filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_17336
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast
import elasticsearch
import numpy as np
from eland.common import ensure_es_client, es_version
from eland.utils import deprecated_api
from .common import TYPE_CLASSIFICATION, TYPE_REGRESSION
from .transformers import get_model_transformer
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
from numpy.typing import ArrayLike, DTypeLike
# Try importing each ML lib separately so mypy users don't have to
# have both installed to use type-checking.
try:
from sklearn.ensemble import ( # type: ignore # noqa: F401
RandomForestClassifier,
RandomForestRegressor,
)
from sklearn.tree import ( # type: ignore # noqa: F401
DecisionTreeClassifier,
DecisionTreeRegressor,
)
except ImportError:
pass
try:
from xgboost import XGBClassifier, XGBRegressor # type: ignore # noqa: F401
except ImportError:
pass
try:
from lightgbm import LGBMClassifier, LGBMRegressor # type: ignore # noqa: F401
except ImportError:
pass
class MLModel:
"""
A machine learning model managed by Elasticsearch.
(See https://www.elastic.co/guide/en/elasticsearch/reference/current/put-inference.html)
These models can be created by Elastic ML, or transformed from supported Python formats
such as scikit-learn or xgboost and imported into Elasticsearch.
The methods for this class attempt to mirror standard Python classes.
"""
def __init__(
self,
es_client: Union[str, List[str], Tuple[str, ...], "Elasticsearch"],
model_id: str,
):
"""
Parameters
----------
es_client: Elasticsearch client argument(s)
- elasticsearch-py parameters or
- elasticsearch-py instance
model_id: str
The unique identifier of the trained inference model in Elasticsearch.
"""
self._client = ensure_es_client(es_client)
self._model_id = model_id
self._trained_model_config_cache: Optional[Dict[str, Any]] = None
def predict(
self, X: Union["ArrayLike", List[float], List[List[float]]]
) -> "ArrayLike":
"""
Make a prediction using a trained model stored in Elasticsearch.
Parameters for this method are not yet fully compatible with standard sklearn.predict.
Parameters
----------
X: Input feature vector.
Must be either a numpy ndarray or a list or list of lists
of type float. TODO: support DataFrame and other formats
Returns
-------
y: np.ndarray of dtype float for regressors or int for classifiers
Examples
--------
>>> from sklearn import datasets
>>> from xgboost import XGBRegressor
>>> from eland.ml import MLModel
>>> # Train model
>>> training_data = datasets.make_classification(n_features=6, random_state=0)
>>> test_data = [[-1, -2, -3, -4, -5, -6], [10, 20, 30, 40, 50, 60]]
>>> regressor = XGBRegressor(objective='reg:squarederror')
>>> regressor = regressor.fit(training_data[0], training_data[1])
>>> # Get some test results
>>> regressor.predict(np.array(test_data)) # doctest: +SKIP
array([0.06062475, 0.9990102 ], dtype=float32)
>>> # Serialise the model to Elasticsearch
>>> feature_names = ["f0", "f1", "f2", "f3", "f4", "f5"]
>>> model_id = "test_xgb_regressor"
>>> es_model = MLModel.import_model('localhost', model_id, regressor, feature_names, es_if_exists='replace')
>>> # Get some test results from Elasticsearch model
>>> es_model.predict(test_data) # doctest: +SKIP
array([0.0606248 , 0.99901026], dtype=float32)
>>> # Delete model from Elasticsearch
>>> es_model.delete_model()
"""
docs = []
if isinstance(X, np.ndarray):
def to_list_or_float(x: Any) -> Union[List[Any], float]:
if isinstance(x, np.ndarray):
return [to_list_or_float(i) for i in x.tolist()]
elif isinstance(x, list):
return [to_list_or_float(i) for i in x]
return float(x)
X = to_list_or_float(X)
# Is it a list of floats?
if isinstance(X, list) and all(isinstance(i, (float, int)) for i in X):
features = cast(List[List[float]], [X])
# If not a list of lists of floats then we error out.
elif isinstance(X, list) and all(
[
isinstance(i, list) and all([isinstance(ix, (float, int)) for ix in i])
for i in X
]
):
features = cast(List[List[float]], X)
else:
raise NotImplementedError(
f"Prediction for type {type(X)}, not supported: {X!r}"
)
for i in features:
doc = {"_source": dict(zip(self.feature_names, i))}
docs.append(doc)
# field_mappings -> field_map in ES 7.7
field_map_name = (
"field_map" if es_version(self._client) >= (7, 7) else "field_mappings"
)
results = self._client.ingest.simulate(
body={
"pipeline": {
"processors": [
{
"inference": {
"model_id": self._model_id,
"inference_config": {self.model_type: {}},
field_map_name: {},
}
}
]
},
"docs": docs,
}
)
# Unpack results into an array. Errors can be present
# within the response without a non-2XX HTTP status code.
y = []
for res in results["docs"]:
if "error" in res:
raise RuntimeError(
f"Failed to run prediction for model ID {self._model_id!r}",
res["error"],
)
y.append(res["doc"]["_source"]["ml"]["inference"][self.results_field])
# Return results as np.ndarray of float32 or int (consistent with sklearn/xgboost)
if self.model_type == TYPE_CLASSIFICATION:
dt: "DTypeLike" = np.int_
else:
dt = np.float32
return np.asarray(y, dtype=dt)
@property
def model_type(self) -> str:
# Legacy way of finding model_type from the model definition.
if "inference_config" not in self._trained_model_config:
trained_model = self._trained_model_config["definition"]["trained_model"]
if "tree" in trained_model:
target_type = trained_model["tree"]["target_type"]
else:
target_type = trained_model["ensemble"]["target_type"]
return cast(str, target_type)
inference_config = self._trained_model_config["inference_config"]
if "classification" in inference_config:
return TYPE_CLASSIFICATION
elif "regression" in inference_config:
return TYPE_REGRESSION
raise ValueError("Unable to determine 'model_type' for MLModel")
@property
def feature_names(self) -> List[str]:
return list(self._trained_model_config["input"]["field_names"])
@property
def results_field(self) -> str:
if "inference_config" not in self._trained_model_config:
return "predicted_value"
return cast(
str,
self._trained_model_config["inference_config"][self.model_type][
"results_field"
],
)
@classmethod
def import_model(
cls,
es_client: Union[str, List[str], Tuple[str, ...], "Elasticsearch"],
model_id: str,
model: Union[
"DecisionTreeClassifier",
"DecisionTreeRegressor",
"RandomForestRegressor",
"RandomForestClassifier",
"XGBClassifier",
"XGBRegressor",
"LGBMRegressor",
"LGBMClassifier",
],
feature_names: List[str],
classification_labels: Optional[List[str]] = None,
classification_weights: Optional[List[float]] = None,
es_if_exists: Optional[str] = None,
es_compress_model_definition: bool = True,
) -> "MLModel":
"""
Transform and serialize a trained 3rd party model into Elasticsearch.
This model can then be used for inference in the Elastic Stack.
Parameters
----------
es_client: Elasticsearch client argument(s)
- elasticsearch-py parameters or
- elasticsearch-py instance
model_id: str
The unique identifier of the trained inference model in Elasticsearch.
model: An instance of a supported python model. We support the following model types:
- sklearn.tree.DecisionTreeClassifier
- sklearn.tree.DecisionTreeRegressor
- sklearn.ensemble.RandomForestRegressor
- sklearn.ensemble.RandomForestClassifier
- lightgbm.LGBMRegressor
- Categorical fields are expected to already be processed
- Only the following objectives are supported
- "regression"
- "regression_l1"
- "huber"
- "fair"
- "quantile"
- "mape"
- lightgbm.LGBMClassifier
- Categorical fields are expected to already be processed
- Only the following objectives are supported
- "binary"
- "multiclass"
- "multiclassova"
- xgboost.XGBClassifier
- only the following objectives are supported:
- "binary:logistic"
- "multi:softmax"
- "multi:softprob"
- xgboost.XGBRegressor
- only the following objectives are supported:
- "reg:squarederror"
- "reg:linear"
- "reg:squaredlogerror"
- "reg:logistic"
- "reg:pseudohubererror"
feature_names: List[str]
Names of the features (required)
classification_labels: List[str]
Labels of the classification targets
classification_weights: List[str]
Weights of the classification targets
es_if_exists: {'fail', 'replace'} default 'fail'
How to behave if model already exists
- fail: Raise a Value Error
- replace: Overwrite existing model
es_compress_model_definition: bool
If True will use 'compressed_definition' which uses gzipped
JSON instead of raw JSON to reduce the amount of data sent
over the wire in HTTP requests. Defaults to 'True'.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.tree import DecisionTreeClassifier
>>> from eland.ml import MLModel
>>> # Train model
>>> training_data = datasets.make_classification(n_features=5, random_state=0)
>>> test_data = [[-50.1, 0.2, 0.3, -0.5, 1.0], [1.6, 2.1, -10, 50, -1.0]]
>>> classifier = DecisionTreeClassifier()
>>> classifier = classifier.fit(training_data[0], training_data[1])
>>> # Get some test results
>>> classifier.predict(test_data)
array([0, 1])
>>> # Serialise the model to Elasticsearch
>>> feature_names = ["f0", "f1", "f2", "f3", "f4"]
>>> model_id = "test_decision_tree_classifier"
>>> es_model = MLModel.import_model(
... 'localhost',
... model_id=model_id,
... model=classifier,
... feature_names=feature_names,
... es_if_exists='replace'
... )
>>> # Get some test results from Elasticsearch model
>>> es_model.predict(test_data)
array([0, 1])
>>> # Delete model from Elasticsearch
>>> es_model.delete_model()
"""
es_client = ensure_es_client(es_client)
transformer = get_model_transformer(
model,
feature_names=feature_names,
classification_labels=classification_labels,
classification_weights=classification_weights,
)
serializer = transformer.transform()
model_type = transformer.model_type
if es_if_exists is None:
es_if_exists = "fail"
ml_model = MLModel(
es_client=es_client,
model_id=model_id,
)
if es_if_exists not in ("fail", "replace"):
raise ValueError("'es_if_exists' must be either 'fail' or 'replace'")
elif es_if_exists == "fail":
if ml_model.exists_model():
raise ValueError(
f"Trained machine learning model {model_id} already exists"
)
elif es_if_exists == "replace":
ml_model.delete_model()
body: Dict[str, Any] = {
"input": {"field_names": feature_names},
}
# 'inference_config' is required in 7.8+ but isn't available in <=7.7
if es_version(es_client) >= (7, 8):
body["inference_config"] = {model_type: {}}
if es_compress_model_definition:
body["compressed_definition"] = serializer.serialize_and_compress_model()
else:
body["definition"] = serializer.serialize_model()
ml_model._client.ml.put_trained_model(
model_id=model_id,
body=body,
)
return ml_model
def delete_model(self) -> None:
"""
Delete an inference model saved in Elasticsearch
If model doesn't exist, ignore failure.
"""
try:
self._client.ml.delete_trained_model(model_id=self._model_id, ignore=(404,))
except elasticsearch.NotFoundError:
pass
def exists_model(self) -> bool:
"""
Check if the model already exists in Elasticsearch
"""
try:
self._client.ml.get_trained_models(model_id=self._model_id)
except elasticsearch.NotFoundError:
return False
return True
@property
def _trained_model_config(self) -> Dict[str, Any]:
"""Lazily loads an ML models 'trained_model_config' information"""
if self._trained_model_config_cache is None:
# In Elasticsearch 7.7 and earlier you can't get
# target type without pulling the model definition
# so we check the version first.
if es_version(self._client) < (7, 8):
resp = self._client.ml.get_trained_models(
model_id=self._model_id, include_model_definition=True
)
else:
resp = self._client.ml.get_trained_models(model_id=self._model_id)
if resp["count"] > 1:
raise ValueError(f"Model ID {self._model_id!r} wasn't unambiguous")
elif resp["count"] == 0:
raise ValueError(f"Model with Model ID {self._model_id!r} wasn't found")
self._trained_model_config_cache = resp["trained_model_configs"][0]
return self._trained_model_config_cache
ImportedMLModel = deprecated_api("MLModel.import_model()")(MLModel.import_model)
|
the-stack_106_17338
|
# -*- coding: utf-8 -*-
r'''
Execution of Salt modules from within states
============================================
These states allow individual execution module calls to be made via states. To
call a single module function use a :mod:`module.run <salt.states.module.run>`
state:
.. code-block:: yaml
mine.send:
module.run:
- name: network.interfaces
Note that this example is probably unnecessary to use in practice, since the
``mine_functions`` and ``mine_interval`` config parameters can be used to
schedule updates for the mine (see :ref:`here <salt-mine>` for more
info).
It is sometimes desirable to trigger a function call after a state is executed,
for this the :mod:`module.wait <salt.states.module.wait>` state can be used:
.. code-block:: yaml
mine.send:
module.wait:
- name: network.interfaces
- watch:
- file: /etc/network/interfaces
All arguments that the ``module`` state does not consume are passed through to
the execution module function being executed:
.. code-block:: yaml
fetch_out_of_band:
module.run:
- name: git.fetch
- cwd: /path/to/my/repo
- user: myuser
- opts: '--all'
Due to how the state system works, if a module function accepts an
argument called, ``name``, then ``m_name`` must be used to specify that
argument, to avoid a collision with the ``name`` argument.
Here is a list of keywords hidden by the state system, which must be prefixed
with ``m_``:
* fun
* name
* names
* state
* saltenv
For example:
.. code-block:: yaml
disable_nfs:
module.run:
- name: service.disable
- m_name: nfs
Note that some modules read all or some of the arguments from a list of keyword
arguments. For example:
.. code-block:: yaml
mine.send:
module.run:
- func: network.ip_addrs
- kwargs:
interface: eth0
.. code-block:: yaml
cloud.create:
module.run:
- func: cloud.create
- provider: test-provider
- m_names:
- test-vlad
- kwargs: {
ssh_username: 'ubuntu',
image: 'ami-8d6d9daa',
securitygroup: 'default',
size: 'c3.large',
location: 'ap-northeast-1',
delvol_on_destroy: 'True'
}
Another example that creates a recurring task that runs a batch file on a
Windows system:
.. code-block:: yaml
eventsviewer:
module.run:
- name: task.create_task
- m_name: 'events-viewer'
- user_name: System
- kwargs: {
action_type: 'Execute',
cmd: 'c:\netops\scripts\events_viewer.bat',
trigger_type: 'Daily',
start_date: '2017-1-20',
start_time: '11:59PM'
}
'''
from __future__ import absolute_import
# Import salt libs
import salt.loader
import salt.utils
import salt.utils.jid
from salt.ext.six.moves import range
def wait(name, **kwargs):
'''
Run a single module function only if the watch statement calls it
``name``
The module function to execute
``**kwargs``
Pass any arguments needed to execute the function
.. note::
Like the :mod:`cmd.run <salt.states.cmd.run>` state, this state will
return ``True`` but not actually execute, unless one of the following
two things happens:
1. The state has a :ref:`watch requisite <requisites-watch>`, and
the state which it is watching changes.
2. Another state has a :ref:`watch_in requisite
<requisites-watch-in>` which references this state, and the state
wth the ``watch_in`` changes.
'''
return {'name': name,
'changes': {},
'result': True,
'comment': ''}
# Alias module.watch to module.wait
watch = salt.utils.alias_function(wait, 'watch')
def run(name, **kwargs):
'''
Run a single module function
``name``
The module function to execute
``returner``
Specify the returner to send the return of the module execution to
``kwargs``
Pass any arguments needed to execute the function
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': None}
if name not in __salt__:
ret['comment'] = 'Module function {0} is not available'.format(name)
ret['result'] = False
return ret
if __opts__['test']:
ret['comment'] = 'Module function {0} is set to execute'.format(name)
return ret
aspec = salt.utils.args.get_function_argspec(__salt__[name])
args = []
defaults = {}
arglen = 0
deflen = 0
if isinstance(aspec.args, list):
arglen = len(aspec.args)
if isinstance(aspec.defaults, tuple):
deflen = len(aspec.defaults)
# Match up the defaults with the respective args
for ind in range(arglen - 1, -1, -1):
minus = arglen - ind
if deflen - minus > -1:
defaults[aspec.args[ind]] = aspec.defaults[-minus]
# overwrite passed default kwargs
for arg in defaults:
if arg == 'name':
if 'm_name' in kwargs:
defaults[arg] = kwargs.pop('m_name')
elif arg == 'fun':
if 'm_fun' in kwargs:
defaults[arg] = kwargs.pop('m_fun')
elif arg == 'state':
if 'm_state' in kwargs:
defaults[arg] = kwargs.pop('m_state')
elif arg == 'saltenv':
if 'm_saltenv' in kwargs:
defaults[arg] = kwargs.pop('m_saltenv')
if arg in kwargs:
defaults[arg] = kwargs.pop(arg)
missing = set()
for arg in aspec.args:
if arg == 'name':
rarg = 'm_name'
elif arg == 'fun':
rarg = 'm_fun'
elif arg == 'names':
rarg = 'm_names'
elif arg == 'state':
rarg = 'm_state'
elif arg == 'saltenv':
rarg = 'm_saltenv'
else:
rarg = arg
if rarg not in kwargs and arg not in defaults:
missing.add(rarg)
continue
if arg in defaults:
args.append(defaults[arg])
else:
args.append(kwargs.pop(rarg))
if missing:
comment = 'The following arguments are missing:'
for arg in missing:
comment += ' {0}'.format(arg)
ret['comment'] = comment
ret['result'] = False
return ret
if aspec.varargs and aspec.varargs in kwargs:
varargs = kwargs.pop(aspec.varargs)
if not isinstance(varargs, list):
msg = "'{0}' must be a list."
ret['comment'] = msg.format(aspec.varargs)
ret['result'] = False
return ret
args.extend(varargs)
nkwargs = {}
if aspec.keywords and aspec.keywords in kwargs:
nkwargs = kwargs.pop(aspec.keywords)
if not isinstance(nkwargs, dict):
msg = "'{0}' must be a dict."
ret['comment'] = msg.format(aspec.keywords)
ret['result'] = False
return ret
try:
if aspec.keywords:
mret = __salt__[name](*args, **nkwargs)
else:
mret = __salt__[name](*args)
except Exception as e:
ret['comment'] = 'Module function {0} threw an exception. Exception: {1}'.format(name, e)
ret['result'] = False
return ret
else:
if mret is not None or mret is not {}:
ret['changes']['ret'] = mret
if 'returner' in kwargs:
ret_ret = {
'id': __opts__['id'],
'ret': mret,
'fun': name,
'jid': salt.utils.jid.gen_jid()}
returners = salt.loader.returners(__opts__, __salt__)
if kwargs['returner'] in returners:
returners[kwargs['returner']](ret_ret)
ret['comment'] = 'Module function {0} executed'.format(name)
ret['result'] = True
# if mret is a dict and there is retcode and its non-zero
if isinstance(mret, dict) and mret.get('retcode', 0) != 0:
ret['result'] = False
# if its a boolean, return that as the result
elif isinstance(mret, bool):
ret['result'] = mret
else:
changes_ret = ret['changes'].get('ret', {})
if isinstance(changes_ret, dict):
if isinstance(changes_ret.get('result', {}), bool):
ret['result'] = changes_ret.get('result', {})
elif changes_ret.get('retcode', 0) != 0:
ret['result'] = False
return ret
mod_watch = salt.utils.alias_function(run, 'mod_watch')
|
the-stack_106_17341
|
import pytest
from utils.fakes import *
cuda_required = pytest.mark.skipif(not torch.cuda.is_available(),
reason="cuda enabled gpu is not available")
a3b3b3 =torch.ones([1,3,3,3])
def test_model2half():
m = simple_cnn([3,6,6],bn=True)
m = model2half(m)
conv1 = m[0][0]
bn = m[0][2]
assert isinstance(conv1.weight, torch.HalfTensor)
assert isinstance(bn.weight, torch.FloatTensor)
@cuda_required
def test_model2half_forward():
learn = fake_learner()
x,y = next(iter(learn.data.train_dl))
res1 = learn.model(x)
learn.model = model2half(learn.model)
res2 = learn.model(x.half())
assert (res2.float() - res1).abs().sum() < 0.01
def test_to_half():
t1,t2 = torch.ones([1]).long(),torch.ones([1])
half = to_half([t1,t2])
assert isinstance(half[0],torch.LongTensor)
assert isinstance(half[1],torch.HalfTensor)
def test_batch_to_half():
t1,t2 = torch.ones([1]),torch.ones([1])
half = batch_to_half([t1,t2])
assert isinstance(half[0],torch.HalfTensor)
assert isinstance(half[1],torch.FloatTensor)
|
the-stack_106_17342
|
# Can we create a standalone executable?
# # target = "llvm -link-params"
# https://discuss.tvm.apache.org/t/can-we-create-a-standalone-executable/8773
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import relay
from tvm.contrib import graph_executor
import onnx
from onnx import TensorProto, helper, mapping, numpy_helper
import pdb
def get_input_data_shape_dict(graph_def, input_data):
if isinstance(input_data, list):
input_names = {}
shape_dict = {}
for i, _ in enumerate(input_data):
input_names[i] = graph_def.graph.input[i].name
shape_dict[input_names[i]] = input_data[i].shape
else:
input_names = graph_def.graph.input[0].name
shape_dict = {input_names: input_data.shape}
return input_names, shape_dict
def get_tvm_output_with_vm(
graph_def, input_data, target, device, opset=None, freeze_params=False, convert_to_static=False
):
"""Generic function to execute and get tvm output with vm executor"""
if not isinstance(input_data, list):
input_data = [input_data]
_, shape_dict = get_input_data_shape_dict(graph_def, input_data)
mod, params = relay.frontend.from_onnx(
graph_def, shape_dict, opset=opset, freeze_params=freeze_params
)
if convert_to_static:
mod = relay.transform.DynamicToStatic()(mod)
ex = relay.create_executor("vm", mod=mod, device=device, target=target)
result = ex.evaluate()(*input_data, **params)
if isinstance(result, tvm.runtime.NDArray):
return result.numpy()
return [r.numpy() for r in result]
def verify_simple_dynamic_model(a_shape, b_shape, target, dev):
def verify_model(ex, a_shape, b_shape):
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
# matmul
out_np = np.matmul(a_array, b_array)
# relu
out_np[out_np < 0] = 0
tvm_out = ex.evaluate()(a_array, b_array).numpy()
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
relu_node = helper.make_node("Relu", ["out"], ["relu"])
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
# matmul
out_np = np.matmul(a_array, b_array)
graph = helper.make_graph(
[mul_node, relu_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("relu", TensorProto.FLOAT, list(out_np.shape))],
)
model = helper.make_model(graph, producer_name="matmul_test")
a_anys = [relay.Any()] * len(a_shape)
b_anys = [relay.Any()] * len(b_shape)
mod, params = relay.frontend.from_onnx(model, {"a": a_anys, "b": b_anys})
# https://discuss.tvm.apache.org/t/relay-frontend-can-relay-take-none-include-shape/5772/2
# # xxxx8888
# # opt_level = 3
# # with tvm.transform.PassContext(opt_level=opt_level):
# executable = tvm.relay.backend.vm.compile(mod, target, params=params)
# # code, lib = executable.save()
# # Examples
# # --------------------------------------------
# # import numpy as np
# # import tvm
# # from tvm import te
# # from tvm import relay
# # # define a simple network.
# # x = relay.var('x', shape=(10, 10))
# # f = relay.Function([x], x + x)
# # mod = tvm.IRModule({"main": f})
# # # create a Relay VM.
# # dev = tvm.cpu()
# # target = "llvm"
# # executable = relay.vm.compile(mod, target)
# # code, lib = executable.save()
# # # save and load the code and lib file.
# # tmp = tvm.contrib.utils.tempdir()
# # path_lib = tmp.relpath("lib.so")
# # lib.export_library(path_lib)
# # with open(tmp.relpath("code.ro"), "wb") as fo:
# # fo.write(code)
# # loaded_lib = tvm.runtime.load_module(path_lib)
# # loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read())
# # # deserialize.
# # des_exec = tvm.runtime.vm.Executable.load_exec(loaded_code, loaded_lib)
# # # execute the deserialized executable.
# # x_data = np.random.rand(10, 10).astype('float32')
# # des_vm = tvm.runtime.vm.VirtualMachine(des_exec, dev)
# # res = des_vm.run(x_data)
# # print(res.numpy())
# # pdb.set_trace()
# ex = relay.create_executor("vm", mod=mod, device=dev, target=target)
verify_model(ex, a_shape, b_shape)
verify_model(ex, [a * 2 for a in a_shape], [b * 2 for b in b_shape])
verify_model(ex, [a * 3 for a in a_shape], [b * 3 for b in b_shape])
pdb.set_trace()
# TODO(mbrookhart, electriclilies): Add CUDA as a target once batch matmul is fixed
@tvm.testing.parametrize_targets("llvm")
def test_batch_matmul_dynamic_model(target, dev):
verify_simple_dynamic_model((2, 3, 4, 3), (2, 3, 3, 4), target, dev)
verify_simple_dynamic_model((2, 4, 3), (3, 4), target, dev)
verify_simple_dynamic_model((2, 3, 4, 3), (3, 4), target, dev)
if __name__ == "__main__":
target = "llvm"
device = tvm.cpu(0)
test_batch_matmul_dynamic_model(target, device)
|
the-stack_106_17346
|
import tty
import sys
import curses
import datetime
import locale
from decimal import Decimal
import getpass
import logging
from typing import TYPE_CHECKING
import electrum
from electrum import util
from electrum.util import format_satoshis
from electrum.bitcoin import is_address, COIN
from electrum.transaction import PartialTxOutput
from electrum.wallet import Wallet
from electrum.wallet_db import WalletDB
from electrum.storage import WalletStorage
from electrum.network import NetworkParameters, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import ServerAddr
if TYPE_CHECKING:
from electrum.daemon import Daemon
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
_ = lambda x:x # i18n
class ElectrumGui:
def __init__(self, config: 'SimpleConfig', daemon: 'Daemon', plugins: 'Plugins'):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists():
print("Wallet not found. try 'electrum create'")
exit()
if storage.is_encrypted():
password = getpass.getpass('Password:', stream=None)
storage.decrypt(password)
db = WalletDB(storage.read(), manual_upgrades=False)
self.wallet = Wallet(db, storage, config=config)
self.wallet.start_network(self.network)
self.contacts = self.wallet.contacts
locale.setlocale(locale.LC_ALL, '')
self.encoding = locale.getpreferredencoding()
self.stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.stdscr.keypad(1)
self.stdscr.border(0)
self.maxy, self.maxx = self.stdscr.getmaxyx()
self.set_cursor(0)
self.w = curses.newwin(10, 50, 5, 5)
self.tab = 0
self.pos = 0
self.popup_pos = 0
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.history = None
self.txid = []
util.register_callback(self.update, ['wallet_updated', 'network_updated'])
self.tab_names = [_("History"), _("Send"), _("Receive"), _("Addresses"), _("Contacts"), _("Banner")]
self.num_tabs = len(self.tab_names)
def set_cursor(self, x):
try:
curses.curs_set(x)
except Exception:
pass
def restore_or_create(self):
pass
def verify_seed(self):
pass
def get_string(self, y, x):
self.set_cursor(1)
curses.echo()
self.stdscr.addstr(y, x, " "*20, curses.A_REVERSE)
s = self.stdscr.getstr(y,x)
curses.noecho()
self.set_cursor(0)
return s
def update(self, event, *args):
self.update_history()
if self.tab == 0:
self.print_history()
self.refresh()
def print_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
if self.history is None:
self.update_history()
self.print_list(self.history[::-1], format_str%(_("Date"), _("Description"), _("Amount"), _("Balance")))
def update_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
self.history = []
self.txid = []
for hist_item in self.wallet.get_history():
if hist_item.tx_mined_status.conf:
timestamp = hist_item.tx_mined_status.timestamp
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "------"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label_for_txid(hist_item.txid)
self.txid.insert(0, hist_item.txid)
if len(label) > 40:
label = label[0:37] + '...'
self.history.append(format_str % (time_str, label, format_satoshis(hist_item.delta, whitespaces=True),
format_satoshis(hist_item.balance, whitespaces=True)))
def print_balance(self):
if not self.network:
msg = _("Offline")
elif self.network.is_connected():
if not self.wallet.up_to_date:
msg = _("Synchronizing...")
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _("Not connected")
self.stdscr.addstr(self.maxy -1, 3, msg)
for i in range(self.num_tabs):
self.stdscr.addstr(0, 2 + 2*i + len(''.join(self.tab_names[0:i])), ' '+self.tab_names[i]+' ', curses.A_BOLD if self.tab == i else 0)
self.stdscr.addstr(self.maxy -1, self.maxx-30, ' '.join([_("Settings"), _("Network"), _("Quit")]))
def print_receive(self):
addr = self.wallet.get_receiving_address()
self.stdscr.addstr(2, 1, "Address: "+addr)
self.print_qr(addr)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %15s "%("Key", "Value"))
def print_addresses(self):
fmt = "%-35s %-30s"
messages = map(lambda addr: fmt % (addr, self.wallet.get_label(addr)), self.wallet.get_addresses())
self.print_list(messages, fmt % ("Address", "Label"))
def print_edit_line(self, y, label, text, index, size):
text += " "*(size - len(text))
self.stdscr.addstr(y, 2, label)
self.stdscr.addstr(y, 15, text, curses.A_REVERSE if self.pos%6==index else curses.color_pair(1))
def print_send_tab(self):
self.stdscr.clear()
self.print_edit_line(3, _("Pay to"), self.str_recipient, 0, 40)
self.print_edit_line(5, _("Description"), self.str_description, 1, 40)
self.print_edit_line(7, _("Amount"), self.str_amount, 2, 15)
self.print_edit_line(9, _("Fee"), self.str_fee, 3, 15)
self.stdscr.addstr(12, 15, _("[Send]"), curses.A_REVERSE if self.pos%6==4 else curses.color_pair(2))
self.stdscr.addstr(12, 25, _("[Clear]"), curses.A_REVERSE if self.pos%6==5 else curses.color_pair(2))
self.maxpos = 6
def print_banner(self):
if self.network and self.network.banner:
banner = self.network.banner
banner = banner.replace('\r', '')
self.print_list(banner.split('\n'))
def print_qr(self, data):
import qrcode
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
s = StringIO()
self.qr = qrcode.QRCode()
self.qr.add_data(data)
self.qr.print_ascii(out=s, invert=False)
msg = s.getvalue()
lines = msg.split('\n')
try:
for i, l in enumerate(lines):
l = l.encode("utf-8")
self.stdscr.addstr(i+5, 5, l, curses.color_pair(3))
except curses.error:
m = 'error. screen too small?'
m = m.encode(self.encoding)
self.stdscr.addstr(5, 1, m, 0)
def print_list(self, lst, firstline = None):
lst = list(lst)
self.maxpos = len(lst)
if not self.maxpos: return
if firstline:
firstline += " "*(self.maxx -2 - len(firstline))
self.stdscr.addstr(1, 1, firstline)
for i in range(self.maxy-4):
msg = lst[i] if i < len(lst) else ""
msg += " "*(self.maxx - 2 - len(msg))
m = msg[0:self.maxx - 2]
m = m.encode(self.encoding)
self.stdscr.addstr(i+2, 1, m, curses.A_REVERSE if i == (self.pos % self.maxpos) else 0)
def refresh(self):
if self.tab == -1: return
self.stdscr.border(0)
self.print_balance()
self.stdscr.refresh()
def main_command(self):
c = self.stdscr.getch()
print(c)
cc = curses.unctrl(c).decode()
if c == curses.KEY_RIGHT: self.tab = (self.tab + 1)%self.num_tabs
elif c == curses.KEY_LEFT: self.tab = (self.tab - 1)%self.num_tabs
elif c == curses.KEY_DOWN: self.pos +=1
elif c == curses.KEY_UP: self.pos -= 1
elif c == 9: self.pos +=1 # tab
elif cc in ['^W', '^C', '^X', '^Q']: self.tab = -1
elif cc in ['^N']: self.network_dialog()
elif cc == '^S': self.settings_dialog()
else: return c
if self.pos<0: self.pos=0
if self.pos>=self.maxpos: self.pos=self.maxpos - 1
def run_tab(self, i, print_func, exec_func):
while self.tab == i:
self.stdscr.clear()
print_func()
self.refresh()
c = self.main_command()
if c: exec_func(c)
def run_history_tab(self, c):
# Get txid from cursor position
if c == 10:
out = self.run_popup('', ['Transaction ID:', self.txid[self.pos]])
def edit_str(self, target, c, is_num=False):
# detect backspace
cc = curses.unctrl(c).decode()
if c in [8, 127, 263] and target:
target = target[:-1]
elif not is_num or cc in '0123456789.':
target += cc
return target
def run_send_tab(self, c):
if self.pos%6 == 0:
self.str_recipient = self.edit_str(self.str_recipient, c)
if self.pos%6 == 1:
self.str_description = self.edit_str(self.str_description, c)
if self.pos%6 == 2:
self.str_amount = self.edit_str(self.str_amount, c, True)
elif self.pos%6 == 3:
self.str_fee = self.edit_str(self.str_fee, c, True)
elif self.pos%6==4:
if c == 10: self.do_send()
elif self.pos%6==5:
if c == 10: self.do_clear()
def run_receive_tab(self, c):
if c == 10:
out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
def run_contacts_tab(self, c):
if c == 10 and self.contacts:
out = self.run_popup('Address', ["Copy", "Pay to", "Edit label", "Delete"]).get('button')
key = list(self.contacts.keys())[self.pos%len(self.contacts.keys())]
if out == "Pay to":
self.tab = 1
self.str_recipient = key
self.pos = 2
elif out == "Edit label":
s = self.get_string(6 + self.pos, 18)
if s:
self.wallet.set_label(key, s)
def run_banner_tab(self, c):
self.show_message(repr(c))
pass
def main(self):
tty.setraw(sys.stdin)
try:
while self.tab != -1:
self.run_tab(0, self.print_history, self.run_history_tab)
self.run_tab(1, self.print_send_tab, self.run_send_tab)
self.run_tab(2, self.print_receive, self.run_receive_tab)
self.run_tab(3, self.print_addresses, self.run_banner_tab)
self.run_tab(4, self.print_contacts, self.run_contacts_tab)
self.run_tab(5, self.print_banner, self.run_banner_tab)
except curses.error as e:
raise Exception("Error with curses. Is your screen too small?") from e
finally:
tty.setcbreak(sys.stdin)
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
def stop(self):
pass
def do_clear(self):
self.str_amount = ''
self.str_recipient = ''
self.str_fee = ''
self.str_description = ''
def do_send(self):
if not is_address(self.str_recipient):
self.show_message(_('Invalid Baricoin address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
self.show_message(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
self.show_message(_('Invalid Fee'))
return
if self.wallet.has_password():
password = self.password_dialog()
if not password:
return
else:
password = None
try:
tx = self.wallet.mktx(outputs=[PartialTxOutput.from_address_and_value(self.str_recipient, amount)],
password=password,
fee=fee)
except Exception as e:
self.show_message(repr(e))
return
if self.str_description:
self.wallet.set_label(tx.txid(), self.str_description)
self.show_message(_("Please wait..."), getchar=False)
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
self.show_message(msg)
except BestEffortRequestFailed as e:
msg = repr(e)
self.show_message(msg)
else:
self.show_message(_('Payment sent.'))
self.do_clear()
#self.update_contacts_tab()
def show_message(self, message, getchar = True):
w = self.w
w.clear()
w.border(0)
for i, line in enumerate(message.split('\n')):
w.addstr(2+i,2,line)
w.refresh()
if getchar: c = self.stdscr.getch()
def run_popup(self, title, items):
return self.run_dialog(title, list(map(lambda x: {'type':'button','label':x}, items)), interval=1, y_pos = self.pos+3)
def network_dialog(self):
if not self.network:
return
net_params = self.network.get_parameters()
server_addr = net_params.server
proxy_config, auto_connect = net_params.proxy, net_params.auto_connect
srv = 'auto-connect' if auto_connect else str(self.network.default_server)
out = self.run_dialog('Network', [
{'label':'server', 'type':'str', 'value':srv},
{'label':'proxy', 'type':'str', 'value':self.config.get('proxy', '')},
], buttons = 1)
if out:
if out.get('server'):
server_str = out.get('server')
auto_connect = server_str == 'auto-connect'
if not auto_connect:
try:
server_addr = ServerAddr.from_str(server_str)
except Exception:
self.show_message("Error:" + server_str + "\nIn doubt, type \"auto-connect\"")
return False
if out.get('server') or out.get('proxy'):
proxy = electrum.network.deserialize_proxy(out.get('proxy')) if out.get('proxy') else proxy_config
net_params = NetworkParameters(server=server_addr,
proxy=proxy,
auto_connect=auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def settings_dialog(self):
fee = str(Decimal(self.config.fee_per_kb()) / COIN)
out = self.run_dialog('Settings', [
{'label':'Default fee', 'type':'satoshis', 'value': fee}
], buttons = 1)
if out:
if out.get('Default fee'):
fee = int(Decimal(out['Default fee']) * COIN)
self.config.set_key('fee_per_kb', fee, True)
def password_dialog(self):
out = self.run_dialog('Password', [
{'label':'Password', 'type':'password', 'value':''}
], buttons = 1)
return out.get('Password')
def run_dialog(self, title, items, interval=2, buttons=None, y_pos=3):
self.popup_pos = 0
self.w = curses.newwin(5 + len(list(items))*interval + (2 if buttons else 0), 68, y_pos, 5)
w = self.w
out = {}
while True:
w.clear()
w.border(0)
w.addstr(0, 2, title)
num = len(list(items))
numpos = num
if buttons: numpos += 2
for i in range(num):
item = items[i]
label = item.get('label')
if item.get('type') == 'list':
value = item.get('value','')
elif item.get('type') == 'satoshis':
value = item.get('value','')
elif item.get('type') == 'str':
value = item.get('value','')
elif item.get('type') == 'password':
value = '*'*len(item.get('value',''))
else:
value = ''
if value is None:
value = ''
if len(value)<20:
value += ' '*(20-len(value))
if 'value' in item:
w.addstr(2+interval*i, 2, label)
w.addstr(2+interval*i, 15, value, curses.A_REVERSE if self.popup_pos%numpos==i else curses.color_pair(1))
else:
w.addstr(2+interval*i, 2, label, curses.A_REVERSE if self.popup_pos%numpos==i else 0)
if buttons:
w.addstr(5+interval*i, 10, "[ ok ]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-2) else curses.color_pair(2))
w.addstr(5+interval*i, 25, "[cancel]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-1) else curses.color_pair(2))
w.refresh()
c = self.stdscr.getch()
if c in [ord('q'), 27]: break
elif c in [curses.KEY_LEFT, curses.KEY_UP]: self.popup_pos -= 1
elif c in [curses.KEY_RIGHT, curses.KEY_DOWN]: self.popup_pos +=1
else:
i = self.popup_pos%numpos
if buttons and c==10:
if i == numpos-2:
return out
elif i == numpos -1:
return {}
item = items[i]
_type = item.get('type')
if _type == 'str':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item.get('value')
elif _type == 'password':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item ['value']
elif _type == 'satoshis':
item['value'] = self.edit_str(item['value'], c, True)
out[item.get('label')] = item.get('value')
elif _type == 'list':
choices = item.get('choices')
try:
j = choices.index(item.get('value'))
except Exception:
j = 0
new_choice = choices[(j + 1)% len(choices)]
item['value'] = new_choice
out[item.get('label')] = item.get('value')
elif _type == 'button':
out['button'] = item.get('label')
break
return out
|
the-stack_106_17347
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""gluon backend rep for onnx test infrastructure"""
import numpy as np
try:
from onnx.backend.base import BackendRep
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. Instructions to"
+ " install - https://github.com/onnx/onnx#installation")
import mxnet as mx
from mxnet import nd
# GluonBackendRep object will be returned by GluonBackend's prepare method which is used to
# execute a model repeatedly.
# Inputs will be passed to the run method of MXNetBackendRep class, it will perform computation and
# retrieve the corresponding results for comparison to the onnx backend.
# https://github.com/onnx/onnx/blob/master/onnx/backend/test/runner/__init__.py.
# Implemented by following onnx docs guide:
# https://github.com/onnx/onnx/blob/master/docs/ImplementingAnOnnxBackend.md
class GluonBackendRep(BackendRep):
"""Running model inference on gluon backend and return the result
to onnx test infrastructure for comparison."""
def __init__(self, net, device):
self.net = net
self.device = device
def run(self, inputs, **kwargs):
"""Run model inference and return the result
Parameters
----------
inputs : numpy array
input to run a layer on
Returns
-------
params : numpy array
result obtained after running the inference on mxnet
"""
# create module, passing cpu context
if self.device == 'CPU':
ctx = mx.cpu()
else:
raise NotImplementedError("ONNX tests are run only for CPU context.")
# run inference
net_inputs = [nd.array(input_data, ctx=ctx) for input_data in inputs]
net_outputs = self.net(*net_inputs)
results = []
results.extend([o for o in net_outputs.asnumpy()])
result = np.array(results)
return [result]
|
the-stack_106_17348
|
"""AirSim Collection Script
"""
import sys
import json
import logging
import time
from pprint import pprint
import click
import numpy as np
from airsimcollect.helper.helper import update, update_collectors, DEFAULT_CONFIG
from airsimcollect import AirSimCollect
logger = logging.getLogger("AirSimCollect")
logger.setLevel(logging.INFO)
def validate_collect_config(config, segmentation_only=False):
"""Validates the configuration file
Arguments:
config {dict} -- Dictionary of configuration
"""
if segmentation_only:
points = np.array([])
else:
collection_points_file = config['collection_points']
points = np.load(collection_points_file)
return points
@click.group()
def cli():
"""Collects data from UE4 AirSim World"""
pass
@cli.command()
@click.option('-c', '--config-file', type=click.Path(exists=True), required=True,
help='File to configure collection')
@click.option('-so', '--segmentation-only', is_flag=True,
help="Only apply segmentation codes")
@click.option('-d', '--debug', is_flag=True,
help="Set debug mode. Verbose logs")
def collect(config_file, segmentation_only, debug):
"""Collects AirSim data"""
if debug:
logger.setLevel(logging.DEBUG)
with open(config_file) as file:
config = json.load(file)
config = update(DEFAULT_CONFIG, config)
config['collectors'] = update_collectors(config['collectors'])
# pprint(config, indent=4)
collection_points = None
try:
collection_points = validate_collect_config(config, segmentation_only)
del config['collection_points']
except Exception as e:
click.secho("Error in validating config file", fg='red')
logger.exception(e)
sys.exit()
if config.get('collection_point_names'):
with open(config['collection_point_names']) as f:
config['collection_point_names'] = json.load(f)
num_collections = collection_points.shape[0]
click.secho("Collecting {:d} data snapshots".format(num_collections))
with click.progressbar(length=num_collections, label='Collecting data') as bar: # pylint: disable=C0102,
asc = AirSimCollect(**config, collection_points=collection_points, bar=bar) # pylint: disable=E1132,
start_time = time.time()
records = asc.begin_collection()
end_time = time.time()
logger.info("%.2f seconds elapsed to take %d data snapshots", end_time - start_time, num_collections)
# logger.info("%d data snapshots taken", num_collections)
|
the-stack_106_17349
|
import torch
from collections import OrderedDict
from torch import nn
def batch_to_device(batch, device):
for key in batch[0].keys():
batch[0][key] = batch[0][key].float()
batch[0][key] = batch[0][key].to(device)
batch[1] = batch[1].to(device)
return batch
class M3EP(nn.Module):
"""
Multi-Modal Model for Energy label Prediction
"""
def __init__(self, config):
super(M3EP, self).__init__()
self.config = config
# Random noise submodule for testing purposes
submodule_name = 'random_noise'
if submodule_name in self.config['submodules']:
out_channels = config['submodules'][submodule_name]['output_size']
setattr(self, submodule_name, nn.Sequential(OrderedDict([
('y_o_c_linear1', nn.Linear(1, out_channels)),
('y_o_c_relu', nn.ReLU()),
])))
# Year of construction submodule
submodule_name = 'year_of_construction'
if submodule_name in self.config['submodules']:
out_channels = config['submodules'][submodule_name]['output_size']
setattr(self, submodule_name, nn.Sequential(OrderedDict([
('y_o_c_linear1', nn.Linear(1, out_channels)),
('y_o_c_relu', nn.ReLU()),
])))
# Registration date submodule
submodule_name = 'registration_date'
if submodule_name in self.config['submodules']:
out_channels = config['submodules'][submodule_name]['output_size']
setattr(self, submodule_name, nn.Sequential(OrderedDict([
('reg_date_linear1', nn.Linear(4, out_channels)),
('reg_date_relu', nn.ReLU()),
])))
# Recorded date submodule
submodule_name = 'recorded_date'
if submodule_name in self.config['submodules']:
out_channels = config['submodules'][submodule_name]['output_size']
setattr(self, submodule_name,
nn.Sequential(OrderedDict([
('rec_date_linear1', nn.Linear(4, out_channels)),
('rec_date_relu', nn.ReLU())])))
# House number submodule
submodule_name = 'house_number'
if submodule_name in self.config['submodules']:
out_channels = config['submodules'][submodule_name]['output_size']
setattr(self, submodule_name,
nn.Sequential(OrderedDict([
('house_number_linear1', nn.Linear(2, out_channels)),
('house_number_relu', nn.ReLU())])))
# House number addition submodule
submodule_name = 'house_number_addition'
if submodule_name in self.config['submodules']:
out_channels = config['submodules'][submodule_name]['output_size']
setattr(self, submodule_name,
nn.Sequential(OrderedDict([
('house_number_addition_linear1', nn.Linear(self.config['vocab_len'], out_channels)),
('house_number_addition_relu', nn.ReLU()),
('house_number_addition_global_average', nn.AdaptiveAvgPool1d(1))
])))
# Purposes submodule
submodule_name = 'purposes'
if submodule_name in self.config['submodules']:
out_channels = config['submodules'][submodule_name]['output_size']
setattr(self, submodule_name,
nn.Sequential(OrderedDict([
('purposes_linear1', nn.Linear(11, out_channels)),
('purposes_relu', nn.ReLU())
])))
# Postal code submodule
submodule_name = 'postal_code'
if submodule_name in self.config['submodules']:
out_channels = config['submodules'][submodule_name]['output_size']
setattr(self, submodule_name,
nn.Sequential(OrderedDict([
('postal_code_linear1', nn.Linear(6 * self.config['vocab_len'], out_channels)),
('postal_code_relu', nn.ReLU())
])))
# Geometry submodule
submodule_name = 'geometry'
if submodule_name in self.config['submodules']:
hidden_size = self.config['submodules'][submodule_name]['hidden_size']
out_channels = self.config['submodules'][submodule_name]['output_size']
output_last_dim = self.config['late_fusion']['input_size']
convnet_kernel_size = self.config['submodules'][submodule_name]['cnn_kernel_size']
maxpool_kernel_size = self.config['submodules'][submodule_name]['maxpool_kernel_size']
setattr(self, submodule_name,
nn.Sequential(OrderedDict([
('geometry_conv1d_1', nn.Conv1d(
in_channels=5,
out_channels=hidden_size,
kernel_size=convnet_kernel_size,
padding=convnet_kernel_size - 1
)),
('geometry', nn.ReLU()),
# ('geometry_conv1d_2', nn.Conv1d(
# in_channels=output_size,
# out_channels=output_size,
# kernel_size=convnet_kernel_size,
# padding=convnet_kernel_size - 1
# )),
# ('geometry', nn.ReLU()),
('geometry_maxpool', nn.MaxPool1d(kernel_size=maxpool_kernel_size)),
('geometry_conv1d_3', nn.Conv1d(
in_channels=hidden_size,
out_channels=out_channels,
kernel_size=convnet_kernel_size,
padding=convnet_kernel_size - 1
)),
('geometry', nn.ReLU()),
('geometry_avg_pooling', nn.AdaptiveAvgPool1d(output_last_dim)),
])))
# Late fusion submodule
input_size = config['late_fusion']['input_size']
late_fusion_hidden_size = config['late_fusion']['hidden_size']
self.late_fusion = nn.Sequential(OrderedDict([
('late_fusion_concatenated_linear', nn.Linear(input_size, late_fusion_hidden_size)),
('late_fusion_relu', nn.ReLU()),
('geometry_avg_pooling', nn.AdaptiveAvgPool1d(1)),
]))
# Output transformation layer
submodule_size_sum = sum([config['submodules'][m]['output_size'] for m in config['submodules']])
output_size = config['late_fusion']['output_size']
self.output_transformation_layer = nn.Sequential(OrderedDict([
('output_transformation_linear', nn.Linear(submodule_size_sum, output_size))
]))
def get_submodule_output(self, batch, module_name):
module = self._modules[module_name]
module_vec = batch[0][module_name + '_vec']
if len(module_vec.shape) == 1: # In case of scalar values, such as the year of construction
module_vec = module_vec.unsqueeze(dim=1)
if module_name == 'geometry':
module_vec = module_vec.permute(0, 2, 1)
module_output = module(module_vec)
return module_output
module_output = module(module_vec)
if len(module_output.shape) == 2:
module_output = module_output.unsqueeze(dim=1)
# module_output.retain_grad()
return module_output
def forward(self, batch):
# Create outputs for all the submodules and corresponding modal vectors
submodule_outputs = tuple(self.get_submodule_output(batch, module_name)
for module_name in self.config['submodules'])
submodule_size_sum = sum([self.config['submodules'][m]['output_size'] for m in self.config['submodules']])
for idx, sample in enumerate(submodule_outputs):
if not len(sample.shape) != 2:
raise ValueError('Wrong tensor dimension', sample.shape, 'output from submodule', idx, ':', sample)
# noinspection PyUnresolvedReferences
concatenated = torch.cat(submodule_outputs, dim=1)
output = self.late_fusion(concatenated)
output = output.view(-1, submodule_size_sum)
output = self.output_transformation_layer(output)
return output
|
the-stack_106_17353
|
import re
from semantic_version import Version
_USER_AGENT_SEARCH_REGEX = re.compile(r"docker\/([0-9]+(?:\.[0-9]+){1,2})")
_EXACT_1_5_USER_AGENT = re.compile(r"^Go 1\.1 package http$")
_ONE_FIVE_ZERO = "1.5.0"
def docker_version(user_agent_string):
""" Extract the Docker version from the user agent, taking special care to
handle the case of a 1.5 client requesting an auth token, which sends
a broken user agent. If we can not positively identify a version, return
None.
"""
# First search for a well defined semver portion in the UA header.
found_semver = _USER_AGENT_SEARCH_REGEX.search(user_agent_string)
if found_semver:
# Docker changed their versioning scheme on Feb 17, 2017 to use date-based versioning:
# https://github.com/docker/docker/pull/31075
# This scheme allows for 0s to appear as prefixes in the major or minor portions of the version,
# which violates semver. Strip them out.
portions = found_semver.group(1).split(".")
updated_portions = [(p[:-1].lstrip("0") + p[-1]) for p in portions]
return Version(".".join(updated_portions), partial=True)
# Check if we received the very specific header which represents a 1.5 request
# to the auth endpoints.
elif _EXACT_1_5_USER_AGENT.match(user_agent_string):
return Version(_ONE_FIVE_ZERO)
else:
return None
|
the-stack_106_17354
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import socket
import sys
import time
from colors import bcolors
ADDR = ''
PORT = 10000
# Create a UDP socket
client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = (ADDR, PORT)
device_id = sys.argv[1]
device_location = sys.argv[2]
odour_level = sys.argv[3]
base_temp = sys.argv[4]
base_humidity = sys.argv[5]
if not device_id:
sys.exit('The device id must be specified.')
print('Bringing up device {}'.format(device_id))
# return message received
def send_command(sock, message, log=True):
sock.sendto(message, server_address)
# Receive response
response, _ = sock.recvfrom(4096)
return response
def make_message(device_id, action, data=''):
if data:
return '{{ "device" : "{}", "action":"{}", "data" : "{}" }}'.format(
device_id, action, data)
else:
return '{{ "device" : "{}", "action":"{}" }}'.format(device_id, action)
def run_action(action):
message = make_message(device_id, action)
if not message:
return
print('Sending data: {}'.format(message))
event_response = send_command(client_sock, message.encode())
print('Response {}'.format(event_response.decode("utf-8")))
def main():
try:
random.seed()
run_action('detach')
run_action('attach')
h = float(base_humidity)
t = float(base_temp)
o = float(odour_level)
while True:
h += random.uniform(-1, 1)
t += random.uniform(-1, 1)
o += random.uniform(-5,5)
temperature_f = t * 9.0/5 + 32
humidity = "{:.3f}".format(h)
temperature = "{:.3f}".format(temperature_f)
message = make_message(
device_id, 'event', 'temperature={}, humidity={}, odour={}, location= {}'.format(temperature, humidity, o, device_location)
).encode()
send_command(client_sock, message, False)
time.sleep(2)
finally:
print('Closing socket')
client_sock.close()
if __name__ == "__main__":
main()
|
the-stack_106_17356
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from functools import partial
import contextlib
import numpy as np
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.optimizer as optimizer
import paddle.fluid.regularizer as regularizer
from paddle.fluid.backward import append_backward
class TestL2DecayRegularizer(unittest.TestCase):
def test_l2decay_regularizer(self):
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
regularizer=regularizer.L2DecayRegularizer(0.5))
self.assertTrue(mul_x.regularizer is not None)
self.assertTrue(
isinstance(mul_x.regularizer, regularizer.L2DecayRegularizer))
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
count_ops = len(block.ops)
params_grads = optimizer.append_regularization_ops(params_grads)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(block.ops), count_ops + 2)
self.assertEqual(block.ops[-1].type, 'sum')
self.assertEqual(block.ops[-2].type, 'scale')
class TestL1DecayRegularizer(unittest.TestCase):
def test_l2decay_regularizer(self):
program = framework.Program()
block = program.global_block()
mul_x = block.create_parameter(
dtype="float32",
shape=[5, 10],
lod_level=0,
name="mul.x",
regularizer=regularizer.L1DecayRegularizer(0.5))
self.assertTrue(mul_x.regularizer is not None)
self.assertTrue(
isinstance(mul_x.regularizer, regularizer.L1DecayRegularizer))
mul_y = block.create_var(
dtype="float32", shape=[10, 8], lod_level=0, name="mul.y")
mul_out = block.create_var(
dtype="float32", shape=[5, 8], lod_level=0, name="mul.out")
block.append_op(
type="mul",
inputs={"X": mul_x,
"Y": mul_y},
outputs={"Out": mul_out},
attrs={"x_num_col_dims": 1})
mean_out = block.create_var(
dtype="float32", shape=[1], lod_level=0, name="mean.out")
block.append_op(
type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out})
params_grads = append_backward(mean_out)
self.assertEqual(len(params_grads), 1)
count_ops = len(block.ops)
params_grads = optimizer.append_regularization_ops(params_grads)
self.assertEqual(len(params_grads), 1)
self.assertEqual(len(block.ops), count_ops + 3)
self.assertEqual(block.ops[-1].type, 'sum')
self.assertEqual(block.ops[-2].type, 'scale')
self.assertEqual(block.ops[-3].type, 'sign')
def bow_net(data,
label,
dict_dim,
is_sparse=False,
emb_dim=8,
hid_dim=8,
hid_dim2=6,
class_dim=2):
"""
BOW net
This model is from https://github.com/PaddlePaddle/models:
fluid/PaddleNLP/text_classification/nets.py
"""
emb = fluid.layers.embedding(
input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim])
bow = fluid.layers.sequence_pool(input=emb, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow)
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
return avg_cost
class TestRegularizer(unittest.TestCase):
def setUp(self):
self.word_dict = paddle.dataset.imdb.word_dict()
reader = paddle.batch(
paddle.dataset.imdb.train(self.word_dict), batch_size=1)()
self.train_data = [next(reader) for _ in range(1)]
def get_places(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
return places
@contextlib.contextmanager
def scope_prog_guard(self, main_prog, startup_prog):
scope = fluid.core.Scope()
with fluid.unique_name.guard():
with fluid.scope_guard(scope):
with fluid.program_guard(main_prog, startup_prog):
yield
def run_program(self, place, feed_list):
exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=feed_list, place=place)
exe.run(fluid.default_startup_program())
main_prog = fluid.default_main_program()
param_list = [var.name for var in main_prog.block(0).all_parameters()]
param_sum = []
for data in self.train_data:
out = exe.run(main_prog,
feed=feeder.feed(data),
fetch_list=param_list)
p_sum = 0
for v in out:
p_sum += np.sum(np.abs(v))
param_sum.append(p_sum)
return param_sum
def check_l2decay_regularizer(self, place, model):
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
with self.scope_prog_guard(
main_prog=main_prog, startup_prog=startup_prog):
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
avg_cost = model(data, label, len(self.word_dict))
optimizer = fluid.optimizer.Adagrad(
learning_rate=0.1,
regularization=fluid.regularizer.L2Decay(1.0))
optimizer.minimize(avg_cost)
param_sum = self.run_program(place, [data, label])
return param_sum
def check_l2decay(self, place, model):
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
main_prog = fluid.framework.Program()
startup_prog = fluid.framework.Program()
with self.scope_prog_guard(
main_prog=main_prog, startup_prog=startup_prog):
data = fluid.layers.data(
name="words", shape=[1], dtype="int64", lod_level=1)
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
avg_cost_l2 = model(data, label, len(self.word_dict))
param_list = fluid.default_main_program().block(0).all_parameters()
para_sum = []
for para in param_list:
para_mul = fluid.layers.square(x=para)
para_sum.append(fluid.layers.reduce_sum(input=para_mul))
avg_cost_l2 += fluid.layers.sums(para_sum) * .5
optimizer = fluid.optimizer.Adagrad(learning_rate=0.1)
optimizer.minimize(avg_cost_l2)
param_sum = self.run_program(place, [data, label])
return param_sum
def test_l2(self):
for place in self.get_places():
dense_sparse_p_sum = []
for sparse in [True, False]:
model = partial(bow_net, is_sparse=sparse)
framework_l2 = self.check_l2decay_regularizer(place, model)
l2 = self.check_l2decay(place, model)
assert len(l2) == len(framework_l2)
for i in range(len(l2)):
assert np.isclose(a=framework_l2[i], b=l2[i], rtol=5e-5)
dense_sparse_p_sum.append(framework_l2)
assert len(dense_sparse_p_sum[0]) == len(dense_sparse_p_sum[1])
for i in range(len(dense_sparse_p_sum[0])):
assert np.isclose(
a=dense_sparse_p_sum[0][i],
b=dense_sparse_p_sum[1][i],
rtol=5e-5)
def test_repeated_regularization(self):
l1 = fluid.regularizer.L1Decay(regularization_coeff=0.1)
l2 = fluid.regularizer.L2Decay(regularization_coeff=0.01)
fc_param_attr = fluid.ParamAttr(regularizer=l1)
with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.layers.uniform_random([2, 2, 3])
out = fluid.layers.fc(x, 5, param_attr=fc_param_attr)
loss = fluid.layers.reduce_sum(out)
sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2)
sgd.minimize(loss)
with fluid.dygraph.guard():
input = fluid.dygraph.to_variable(
np.random.randn(3, 2).astype('float32'))
paddle.seed(1)
paddle.framework.random._manual_program_seed(1)
linear1 = fluid.dygraph.Linear(
2, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr)
linear2 = fluid.dygraph.Linear(
2, 2, param_attr=fc_param_attr, bias_attr=fc_param_attr)
loss1 = linear1(input)
loss1.backward()
# set l2 regularizer in optimizer, but l1 in fluid.ParamAttr
fluid.optimizer.SGD(parameter_list=linear1.parameters(),
learning_rate=1e-2,
regularization=l2).minimize(loss1)
# only set l1 in fluid.ParamAttr
loss2 = linear2(input)
loss2.backward()
fluid.optimizer.SGD(parameter_list=linear2.parameters(),
learning_rate=1e-2).minimize(loss2)
# they should both be applied by l1, and keep the same
self.assertTrue(
np.allclose(linear1.weight.numpy(), linear2.weight.numpy()),
"weight should use the regularization in fluid.ParamAttr!")
self.assertTrue(
np.allclose(linear1.bias.numpy(), linear2.bias.numpy()),
"bias should use the regularization in fluid.ParamAttr!")
if __name__ == '__main__':
unittest.main()
|
the-stack_106_17362
|
"""Jump to where a function or class was defined on Ctrl+click or Ctrl+Enter.
For this plugin to work, you also need the langserver plugin.
"""
from __future__ import annotations
import dataclasses
import logging
import tkinter
from functools import partial
from pathlib import Path
from typing import List
from porcupine import get_tab_manager, tabs, utils
log = logging.getLogger(__name__)
@dataclasses.dataclass
class Request(utils.EventDataclass):
file_path: str # not pathlib.Path because json
location: str
@dataclasses.dataclass
class LocationRange:
file_path: str # not pathlib.Path because json
start: str
end: str
@dataclasses.dataclass
class Response(utils.EventDataclass):
location_ranges: List[LocationRange]
def show_location_range(loc_range: LocationRange) -> None:
log.info(f"showing definition to user: {loc_range}")
path = Path(loc_range.file_path)
matching_tabs = [
tab
for tab in get_tab_manager().tabs()
if isinstance(tab, tabs.FileTab) and tab.path == path
]
if matching_tabs:
[tab] = matching_tabs
get_tab_manager().select(tab)
else:
log.info(f"{path} not opened yet, opening now")
tab = tabs.FileTab.open_file(get_tab_manager(), path)
get_tab_manager().add_tab(tab, select=True)
tab.textwidget.tag_remove("sel", "1.0", "end")
tab.textwidget.tag_add("sel", loc_range.start, loc_range.end)
tab.textwidget.mark_set("insert", loc_range.start)
tab.textwidget.see("insert")
# Find where cursor of text widget is, not necessarily anywhere near mouse
def find_cursor_xy(textwidget: tkinter.Text) -> tuple[int, int]:
bbox = textwidget.bbox("insert")
assert bbox is not None
left, top, width, height = bbox
# Make coords relative to top left corner of screen, not text widget
left += textwidget.winfo_rootx()
top += textwidget.winfo_rooty()
return (left, top + height)
def receive_jump(event: utils.EventWithData) -> None:
tab = event.widget
assert isinstance(tab, tabs.FileTab), repr(tab)
response = event.data_class(Response)
if not response.location_ranges:
log.warning("no possible definitions found")
elif len(response.location_ranges) == 1:
show_location_range(response.location_ranges[0])
else:
menu = tkinter.Menu(tearoff=False)
# Consistent order, first location is first within same file
sorted_ranges = sorted(
response.location_ranges,
key=(
lambda r: (
Path(r.file_path), # Case insensitive comparing on windows
int(r.start.split(".")[0]), # Line number
int(r.start.split(".")[1]), # Column number in case multiple on same line
)
),
)
for loc_range in sorted_ranges:
menu.add_command(
# TODO: better menu item text?
label=f"Line {loc_range.start.split('.')[0]} in {loc_range.file_path}",
command=partial(show_location_range, loc_range),
)
menu.tk_popup(*find_cursor_xy(tab.textwidget))
menu.bind("<Unmap>", (lambda event: menu.after_idle(menu.destroy)), add=True)
def on_new_filetab(tab: tabs.FileTab) -> None:
utils.bind_with_data(tab, "<<JumpToDefinitionResponse>>", receive_jump, add=True)
def setup() -> None:
get_tab_manager().add_filetab_callback(on_new_filetab)
|
the-stack_106_17363
|
"""Current-flow closeness centrality measures.
"""
# Copyright (C) 2010-2013 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx_mod as nx
from networkx_mod.algorithms.centrality.flow_matrix import *
__author__ = """Aric Hagberg <[email protected]>"""
__all__ = ['current_flow_closeness_centrality', 'information_centrality']
def current_flow_closeness_centrality(G, weight='weight',
dtype=float, solver='lu'):
"""Compute current-flow closeness centrality for nodes.
Current-flow closeness centrality is variant of closeness
centrality based on effective resistance between nodes in
a network. This metric is also known as information centrality.
Parameters
----------
G : graph
A NetworkX graph
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with current flow closeness centrality as the value.
See Also
--------
closeness_centrality
Notes
-----
The algorithm is from Brandes [1]_.
See also [2]_ for the original definition of information centrality.
References
----------
.. [1] Ulrik Brandes and Daniel Fleischer,
Centrality Measures Based on Current Flow.
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] Karen Stephenson and Marvin Zelen:
Rethinking centrality: Methods and examples.
Social Networks 11(1):1-37, 1989.
http://dx.doi.org/10.1016/0378-8733(89)90016-6
"""
from networkx_mod.utils import reverse_cuthill_mckee_ordering
import numpy as np
import scipy
if G.is_directed():
raise nx.NetworkXError(
"current_flow_closeness_centrality() not defined for digraphs.")
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
solvername = {"full": FullInverseLaplacian,
"lu": SuperLUInverseLaplacian,
"cg": CGInverseLaplacian}
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H
n = H.number_of_nodes()
L = laplacian_sparse_matrix(H, nodelist=range(n), weight=weight,
dtype=dtype, format='csc')
C2 = solvername[solver](L, width=1, dtype=dtype) # initialize solver
for v in H:
col = C2.get_row(v)
for w in H:
betweenness[v] += col[v]-2*col[w]
betweenness[w] += col[v]
for v in H:
betweenness[v] = 1.0 / (betweenness[v])
return dict((ordering[k], float(v)) for k, v in betweenness.items())
information_centrality = current_flow_closeness_centrality
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
|
the-stack_106_17365
|
from __future__ import division
import chainer
import chainer.functions as F
import numpy as np
from chainercv.experimental.links.model.fcis.utils.mask_voting \
import mask_voting
from chainercv.transforms.image.resize import resize
class FCIS(chainer.Chain):
"""Base class for FCIS.
This is a base class for FCIS links supporting instance segmentation
API [#]_. The following three stages constitute FCIS.
1. **Feature extraction**: Images are taken and their \
feature maps are calculated.
2. **Region Proposal Networks**: Given the feature maps calculated in \
the previous stage, produce set of RoIs around objects.
3. **Localization, Segmentation and Classification Heads**: Using feature \
maps that belong to the proposed RoIs, segment regions of the \
objects, classify the categories of the objects in the RoIs and \
improve localizations.
Each stage is carried out by one of the callable
:class:`chainer.Chain` objects :obj:`feature`, :obj:`rpn` and :obj:`head`.
There are two functions :meth:`predict` and :meth:`__call__` to conduct
instance segmentation.
:meth:`predict` takes images and returns masks, object labels
and their scores.
:meth:`__call__` is provided for a scnerario when intermediate outputs
are needed, for instance, for training and debugging.
Links that support instance segmentation API have method :meth:`predict`
with the same interface. Please refer to :meth:`predict` for further
details.
.. [#] Yi Li, Haozhi Qi, Jifeng Dai, Xiangyang Ji, Yichen Wei. \
Fully Convolutional Instance-aware Semantic Segmentation. CVPR 2017.
Args:
extractor (callable Chain): A callable that takes a BCHW image
array and returns feature maps.
rpn (callable Chain): A callable that has the same interface as
:class:`~chainercv.links.model.faster_rcnn.RegionProposalNetwork`.
Please refer to the documentation found there.
head (callable Chain): A callable that takes a BCHW array,
RoIs and batch indices for RoIs.
This returns class-agnostic segmentation scores, class-agnostic
localization parameters, class scores, improved RoIs and batch
indices for RoIs.
mean (numpy.ndarray): A value to be subtracted from an image
in :meth:`prepare`.
min_size (int): A preprocessing parameter for :meth:`prepare`. Please
refer to a docstring found for :meth:`prepare`.
max_size (int): A preprocessing parameter for :meth:`prepare`.
loc_normalize_mean (tuple of four floats): Mean values of
localization estimates.
loc_normalize_std (tupler of four floats): Standard deviation
of localization estimates.
"""
def __init__(
self, extractor, rpn, head,
mean, min_size, max_size,
loc_normalize_mean, loc_normalize_std,
):
super(FCIS, self).__init__()
with self.init_scope():
self.extractor = extractor
self.rpn = rpn
self.head = head
self.mean = mean
self.min_size = min_size
self.max_size = max_size
self.loc_normalize_mean = loc_normalize_mean
self.loc_normalize_std = loc_normalize_std
self.use_preset('visualize')
@property
def n_class(self):
# Total number of classes including the background.
return self.head.n_class
def __call__(self, x, scale=1.):
"""Forward FCIS.
Scaling paramter :obj:`scale` is used by RPN to determine the
threshold to select small objects, which are going to be
rejected irrespective of their confidence scores.
Here are notations used.
* :math:`N` is the number of batch size
* :math:`R'` is the total number of RoIs produced across batches. \
Given :math:`R_i` proposed RoIs from the :math:`i` th image, \
:math:`R' = \\sum _{i=1} ^ N R_i`.
* :math:`L` is the number of classes excluding the background.
* :math:`RH` is the height of pooled image by Position Sensitive \
ROI pooling.
* :math:`RW` is the height of pooled image by Position Sensitive \
ROI pooling.
Classes are ordered by the background, the first class, ..., and
the :math:`L` th class.
Args:
x (~chainer.Variable): 4D image variable.
scale (float): Amount of scaling applied to the raw image
during preprocessing.
Returns:
Variable, Variable, Variable, array, array:
Returns tuple of five values listed below.
* **roi_ag_seg_scores**: Class-agnostic clipped mask scores for \
the proposed ROIs. Its shape is :math:`(R', 2, RH, RW)`
* **ag_locs**: Class-agnostic offsets and scalings for \
the proposed RoIs. Its shape is :math:`(R', 2, 4)`.
* **roi_cls_scores**: Class predictions for the proposed RoIs. \
Its shape is :math:`(R', L + 1)`.
* **rois**: RoIs proposed by RPN. Its shape is \
:math:`(R', 4)`.
* **roi_indices**: Batch indices of RoIs. Its shape is \
:math:`(R',)`.
"""
img_size = x.shape[2:]
# Feature Extractor
rpn_features, roi_features = self.extractor(x)
rpn_locs, rpn_scores, rois, roi_indices, anchor = self.rpn(
rpn_features, img_size, scale)
roi_ag_seg_scores, roi_ag_locs, roi_cls_scores, rois, roi_indices = \
self.head(roi_features, rois, roi_indices, img_size)
return roi_ag_seg_scores, roi_ag_locs, roi_cls_scores, \
rois, roi_indices
def prepare(self, img):
"""Preprocess an image for feature extraction.
The length of the shorter edge is scaled to :obj:`self.min_size`.
After the scaling, if the length of the longer edge is longer than
:obj:`self.max_size`, the image is scaled to fit the longer edge
to :obj:`self.max_size`.
After resizing the image, the image is subtracted by a mean image value
:obj:`self.mean`.
Args:
img (~numpy.ndarray): An image. This is in CHW and RGB format.
The range of its value is :math:`[0, 255]`.
Returns:
~numpy.ndarray:
A preprocessed image.
"""
_, H, W = img.shape
scale = self.min_size / min(H, W)
if scale * max(H, W) > self.max_size:
scale = self.max_size / max(H, W)
img = resize(img, (int(H * scale), int(W * scale)))
img = (img - self.mean).astype(np.float32, copy=False)
return img
def use_preset(self, preset):
"""Use the given preset during prediction.
This method changes values of :obj:`self.nms_thresh`,
:obj:`self.score_thresh`, :obj:`self.mask_merge_thresh`,
:obj:`self.binary_thresh`, :obj:`self.binary_thresh` and
:obj:`self.min_drop_size`. These values are a threshold value
used for non maximum suppression, a threshold value
to discard low confidence proposals in :meth:`predict`,
a threshold value to merge mask in :meth:`predict`,
a threshold value to binalize segmentation scores in :meth:`predict`,
a limit number of predicted masks in one image and
a threshold value to discard small bounding boxes respectively.
If the attributes need to be changed to something
other than the values provided in the presets, please modify
them by directly accessing the public attributes.
Args:
preset ({'visualize', 'evaluate'): A string to determine the
preset to use.
"""
if preset == 'visualize':
self.nms_thresh = 0.3
self.score_thresh = 0.7
self.mask_merge_thresh = 0.5
self.binary_thresh = 0.4
self.limit = 100
self.min_drop_size = 16
elif preset == 'evaluate':
self.nms_thresh = 0.3
self.score_thresh = 1e-3
self.mask_merge_thresh = 0.5
self.binary_thresh = 0.4
self.limit = 100
self.min_drop_size = 16
elif preset == 'coco_evaluate':
self.nms_thresh = 0.3
self.score_thresh = 1e-3
self.mask_merge_thresh = 0.5
self.binary_thresh = 0.4
self.limit = 100
self.min_drop_size = 2
else:
raise ValueError('preset must be visualize or evaluate')
def predict(self, imgs):
"""Segment object instances from images.
This method predicts instance-aware object regions for each image.
Args:
imgs (iterable of numpy.ndarray): Arrays holding images of shape
:math:`(B, C, H, W)`. All images are in CHW and RGB format
and the range of their value is :math:`[0, 255]`.
Returns:
tuple of lists:
This method returns a tuple of three lists,
:obj:`(masks, labels, scores)`.
* **masks**: A list of boolean arrays of shape :math:`(R, H, W)`, \
where :math:`R` is the number of masks in a image. \
Each pixel holds value if it is inside the object inside or not.
* **labels** : A list of integer arrays of shape :math:`(R,)`. \
Each value indicates the class of the masks. \
Values are in range :math:`[0, L - 1]`, where :math:`L` is the \
number of the foreground classes.
* **scores** : A list of float arrays of shape :math:`(R,)`. \
Each value indicates how confident the prediction is.
"""
prepared_imgs = []
sizes = []
for img in imgs:
size = img.shape[1:]
img = self.prepare(img.astype(np.float32))
prepared_imgs.append(img)
sizes.append(size)
masks = []
labels = []
scores = []
for img, size in zip(prepared_imgs, sizes):
with chainer.using_config('train', False), \
chainer.function.no_backprop_mode():
# inference
img_var = chainer.Variable(self.xp.array(img[None]))
scale = img_var.shape[3] / size[1]
roi_ag_seg_scores, _, roi_cls_scores, bboxes, _ = \
self.__call__(img_var, scale)
# We are assuming that batch size is 1.
roi_ag_seg_score = chainer.cuda.to_cpu(roi_ag_seg_scores.array)
roi_cls_score = chainer.cuda.to_cpu(roi_cls_scores.array)
bbox = chainer.cuda.to_cpu(bboxes)
# filter bounding boxes with min_size
height = bbox[:, 2] - bbox[:, 0]
width = bbox[:, 3] - bbox[:, 1]
keep_indices = np.where(
(height >= self.min_drop_size) &
(width >= self.min_drop_size))[0]
roi_ag_seg_score = roi_ag_seg_score[keep_indices, :, :]
roi_cls_score = roi_cls_score[keep_indices]
bbox = bbox[keep_indices, :]
# scale bbox
bbox = bbox / scale
# shape: (n_rois, 4)
bbox[:, 0::2] = self.xp.clip(bbox[:, 0::2], 0, size[0])
bbox[:, 1::2] = self.xp.clip(bbox[:, 1::2], 0, size[1])
# shape: (n_roi, roi_size, roi_size)
roi_seg_prob = F.softmax(roi_ag_seg_score).array[:, 1]
roi_cls_prob = F.softmax(roi_cls_score).array
roi_seg_prob, bbox, label, roi_cls_prob = mask_voting(
roi_seg_prob, bbox, roi_cls_prob, size,
self.score_thresh, self.nms_thresh,
self.mask_merge_thresh, self.binary_thresh,
limit=self.limit, bg_label=0)
mask = np.zeros(
(len(roi_seg_prob), size[0], size[1]), dtype=np.bool)
for i, (roi_seg_pb, bb) in enumerate(zip(roi_seg_prob, bbox)):
bb = np.round(bb).astype(np.int32)
y_min, x_min, y_max, x_max = bb
roi_msk_pb = resize(
roi_seg_pb.astype(np.float32)[None],
(y_max - y_min, x_max - x_min))
roi_msk = (roi_msk_pb > self.binary_thresh)[0]
mask[i, y_min:y_max, x_min:x_max] = roi_msk
masks.append(mask)
labels.append(label)
scores.append(roi_cls_prob)
return masks, labels, scores
|
the-stack_106_17367
|
import os
import numpy as np
import random as rand
import pylab as py
import matplotlib.pyplot as plt
import scipy.interpolate
import gudhi as gd
import ot
from matplotlib import cm
from lib import helper as hp
from lib.tda import sim_homology
from scipy.interpolate import Rbf, interp1d, interp2d
from typing import List, Set, Dict, Tuple, Optional
from multiprocessing import Process
from scipy.spatial.distance import *
from scipy.stats import *
def top_nat_neighbors(
path: str = "",
array: np.ndarray = np.empty(1),
columns: int = 88
) -> np.ndarray:
"""
Nearest neighbor interpolation.
Returns the original data with augmented nearest neighbors.
:param path: Path to the desired CSV-file.
:param column: Columns to be processed, beginning from the first.
:return:
"""
try:
if len(path) > 0:
data = hp.read_data(path, columns)
else:
data = array
except ValueError:
print("Oops! That was no valid number. Try again ...")
x, y = np.empty(0), np.empty(0)
for i in data:
if np.isfinite(i[0]) and np.isfinite(i[1]):
x = np.append(x, i[0])
y = np.append(y, i[1])
xx = np.linspace(np.min(x), np.max(x), len(x))
f = interp1d(x, y, kind="nearest")
new_data = []
for i in range(0, len(xx)):
new_data.append([xx[i], f(xx[i])])
new_data.append([x[i], y[i]])
return np.array(new_data)
def proc_signatures(dir: str, delimiter: str = ",", iterations: int = 5):
"""
Processes the experiment for the signature dataset.
Insert the directory to the MOBISID dataset: https://ms.sapientia.ro/~manyi/mobisig.html.
:param dir: Path to the directory.
:param delimiter: Delimiter used to save the csv file.
:proc: Directory.
"""
subdirectories = os.listdir(dir)
for user_folder in subdirectories:
if "USER" in user_folder:
path = os.path.abspath(dir + "/" + user_folder)
filepaths = os.listdir(path)
for file in filepaths:
temp_data = top_nat_neighbors(
path=dir + "/" + user_folder + "/" + file, columns=2
)
for j in range(0, iterations):
temp_data = top_nat_neighbors(array=temp_data, columns=2)
np.savetxt(
dir
+ "/"
+ "natneighbor"
+ "/"
+ user_folder
+ "/"
+ "it_"
+ str(j)
+ "_"
+ file,
temp_data,
delimiter=delimiter,
)
def create_persistence_distance_file(
orig_path: str,
interpol_path: str,
savefile: bool = True,
distance_type: ["wasserstein", "bottleneck"] = "wasserstein",
filtration: ["alpha", "rips", "witness"] = "rips",
amount_of_files: int = 100
) -> np.ndarray:
"""
Creates from two directories with corresponding named CSV-files a bottleneck-distance comparison.
This code relies on the naming of the directories.
The structure should be: MOBISIG/USERX/file.csv and MOBISIG_natneighbor/USERX/file.csv for a good naming of the .csv rows.
:param orig_path: Path to the original MOBISIG-files.
:param interpol_path: Path tot the interpolated MOBISIG-files.
:param savefile: Whether to save the bottleneck distances into a file or not (npy-format).
:param amount_of_files: Amount of files to be processed.
:return: np.ndarray with bottleneck distances.
"""
def diff(first, second):
"""
Computes the difference of two list objects.
:param first: First list.
:param second: Second list.
:return: List difference.
"""
second = set(second)
return [item for item in first if item not in second]
original_data, interpolated_data, files_to_ignore = [], [], []
for dirpath, dirnames, filenames in os.walk(orig_path):
for filename in filenames:
files_to_ignore.append(os.path.join(dirpath, filename))
break
for dirpath, dirnames, filenames in os.walk(orig_path):
for filename in filenames:
original_data.append(os.path.join(dirpath, filename))
for dirpath, dirnames, filenames in os.walk(interpol_path):
for filename in filenames:
interpolated_data.append(os.path.join(dirpath, filename))
original_data = diff(original_data, files_to_ignore)
interpolated_data = diff(interpolated_data, files_to_ignore)
for i in original_data:
matching = [s for s in interpolated_data if i[20:] in s]
matching.sort()
for j in matching:
distance = sim_homology.persistence_distance(i, j, filtration=filtration, type=distance_type)
with open("results/" + filtration + "_" + distance_type + ".csv", "a") as fd:
fd.write(
i[20 : len(i) - 4]
+ ","
+ j[32 : len(j) - 4]
+ ","
+ str(distance)
+ "\n"
)
print(
"File with name "
+ j
+ " has been compared to "
+ i
+ ". The " + distance_type + "distance is "
+ str(distance)
+ "."
)
def compute_mean_distance(path1, path2):
def diff(first, second):
"""
Computes the difference of two list objects.
:param first: First list.
:param second: Second list.
:return: List difference.
"""
second = set(second)
return [item for item in first if item not in second]
original_data, interpolated_data, files_to_ignore = [], [], []
for dirpath, dirnames, filenames in os.walk(path1):
for filename in filenames:
files_to_ignore.append(os.path.join(dirpath, filename))
break
for dirpath, dirnames, filenames in os.walk(path1):
for filename in filenames:
original_data.append(os.path.join(dirpath, filename))
for dirpath, dirnames, filenames in os.walk(path2):
for filename in filenames:
interpolated_data.append(os.path.join(dirpath, filename))
original_data = diff(original_data, files_to_ignore)
interpolated_data = diff(interpolated_data, files_to_ignore)
for i in range(0, len(original_data)):
for j in range(0, len(original_data)):
data1 = np.genfromtxt(original_data[i], delimiter=",")
data2 = np.genfromtxt(interpolated_data[j], delimiter=",")
nans1 = np.argwhere(np.isnan(data1))
nans2 = np.argwhere(np.isnan(data2))
for a in nans1:
data1 = np.delete(data1, nans1)
for b in nans2:
data2 = np.delete(data2, nans2)
data1 = data1.flatten()
data2 = data2.flatten()
mean1 = np.mean(data1)
mean2 = np.mean(data2)
std1 = np.std(data1)
std2 = np.std(data2)
varia1 = variation(data1)
varia2 = variation(data2)
w1 = wasserstein_distance(data1, data2)
with open("results/measurement_it.csv", "a") as fd:
fd.write(
original_data[i][20 : len(original_data[i]) - 4]
+ ","
+ interpolated_data[j][32 : len(interpolated_data[j]) - 4]
+ ","
+ str(mean1)
+ ","
+ str(mean2)
+ ","
+ str(std1)
+ ","
+ str(std2)
+ ","
+ str(varia1)
+ ","
+ str(varia2)
+ ","
+ str(w1)
+ "\n"
)
def run_in_parallel(*fns):
"""
Runs several functions in parallel.
:param fns: Several functions.
:return: A nice message.
"""
proc = []
for fn in fns:
p = Process(target=fn)
p.start()
proc.append(p)
for p in proc:
p.join()
return print("Processing finished!")
########################################################################################################################
""" RUN THE DISTANCES
run_in_parallel(
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="rips", distance_type="wasserstein"),
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="alpha", distance_type="wasserstein"),
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="witness", distance_type="wasserstein"),
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="rips", distance_type="bottleneck"),
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="alpha", distance_type="bottleneck"),
create_persistence_distance_file("data/MOBISIG/", "data/MOBISIG_natneighbor/", filtration="witness", distance_type="bottleneck")
)
"""
########################################################################################################################
|
the-stack_106_17370
|
#!/usr/bin/python3
from pypine import *
import pypinex_pack as pack
tasks = Tasks()
tasks.add("demo", "A demonstration PyPine script for packing with gzip.", [
core.src(".", "hello_world.txt"),
pack.gzip(),
core.cat(),
pack.gunzip(),
core.echo(),
]
)
tasks.run("demo")
|
the-stack_106_17371
|
import math
import numpy as np
from PIL import Image
def get_size_from_input(input_parameters: str, img_width: int, img_height: int):
input_parameters = input_parameters.split()
if len(input_parameters) == 1 and input_parameters[0].endswith('px'):
pixels_count = int(input_parameters[0][0:-2])
if pixels_count < 1:
raise ValueError('Количество пикселей меньше 1.')
pixel_side = int(math.sqrt((img_width * img_height) / pixels_count))
return [pixel_side, pixel_side]
if len(input_parameters) > 0:
if input_parameters[0].endswith('%'):
first_side = int(img_width / 100 * int(input_parameters[0][0:-1]))
else:
first_side = int(input_parameters[0])
if first_side < 1:
raise ValueError('Ширина пикселя меньше 1.')
second_side = first_side
if len(input_parameters) > 1:
if input_parameters[1].endswith('%'):
first_side = int(img_height / 100 * int(input_parameters[1][0:-1]))
else:
first_side = int(input_parameters[1])
if second_side < 1:
raise ValueError('Высота пикселя меньше 1.')
return [first_side, second_side]
raise ValueError('Ошибка ввода')
def get_filtered_array(array: np.ndarray, arr_height: int, arr_width: int, pixel_height, pixel_width, gray_step):
for i in range(0, arr_height, pixel_height):
for j in range(0, arr_width, pixel_width):
if j + pixel_width > arr_width:
dx = arr_width - j
else:
dx = pixel_width
if i + pixel_height > arr_height:
dy = arr_height - i
else:
dy = pixel_height
array[i:i + dy, j:j + dx] = int(array[i:i + dy, j:j + dx].sum() / 3 // (dy * dx)) // gray_step * gray_step
return array
print("Введите путь до файла: ")
img = Image.open(input())
arr = np.array(img)
arr_height = len(arr)
arr_width = len(arr[1])
pixel_height, pixel_width = get_size_from_input(input("Введите ширину и высоту пикселей: "), arr_width, arr_height)
grad_step = 256 // int(input("Введите шаг : "))
extension = input("Введите расширение выходного файла: ")
res = Image.fromarray(get_filtered_array(arr, arr_height, arr_width, pixel_height, pixel_width, grad_step))
res.save(f'res.{extension}')
|
the-stack_106_17374
|
import gws.tools.net
import gws.tools.xml2
from . import error
_ows_error_strings = '<ServiceException', '<ServerException', '<ows:ExceptionReport'
def raw_get(url, **kwargs):
# the reason to use lax is that we want an exception text from the server
# even if the status != 200
kwargs['lax'] = True
try:
resp = gws.tools.net.http_request(url, **kwargs)
except gws.tools.net.Error as e:
raise error.Error('http error') from e
status = resp.status_code
# check for an ows error (no matter what status code says)
# we can get big image responses here, so be careful and don't blindly decode them
if resp.content.startswith(b'<') or 'xml' in resp.content_type:
text = str(resp.content[:1024], encoding='utf8', errors='ignore').lower()
for e in _ows_error_strings:
if e.lower() in text:
raise error.Error(resp.text[:1024])
if status != 200:
raise error.Error(f'HTTP error: {resp.status_code!r}')
return resp
def get(url, service, request, **kwargs):
"""Get a raw service response"""
params = kwargs.get('params') or {}
params['SERVICE'] = service.upper()
params['REQUEST'] = request
# some guys accept only uppercase params
params = {k.upper(): v for k, v in params.items()}
kwargs['params'] = params
return raw_get(url, **kwargs)
def get_text(url, service, request, **kwargs):
resp = get(url, service, request, **kwargs)
return resp.text
|
the-stack_106_17375
|
import tkinter
import serial
from threading import Thread, Condition
import sys
import glob
class SliderGUIWindow:
def __init__(self, serial_port):
self.root = tkinter.Tk()
self.root.protocol("WM_DELETE_WINDOW", self.closeRequested)
self.slider_value = tkinter.DoubleVar()
self.scale = tkinter.Scale(self.root, to=1.0, variable=self.slider_value, command=self.valueUpdated, resolution=0)
self.scale.pack()
self.label = tkinter.Label(self.root)
self.label.pack()
self.current_motor_speed = 0
self.serial_thread = Thread(target=self.serialThread)
self.serial_thread.daemon = True
self.serial_port = serial_port
self.running = True
self.change_condition = Condition()
def closeRequested(self):
self.change_condition.acquire()
self.running = False
self.change_condition.notify_all()
self.change_condition.release()
self.serial_thread.join()
self.root.destroy()
def start(self):
self.serial_thread.start()
self.root.mainloop()
def valueUpdated(self, event):
self.change_condition.acquire()
self.current_motor_speed = self.slider_value.get()
self.change_condition.notify_all()
self.change_condition.release()
def serialThread(self):
ser = serial.Serial(self.serial_port, 9600, timeout=5)
self.change_condition.acquire()
while(self.running):
write_byte = bytearray([int(self.current_motor_speed * 255)])
ser.write(write_byte)
self.change_condition.wait()
self.change_condition.release()
ser.write(bytearray([0]))
ser.close()
print("I was able to close!")
def listSerialPorts():
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
ports = []
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def main():
if(len(sys.argv) < 2):
print("Usage: python3 motor_controller.py <serial_port>")
print("Available ports:")
print(', '.join(listSerialPorts()))
return
serial_port = sys.argv[1]
gui = SliderGUIWindow(serial_port)
gui.start()
if __name__ == "__main__":
main()
|
the-stack_106_17382
|
import asyncio
import enum
import logging
import os
from pathlib import Path
from typing import Optional, Union
from async_generator import asynccontextmanager
from meltano.core.logging.utils import SubprocessOutputWriter
from .error import Error
from .plugin import PluginRef
from .plugin.config_service import PluginConfigService
from .plugin.project_plugin import ProjectPlugin
from .plugin.settings_service import PluginSettingsService
from .project import Project
from .project_plugins_service import ProjectPluginsService
from .venv_service import VenvService, VirtualEnv
def invoker_factory(project, plugin: ProjectPlugin, *args, **kwargs):
cls = PluginInvoker
if hasattr(plugin, "invoker_class"):
cls = plugin.invoker_class
invoker = cls(project, plugin, *args, **kwargs)
return invoker
class InvokerError(Error):
pass
class ExecutableNotFoundError(InvokerError):
"""Occurs when the executable could not be found"""
def __init__(self, plugin: PluginRef, executable):
super().__init__(
f"Executable '{executable}' could not be found. "
f"{plugin.type.descriptor.capitalize()} '{plugin.name}' may not have been installed yet using `meltano install {plugin.type.singular} {plugin.name}`, or the executable name may be incorrect."
)
class InvokerNotPreparedError(InvokerError):
"""Occurs when `invoke` is called before `prepare`"""
pass
class UnknownCommandError(InvokerError):
"""Occurs when `invoke` is called in command mode with an undefined command."""
def __init__(self, plugin: PluginRef, command):
"""Initialize UnknownCommandError."""
self.plugin = plugin
self.command = command
def __str__(self):
"""Return error message."""
if self.plugin.supported_commands:
supported_commands = ", ".join(self.plugin.supported_commands)
desc = f"supports the following commands: {supported_commands}"
else:
desc = "does not define any commands."
return " ".join(
[
f"Command '{self.command}' could not be found.",
f"{self.plugin.type.descriptor.capitalize()} '{self.plugin.name}'",
desc,
]
)
class PluginInvoker:
"""This class handles the invocation of a `ProjectPlugin` instance."""
class StdioSource(str, enum.Enum): # noqa: WPS431
"""Describes the available unix style std io sources."""
STDIN = "stdin"
STDOUT = "stdout"
STDERR = "stderr"
def __init__(
self,
project: Project,
plugin: ProjectPlugin,
context: Optional[object] = None,
output_handlers: Optional[dict] = None,
run_dir=None,
config_dir=None,
venv_service: VenvService = None,
plugins_service: ProjectPluginsService = None,
plugin_config_service: PluginConfigService = None,
plugin_settings_service: PluginSettingsService = None,
):
self.project = project
self.plugin = plugin
self.context = context
self.output_handlers = output_handlers
self.venv_service: Optional[VenvService] = None
if plugin.pip_url or venv_service:
self.venv_service = venv_service or VenvService(
project,
name=plugin.name,
namespace=plugin.type,
)
self.plugin_config_service = plugin_config_service or PluginConfigService(
plugin,
config_dir or self.project.plugin_dir(plugin),
run_dir or self.project.run_dir(plugin.name),
)
self.plugins_service = plugins_service or ProjectPluginsService(project)
self.settings_service = plugin_settings_service or PluginSettingsService(
project, plugin, plugins_service=self.plugins_service
)
self._prepared = False
self.plugin_config = {}
self.plugin_config_processed = {}
self.plugin_config_extras = {}
self.plugin_config_env = {}
@property
def capabilities(self):
# we want to make sure the capabilites are immutable from the `PluginInvoker` interface
return frozenset(self.plugin.capabilities)
@property
def files(self):
plugin_files = {**self.plugin.config_files, **self.plugin.output_files}
return {
_key: self.plugin_config_service.run_dir.joinpath(filename)
for _key, filename in plugin_files.items()
}
async def prepare(self, session):
"""Prepare plugin config."""
self.plugin_config = self.settings_service.as_dict(
extras=False, session=session
)
self.plugin_config_processed = self.settings_service.as_dict(
extras=False, process=True, session=session
)
self.plugin_config_extras = self.settings_service.as_dict(
extras=True, session=session
)
self.plugin_config_env = self.settings_service.as_env(session=session)
async with self.plugin.trigger_hooks("configure", self, session):
self.plugin_config_service.configure()
self._prepared = True
async def cleanup(self):
"""Reset the plugin config."""
self.plugin_config = {}
self.plugin_config_processed = {}
self.plugin_config_extras = {}
self.plugin_config_env = {}
async with self.plugin.trigger_hooks("cleanup", self):
self._prepared = False
@asynccontextmanager
async def prepared(self, session):
"""Context manager that prepares plugin config , yielding to the caller, and then resetting the config."""
try:
await self.prepare(session)
yield
finally:
await self.cleanup()
def exec_path(self, executable: Optional[str] = None) -> Union[str, Path]:
"""
Return the absolute path to the executable.
Uses the plugin executable if none is specified.
"""
executable = executable or self.plugin.executable
if not self.venv_service:
if "/" not in executable.replace("\\", "/"):
# Expect executable on path
return executable
# Return executable relative to project directory
return self.project.root.joinpath(executable)
# Return executable within venv
return self.venv_service.exec_path(executable)
def exec_args(self, *args, command=None, env=None):
"""Materialize the arguments to be passed to the executable.
Raises `UnknownCommandError` if requested command is not defined.
"""
env = env or {}
executable = self.exec_path()
if command:
command_config = self.find_command(command)
plugin_args = command_config.expanded_args(command, env)
if command_config.executable:
executable = self.exec_path(command_config.executable)
else:
plugin_args = self.plugin.exec_args(self)
return [str(arg) for arg in (executable, *plugin_args, *args)]
def find_command(self, name):
"""Find a Command by name. Raises `UnknownCommandError` if not defined."""
try:
return self.plugin.all_commands[name]
except KeyError as err:
raise UnknownCommandError(self.plugin, name) from err
def env(self):
env = {
**self.project.dotenv_env,
**self.settings_service.env,
**self.plugin_config_env,
}
# Ensure Meltano venv is not inherited
env.pop("VIRTUAL_ENV", None)
env.pop("PYTHONPATH", None)
if self.venv_service:
# Switch to plugin-specific venv
venv = VirtualEnv(
self.project.venvs_dir(self.plugin.type, self.plugin.name)
)
venv_dir = str(venv.bin_dir)
env["VIRTUAL_ENV"] = str(venv.root)
env["PATH"] = os.pathsep.join([venv_dir, env["PATH"]])
return env
def Popen_options(self):
return {}
@asynccontextmanager
async def _invoke(
self,
*args,
require_preparation=True,
env=None,
command=None,
**kwargs,
):
env = env or {}
if require_preparation and not self._prepared:
raise InvokerNotPreparedError()
async with self.plugin.trigger_hooks("invoke", self, args):
popen_options = {**self.Popen_options(), **kwargs}
popen_env = {**self.env(), **env}
popen_args = self.exec_args(*args, command=command, env=popen_env)
logging.debug(f"Invoking: {popen_args}")
logging.debug(f"Env: {popen_env}")
try:
yield (popen_args, popen_options, popen_env)
except FileNotFoundError as err:
raise ExecutableNotFoundError(
self.plugin, self.plugin.executable
) from err
async def invoke_async(self, *args, **kwargs):
async with self._invoke(*args, **kwargs) as (
popen_args,
popen_options,
popen_env,
):
return await asyncio.create_subprocess_exec(
*popen_args,
**popen_options,
env=popen_env,
)
async def dump(self, file_id):
"""Dump a given file id."""
try:
async with self._invoke():
return self.files[file_id].read_text()
except ExecutableNotFoundError as err:
# Unwrap FileNotFoundError
raise err.__cause__
def add_output_handler(self, src: str, handler: SubprocessOutputWriter):
"""Append an output handler for a given stdio stream.
Args:
src: stdio source you'd like to subscribe, likely either 'stdout' or 'stderr'
handler: either a StreamWriter or an object matching the utils.SubprocessOutputWriter proto
"""
if self.output_handlers:
self.output_handlers[src].append(handler)
else:
self.output_handlers = {src: [handler]}
|
the-stack_106_17384
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import itertools
import warnings
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import strutils
import six
from heat.common import exception
from heat.common.i18n import _
from heat.common import param_utils
from heat.engine import constraints as constr
PARAMETER_KEYS = (
TYPE, DEFAULT, NO_ECHO, ALLOWED_VALUES, ALLOWED_PATTERN,
MAX_LENGTH, MIN_LENGTH, MAX_VALUE, MIN_VALUE,
DESCRIPTION, CONSTRAINT_DESCRIPTION, LABEL
) = (
'Type', 'Default', 'NoEcho', 'AllowedValues', 'AllowedPattern',
'MaxLength', 'MinLength', 'MaxValue', 'MinValue',
'Description', 'ConstraintDescription', 'Label'
)
class Schema(constr.Schema):
"""Parameter schema."""
KEYS = (
TYPE, DESCRIPTION, DEFAULT, SCHEMA, CONSTRAINTS, HIDDEN,
LABEL, IMMUTABLE
) = (
'Type', 'Description', 'Default', 'Schema', 'Constraints', 'NoEcho',
'Label', 'Immutable'
)
PARAMETER_KEYS = PARAMETER_KEYS
# For Parameters the type name for Schema.LIST is CommaDelimitedList
# and the type name for Schema.MAP is Json
TYPES = (
STRING, NUMBER, LIST, MAP, BOOLEAN,
) = (
'String', 'Number', 'CommaDelimitedList', 'Json', 'Boolean',
)
def __init__(self, data_type, description=None, default=None, schema=None,
constraints=None, hidden=False, label=None, immutable=False):
super(Schema, self).__init__(data_type=data_type,
description=description,
default=default,
schema=schema,
required=default is None,
constraints=constraints,
label=label,
immutable=immutable)
self.hidden = hidden
# Schema class validates default value for lists assuming list type. For
# comma delimited list string supported in parameters Schema class, the
# default value has to be parsed into a list if necessary so that
# validation works.
def _validate_default(self, context):
if self.default is not None:
default_value = self.default
if self.type == self.LIST and not isinstance(self.default, list):
try:
default_value = self.default.split(',')
except (KeyError, AttributeError) as err:
raise exception.InvalidSchemaError(
message=_('Default must be a comma-delimited list '
'string: %s') % err)
elif self.type == self.LIST and isinstance(self.default, list):
default_value = [(six.text_type(x))
for x in self.default]
try:
self.validate_constraints(default_value, context,
[constr.CustomConstraint])
except (ValueError, TypeError,
exception.StackValidationFailed) as exc:
raise exception.InvalidSchemaError(
message=_('Invalid default %(default)s (%(exc)s)') %
dict(default=self.default, exc=exc))
def set_default(self, default=None):
super(Schema, self).set_default(default)
self.required = default is None
@staticmethod
def get_num(key, context):
val = context.get(key)
if val is not None:
val = Schema.str_to_num(val)
return val
@staticmethod
def _check_dict(schema_dict, allowed_keys, entity):
if not isinstance(schema_dict, dict):
raise exception.InvalidSchemaError(
message=_("Invalid %s, expected a mapping") % entity)
for key in schema_dict:
if key not in allowed_keys:
raise exception.InvalidSchemaError(
message=_("Invalid key '%(key)s' for %(entity)s") % {
"key": key, "entity": entity})
@classmethod
def _validate_dict(cls, param_name, schema_dict):
cls._check_dict(schema_dict,
cls.PARAMETER_KEYS,
"parameter (%s)" % param_name)
if cls.TYPE not in schema_dict:
raise exception.InvalidSchemaError(
message=_("Missing parameter type for parameter: %s") %
param_name)
@classmethod
def from_dict(cls, param_name, schema_dict):
"""Return a Parameter Schema object from a legacy schema dictionary.
:param param_name: name of the parameter owning the schema; used
for more verbose logging
:type param_name: str
"""
cls._validate_dict(param_name, schema_dict)
def constraints():
desc = schema_dict.get(CONSTRAINT_DESCRIPTION)
if MIN_VALUE in schema_dict or MAX_VALUE in schema_dict:
yield constr.Range(Schema.get_num(MIN_VALUE, schema_dict),
Schema.get_num(MAX_VALUE, schema_dict),
desc)
if MIN_LENGTH in schema_dict or MAX_LENGTH in schema_dict:
yield constr.Length(Schema.get_num(MIN_LENGTH, schema_dict),
Schema.get_num(MAX_LENGTH, schema_dict),
desc)
if ALLOWED_VALUES in schema_dict:
yield constr.AllowedValues(schema_dict[ALLOWED_VALUES], desc)
if ALLOWED_PATTERN in schema_dict:
yield constr.AllowedPattern(schema_dict[ALLOWED_PATTERN], desc)
# make update_allowed true by default on TemplateResources
# as the template should deal with this.
return cls(schema_dict[TYPE],
description=schema_dict.get(DESCRIPTION),
default=schema_dict.get(DEFAULT),
constraints=list(constraints()),
hidden=str(schema_dict.get(NO_ECHO,
'false')).lower() == 'true',
label=schema_dict.get(LABEL))
def validate_value(self, value, context=None, template=None):
super(Schema, self).validate_constraints(value, context=context,
template=template)
def __getitem__(self, key):
if key == self.TYPE:
return self.type
if key == self.HIDDEN:
return self.hidden
else:
return super(Schema, self).__getitem__(key)
@six.python_2_unicode_compatible
class Parameter(object):
"""A template parameter."""
def __new__(cls, name, schema, value=None):
"""Create a new Parameter of the appropriate type."""
if cls is not Parameter:
return super(Parameter, cls).__new__(cls)
# Check for fully-fledged Schema objects
if not isinstance(schema, Schema):
schema = Schema.from_dict(name, schema)
if schema.type == schema.STRING:
ParamClass = StringParam
elif schema.type == schema.NUMBER:
ParamClass = NumberParam
elif schema.type == schema.LIST:
ParamClass = CommaDelimitedListParam
elif schema.type == schema.MAP:
ParamClass = JsonParam
elif schema.type == schema.BOOLEAN:
ParamClass = BooleanParam
else:
raise ValueError(_('Invalid Parameter type "%s"') % schema.type)
return super(Parameter, cls).__new__(ParamClass)
__slots__ = ('name', 'schema', 'user_value', 'user_default')
def __init__(self, name, schema, value=None):
"""Initialise the parameter.
Initialise the Parameter with a name, schema and optional user-supplied
value.
"""
self.name = name
self.schema = schema
self.user_value = value
self.user_default = None
def validate(self, validate_value=True, context=None, template=None):
"""Validates the parameter.
This method validates if the parameter's schema is valid,
and if the default value - if present - or the user-provided
value for the parameter comply with the schema.
"""
err_msg = _("Parameter '%(name)s' is invalid: %(exp)s")
try:
self.schema.validate(context)
if not validate_value:
return
if self.user_value is not None:
self._validate(self.user_value, context, template)
elif self.has_default():
self._validate(self.default(), context, template)
else:
raise exception.UserParameterMissing(key=self.name)
except exception.StackValidationFailed as ex:
msg = err_msg % dict(name=self.name, exp=six.text_type(ex))
raise exception.StackValidationFailed(message=msg)
except exception.InvalidSchemaError as ex:
msg = err_msg % dict(name=self.name, exp=six.text_type(ex))
raise exception.InvalidSchemaError(message=msg)
def value(self):
"""Get the parameter value, optionally sanitising it for output."""
if self.user_value is not None:
return self.user_value
if self.has_default():
return self.default()
raise exception.UserParameterMissing(key=self.name)
def has_value(self):
"""Parameter has a user or default value."""
return self.user_value is not None or self.has_default()
def hidden(self):
"""Return whether the parameter is hidden.
Hidden parameters should be sanitised in any output to the user.
"""
return self.schema.hidden
def description(self):
"""Return the description of the parameter."""
return self.schema.description or ''
def label(self):
"""Return the label or param name."""
return self.schema.label or self.name
def has_default(self):
"""Return whether the parameter has a default value."""
return (self.schema.default is not None or
self.user_default is not None)
def default(self):
"""Return the default value of the parameter."""
if self.user_default is not None:
return self.user_default
return self.schema.default
def set_default(self, value):
self.user_default = value
def __str__(self):
"""Return a string representation of the parameter."""
value = self.value()
if self.hidden():
return six.text_type('******')
else:
return six.text_type(value)
class NumberParam(Parameter):
"""A template parameter of type "Number"."""
__slots__ = tuple()
def __int__(self):
"""Return an integer representation of the parameter."""
return int(super(NumberParam, self).value())
def __float__(self):
"""Return a float representation of the parameter."""
return float(super(NumberParam, self).value())
def _validate(self, val, context, template=None):
try:
Schema.str_to_num(val)
except ValueError as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
self.schema.validate_value(val, context=context, template=template)
def value(self):
return Schema.str_to_num(super(NumberParam, self).value())
class BooleanParam(Parameter):
"""A template parameter of type "Boolean"."""
__slots__ = tuple()
def _validate(self, val, context, template=None):
try:
strutils.bool_from_string(val, strict=True)
except ValueError as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
self.schema.validate_value(val, context=context, template=template)
def value(self):
if self.user_value is not None:
raw_value = self.user_value
else:
raw_value = self.default()
return strutils.bool_from_string(str(raw_value), strict=True)
class StringParam(Parameter):
"""A template parameter of type "String"."""
__slots__ = tuple()
def _validate(self, val, context, template=None):
self.schema.validate_value(val, context=context, template=template)
def value(self):
return self.schema.to_schema_type(super(StringParam, self).value())
class ParsedParameter(Parameter):
"""A template parameter with cached parsed value."""
__slots__ = ('parsed',)
def __init__(self, name, schema, value=None):
super(ParsedParameter, self).__init__(name, schema, value)
self._update_parsed()
def set_default(self, value):
super(ParsedParameter, self).set_default(value)
self._update_parsed()
def _update_parsed(self):
if self.has_value():
if self.user_value is not None:
self.parsed = self.parse(self.user_value)
else:
self.parsed = self.parse(self.default())
class CommaDelimitedListParam(ParsedParameter, collections.Sequence):
"""A template parameter of type "CommaDelimitedList"."""
__slots__ = ('parsed',)
def __init__(self, name, schema, value=None):
self.parsed = []
super(CommaDelimitedListParam, self).__init__(name, schema, value)
def parse(self, value):
# only parse when value is not already a list
if isinstance(value, list):
return [(six.text_type(x)) for x in value]
try:
return param_utils.delim_string_to_list(value)
except (KeyError, AttributeError) as err:
message = _('Value must be a comma-delimited list string: %s')
raise ValueError(message % six.text_type(err))
return value
def value(self):
if self.has_value():
return self.parsed
raise exception.UserParameterMissing(key=self.name)
def __len__(self):
"""Return the length of the list."""
return len(self.parsed)
def __getitem__(self, index):
"""Return an item from the list."""
return self.parsed[index]
def __str__(self):
if self.hidden():
return super(CommaDelimitedListParam, self).__str__()
return ",".join(self.value())
def _validate(self, val, context, template=None):
try:
parsed = self.parse(val)
except ValueError as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
self.schema.validate_value(parsed, context=context, template=template)
class JsonParam(ParsedParameter):
"""A template parameter who's value is map or list."""
__slots__ = ('parsed',)
def __init__(self, name, schema, value=None):
self.parsed = {}
super(JsonParam, self).__init__(name, schema, value)
def parse(self, value):
try:
val = value
if not isinstance(val, six.string_types):
# turn off oslo_serialization's clever to_primitive()
val = jsonutils.dumps(val, default=None)
if val:
return jsonutils.loads(val)
except (ValueError, TypeError) as err:
message = _('Value must be valid JSON: %s') % err
raise ValueError(message)
return value
def value(self):
if self.has_value():
return self.parsed
raise exception.UserParameterMissing(key=self.name)
def __getitem__(self, key):
return self.parsed[key]
def __iter__(self):
return iter(self.parsed)
def __len__(self):
return len(self.parsed)
def __str__(self):
if self.hidden():
return super(JsonParam, self).__str__()
return encodeutils.safe_decode(jsonutils.dumps(self.value()))
def _validate(self, val, context, template=None):
try:
parsed = self.parse(val)
except ValueError as ex:
raise exception.StackValidationFailed(message=six.text_type(ex))
self.schema.validate_value(parsed, context=context, template=template)
@six.add_metaclass(abc.ABCMeta)
class Parameters(collections.Mapping):
"""Parameters of a stack.
The parameters of a stack, with type checking, defaults, etc. specified by
the stack's template.
"""
def __init__(self, stack_identifier, tmpl, user_params=None,
param_defaults=None):
"""Initialisation of the parameter.
Create the parameter container for a stack from the stack name and
template, optionally setting the user-supplied parameter values.
"""
user_params = user_params or {}
param_defaults = param_defaults or {}
def user_parameter(schema_item):
name, schema = schema_item
return Parameter(name, schema,
user_params.get(name))
self.tmpl = tmpl
self.user_params = user_params
schemata = self.tmpl.param_schemata()
user_parameters = (user_parameter(si) for si in
six.iteritems(schemata))
pseudo_parameters = self._pseudo_parameters(stack_identifier)
self.params = dict((p.name,
p) for p in itertools.chain(pseudo_parameters,
user_parameters))
self.non_pseudo_param_keys = [p for p in self.params if p not in
self.PSEUDO_PARAMETERS]
for pd_name, param_default in param_defaults.items():
if pd_name in self.params:
self.params[pd_name].set_default(param_default)
def validate(self, validate_value=True, context=None):
"""Validates all parameters.
This method validates if all user-provided parameters are actually
defined in the template, and if all parameters are valid.
"""
self._validate_user_parameters()
for param in six.itervalues(self.params):
param.validate(validate_value, context, self.tmpl)
def __contains__(self, key):
"""Return whether the specified parameter exists."""
return key in self.params
def __iter__(self):
"""Return an iterator over the parameter names."""
return iter(self.params)
def __len__(self):
"""Return the number of parameters defined."""
return len(self.params)
def __getitem__(self, key):
"""Get a parameter value."""
return self.params[key].value()
def map(self, func, filter_func=lambda p: True):
"""Map the supplied function onto each Parameter.
Map the supplied function onto each Parameter (with an optional filter
function) and return the resulting dictionary.
"""
return dict((n, func(p))
for n, p in six.iteritems(self.params) if filter_func(p))
def set_stack_id(self, stack_identifier):
"""Set the StackId pseudo parameter value."""
if stack_identifier is not None:
self.params[self.PARAM_STACK_ID].schema.set_default(
stack_identifier.arn())
return True
return False
def _validate_user_parameters(self):
schemata = self.tmpl.param_schemata()
for param in self.user_params:
if param not in schemata:
raise exception.UnknownUserParameter(key=param)
def _pseudo_parameters(self, stack_identifier):
warnings.warn("Parameters._pseudo_parameters() is deprecated and "
"will become an abstract method in future. Subclasses "
"should override it to provide their own pseudo "
"parameters.", DeprecationWarning)
stack_id = (stack_identifier.arn()
if stack_identifier is not None else 'None')
stack_name = stack_identifier and stack_identifier.stack_name
yield Parameter('AWS::StackId',
Schema(Schema.STRING, _('Stack ID'),
default=str(stack_id)))
if stack_name:
yield Parameter('AWS::StackName',
Schema(Schema.STRING, _('Stack Name'),
default=stack_name))
yield Parameter('AWS::Region',
Schema(Schema.STRING,
default='ap-southeast-1',
constraints=[
constr.AllowedValues(['us-east-1',
'us-west-1',
'us-west-2',
'sa-east-1',
'eu-west-1',
'ap-southeast-1',
'ap-northeast-1']
)]))
def immutable_params_modified(self, new_parameters, input_params):
# A parameter must have been present in the old stack for its
# immutability to be enforced
common_params = list(set(new_parameters.non_pseudo_param_keys)
& set(self.non_pseudo_param_keys))
invalid_params = []
for param in common_params:
old_value = self.params[param]
if param in input_params:
new_value = input_params[param]
else:
new_value = new_parameters[param]
immutable = new_parameters.params[param].schema.immutable
if immutable and old_value.value() != new_value:
invalid_params.append(param)
if invalid_params:
return invalid_params
|
the-stack_106_17386
|
from typing import List
import hikari
import lightbulb
import tweepy
import os
with open("./secrets/twitter") as t:
_twitter = t.read().splitlines()
_twitter_api = _twitter[0]
_twitter_secret_api = _twitter[1]
_twitter_access = _twitter[2]
_twitter_access_secret = _twitter[3]
authenticator = tweepy.OAuth1UserHandler(os.environ['TWITTER_API'], os.environ['TWITTER_API_SECRET'])
authenticator.set_access_token(os.environ['TWITTER_ACCESS'], os.environ['TWITTER_ACCESS_SECRET'])
api = tweepy.API(authenticator, wait_on_rate_limit=True)
#client = tweepy.Client(TWITTER_BEARER, wait_on_rate_limit=True)
plugin = lightbulb.Plugin("Twitter")
my_followers = []
streams = []
user_ids = []
tweets = []
class Listener(tweepy.Stream):
def on_status(self, status):
tweets.append(status)
print(status.user.screen_name + ": " + status.text)
#async def get_tweets():
@plugin.command
@lightbulb.command("twitter", "Twitter Command Group")
@lightbulb.implements(lightbulb.PrefixCommandGroup, lightbulb.SlashCommandGroup)
async def twitter(ctx: lightbulb.Context) -> None:
pass
@plugin.command
@lightbulb.command("say", "Twitter Command Group")
@lightbulb.implements(lightbulb.PrefixCommand, lightbulb.SlashCommand)
async def twitter(ctx: lightbulb.Context) -> None:
await ctx.respond('what?')
@twitter.child
@lightbulb.option('username', 'Twitter Handle', type=str)
@lightbulb.command("bind", "[Work in progress] Streams user's tweets in the channel.")
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def bind(ctx: lightbulb.Context) -> None:
if (ctx.options.username != "Iwus237"):
streams.append(ctx.options.username)
stream = Listener(
os.environ['TWITTER_API'], os.environ['TWITTER_API_SECRET'],
os.environ['TWITTER_ACCESS'], os.environ['TWITTER_ACCESS_SECRET']
)
for user in streams:
user_ids.append(api.get_user(screen_name = user).id)
await ctx.respond("current users tracked (id): " + str(user_ids))
await ctx.respond("starting stream")
stream.filter(follow=user_ids, threaded=True)
while streams:
for tweet in tweets:
await ctx.respond(tweet.user.screen_name + " tweeted: " + tweet.text)
tweets.remove(tweet)
else:
ctx.respond("The bot cannot follow itself!")
@twitter.child
@lightbulb.option('username', 'Twitter Handle', type=str)
@lightbulb.command("unbind", "[Work in progress] Stop streaming user tweets")
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def unbind(ctx: lightbulb.Context) -> None:
streams.pop(ctx.options.username)
user_ids.pop(api.get_user(screen_name=ctx.option.username).id)
@twitter.child
@lightbulb.option('username', 'Twitter Handle', type=str)
@lightbulb.command('follow', 'Follows twitter user as bot')
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def follow(ctx: lightbulb.Context) -> None:
api.create_friendship(screen_name = ctx.options.username)
await ctx.respond("followed " + ctx.options.username)
@twitter.child
@lightbulb.option('username', 'Twitter Handle', type=str)
@lightbulb.command('unfollow', 'Unfollows twitter user as bot')
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def unfollow(ctx: lightbulb.Context) -> None:
api.destroy_friendship(screen_name = ctx.options.username)
await ctx.respond("unfollowed " + ctx.options.username)
@twitter.child
@lightbulb.command('list', 'Lists users the bot is following on Twitter.')
@lightbulb.implements(lightbulb.PrefixSubCommand, lightbulb.SlashSubCommand)
async def list(ctx: lightbulb.Context) -> None:
for follower in api.get_friends(screen_name = 'Iwus237'):
my_followers.append(follower.screen_name)
await ctx.respond("I'm currently following: " + ", ".join(str(e) for e in my_followers))
def load(bot: lightbulb.BotApp) -> None:
bot.add_plugin(plugin)
def unload(bot: lightbulb.BotApp) -> None:
bot.remove_plugin(bot.get_slash_command(plugin))
|
the-stack_106_17387
|
# Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
# Python
import base64
import binascii
import re
# Django
from django.utils.translation import ugettext_lazy as _
# Tower
from awx.conf import fields
class PendoTrackingStateField(fields.ChoiceField):
def to_internal_value(self, data):
# Any false/null values get converted to 'off'.
if data in fields.NullBooleanField.FALSE_VALUES or data in fields.NullBooleanField.NULL_VALUES:
return 'off'
return super(PendoTrackingStateField, self).to_internal_value(data)
class CustomLogoField(fields.CharField):
CUSTOM_LOGO_RE = re.compile(r'^data:image/(?:png|jpeg|gif);base64,([A-Za-z0-9+/=]+?)$')
default_error_messages = {
'invalid_format': _('Invalid format for custom logo. Must be a data URL with a base64-encoded GIF, PNG or JPEG image.'),
'invalid_data': _('Invalid base64-encoded data in data URL.'),
}
def to_internal_value(self, data):
data = super(CustomLogoField, self).to_internal_value(data)
match = self.CUSTOM_LOGO_RE.match(data)
if not match:
self.fail('invalid_format')
b64data = match.group(1)
try:
base64.b64decode(b64data)
except (TypeError, binascii.Error):
self.fail('invalid_data')
return data
|
the-stack_106_17390
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 6 22:50:35 2019
@author: sarashashaani
pipeT dataset
most recent last_working_withuf
max level = 3
min node size = 100
split quantile = 20
crps quantil = N/A
"""
import numpy as np
from scipy import random as sr
from random import sample
import ast
import time
from csv import writer
from joblib import Parallel, delayed
from collections import Counter
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
def evaluate_algorithm(dataset):
global methods, leaves
evals_dict = {}
train_set = dataset[0]
""" TEST SET """
test_set = list()
for row in dataset[1]:
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
self_test = list()
for row in dataset[0]:
row_copy = list(row)
self_test.append(row_copy)
row_copy[-1] = None
for tree_method in methods:
evals_dict[tree_method] = {}
tree = decision_tree(train_set, tree_method)
leaves = []
leaves = leaves_list(tree, 0)
predicted = tree_preds(tree, test_set)
predicted_in = tree_preds(tree, self_test)
actual = [row[-1] for row in dataset[1]]
actual_in = [row[-1] for row in dataset[0]]
leaf_dict = dict((str(l),[]) for l in leaves)
leaf_dict_in = dict((str(l),[]) for l in leaves)
for l in range(len(leaves)):
leaf_dict[str(leaves[l])] = [actual[i] for i in range(len(actual)) if predicted[i] == l]
leaf_dict_in[str(leaves[l])] = [actual_in[i] for i in range(len(actual_in)) if predicted_in[i] == l]
for eval_method in methods:
eval_new = [accuracy_funcs(eval_method, leaf_dict)]
eval_new += [accuracy_funcs(eval_method, leaf_dict_in)]
evals_dict[tree_method][eval_method] = eval_new
# print(eval_method+' eval: '+str(eval_new))
return evals_dict
# List of data points in all leaves
def leaves_list(node, depth=0):
global leaves
if isinstance(node, dict):
leaves_list(node['left'], depth+1)
leaves_list(node['right'], depth+1)
else:
leaves.append(node)
return leaves
# Classification and Regression Tree Algorithm; Output: predictions as the
# whole data in the leaves for each test data point
def decision_tree(train, tree_method):
global max_depth
tree = build_tree(train, tree_method)
return tree
def tree_preds(tree, test_set):
global leaves
predictions = list()
for row in test_set:
prediction = predict(tree, row)
predictions.append(leaves.index(prediction))
return predictions
def accuracy_funcs(method, leaf_dict):
if method == 'sse':
return accuracy_sse(leaf_dict)
if method == 'crps':
return accuracy_crps(leaf_dict)
if method == 'dss':
return accuracy_dss(leaf_dict)
if method == 'is1':
return accuracy_is1(leaf_dict)
# Evaluation metric: SSE; Input is the actual data and the all the observations
# of the leaf each data point falls in (predicted)
def accuracy_sse(leaf_dict):
total_sse = 0
for key, val in leaf_dict.items():
leaf = ast.literal_eval(key)
avg = np.mean(leaf)
xv = list(Counter(val).keys()) # equals to list(set(targets))
rv = list(Counter(val).values())
for j, point in enumerate(xv):
total_sse += pow(point - avg, 2)*rv[j]
return total_sse
def accuracy_crps(leaf_dict):
total_crps = 0 ## crps old with freq -- this is correct
for key, val in leaf_dict.items(): # key is X and val is y
leaf = ast.literal_eval(key)
x = list(Counter(leaf).keys())
r = list(Counter(leaf).values())
crps_2 = 0.0
for j, leaf_point_q in enumerate(x):
s = 0.0
for i, leaf_point in enumerate(x):
s += abs(leaf_point_q-leaf_point)/(pow(len(leaf),2)*2)*r[i]
crps_2 += s*r[j]
xv = list(Counter(val).keys())
rv = list(Counter(val).values())
crps_1 = 0.0
for j, leaf_point_q in enumerate(xv):
s = 0.0
for i, leaf_point in enumerate(x):
s += abs(leaf_point_q-leaf_point)*r[i]
crps_1 += s*rv[j]
total_crps += crps_1/len(leaf) - crps_2*len(val)
return total_crps
def accuracy_dss(leaf_dict):
total_dss = 0
for key, val in leaf_dict.items():
leaf = ast.literal_eval(key)
mhat = np.mean(leaf)
vhat = max(np.var(leaf),.1)
xv = list(Counter(val).keys()) # equals to list(set(targets))
rv = list(Counter(val).values())
for j, point in enumerate(xv):
total_dss += (pow(point - mhat,2)/vhat+np.log(vhat))*rv[j]
return total_dss
def accuracy_is1(leaf_dict):
global alpha
total_is = 0
for key, val in leaf_dict.items():
leaf = sorted(ast.literal_eval(key))
u = leaf[int(np.ceil((1-alpha)*len(leaf)))-1]
xv = list(Counter(val).keys()) # equals to list(set(targets))
rv = list(Counter(val).values())
for j, point in enumerate(xv):
total_is += (u+(point-u)*(point>=u)/alpha)*rv[j]
return total_is
# Split a dataset based on an attribute and an attribute value
# This is candidate split, so all we do here is to devide the dataset into
# left (attribute <= attribute value) and right (o.w.)
# left (attribute == attribute value) and right (o.w.) if equal (for categorical vars)
def test_split(index, value, train_set, equal):
left, right = list(), list()
if equal:
for row in train_set:
if row[index] == value:
left.append(row)
else:
right.append(row)
else:
for row in train_set:
if row[index] < value:
left.append(row)
else:
right.append(row)
return left, right
def new_split_funcs(method, groups, notparent):
if method == 'sse':
return sse_for_new_split(groups,notparent)
if method == 'crps':
return crps_for_new_split(groups,notparent)
if method == 'dss':
return dss_for_new_split(groups,notparent)
if method == 'is1':
return is1_for_new_split(groups,notparent)
# SSE of a a new splitted point; if nonparent it is two nodes (left, right)
# if parent, only one node
def sse_for_new_split(groups,notparent):
sse = 0.0
if notparent:
for group in groups:
mean_target = sum([row[-1] for row in group])/float(len(group))
sse += sum([pow(row[-1]-mean_target,2) for row in group])
else:
mean_target = sum([row[-1] for row in groups])/float(len(groups))
sse = sum([pow(row[-1]-mean_target,2) for row in groups])
return sse
# Find the empirical cdf of a sample, Outcome: quantiles and cumulative probabilities
def ecdf(sample):
sample = np.atleast_1d(sample)
quantiles, counts = np.unique(sample, return_counts=True)
cumprob = np.cumsum(counts).astype(np.double) / sample.size
return quantiles, cumprob
def crps_for_new_split(groups,notparent):
total_crps = 0
if notparent:
for group in groups:
targets = np.asarray([row[-1] for row in group])
x = list(Counter(targets).keys())
r = list(Counter(targets).values())
crps_2 = 0.0
for j, leaf_point_q in enumerate(x):
s = 0.0
for i, leaf_point in enumerate(x):
s += abs(leaf_point_q-leaf_point)*r[i]
crps_2 += s*r[j]
total_crps += crps_2/(2*len(targets))
else:
targets = np.asarray([row[-1] for row in groups])
x = list(Counter(targets).keys())
r = list(Counter(targets).values())
crps_2 = 0.0
for j, leaf_point_q in enumerate(x):
s = 0.0
for i, leaf_point in enumerate(x):
s += abs(leaf_point_q-leaf_point)*r[i]
crps_2 += s*r[j]
total_crps += crps_2/(2*len(targets))
return total_crps
def dss_for_new_split(groups,notparent):
dss = 0.0
if notparent:
for group in groups:
targets = np.asarray([row[-1] for row in group])
mhat = np.mean(targets)
vhat = max(np.var(targets),.1)
dss += (np.log(vhat)*len(targets)+ sum([pow(x-mhat,2) for x in targets])/vhat)
else:
targets = np.asarray([row[-1] for row in groups])
mhat = np.mean(targets)
vhat = max(np.var(targets),.1)
dss += (np.log(vhat)*len(targets)+ sum([pow(x-mhat,2) for x in targets])/vhat)
return dss
def is1_for_new_split(groups,notparent):
global alpha
is1 = 0.0
if notparent:
for group in groups:
targets = sorted(np.asarray([row[-1] for row in group]))
u = targets[int(np.ceil((1-alpha)*len(targets)))-1]
is1 += (u*len(targets)+sum([(x-u)*(x>=u) for x in targets])/alpha)
else:
targets = sorted(np.asarray([row[-1] for row in groups]))
u = targets[int(np.ceil((1-alpha)*len(targets)))-1]
is1 += (u*len(targets)+sum([(x-u)*(x>=u) for x in targets])/alpha)
return is1
# Select the best split point for a dataset
# based on tree_method: crps or sse; start by b_score before split and
# search for lowest score across all candidate splits
def get_split(train_set, tree_method):
global min_node_size, num_quantiles, x_dim, tol, is_cat, cov_uniqvals
b_index, b_value, b_groups = 999, 999, None
b_score = new_split_funcs(tree_method, train_set, 0)
first_val = 0
split_occurs = 0
for index in range(x_dim):
qe, pe = ecdf(column(train_set,index))
if is_cat[index]:# and len(unique_vals) <= 25:
tocheck_val = qe
equal = 1
elif len(qe) < num_quantiles:
tocheck_val = qe
equal = 0
else:
inc_p = 1/(num_quantiles+1)
inds = [next(x[0] for x in enumerate(pe) if x[1] > i*inc_p) for i in range(1,(num_quantiles+1))]
tocheck_val = list(sorted(set([qe[i] for i in inds])))
equal = 0
for val in tocheck_val:
groups = test_split(index, val, train_set, equal)
if len(groups[0]) >= min_node_size and len(groups[1]) >= min_node_size:
measure = new_split_funcs(tree_method, groups, 1)
if not first_val:
first_val = 1
if b_score < measure:
print("monotonicity violated - "+str(tree_method)+" - variable "+str(index))
log_file.write("monotonicity violated - "+str(tree_method)+" - variable "+str(val))
b_score = max(b_score,measure)
if split_occurs:
check_tol = 0
else:
check_tol = tol
if measure <= b_score*(1-check_tol):
split_occurs = 1
b_index, b_value, b_score, b_groups = index, val, measure, groups
if not split_occurs:
print("no improvement - "+str(tree_method))
log_file.write("no improvement - "+str(tree_method))
return {'index':b_index, 'value':b_value, 'groups':b_groups}
# Return the observaions in the leaf
def to_terminal(group):
outcomes = [row[-1] for row in group]
return outcomes
# Create child splits for a node
# or make terminal (leaf) if (1) no split improves the current node,
# or (2) the depth of the tree is maxed or (3) the volume of the node before split
# is less than twice of the min_node_size (minimum data points in any node)
def split(node, depth, tree_method):
global min_node_size, max_depth
if node['groups']:
left, right = node['groups']
del(node['groups'])
else:
print('NOTHING')
# check for a no split
if not left or not right:
node['left'] = node['right'] = to_terminal(left + right)
return
# check for max depth
if depth >= max_depth:
node['left'], node['right'] = to_terminal(left), to_terminal(right)
return
# process left child
if len(left) < 3*min_node_size:
node['left'] = to_terminal(left)
else:
node['left'] = get_split(left, tree_method)
split(node['left'], depth+1, tree_method)
# process right child
if len(right) < 3*min_node_size:
node['right'] = to_terminal(right)
else:
node['right'] = get_split(right, tree_method)
split(node['right'], depth+1, tree_method)
# Build a decision tree
# Start with the root to get the first split, then call the recursice Split function
def build_tree(train_set, tree_method):
global max_depth
root = get_split(train_set, tree_method)
split(root, 1, tree_method)
print("tree_method "+tree_method+"\n###########################")
print_tree(root, depth=0)
return root
# Print a decision tree
def print_tree(node, depth=0):
global is_cat
if isinstance(node, dict):
if is_cat[node['index']]:
print('%s[X%d = %d]' % ((depth*' ', (node['index']+1), int(node['value']))))
else:
print('%s[X%d < %.4f]' % ((depth*' ', (node['index']+1), node['value'])))
print_tree(node['left'], depth+1)
print_tree(node['right'], depth+1)
else:
print('%s[%s]' % ((depth*' ', len(node))))
# Make a prediction with a decision tree
# Return the node (the entire leaf, not just a summary)
def predict(node, row):
if row[node['index']] < node['value']:
if isinstance(node['left'], dict):
return predict(node['left'], row)
else:
return node['left']
else:
if isinstance(node['right'], dict):
return predict(node['right'], row)
else:
return node['right']
def column(matrix, i):
return [row[i] for row in matrix]
""" evaluate algorithm """
def OneRep(k):
sr.seed(k+2010)
holdout_size = int(len(rows)/2)
train_index = list(sample(range(len(rows)),holdout_size))
test_index = list(set(range(len(rows))) - set(train_index))
train_set = [rows[index] for index in train_index]
test_set = [rows[index] for index in test_index]
dataset = [train_set,test_set]
total_time = time.time()
scores = evaluate_algorithm(dataset)
total_time = time.time() - total_time
print("Rep "+str(k)+" completed in "+str(round(total_time,2))+" sec.")
log_file.write("\nRep "+str(k)+" completed in "+str(round(total_time,2))+" sec.")
return scores
def set_box_colors(bp):
colors = ['red', 'purple', 'blue', 'green']
elements_1 = ['boxes','fliers']
elements_2= ['caps','whiskers']
for elem in elements_1:
for idx in range(len(bp[elem])):
plt.setp(bp[elem][idx], color=colors[idx])
for elem in elements_2:
for idx in range(int(len(bp[elem])/2)):
plt.setp(bp[elem][2*idx], color=colors[idx])
plt.setp(bp[elem][2*idx+1], color=colors[idx])
def plot(e_method):
global params
dataset1 = np.transpose(np.array(csv_dict_in[e_method]))
dataset2 = np.transpose(np.array(csv_dict_out[e_method]))
#
fig = plt.figure()
fig.suptitle(data_title)
print(e_method+"-evaluated")
for j, t_method in enumerate(methods):
print(t_method+"-built tree, in-sample mean: "+str(round(np.mean(dataset1[j,:]),2)))
log_file.write("\n"+t_method+"-built tree, in-sample mean: "+str(round(np.mean(dataset1[j,:]),2)))
plt.subplot(1, 2, 1)
plt.title('in-sample '+str(e_method), fontsize=20)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
bp = plt.boxplot(dataset1.tolist(),positions = [1, 2, 3, 4], widths = 0.9)
set_box_colors(bp)
frame1 = plt.gca()
frame1.axes.set_xticklabels(methods, fontsize=14, rotation = 90)
for j, t_method in enumerate(methods):
print(t_method+"-built tree, out-of-sample mean: "+str(round(np.mean(dataset2[j,:]),2)))
log_file.write("\n"+t_method+"-built tree, out-sample mean: "+str(round(np.mean(dataset2[j,:]),2)))
plt.subplot(1, 2, 2)
plt.title('out-of-sample '+str(e_method), fontsize=20)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
bp = plt.boxplot(dataset2.tolist(),positions = [1, 2, 3, 4], widths = 0.9)
set_box_colors(bp)
frame1 = plt.gca()
frame1.axes.set_xticklabels(methods, fontsize=14, rotation = 90)
fig.subplots_adjust(hspace=0)
fig.tight_layout()
plt.savefig(directory+"results/"+data_title+"_"+e_method+"_out_"+params+".png".format(1))
global min_node_size, max_depth, methods, num_quantiles, alpha, tol, x_dim, is_cat, cov_uniqvals, leaves, params
leaves = []
tol = 0
# inputs
max_depth = 3
min_node_size = 100
num_quantiles = 20
total_reps = 10
alpha = .2
data_title = "pipeT"
methods = ["crps", "dss", "is1", "sse"]
params = str(max_depth)+str(min_node_size)+str(num_quantiles)+str(total_reps)+str(alpha)
total_time = time.time()
directory = "/home/sshasha2/"
datafile = directory+"data/test_"+data_title+".txt"
log_file = open(directory+"log_"+data_title+"_"+params+".txt", 'a+')
with open (datafile, 'r') as f: # use with to open your files, it close them automatically
rows = [x.split() for x in f]
rows = rows[1:]
for i in range(len(rows)):
rows[i] = [float(x) for x in rows[i]]
x_dim = len(rows[0])-1
is_cat = []
cov_uniqvals = []
for i in range(x_dim):
unique_vals = list(sorted(set(column(rows,i))))
cov_uniqvals += [unique_vals]
if len(unique_vals) <= 2:#len(rows)/len(unique_vals) > 100:
is_cat += [1]
else:
is_cat += [0]
total_time = time.time()
results = Parallel(n_jobs=min(total_reps,20))(delayed(OneRep)(rep_no) for rep_no in range(total_reps))
#print(results)
csv_dict_out = {method: [] for method in methods}
csv_dict_in = {method: [] for method in methods}
for e_method in methods:
reps_list = []
reps_list_in = []
for rep in range(total_reps):
rep_list = []
rep_list_in = []
for t_method in methods:
rep_list += [round(results[rep][t_method][e_method][0],2)]
rep_list_in += [round(results[rep][t_method][e_method][1],2)]
reps_list += [rep_list]
reps_list_in += [rep_list_in]
csv_dict_out[e_method] += reps_list
csv_dict_in[e_method] += reps_list_in
with open(directory+"results/"+data_title+"_"+e_method+"_out_"+params+".csv", "w") as f:
w = writer(f)
w.writerows(csv_dict_out[e_method])
with open(directory+"results/"+data_title+"_"+e_method+"_in_"+params+".csv", "w") as f:
w = writer(f)
w.writerows(csv_dict_in[e_method])
total_time = time.time() - total_time
print(data_title+" completed in "+str(round(total_time,2))+" sec.")
log_file.write("\n"+data_title+" completed in "+str(round(total_time,2))+" sec.")
for e_method in methods:
plot(e_method)
|
the-stack_106_17397
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 23 11:31:21 2021
@author: ghiggi
"""
import numpy as np
from cycler import cycler
import matplotlib.pyplot as plt
##----------------------------------------------------------------------------.
### Check AR weights
def check_ar_weights(ar_weights):
"""Check AR weights validity."""
if isinstance(ar_weights, (int, float)):
ar_weights = [ar_weights]
if isinstance(ar_weights, list):
ar_weights = np.array(ar_weights)
if not isinstance(ar_weights, np.ndarray):
raise TypeError("Specify AR weights with a list or a numpy array.")
# Check that any intial_ar_weights is negative
if any(ar_weights < 0):
raise ValueError("AR weights must not contain negative weights.")
# Check that the last AR weight is not zero !
if ar_weights[-1] == 0:
raise ValueError("The last weight of ar_weights must not be 0.")
return ar_weights
#----------------------------------------------------------------------------.
# No AR weights update when .step()
def _ConstantStep(self):
return self.ar_weights
def _DiracDeltaStep(self):
return self.ar_weights
##----------------------------------------------------------------------------.
## Discrete weight update functions
def _StepwiseDecayStep(self):
weights = self.ar_absolute_weights[:-1]
weights = weights - self.factor
weights[weights < 0] = 0
self.ar_absolute_weights[:-1] = weights
def _StepwiseGrowthStep(self):
weight = self.ar_absolute_weights[-1]
weight = weight + self.factor
if weight > 1:
weight = 1
self.ar_absolute_weights[-1] = weight
def _StepwiseStep(self):
if self.temporary_step_count >= self.step_interval:
_StepwiseDecayStep(self)
if self.smooth_growth:
_StepwiseGrowthStep(self)
# Reset temporary_step_count
self.temporary_step_count = 0
def _HalfDecayStep(self):
weights = self.ar_absolute_weights[:-1]
weights = weights/2
self.ar_absolute_weights[:-1] = weights
def _HalfGrowthStep(self):
weight = self.ar_absolute_weights[-1]
if weight == 0:
weight = self.factor
weight = weight*2
if weight > 1:
weight = 1
self.ar_absolute_weights[-1] = weight
def _HalfStep(self):
if self.temporary_step_count >= self.step_interval:
_HalfDecayStep(self)
if self.smooth_growth:
_HalfGrowthStep(self)
# Reset temporary_step_count
self.temporary_step_count = 0
##----------------------------------------------------------------------------.
### Continous weight update functions
def _LinearDecayStep(self):
initial_weights = self.ar_absolute_initial_weights[:-1]
weights = initial_weights - self.factor*self.global_step_count_arr[:-1]
weights[weights < 0] = 0
self.ar_absolute_weights[:-1] = weights
def _LinearGrowthStep(self):
initial_weight = self.ar_absolute_initial_weights[-1]
weight = initial_weight + self.factor*self.global_step_count_arr[-1]
if weight > 1:
weight = 1
self.ar_absolute_weights[-1] = weight
def _LinearStep(self):
_LinearDecayStep(self)
if self.smooth_growth:
_LinearGrowthStep(self)
def _ExponentialDecayStep(self):
initial_weights = self.ar_absolute_initial_weights[:-1]
weights = initial_weights * np.exp(-self.factor*self.global_step_count_arr[:-1])
self.ar_absolute_weights[:-1] = weights
def _ExponentialGrowthStep(self):
weight = self.factor * np.exp(self.factor*self.global_step_count_arr[-1])
if weight > 1:
weight = 1
self.ar_absolute_weights[-1] = weight
def _ExponentialStep(self):
_ExponentialDecayStep(self)
if self.smooth_growth:
_ExponentialGrowthStep(self)
#-----------------------------------------------------------------------------.
class AR_Scheduler():
"""Autoregressive (AR) weights scheduler."""
def __init__(self,
method = "LinearStep",
factor = 0.001,
step_interval = None,
smooth_growth = True,
fixed_ar_weights = None,
initial_ar_absolute_weights = None,
initial_ar_weights = None):
"""Autoregressive (AR) weights scheduler.
Parameters
----------
smooth_growth : bool, optional
Wheter to set the new AR weight to 0 and growth it smoothly to avoid
training destabilization.
Do not apply to 'Constant' and 'DiracDelta' methods.
The default is True.
method : str, optional
Available methods: 'Constant','DiracDelta','StepwiseDecay','HalfDecay','LinearDecay','ExponentialDecay'
The default method is "DiracDelta".
Methods explanation:
Constant: Add an AR weight (with absolute value 1) when .update() is called.
DiracDelta: Add an AR weight when .update() is called and
reset to 0 the others AR weights.
StepwiseStep: When a new AR weight is added with .update(), it start to substract
'factor' from the others AR absolute weights every 'step_interval' .step() calls.
If smooth_growth=True, the new AR weight growth by step from 0 every 'step_interval' .step() calls.)
HalfStep: When a new AR weight is added with .update(), it start to half
the others AR absolute weights every 'step_interval' .step() calls.
If smooth_growth=True, the new AR weight growth by doubling from factor every 'step_interval' .step() calls.
LinearStep : When a new AR weight is added with .update(), it start to
decrease linearly (with slope '-factor') the others
AR absolute weights every .step() call.
If smooth_growth=True, the new AR weight growth linearly
starting from 0.
ExponentialStep: When a new AR weight is added with .update(), it start to
decrease exponentially (with decay rate '-factor')
the others AR absolute weights every .step() call.
If smooth_growth=True, the new AR weight growth exponentially
starting from 'factor'.
factor : float, optional
Argument required by the following methods: 'StepwiseStep','HalfStep','LinearStep','ExponentialStep'.
Regulate the decay and growth of AR absolute weights when .step() is called.
For HalfStep and ExponentialStep, is also used as first value for the new ar_weight when smooth_growth=True.
step_interval : int, optional
Argument required by the following methods: 'StepwiseStep','HalfStep'.
Specify the frequency with which the AR weights are updated with methods 'StepwiseStep' and 'HalfStep'.
Step_interval = 1 cause weight update at every .step() call.
fixed_ar_weights : list, optional
List of AR iterations for which the value AR weights must not be
modified by the step functions.
The default is None. No AR weights is fixed.
initial_ar_abolute_weights : list, optional
Specify the initial absolute AR weights.
They will be rescaled to have 1 has largest value.
If specified, initial_ar_weights must not be specified.
The default is ar_weights = [1].
initial_ar_weights : list, optional
Specify the initial normalized AR weights. (must sum up to 1).
If specified, initial_ar_abolute_weights must not be specified.
The default is ar_weights = [1].
"""
# 'StepwiseDecay' and 'HalfDecay' factor is applied to the ar_absolute weights (not the normalized ar_weights)
# 'LinearDecay','ExponentialDecay' is applied from the initial ar_absolute_weights
# TODO:
# - Implement a min_ar_weight_option? (instead of decaying to 0)
# - Increasing-Decreasing Decay ... "
# Check smooth_growth
##--------------------------------------------------------------------.
if not isinstance(smooth_growth, bool):
raise TypeError("'smooth_growth' must be either True or False.")
##--------------------------------------------------------------------.
# Check valid method
valid_method = ['Constant','DiracDelta','StepwiseStep','HalfStep','LinearStep','ExponentialStep']
if method not in valid_method:
raise ValueError("Provide a valid 'method'.")
##--------------------------------------------------------------------.
# Check fixed_ar_weights
if not isinstance(fixed_ar_weights, (type(None), np.ndarray, list)):
raise TypeError("'fixed_ar_weights' must be specified as list.")
if isinstance(fixed_ar_weights, list):
fixed_ar_weights = np.array(fixed_ar_weights)
if fixed_ar_weights is not None:
if len(fixed_ar_weights) == 0:
fixed_ar_weights = None
##---------------------------------------------------------------------.
# Check initial_ar_weights and initial_ar_absolute_weights are not both specified.
if initial_ar_weights is not None and initial_ar_absolute_weights is not None:
raise ValueError("Specify either 'initial_ar_weights' or 'initial_ar_absolute_weights'.")
# Set default ar_weights if not specified
if initial_ar_weights is None and initial_ar_absolute_weights is None:
initial_ar_weights = [1]
# Check initial_ar_weights
if initial_ar_weights is not None:
# Check AR weights validity
initial_ar_weights = check_ar_weights(initial_ar_weights)
# Check ar_weights sum up to 1
if np.sum(initial_ar_weights) != 1:
raise ValueError("'initial_ar_weights' must sum up to 1.")
# Compute AR absolute weights
# - Force the largest values to be 1
initial_ar_absolute_weights = initial_ar_weights/initial_ar_weights.max()
# Check initial_ar_absolute_weights
elif initial_ar_absolute_weights is not None:
# Check AR weights validity
initial_ar_absolute_weights = check_ar_weights(initial_ar_absolute_weights)
# - Force the maximum values to be 1
initial_ar_absolute_weights = initial_ar_absolute_weights/initial_ar_absolute_weights.max()
# Compute the normalized AR weights
initial_ar_weights = initial_ar_absolute_weights/initial_ar_absolute_weights.sum()
else:
raise NotImplementedError("This option has been not considered.")
##--------------------------------------------------------------------.
# Check that factor and step_interval are not negative
if factor is not None:
if factor < 0:
raise ValueError("Provide a factor between 0 and 1.")
if step_interval is not None:
if step_interval <= 0:
raise ValueError("'step_interval' must be an integer value equal or larger than 1.")
##---------------------------------------------------------------------.
# Check required method arguments are specified
if method in ['StepwiseStep','HalfStep']:
if step_interval is None:
raise ValueError("'{}' method requires specification of the 'step_interval' argument".format(method))
if method in ['HalfStep','StepwiseStep','LinearStep','ExponentialStep']:
if factor is None:
raise ValueError("'{}' method requires specification of the 'factor' argument".format(method))
if method in ['Constant', 'DiracDelta']:
smooth_growth = False
##---------------------------------------------------------------------.
# Count the number of AR iteration (at start)
current_ar_iterations = len(initial_ar_weights) - 1
self.current_ar_iterations = current_ar_iterations
# Set absolute AR weights
self.ar_absolute_weights = initial_ar_absolute_weights
# Set ar_weights (normalized AR weights)
self.ar_weights = initial_ar_weights
# Set initial AR absolute weights (for fixed weights) and 'LinearDecay' and 'ExponentialDecay'
self.ar_absolute_initial_weights = self.ar_absolute_weights.copy()
##--------------------------------------------------------------------.
# Add method arguments
self.method = method
self.step_interval = step_interval
self.factor = factor
self.smooth_growth = smooth_growth
self.fixed_ar_weights = fixed_ar_weights
##--------------------------------------------------------------------.
# Initialize temporary step counter
# - For 'StepwiseDecay' and 'HalfDecay' method --> step_interval
self.temporary_step_count = 0
##--------------------------------------------------------------------.
# - Initialize global step counter
# - For 'LinearDecay' and 'ExponentialDecay'
self.global_step_count_arr = np.zeros(current_ar_iterations+1)
##--------------------------------------------------------------------.
### Define the update_weights function
fun_dict = {'Constant': _ConstantStep,
'DiracDelta': _DiracDeltaStep,
'StepwiseStep': _StepwiseStep,
'HalfStep': _HalfStep,
'LinearStep': _LinearStep,
'ExponentialStep': _ExponentialStep,
}
self.update_weights = fun_dict[method]
##--------------------------------------------------------------------.
def step(self):
"""Update AR weights."""
# Update step count
self.temporary_step_count = self.temporary_step_count + 1 # for 'StepwiseDecay' and 'HalfDecay'
self.global_step_count_arr = self.global_step_count_arr + 1 # for 'LinearDecay' and 'ExponentialDecay'
##---------------------------------------------------------------------.
if self.current_ar_iterations > 0:
# - Update weights
self.update_weights(self)
# - Refix the value of fixed AR weights
if self.fixed_ar_weights is not None:
tmp_fixed_ar_weights = self.fixed_ar_weights[self.fixed_ar_weights < self.current_ar_iterations]
self.ar_absolute_weights[tmp_fixed_ar_weights] = self.ar_absolute_initial_weights[tmp_fixed_ar_weights]
##---------------------------------------------------------------------.
# Retrieve normalized AR weights (summing up to 1)
self.ar_weights = np.array(self.ar_absolute_weights)/np.sum(self.ar_absolute_weights)
def update(self):
"""Add an ar_absolute_weight with value 1."""
# Update the number of AR iterations
self.current_ar_iterations = self.current_ar_iterations + 1
# Add a new AR weight
if not self.smooth_growth: # ... with (absolute) value 1
self.ar_absolute_weights = np.append(self.ar_absolute_weights, 1)
self.ar_absolute_initial_weights = np.append(self.ar_absolute_initial_weights, 1)
else: # start at 0 (or factor for ExponentialStep, HalfStep)
# Update current last weight value (for ExponentialStep and LInearStep)
self.ar_absolute_initial_weights[-1] = self.ar_absolute_weights[-1]
# Add new weight
self.ar_absolute_initial_weights = np.append(self.ar_absolute_initial_weights, 0)
self.ar_absolute_weights = np.append(self.ar_absolute_weights, 0)
##---------------------------------------------------------------------.
# If DiracDelta weight update method is choosen, set to 0 the other weights
if self.method == "DiracDelta":
self.ar_absolute_weights[:-1] = 0
##---------------------------------------------------------------------.
# Update normalization of AR weights
self.ar_weights = np.array(self.ar_absolute_weights)/np.sum(self.ar_absolute_weights)
##---------------------------------------------------------------------.
# Update the step count array (--> For LinearDecay and ExponentialDecay)
self.global_step_count_arr[-1] = 0 # Reset the last (because will start to decay)
self.global_step_count_arr = np.append(self.global_step_count_arr, 0)
#----------------------------------------------------------------------------.
def plot_AR_scheduler(ar_scheduler,
n_updates=4,
update_every=15,
plot_absolute_ar_weights=True,
plot_normalized_ar_weights=True):
n_initial_ar_weights = len(ar_scheduler.ar_weights)
n_final_ar_weights = n_initial_ar_weights + n_updates
### Initialize dictionary
ar_weights_per_ar_iteration = {}
for i in range(n_final_ar_weights + 1):
ar_weights_per_ar_iteration[i] = {}
ar_weights_per_ar_iteration[i]['iteration'] = []
ar_weights_per_ar_iteration[i]['ar_absolute_weights'] = []
ar_weights_per_ar_iteration[i]['ar_weights'] = []
# Simulate AR weights step() and update()
iteration = 0
for u in range(n_updates+1):
for i in range(update_every+1):
current_ar_iterations = len(ar_scheduler.ar_weights) - 1
for ar_iteration in range(current_ar_iterations+1):
ar_weights_per_ar_iteration[ar_iteration]['iteration'].append(iteration)
ar_weights_per_ar_iteration[ar_iteration]['ar_absolute_weights'].append(ar_scheduler.ar_absolute_weights[ar_iteration])
ar_weights_per_ar_iteration[ar_iteration]['ar_weights'].append(ar_scheduler.ar_weights[ar_iteration])
ar_scheduler.step()
iteration = iteration + 1
ar_scheduler.update()
##------------------------------------------------------------------------.
### Visualize AR weights
method = ar_scheduler.method
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
custom_cycler = cycler(linestyle=['-', '--', ':', '-.','-', '--', ':', '-.','-', '--'],
color=colors)
if plot_absolute_ar_weights:
fig, ax = plt.subplots()
ax.set_prop_cycle(custom_cycler)
for ar_iteration in range(n_final_ar_weights+1):
plt.plot(ar_weights_per_ar_iteration[ar_iteration]['iteration'],
ar_weights_per_ar_iteration[ar_iteration]['ar_absolute_weights'],
antialiased = True)
ax.set_xlabel("Iteration")
plt.title("Absolute AR weights ({})".format(method))
ax.legend(labels=list(range(n_final_ar_weights+1)), loc='upper right')
plt.show()
if plot_normalized_ar_weights:
fig, ax = plt.subplots()
ax.set_prop_cycle(custom_cycler)
for ar_iteration in range(n_final_ar_weights+1):
plt.plot(ar_weights_per_ar_iteration[ar_iteration]['iteration'],
ar_weights_per_ar_iteration[ar_iteration]['ar_weights'],
antialiased = True)
ax.set_xlabel("Iteration")
plt.title("Normalized AR weights ({})".format(method))
ax.legend(labels=list(range(n_final_ar_weights+1)), loc='upper right')
plt.show()
##----------------------------------------------------------------------------.
|
the-stack_106_17398
|
# Authors: Yousra Bekhti <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
from scipy import linalg
from numpy.testing import assert_array_equal, assert_equal
import mne
from mne.datasets import testing
from mne.beamformer import rap_music
from mne.cov import regularize
from mne.utils import run_tests_if_main
data_path = testing.data_path(download=False)
fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
def _get_data(ch_decim=1):
"""Read in data used in tests."""
# Read evoked
evoked = mne.read_evokeds(fname_ave, 0, baseline=(None, 0))
evoked.info['bads'] = ['MEG 2443']
evoked.info['lowpass'] = 20 # fake for decim
evoked.decimate(12)
evoked.crop(0.0, 0.3)
picks = mne.pick_types(evoked.info, meg=True, eeg=False)
picks = picks[::ch_decim]
evoked.pick_channels([evoked.ch_names[pick] for pick in picks])
evoked.info.normalize_proj()
noise_cov = mne.read_cov(fname_cov)
noise_cov['projs'] = []
noise_cov = regularize(noise_cov, evoked.info, rank='full', proj=False)
return evoked, noise_cov
def simu_data(evoked, forward, noise_cov, n_dipoles, times, nave=1):
"""Simulate an evoked dataset with 2 sources.
One source is put in each hemisphere.
"""
# Generate the two dipoles data
mu, sigma = 0.1, 0.005
s1 = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 /
(2 * sigma ** 2))
mu, sigma = 0.075, 0.008
s2 = -1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-(times - mu) ** 2 /
(2 * sigma ** 2))
data = np.array([s1, s2]) * 1e-9
src = forward['src']
rng = np.random.RandomState(42)
rndi = rng.randint(len(src[0]['vertno']))
lh_vertno = src[0]['vertno'][[rndi]]
rndi = rng.randint(len(src[1]['vertno']))
rh_vertno = src[1]['vertno'][[rndi]]
vertices = [lh_vertno, rh_vertno]
tmin, tstep = times.min(), 1 / evoked.info['sfreq']
stc = mne.SourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep)
sim_evoked = mne.simulation.simulate_evoked(forward, stc, evoked.info,
noise_cov, nave=nave,
random_state=rng)
return sim_evoked, stc
def _check_dipoles(dipoles, fwd, stc, evoked, residual=None):
src = fwd['src']
pos1 = fwd['source_rr'][np.where(src[0]['vertno'] ==
stc.vertices[0])]
pos2 = fwd['source_rr'][np.where(src[1]['vertno'] ==
stc.vertices[1])[0] +
len(src[0]['vertno'])]
# Check the position of the two dipoles
assert (dipoles[0].pos[0] in np.array([pos1, pos2]))
assert (dipoles[1].pos[0] in np.array([pos1, pos2]))
ori1 = fwd['source_nn'][np.where(src[0]['vertno'] ==
stc.vertices[0])[0]][0]
ori2 = fwd['source_nn'][np.where(src[1]['vertno'] ==
stc.vertices[1])[0] +
len(src[0]['vertno'])][0]
# Check the orientation of the dipoles
assert (np.max(np.abs(np.dot(dipoles[0].ori[0],
np.array([ori1, ori2]).T))) > 0.99)
assert (np.max(np.abs(np.dot(dipoles[1].ori[0],
np.array([ori1, ori2]).T))) > 0.99)
if residual is not None:
picks_grad = mne.pick_types(residual.info, meg='grad')
picks_mag = mne.pick_types(residual.info, meg='mag')
rel_tol = 0.02
for picks in [picks_grad, picks_mag]:
assert (linalg.norm(residual.data[picks], ord='fro') <
rel_tol * linalg.norm(evoked.data[picks], ord='fro'))
@testing.requires_testing_data
def test_rap_music_simulated():
"""Test RAP-MUSIC with simulated evoked."""
evoked, noise_cov = _get_data(ch_decim=16)
forward = mne.read_forward_solution(fname_fwd)
forward = mne.pick_channels_forward(forward, evoked.ch_names)
forward_surf_ori = mne.convert_forward_solution(forward, surf_ori=True)
forward_fixed = mne.convert_forward_solution(forward, force_fixed=True,
surf_ori=True, use_cps=True)
n_dipoles = 2
sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov,
n_dipoles, evoked.times, nave=evoked.nave)
# Check dipoles for fixed ori
dipoles = rap_music(sim_evoked, forward_fixed, noise_cov,
n_dipoles=n_dipoles)
_check_dipoles(dipoles, forward_fixed, stc, sim_evoked)
assert (0.97 < dipoles[0].gof.max() < 1.)
assert (dipoles[0].gof.min() >= 0.)
assert_array_equal(dipoles[0].gof, dipoles[1].gof)
nave = 100000 # add a tiny amount of noise to the simulated evokeds
sim_evoked, stc = simu_data(evoked, forward_fixed, noise_cov,
n_dipoles, evoked.times, nave=nave)
dipoles, residual = rap_music(sim_evoked, forward_fixed, noise_cov,
n_dipoles=n_dipoles, return_residual=True)
_check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)
# Check dipoles for free ori
dipoles, residual = rap_music(sim_evoked, forward, noise_cov,
n_dipoles=n_dipoles, return_residual=True)
_check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)
# Check dipoles for free surface ori
dipoles, residual = rap_music(sim_evoked, forward_surf_ori, noise_cov,
n_dipoles=n_dipoles, return_residual=True)
_check_dipoles(dipoles, forward_fixed, stc, sim_evoked, residual)
@testing.requires_testing_data
def test_rap_music_sphere():
"""Test RAP-MUSIC with real data, sphere model, MEG only."""
evoked, noise_cov = _get_data(ch_decim=8)
sphere = mne.make_sphere_model(r0=(0., 0., 0.04))
src = mne.setup_volume_source_space(subject=None, pos=10.,
sphere=(0.0, 0.0, 40, 65.0),
mindist=5.0, exclude=0.0)
forward = mne.make_forward_solution(evoked.info, trans=None, src=src,
bem=sphere)
dipoles = rap_music(evoked, forward, noise_cov, n_dipoles=2)
# Test that there is one dipole on each hemisphere
pos = np.array([dip.pos[0] for dip in dipoles])
assert_equal(pos.shape, (2, 3))
assert_equal((pos[:, 0] < 0).sum(), 1)
assert_equal((pos[:, 0] > 0).sum(), 1)
# Check the amplitude scale
assert (1e-10 < dipoles[0].amplitude[0] < 1e-7)
# Check the orientation
dip_fit = mne.fit_dipole(evoked, noise_cov, sphere)[0]
assert (np.max(np.abs(np.dot(dip_fit.ori, dipoles[0].ori[0]))) > 0.99)
assert (np.max(np.abs(np.dot(dip_fit.ori, dipoles[1].ori[0]))) > 0.99)
@testing.requires_testing_data
def test_rap_music_picks():
"""Test RAP-MUSIC with picking."""
evoked = mne.read_evokeds(fname_ave, condition='Right Auditory',
baseline=(None, 0))
evoked.crop(tmin=0.05, tmax=0.15) # select N100
evoked.pick_types(meg=True, eeg=False)
forward = mne.read_forward_solution(fname_fwd)
noise_cov = mne.read_cov(fname_cov)
dipoles = rap_music(evoked, forward, noise_cov, n_dipoles=2)
assert len(dipoles) == 2
run_tests_if_main()
|
the-stack_106_17401
|
#!/usr/env/python
"""
Hillslope model with block uplift.
"""
import sys
import time
from matplotlib.pyplot import axis
from numpy import amax, arange, count_nonzero, logical_and, where, zeros
from landlab.ca.boundaries.hex_lattice_tectonicizer import LatticeUplifter
from landlab.ca.celllab_cts import Transition
from .cts_model import CTSModel
from .lattice_grain import lattice_grain_node_states, lattice_grain_transition_list
_DEBUG = False
def plot_hill(grid, filename=None, array=None, cmap=None, show=True):
"""Generate a plot of the modeled hillslope."""
import matplotlib as mpl
import matplotlib.pyplot as plt
# Set color map
if cmap is None:
rock = "#5F594D"
sed = "#A4874B"
sky = "#D0E4F2"
mob = "#D98859"
clist = [sky, mob, mob, mob, mob, mob, mob, sed, rock]
cmap = mpl.colors.ListedColormap(clist)
if array is None:
array = grid.at_node["node_state"]
# Generate the plot
ax = grid.hexplot(array, color_map=cmap)
ax.set_aspect("equal")
# If applicable, save to file. Otherwise display the figure.
# (Note: the latter option freezes execution until user dismisses window)
if filename is not None:
plt.savefig(filename, bbox_inches="tight")
plt.clf()
print("Figure saved to " + filename)
elif show:
plt.show()
class GrainHill(CTSModel):
"""
Model hillslope evolution with block uplift.
"""
def __init__(
self,
grid_size,
report_interval=1.0e8,
run_duration=1.0,
output_interval=1.0e99,
settling_rate=2.2e8,
disturbance_rate=1.0,
weathering_rate=1.0,
dissolution_rate=0.0,
uplift_interval=1.0,
plot_interval=1.0e99,
friction_coef=0.3,
rock_state_for_uplift=7,
opt_rock_collapse=False,
show_plots=True,
initial_state_grid=None,
opt_track_grains=False,
prop_data=None,
prop_reset_value=None,
callback_fn=None,
closed_boundaries=(False, False, False, False),
**kwds
):
"""Call the initialize() method."""
self.initializer(
grid_size,
report_interval,
run_duration,
output_interval,
settling_rate,
disturbance_rate,
weathering_rate,
dissolution_rate,
uplift_interval,
plot_interval,
friction_coef,
rock_state_for_uplift,
opt_rock_collapse,
show_plots,
initial_state_grid,
opt_track_grains,
prop_data,
prop_reset_value,
callback_fn,
closed_boundaries,
**kwds
)
def initializer(
self,
grid_size,
report_interval,
run_duration,
output_interval,
settling_rate,
disturbance_rate,
weathering_rate,
dissolution_rate,
uplift_interval,
plot_interval,
friction_coef,
rock_state_for_uplift,
opt_rock_collapse,
show_plots,
initial_state_grid,
opt_track_grains,
prop_data,
prop_reset_value,
callback_fn,
closed_boundaries,
**kwds
):
"""Initialize the grain hill model."""
self.settling_rate = settling_rate
self.disturbance_rate = disturbance_rate
self.weathering_rate = weathering_rate
self.dissolution_rate = dissolution_rate
self.uplift_interval = uplift_interval
self.plot_interval = plot_interval
self.friction_coef = friction_coef
self.rock_state = rock_state_for_uplift # 7 (resting sed) or 8 (rock)
self.opt_track_grains = opt_track_grains
self.callback_fn = callback_fn
if opt_rock_collapse:
self.collapse_rate = self.settling_rate
else:
self.collapse_rate = 0.0
# Call base class init
super().initialize(
grid_size=grid_size,
report_interval=report_interval,
grid_orientation="vertical",
grid_shape="rect",
show_plots=show_plots,
cts_type="oriented_hex",
run_duration=run_duration,
output_interval=output_interval,
initial_state_grid=initial_state_grid,
prop_data=prop_data,
prop_reset_value=prop_reset_value,
closed_boundaries=closed_boundaries,
**kwds
)
# Set some things related to property-swapping and/or callback fn
# if the user wants to track grain motion.
# if opt_track_grains:
# propid = self.ca.propid
# else:
# propid = None
self.uplifter = LatticeUplifter(
self.grid,
self.grid.at_node["node_state"],
propid=self.ca.propid,
prop_data=self.ca.prop_data,
prop_reset_value=self.ca.prop_reset_value,
)
self.initialize_timing(
output_interval, plot_interval, uplift_interval, report_interval
)
def initialize_timing(
self, output_interval, plot_interval, uplift_interval, report_interval
):
"""Set up variables related to timing of uplift, output, reporting"""
self.current_time = 0.0
# Next time for output to file
self.next_output = output_interval
# Next time for a plot
if self._show_plots:
self.next_plot = plot_interval
else:
self.next_plot = self.run_duration + 1
# Next time for a progress report to user
self.next_report = report_interval
# Next time to add baselevel adjustment
self.next_uplift = uplift_interval
# Iteration numbers, for output files
self.output_iteration = 1
def node_state_dictionary(self):
"""
Create and return dict of node states.
Overrides base-class method. Here, we simply call on a function in
the lattice_grain module.
"""
return lattice_grain_node_states()
def transition_list(self):
"""
Make and return list of Transition object.
"""
xn_list = lattice_grain_transition_list(
g=self.settling_rate,
f=self.friction_coef,
motion=self.settling_rate,
swap=self.opt_track_grains,
callback=self.callback_fn,
)
xn_list = self.add_weathering_and_disturbance_transitions(
xn_list,
self.disturbance_rate,
self.weathering_rate,
self.dissolution_rate,
collapse_rate=self.collapse_rate,
)
return xn_list
def add_weathering_and_disturbance_transitions(
self,
xn_list,
d=0.0,
w=0.0,
diss=0.0,
collapse_rate=0.0,
swap=False,
callback=None,
):
"""
Add transition rules representing weathering and/or grain disturbance
to the list, and return the list.
Parameters
----------
xn_list : list of Transition objects
List of objects that encode information about the link-state
transitions. Normally should first be initialized with lattice-grain
transition rules, then passed to this function to add rules for
weathering and disturbance.
d : float (optional)
Rate of transition (1/time) from fluid / resting grain pair to
mobile-grain / fluid pair, representing grain disturbance.
w : float (optional)
Rate of transition (1/time) from fluid / rock pair to
fluid / resting-grain pair, representing weathering.
diss : float (optional)
Dissolution rate: transition rate from fluid / rock pair to
fluid / fluid pair.
Returns
-------
xn_list : list of Transition objects
Modified transition list.
"""
# Disturbance rule
if d > 0.0:
xn_list.append(
Transition((7, 0, 0), (0, 1, 0), d, "disturbance", swap, callback)
)
xn_list.append(
Transition((7, 0, 1), (0, 2, 1), d, "disturbance", swap, callback)
)
xn_list.append(
Transition((7, 0, 2), (0, 3, 2), d, "disturbance", swap, callback)
)
xn_list.append(
Transition((0, 7, 0), (4, 0, 0), d, "disturbance", swap, callback)
)
xn_list.append(
Transition((0, 7, 1), (5, 0, 1), d, "disturbance", swap, callback)
)
xn_list.append(
Transition((0, 7, 2), (6, 0, 2), d, "disturbance", swap, callback)
)
# Weathering rule
if w > 0.0:
xn_list.append(Transition((8, 0, 0), (7, 0, 0), w, "weathering"))
xn_list.append(Transition((8, 0, 1), (7, 0, 1), w, "weathering"))
xn_list.append(Transition((8, 0, 2), (7, 0, 2), w, "weathering"))
xn_list.append(Transition((0, 8, 0), (0, 7, 0), w, "weathering"))
xn_list.append(Transition((0, 8, 1), (0, 7, 1), w, "weathering"))
xn_list.append(Transition((0, 8, 2), (0, 7, 2), w, "weathering"))
# "Vertical rock collapse" rule: a rock particle overlying air
# will collapse, transitioning to a downward-moving grain
if collapse_rate > 0.0:
xn_list.append(
Transition(
(0, 8, 0),
(4, 0, 0),
collapse_rate,
"rock collapse",
swap,
callback,
)
)
# Dissolution rule
if diss > 0.0:
xn_list.append(Transition((8, 0, 0), (0, 0, 0), diss, "dissolution"))
xn_list.append(Transition((8, 0, 1), (0, 0, 1), diss, "dissolution"))
xn_list.append(Transition((8, 0, 2), (0, 0, 2), diss, "dissolution"))
xn_list.append(Transition((0, 8, 0), (0, 0, 0), diss, "dissolution"))
xn_list.append(Transition((0, 8, 1), (0, 0, 1), diss, "dissolution"))
xn_list.append(Transition((0, 8, 2), (0, 0, 2), diss, "dissolution"))
if _DEBUG:
print()
print(
"setup_transition_list(): list has "
+ str(len(xn_list))
+ " transitions:"
)
for t in xn_list:
print(
" From state "
+ str(t.from_state)
+ " to state "
+ str(t.to_state)
+ " at rate "
+ str(t.rate)
+ " called "
+ str(t.name)
)
return xn_list
def initialize_node_state_grid(self):
"""Set up initial node states.
Examples
--------
>>> gh = GrainHill((5, 7), show_plots=False)
>>> gh.grid.at_node['node_state'] # doctest: +NORMALIZE_WHITESPACE
array([8, 7, 7, 8, 7, 7, 7, 0, 7, 7, 0, 7, 7, 7, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
"""
# For shorthand, get a reference to the node-state grid and to x coord
nsg = self.grid.at_node["node_state"]
nodex = self.grid.node_x
# Fill the bottom two rows with grains
right_side_x = 0.866025403784 * (self.grid.number_of_node_columns - 1)
for i in range(self.grid.number_of_nodes):
if self.grid.node_y[i] < 2.0:
if nodex[i] > 0.0 and nodex[i] < right_side_x:
nsg[i] = 7
# Place "wall" particles in the lower-left and lower-right corners
if self.grid.number_of_node_columns % 2 == 0:
bottom_right = self.grid.number_of_node_columns - 1
else:
bottom_right = self.grid.number_of_node_columns // 2
nsg[0] = 8 # bottom left
nsg[bottom_right] = 8
return nsg
def run(self, to=None):
"""Run the model."""
if to is None:
run_to = self.run_duration
else:
run_to = to
while self.current_time < run_to:
# Figure out what time to run to this iteration
next_pause = min(self.next_output, self.next_plot)
next_pause = min(next_pause, self.next_uplift)
next_pause = min(next_pause, run_to)
# Once in a while, print out simulation and real time to let the
# user know that the sim is running ok
current_real_time = time.time()
if current_real_time >= self.next_report:
print(
"Current sim time "
+ str(self.current_time)
+ " ("
+ str(100 * self.current_time / self.run_duration)
+ "%)"
)
self.next_report = current_real_time + self.report_interval
# Run until next pause
self.ca.run(next_pause, self.ca.node_state)
self.current_time = next_pause
# Handle output to file
if self.current_time >= self.next_output:
self.write_output(self.grid, "grain_hill_model", self.output_iteration)
self.output_iteration += 1
self.next_output += self.output_interval
# Handle plotting on display
if self._show_plots and self.current_time >= self.next_plot:
self.ca_plotter.update_plot()
axis("off")
self.next_plot += self.plot_interval
# Handle uplift
if self.current_time >= self.next_uplift:
self.uplifter.uplift_interior_nodes(
self.ca, self.current_time, rock_state=self.rock_state
)
self.next_uplift += self.uplift_interval
def get_profile_and_soil_thickness(self, grid, data):
"""Calculate and return profiles of elevation and soil thickness.
Examples
--------
>>> from landlab import HexModelGrid
>>> hg = HexModelGrid((4, 5), node_layout='rect', orientation='vertical')
>>> ns = hg.add_zeros("node_state", at="node", dtype=int)
>>> ns[[0, 3, 1, 6, 4, 9, 2]] = 8
>>> ns[[8, 13, 11, 16, 14]] = 7
>>> gh = GrainHill((3, 7), show_plots=False) # grid size arbitrary here
>>> (elev, thickness) = gh.get_profile_and_soil_thickness(hg, ns)
>>> list(elev)
[0.0, 2.5, 3.0, 2.5, 0.0]
>>> list(thickness)
[0.0, 2.0, 2.0, 1.0, 0.0]
"""
nc = grid.number_of_node_columns
elev = zeros(nc)
soil = zeros(nc)
for col in range(nc):
base_id = (col // 2) + (col % 2) * ((nc + 1) // 2)
node_ids = arange(base_id, grid.number_of_nodes, nc)
states = data[node_ids]
(rows_with_rock_or_sed,) = where(states > 0)
if len(rows_with_rock_or_sed) == 0:
elev[col] = 0.0
else:
elev[col] = amax(rows_with_rock_or_sed) + 0.5 * (col % 2)
soil[col] = count_nonzero(logical_and(states > 0, states < 8))
return elev, soil
def get_params_from_input_file(filename):
"""Fetch parameter values from input file."""
from landlab.core import load_params
mpd_params = load_params(filename)
return mpd_params
def main(params):
"""Initialize model with dict of params then run it."""
grid_size = (
int(params["number_of_node_rows"]),
int(params["number_of_node_columns"]),
)
grain_hill_model = GrainHill(grid_size, **params)
grain_hill_model.run()
# Temporary: save last image to file
import matplotlib.pyplot as plt
plt.savefig("grain_hill_final.png")
if __name__ == "__main__":
"""Executes model."""
try:
infile = sys.argv[1]
except IndexError:
print("Must include input file name on command line")
sys.exit(1)
params = get_params_from_input_file(infile)
main(params)
|
the-stack_106_17403
|
# coding: utf-8
# pylint: disable = invalid-name, W0105
"""Training Library containing training routines of LightGBM."""
from __future__ import absolute_import
import collections
from operator import attrgetter
import numpy as np
from . import callback
from .basic import Booster, Dataset, LightGBMError, _InnerPredictor
from .compat import (SKLEARN_INSTALLED, LGBMStratifiedKFold, integer_types,
range_, string_type)
def train(params, train_set, num_boost_round=100,
valid_sets=None, valid_names=None,
fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, evals_result=None,
verbose_eval=True, learning_rates=None, callbacks=None):
"""
Train with given parameters.
Parameters
----------
params : dict
Parameters for training.
train_set : Dataset
Data to be trained.
num_boost_round: int
Number of boosting iterations.
valid_sets: list of Datasets
List of data to be evaluated during training
valid_names: list of string
Names of valid_sets
fobj : function
Customized objective function.
feval : function
Customized evaluation function.
Note: should return (eval_name, eval_result, is_higher_better) of list of this
init_model : file name of lightgbm model or 'Booster' instance
model used for continued train
feature_name : list of str, or 'auto'
Feature names
If 'auto' and data is pandas DataFrame, use data columns name
categorical_feature : list of str or int, or 'auto'
Categorical features,
type int represents index,
type str represents feature names (need to specify feature_name as well)
If 'auto' and data is pandas DataFrame, use pandas categorical columns
early_stopping_rounds: int
Activates early stopping.
Requires at least one validation data and one metric
If there's more than one, will check all of them
Returns the model with (best_iter + early_stopping_rounds)
If early stopping occurs, the model will add 'best_iteration' field
evals_result: dict or None
This dictionary used to store all evaluation results of all the items in valid_sets.
Example: with a valid_sets containing [valid_set, train_set]
and valid_names containing ['eval', 'train']
and a paramater containing ('metric':'logloss')
Returns: {'train': {'logloss': ['0.48253', '0.35953', ...]},
'eval': {'logloss': ['0.480385', '0.357756', ...]}}
passed with None means no using this function
verbose_eval : bool or int
Requires at least one item in evals.
If `verbose_eval` is True,
the eval metric on the valid set is printed at each boosting stage.
If `verbose_eval` is int,
the eval metric on the valid set is printed at every `verbose_eval` boosting stage.
The last boosting stage
or the boosting stage found by using `early_stopping_rounds` is also printed.
Example: with verbose_eval=4 and at least one item in evals,
an evaluation metric is printed every 4 (instead of 1) boosting stages.
learning_rates: list or function
List of learning rate for each boosting round
or a customized function that calculates learning_rate
in terms of current number of round (e.g. yields learning rate decay)
- list l: learning_rate = l[current_round]
- function f: learning_rate = f(current_round)
callbacks : list of callback functions
List of callback functions that are applied at each iteration.
See Callbacks in Python-API.md for more information.
Returns
-------
booster : a trained booster model
"""
"""create predictor first"""
if isinstance(init_model, string_type):
predictor = _InnerPredictor(model_file=init_model)
elif isinstance(init_model, Booster):
predictor = init_model._to_predictor()
else:
predictor = None
init_iteration = predictor.num_total_iteration if predictor is not None else 0
"""check dataset"""
if not isinstance(train_set, Dataset):
raise TypeError("Training only accepts Dataset object")
train_set._update_params(params)
train_set._set_predictor(predictor)
train_set.set_feature_name(feature_name)
train_set.set_categorical_feature(categorical_feature)
is_valid_contain_train = False
train_data_name = "training"
reduced_valid_sets = []
name_valid_sets = []
if valid_sets is not None:
if isinstance(valid_sets, Dataset):
valid_sets = [valid_sets]
if isinstance(valid_names, string_type):
valid_names = [valid_names]
for i, valid_data in enumerate(valid_sets):
"""reduce cost for prediction training data"""
if valid_data is train_set:
is_valid_contain_train = True
if valid_names is not None:
train_data_name = valid_names[i]
continue
if not isinstance(valid_data, Dataset):
raise TypeError("Traninig only accepts Dataset object")
valid_data.set_reference(train_set)
reduced_valid_sets.append(valid_data)
if valid_names is not None and len(valid_names) > i:
name_valid_sets.append(valid_names[i])
else:
name_valid_sets.append('valid_' + str(i))
for valid_data in valid_sets:
valid_data._update_params(params)
"""process callbacks"""
if callbacks is None:
callbacks = set()
else:
for i, cb in enumerate(callbacks):
cb.__dict__.setdefault('order', i - len(callbacks))
callbacks = set(callbacks)
# Most of legacy advanced options becomes callbacks
if verbose_eval is True:
callbacks.add(callback.print_evaluation())
elif isinstance(verbose_eval, integer_types):
callbacks.add(callback.print_evaluation(verbose_eval))
if early_stopping_rounds is not None:
callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=bool(verbose_eval)))
if learning_rates is not None:
callbacks.add(callback.reset_parameter(learning_rate=learning_rates))
if evals_result is not None:
callbacks.add(callback.record_evaluation(evals_result))
callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
callbacks_after_iter = callbacks - callbacks_before_iter
callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
"""construct booster"""
booster = Booster(params=params, train_set=train_set)
if is_valid_contain_train:
booster.set_train_data_name(train_data_name)
for valid_set, name_valid_set in zip(reduced_valid_sets, name_valid_sets):
booster.add_valid(valid_set, name_valid_set)
booster.best_iteration = -1
"""start training"""
for i in range_(init_iteration, init_iteration + num_boost_round):
for cb in callbacks_before_iter:
cb(callback.CallbackEnv(model=booster,
params=params,
iteration=i,
begin_iteration=init_iteration,
end_iteration=init_iteration + num_boost_round,
evaluation_result_list=None))
booster.update(fobj=fobj)
evaluation_result_list = []
# check evaluation result.
if valid_sets is not None:
if is_valid_contain_train:
evaluation_result_list.extend(booster.eval_train(feval))
evaluation_result_list.extend(booster.eval_valid(feval))
try:
for cb in callbacks_after_iter:
cb(callback.CallbackEnv(model=booster,
params=params,
iteration=i,
begin_iteration=init_iteration,
end_iteration=init_iteration + num_boost_round,
evaluation_result_list=evaluation_result_list))
except callback.EarlyStopException as earlyStopException:
booster.best_iteration = earlyStopException.best_iteration + 1
break
return booster
class CVBooster(object):
""""Auxiliary data struct to hold all boosters of CV."""
def __init__(self):
self.boosters = []
self.best_iteration = -1
def append(self, booster):
"""add a booster to CVBooster"""
self.boosters.append(booster)
def __getattr__(self, name):
"""redirect methods call of CVBooster"""
def handlerFunction(*args, **kwargs):
"""call methods with each booster, and concatenate their results"""
ret = []
for booster in self.boosters:
ret.append(getattr(booster, name)(*args, **kwargs))
return ret
return handlerFunction
def _make_n_folds(full_data, data_splitter, nfold, params, seed, fpreproc=None, stratified=False, shuffle=True):
"""
Make an n-fold list of Booster from random indices.
"""
num_data = full_data.construct().num_data()
if data_splitter is not None:
if not hasattr(data_splitter, 'split'):
raise AttributeError("data_splitter has no method 'split'")
folds = data_splitter.split(np.arange(num_data))
elif stratified:
if not SKLEARN_INSTALLED:
raise LightGBMError('Scikit-learn is required for stratified cv')
sfk = LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed)
folds = sfk.split(X=np.zeros(num_data), y=full_data.get_label())
else:
if shuffle:
randidx = np.random.RandomState(seed).permutation(num_data)
else:
randidx = np.arange(num_data)
kstep = int(num_data / nfold)
test_id = [randidx[i: i + kstep] for i in range_(0, num_data, kstep)]
train_id = [np.concatenate([test_id[i] for i in range_(nfold) if k != i]) for k in range_(nfold)]
folds = zip(train_id, test_id)
ret = CVBooster()
for train_idx, test_idx in folds:
train_set = full_data.subset(train_idx)
valid_set = full_data.subset(test_idx)
# run preprocessing on the data set if needed
if fpreproc is not None:
train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy())
else:
tparam = params
cvbooster = Booster(tparam, train_set)
cvbooster.add_valid(valid_set, 'valid')
ret.append(cvbooster)
return ret
def _agg_cv_result(raw_results):
"""
Aggregate cross-validation results.
"""
cvmap = collections.defaultdict(list)
metric_type = {}
for one_result in raw_results:
for one_line in one_result:
metric_type[one_line[1]] = one_line[3]
cvmap[one_line[1]].append(one_line[2])
return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()]
def cv(params, train_set, num_boost_round=10,
data_splitter=None, nfold=5, stratified=False, shuffle=True,
metrics=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, fpreproc=None,
verbose_eval=None, show_stdv=True, seed=0,
callbacks=None):
"""
Cross-validation with given paramaters.
Parameters
----------
params : dict
Booster params.
train_set : Dataset
Data to be trained.
num_boost_round : int
Number of boosting iterations.
data_splitter : an instance with split(X) method
Instance with split(X) method.
nfold : int
Number of folds in CV.
stratified : bool
Perform stratified sampling.
shuffle: bool
Whether shuffle before split data
metrics : string or list of strings
Evaluation metrics to be watched in CV.
fobj : function
Custom objective function.
feval : function
Custom evaluation function.
init_model : file name of lightgbm model or 'Booster' instance
model used for continued train
feature_name : list of str, or 'auto'
Feature names
If 'auto' and data is pandas DataFrame, use data columns name
categorical_feature : list of str or int, or 'auto'
Categorical features,
type int represents index,
type str represents feature names (need to specify feature_name as well)
If 'auto' and data is pandas DataFrame, use pandas categorical columns
early_stopping_rounds: int
Activates early stopping. CV error needs to decrease at least
every <early_stopping_rounds> round(s) to continue.
Last entry in evaluation history is the one from best iteration.
fpreproc : function
Preprocessing function that takes (dtrain, dtest, param)
and returns transformed versions of those.
verbose_eval : bool, int, or None, default None
Whether to display the progress.
If None, progress will be displayed when np.ndarray is returned.
If True, progress will be displayed at boosting stage.
If an integer is given,
progress will be displayed at every given `verbose_eval` boosting stage.
show_stdv : bool, default True
Whether to display the standard deviation in progress.
Results are not affected, and always contains std.
seed : int
Seed used to generate the folds (passed to numpy.random.seed).
callbacks : list of callback functions
List of callback functions that are applied at each iteration.
See Callbacks in Python-API.md for more information.
Returns
-------
evaluation history : list(string)
"""
if not isinstance(train_set, Dataset):
raise TypeError("Traninig only accepts Dataset object")
if isinstance(init_model, string_type):
predictor = _InnerPredictor(model_file=init_model)
elif isinstance(init_model, Booster):
predictor = init_model._to_predictor()
else:
predictor = None
train_set._update_params(params)
train_set._set_predictor(predictor)
train_set.set_feature_name(feature_name)
train_set.set_categorical_feature(categorical_feature)
if metrics:
params.setdefault('metric', [])
if isinstance(metrics, string_type):
params['metric'].append(metrics)
else:
params['metric'].extend(metrics)
results = collections.defaultdict(list)
cvfolds = _make_n_folds(train_set, data_splitter=data_splitter,
nfold=nfold, params=params, seed=seed,
fpreproc=fpreproc, stratified=stratified,
shuffle=shuffle)
# setup callbacks
if callbacks is None:
callbacks = set()
else:
for i, cb in enumerate(callbacks):
cb.__dict__.setdefault('order', i - len(callbacks))
callbacks = set(callbacks)
if early_stopping_rounds is not None:
callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=False))
if verbose_eval is True:
callbacks.add(callback.print_evaluation(show_stdv=show_stdv))
elif isinstance(verbose_eval, integer_types):
callbacks.add(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))
callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
callbacks_after_iter = callbacks - callbacks_before_iter
callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
for i in range_(num_boost_round):
for cb in callbacks_before_iter:
cb(callback.CallbackEnv(model=cvfolds,
params=params,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
evaluation_result_list=None))
cvfolds.update(fobj=fobj)
res = _agg_cv_result(cvfolds.eval_valid(feval))
for _, key, mean, _, std in res:
results[key + '-mean'].append(mean)
results[key + '-stdv'].append(std)
try:
for cb in callbacks_after_iter:
cb(callback.CallbackEnv(model=cvfolds,
params=params,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
evaluation_result_list=res))
except callback.EarlyStopException as earlyStopException:
cvfolds.best_iteration = earlyStopException.best_iteration + 1
for k in results:
results[k] = results[k][:cvfolds.best_iteration]
break
return dict(results)
|
the-stack_106_17404
|
import torch
import torch.nn as nn
def MAEAUC_approx(x, x_hat, y, lambda_auc):
# Computing error for each row
err = torch.abs(x - x_hat).mean(axis = (1, 2))
# Selecting error of positive and negative example
err_n = err[y == 1]
err_a = err[y > 1]
n_a = (err_a.shape)[0]
n_n = (err_n.shape)[0]
# If there are positive examples compute the AUC penalty
if n_a > 0:
diff = err_a.view(-1, 1).unsqueeze(1) - err_n.view(-1, 1)
exp = torch.sigmoid(diff).sum()
auc = lambda_auc * exp / (n_a * n_n)
mean_loss = err.mean()
penalized_loss = err.mean() - auc
return penalized_loss, mean_loss
else:
mean_loss = err.mean()
return mean_loss
class MAEAUCLoss(nn.Module):
def __init__(self):
super().__init__()
self.loss = MAEAUC_approx
def forward(self, x_hat, x_true, y, lambda_auc):
loss = self.loss(x_hat, x_true, y, lambda_auc)
return loss
if __name__ == '__main__':
# lambda_auc in {0,0.1,1,10,100,1000,10000} with MSE error
x = torch.rand([10,2,3])
x_hat = torch.rand([10,2,3]) +.2
y = torch.tensor([0,0,0,0,0,0,1,1,1,1])
loss = MAEAUCLoss()
print(loss(x, x_hat, y, 10))
print(MAEAUC_approx(x, x_hat, y, 10))
|
the-stack_106_17406
|
""" Module for common functions """
import io
import logging
import json
import os
from json_validator.validator import JsonValidator
from sap.cf_logging.formatters.json_formatter import JsonFormatter
from sap.cf_logging.core.constants import \
LOG_SENSITIVE_CONNECTION_DATA, LOG_REMOTE_USER, LOG_REFERER
from tests.schema_util import extend
def check_log_record(stream, schema, expected):
""" Using the JsonValidator check that the data in the stream
matches the expected output
"""
log_json = stream.getvalue()
log_object = json.JSONDecoder().decode(log_json)
expected_json = extend(schema, expected)
_, error = JsonValidator(expected_json).validate(log_object)
print('----------------------------------------------->')
print(log_json)
print('<-----------------------------------------------')
return error
def config_logger(logger_name):
""" Function to configure a JSONLogger and print the output into a stream"""
stream = io.StringIO()
stream_handler = logging.StreamHandler(stream)
stream_handler.setFormatter(JsonFormatter())
logger = logging.getLogger(logger_name)
logger.addHandler(stream_handler)
return logger, stream
def enable_sensitive_fields_logging():
""" sets a few logging related env vars """
os.environ[LOG_SENSITIVE_CONNECTION_DATA] = 'true'
os.environ[LOG_REMOTE_USER] = 'true'
os.environ[LOG_REFERER] = 'true'
|
the-stack_106_17407
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
import sys
action = sys.argv[1]
if action in ["help", "-h", "--help"] or len(sys.argv) != 3:
print(("Usage: %s <action> <inputfile>, where action can be: \n"
"help Print this message\n"
"plain Print ASCII tree to stdout\n"
"dot Print dot file to stdout\n"
"count Count most frequent transition reasons\n" % sys.argv[0]))
sys.exit(0)
filename = sys.argv[2]
maps = {}
root_maps = []
transitions = {}
annotations = {}
class Map(object):
def __init__(self, pointer, origin):
self.pointer = pointer
self.origin = origin
def __str__(self):
return "%s (%s)" % (self.pointer, self.origin)
class Transition(object):
def __init__(self, from_map, to_map, reason):
self.from_map = from_map
self.to_map = to_map
self.reason = reason
def RegisterNewMap(raw_map):
if raw_map in annotations:
annotations[raw_map] += 1
else:
annotations[raw_map] = 0
return AnnotateExistingMap(raw_map)
def AnnotateExistingMap(raw_map):
return "%s_%d" % (raw_map, annotations[raw_map])
def AddMap(pointer, origin):
pointer = RegisterNewMap(pointer)
maps[pointer] = Map(pointer, origin)
return pointer
def AddTransition(from_map, to_map, reason):
from_map = AnnotateExistingMap(from_map)
to_map = AnnotateExistingMap(to_map)
if from_map not in transitions:
transitions[from_map] = {}
targets = transitions[from_map]
if to_map in targets:
# Some events get printed twice, that's OK. In some cases, ignore the
# second output...
old_reason = targets[to_map].reason
if old_reason.startswith("ReplaceDescriptors"):
return
# ...and in others use it for additional detail.
if reason in []:
targets[to_map].reason = reason
return
# Unexpected duplicate events? Warn.
print(("// warning: already have a transition from %s to %s, reason: %s" %
(from_map, to_map, targets[to_map].reason)))
return
targets[to_map] = Transition(from_map, to_map, reason)
with open(filename, "r") as f:
last_to_map = ""
for line in f:
if not line.startswith("[TraceMaps: "): continue
words = line.split(" ")
event = words[1]
if event == "InitialMap":
assert words[2] == "map="
assert words[4] == "SFI="
new_map = AddMap(words[3], "SFI#%s" % words[5])
root_maps.append(new_map)
continue
if words[2] == "from=" and words[4] == "to=":
from_map = words[3]
to_map = words[5]
if from_map not in annotations:
print(("// warning: unknown from_map %s" % from_map))
new_map = AddMap(from_map, "<unknown>")
root_maps.append(new_map)
if to_map != last_to_map:
AddMap(to_map, "<transition> (%s)" % event)
last_to_map = to_map
if event in ["Transition", "NoTransition"]:
assert words[6] == "name=", line
reason = "%s: %s" % (event, words[7])
elif event in ["Normalize", "ReplaceDescriptors", "SlowToFast"]:
assert words[6] == "reason=", line
reason = "%s: %s" % (event, words[7])
if words[8].strip() != "]":
reason = "%s_%s" % (reason, words[8])
else:
reason = event
AddTransition(from_map, to_map, reason)
continue
def PlainPrint(m, indent, label):
print(("%s%s (%s)" % (indent, m, label)))
if m in transitions:
for t in transitions[m]:
PlainPrint(t, indent + " ", transitions[m][t].reason)
def CountTransitions(m):
if m not in transitions: return 0
return len(transitions[m])
def DotPrint(m, label):
print(("m%s [label=\"%s\"]" % (m[2:], label)))
if m in transitions:
for t in transitions[m]:
# GraphViz doesn't like node labels looking like numbers, so use
# "m..." instead of "0x...".
print(("m%s -> m%s" % (m[2:], t[2:])))
reason = transitions[m][t].reason
reason = reason.replace("\\", "BACKSLASH")
reason = reason.replace("\"", "\\\"")
DotPrint(t, reason)
if action == "plain":
root_maps = sorted(root_maps, key=CountTransitions, reverse=True)
for m in root_maps:
PlainPrint(m, "", maps[m].origin)
elif action == "dot":
print("digraph g {")
for m in root_maps:
DotPrint(m, maps[m].origin)
print("}")
elif action == "count":
reasons = {}
for s in transitions:
for t in transitions[s]:
reason = transitions[s][t].reason
if reason not in reasons:
reasons[reason] = 1
else:
reasons[reason] += 1
reasons_list = []
for r in reasons:
reasons_list.append("%8d %s" % (reasons[r], r))
reasons_list.sort(reverse=True)
for r in reasons_list[:20]:
print(r)
|
the-stack_106_17409
|
# -*-coding:utf-8-*-
"""
Author:yinshunyao
Date:2019/7/31 0031下午 9:33
test for bbox
"""
from ai_tool.bbox import BBox, BBoxes
import unittest
class BBoxTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.bbox1 = BBox([0, 0, 100, 100])
cls.bbox2 = BBox([0, 0, 120, 120])
def test_iou(self):
from ai_tool.bbox import BBox
bbox1 = BBox([1, 2, 101, 102])
bbox2 = BBox([11, 12, 121, 122])
iou = bbox1 / bbox2
print("iou", iou)
assert iou > 0.5
print('box1 S is', bbox1.S)
print('box1 & box2', bbox1 & bbox2)
print('box1 == box2', bbox1 == bbox2)
print('merge box1 + box2', bbox1 + bbox2)
print('merge box1 | box2', bbox1 | bbox2)
class BBoxesTest(unittest.TestCase):
def test_bboxes(self):
from ai_tool.bbox import BBoxes, BBox
bb1 = BBoxes(iou_thresh=0.6)
bb2 = BBoxes()
bb1.append([1,2, 101, 102])
bb1.append([1000, 2, 1101, 102])
bb2.append([11, 12, 111, 112])
bb2.append([1, 1002, 101, 1102])
# judge the bbox in bb1
print("[5, 5, 100, 100] in bb1", BBox([5, 5, 100, 100]) in bb1)
print("[100, 5, 200, 100] in bb1", BBox([100, 5, 200, 100]) in bb1)
# bb1 & bb2
print("bb1 & bb2", bb1 & bb2)
print("bb1 - bb2", bb1 - bb2)
print("bb2 - bb1", bb2 - bb1)
|
the-stack_106_17416
|
import datetime
import re
import unittest
import pytest
from bson import ObjectId
from mongoengine import *
from mongoengine.errors import InvalidQueryError
from mongoengine.queryset import Q
class TestQ(unittest.TestCase):
def setUp(self):
connect(db="mongoenginetest")
class Person(Document):
name = StringField()
age = IntField()
meta = {"allow_inheritance": True}
Person.drop_collection()
self.Person = Person
def test_empty_q(self):
"""Ensure that empty Q objects won't hurt."""
q1 = Q()
q2 = Q(age__gte=18)
q3 = Q()
q4 = Q(name="test")
q5 = Q()
class Person(Document):
name = StringField()
age = IntField()
query = {"$or": [{"age": {"$gte": 18}}, {"name": "test"}]}
assert (q1 | q2 | q3 | q4 | q5).to_query(Person) == query
query = {"age": {"$gte": 18}, "name": "test"}
assert (q1 & q2 & q3 & q4 & q5).to_query(Person) == query
def test_q_with_dbref(self):
"""Ensure Q objects handle DBRefs correctly"""
connect(db="mongoenginetest")
class User(Document):
pass
class Post(Document):
created_user = ReferenceField(User)
user = User.objects.create()
Post.objects.create(created_user=user)
assert Post.objects.filter(created_user=user).count() == 1
assert Post.objects.filter(Q(created_user=user)).count() == 1
def test_and_combination(self):
"""Ensure that Q-objects correctly AND together."""
class TestDoc(Document):
x = IntField()
y = StringField()
query = (Q(x__lt=7) & Q(x__lt=3)).to_query(TestDoc)
assert query == {"$and": [{"x": {"$lt": 7}}, {"x": {"$lt": 3}}]}
query = (Q(y="a") & Q(x__lt=7) & Q(x__lt=3)).to_query(TestDoc)
assert query == {"$and": [{"y": "a"}, {"x": {"$lt": 7}}, {"x": {"$lt": 3}}]}
# Check normal cases work without an error
query = Q(x__lt=7) & Q(x__gt=3)
q1 = Q(x__lt=7)
q2 = Q(x__gt=3)
query = (q1 & q2).to_query(TestDoc)
assert query == {"x": {"$lt": 7, "$gt": 3}}
# More complex nested example
query = Q(x__lt=100) & Q(y__ne="NotMyString")
query &= Q(y__in=["a", "b", "c"]) & Q(x__gt=-100)
mongo_query = {
"x": {"$lt": 100, "$gt": -100},
"y": {"$ne": "NotMyString", "$in": ["a", "b", "c"]},
}
assert query.to_query(TestDoc) == mongo_query
def test_or_combination(self):
"""Ensure that Q-objects correctly OR together."""
class TestDoc(Document):
x = IntField()
q1 = Q(x__lt=3)
q2 = Q(x__gt=7)
query = (q1 | q2).to_query(TestDoc)
assert query == {"$or": [{"x": {"$lt": 3}}, {"x": {"$gt": 7}}]}
def test_and_or_combination(self):
"""Ensure that Q-objects handle ANDing ORed components."""
class TestDoc(Document):
x = IntField()
y = BooleanField()
TestDoc.drop_collection()
query = Q(x__gt=0) | Q(x__exists=False)
query &= Q(x__lt=100)
assert query.to_query(TestDoc) == {
"$and": [
{"$or": [{"x": {"$gt": 0}}, {"x": {"$exists": False}}]},
{"x": {"$lt": 100}},
]
}
q1 = Q(x__gt=0) | Q(x__exists=False)
q2 = Q(x__lt=100) | Q(y=True)
query = (q1 & q2).to_query(TestDoc)
TestDoc(x=101).save()
TestDoc(x=10).save()
TestDoc(y=True).save()
assert query == {
"$and": [
{"$or": [{"x": {"$gt": 0}}, {"x": {"$exists": False}}]},
{"$or": [{"x": {"$lt": 100}}, {"y": True}]},
]
}
assert 2 == TestDoc.objects(q1 & q2).count()
def test_or_and_or_combination(self):
"""Ensure that Q-objects handle ORing ANDed ORed components. :)"""
class TestDoc(Document):
x = IntField()
y = BooleanField()
TestDoc.drop_collection()
TestDoc(x=-1, y=True).save()
TestDoc(x=101, y=True).save()
TestDoc(x=99, y=False).save()
TestDoc(x=101, y=False).save()
q1 = Q(x__gt=0) & (Q(y=True) | Q(y__exists=False))
q2 = Q(x__lt=100) & (Q(y=False) | Q(y__exists=False))
query = (q1 | q2).to_query(TestDoc)
assert query == {
"$or": [
{
"$and": [
{"x": {"$gt": 0}},
{"$or": [{"y": True}, {"y": {"$exists": False}}]},
]
},
{
"$and": [
{"x": {"$lt": 100}},
{"$or": [{"y": False}, {"y": {"$exists": False}}]},
]
},
]
}
assert 2 == TestDoc.objects(q1 | q2).count()
def test_multiple_occurence_in_field(self):
class Test(Document):
name = StringField(max_length=40)
title = StringField(max_length=40)
q1 = Q(name__contains="te") | Q(title__contains="te")
q2 = Q(name__contains="12") | Q(title__contains="12")
q3 = q1 & q2
query = q3.to_query(Test)
assert query["$and"][0] == q1.to_query(Test)
assert query["$and"][1] == q2.to_query(Test)
def test_q_clone(self):
class TestDoc(Document):
x = IntField()
TestDoc.drop_collection()
for i in range(1, 101):
t = TestDoc(x=i)
t.save()
# Check normal cases work without an error
test = TestDoc.objects(Q(x__lt=7) & Q(x__gt=3))
assert test.count() == 3
test2 = test.clone()
assert test2.count() == 3
assert test2 != test
test3 = test2.filter(x=6)
assert test3.count() == 1
assert test.count() == 3
def test_q(self):
"""Ensure that Q objects may be used to query for documents."""
class BlogPost(Document):
title = StringField()
publish_date = DateTimeField()
published = BooleanField()
BlogPost.drop_collection()
post1 = BlogPost(
title="Test 1", publish_date=datetime.datetime(2010, 1, 8), published=False
)
post1.save()
post2 = BlogPost(
title="Test 2", publish_date=datetime.datetime(2010, 1, 15), published=True
)
post2.save()
post3 = BlogPost(title="Test 3", published=True)
post3.save()
post4 = BlogPost(title="Test 4", publish_date=datetime.datetime(2010, 1, 8))
post4.save()
post5 = BlogPost(title="Test 1", publish_date=datetime.datetime(2010, 1, 15))
post5.save()
post6 = BlogPost(title="Test 1", published=False)
post6.save()
# Check ObjectId lookup works
obj = BlogPost.objects(id=post1.id).first()
assert obj == post1
# Check Q object combination with one does not exist
q = BlogPost.objects(Q(title="Test 5") | Q(published=True))
posts = [post.id for post in q]
published_posts = (post2, post3)
assert all(obj.id in posts for obj in published_posts)
q = BlogPost.objects(Q(title="Test 1") | Q(published=True))
posts = [post.id for post in q]
published_posts = (post1, post2, post3, post5, post6)
assert all(obj.id in posts for obj in published_posts)
# Check Q object combination
date = datetime.datetime(2010, 1, 10)
q = BlogPost.objects(Q(publish_date__lte=date) | Q(published=True))
posts = [post.id for post in q]
published_posts = (post1, post2, post3, post4)
assert all(obj.id in posts for obj in published_posts)
assert not any(obj.id in posts for obj in [post5, post6])
BlogPost.drop_collection()
# Check the 'in' operator
self.Person(name="user1", age=20).save()
self.Person(name="user2", age=20).save()
self.Person(name="user3", age=30).save()
self.Person(name="user4", age=40).save()
assert self.Person.objects(Q(age__in=[20])).count() == 2
assert self.Person.objects(Q(age__in=[20, 30])).count() == 3
# Test invalid query objs
with pytest.raises(InvalidQueryError):
self.Person.objects("user1")
# filter should fail, too
with pytest.raises(InvalidQueryError):
self.Person.objects.filter("user1")
def test_q_regex(self):
"""Ensure that Q objects can be queried using regexes."""
person = self.Person(name="Guido van Rossum")
person.save()
obj = self.Person.objects(Q(name=re.compile("^Gui"))).first()
assert obj == person
obj = self.Person.objects(Q(name=re.compile("^gui"))).first()
assert obj is None
obj = self.Person.objects(Q(name=re.compile("^gui", re.I))).first()
assert obj == person
obj = self.Person.objects(Q(name__not=re.compile("^bob"))).first()
assert obj == person
obj = self.Person.objects(Q(name__not=re.compile("^Gui"))).first()
assert obj is None
def test_q_repr(self):
assert repr(Q()) == "Q(**{})"
assert repr(Q(name="test")) == "Q(**{'name': 'test'})"
assert (
repr(Q(name="test") & Q(age__gte=18))
== "(Q(**{'name': 'test'}) & Q(**{'age__gte': 18}))"
)
assert (
repr(Q(name="test") | Q(age__gte=18))
== "(Q(**{'name': 'test'}) | Q(**{'age__gte': 18}))"
)
def test_q_lists(self):
"""Ensure that Q objects query ListFields correctly."""
class BlogPost(Document):
tags = ListField(StringField())
BlogPost.drop_collection()
BlogPost(tags=["python", "mongo"]).save()
BlogPost(tags=["python"]).save()
assert BlogPost.objects(Q(tags="mongo")).count() == 1
assert BlogPost.objects(Q(tags="python")).count() == 2
BlogPost.drop_collection()
def test_q_merge_queries_edge_case(self):
class User(Document):
email = EmailField(required=False)
name = StringField()
User.drop_collection()
pk = ObjectId()
User(email="[email protected]", pk=pk).save()
assert (
1
== User.objects.filter(Q(email="[email protected]") | Q(name="John Doe"))
.limit(2)
.filter(pk=pk)
.count()
)
def test_chained_q_or_filtering(self):
class Post(EmbeddedDocument):
name = StringField(required=True)
class Item(Document):
postables = ListField(EmbeddedDocumentField(Post))
Item.drop_collection()
Item(postables=[Post(name="a"), Post(name="b")]).save()
Item(postables=[Post(name="a"), Post(name="c")]).save()
Item(postables=[Post(name="a"), Post(name="b"), Post(name="c")]).save()
assert (
Item.objects(Q(postables__name="a") & Q(postables__name="b")).count() == 2
)
assert (
Item.objects.filter(postables__name="a").filter(postables__name="b").count()
== 2
)
def test_equality(self):
assert Q(name="John") == Q(name="John")
assert Q() == Q()
def test_inequality(self):
assert Q(name="John") != Q(name="Ralph")
def test_operation_equality(self):
q1 = Q(name="John") | Q(title="Sir") & Q(surname="Paul")
q2 = Q(name="John") | Q(title="Sir") & Q(surname="Paul")
assert q1 == q2
def test_operation_inequality(self):
q1 = Q(name="John") | Q(title="Sir")
q2 = Q(title="Sir") | Q(name="John")
assert q1 != q2
def test_combine_and_empty(self):
q = Q(x=1)
assert q & Q() == q
assert Q() & q == q
def test_combine_and_both_empty(self):
assert Q() & Q() == Q()
def test_combine_or_empty(self):
q = Q(x=1)
assert q | Q() == q
assert Q() | q == q
def test_combine_or_both_empty(self):
assert Q() | Q() == Q()
def test_q_bool(self):
assert Q(name="John")
assert not Q()
def test_combine_bool(self):
assert not Q() & Q()
assert Q() & Q(name="John")
assert Q(name="John") & Q()
assert Q() | Q(name="John")
assert Q(name="John") | Q()
if __name__ == "__main__":
unittest.main()
|
the-stack_106_17417
|
"""Tests for the Ambiclimate config flow."""
import ambiclimate
from homeassistant import data_entry_flow
from homeassistant.components.ambiclimate import config_flow
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.setup import async_setup_component
from homeassistant.util import aiohttp
from tests.async_mock import AsyncMock, patch
async def init_config_flow(hass):
"""Init a configuration flow."""
await async_setup_component(
hass, "http", {"http": {"base_url": "https://hass.com"}}
)
config_flow.register_flow_implementation(hass, "id", "secret")
flow = config_flow.AmbiclimateFlowHandler()
flow.hass = hass
return flow
async def test_abort_if_no_implementation_registered(hass):
"""Test we abort if no implementation is registered."""
flow = config_flow.AmbiclimateFlowHandler()
flow.hass = hass
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "missing_configuration"
async def test_abort_if_already_setup(hass):
"""Test we abort if Ambiclimate is already setup."""
flow = await init_config_flow(hass)
with patch.object(hass.config_entries, "async_entries", return_value=[{}]):
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
with patch.object(hass.config_entries, "async_entries", return_value=[{}]):
result = await flow.async_step_code()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_full_flow_implementation(hass):
"""Test registering an implementation and finishing flow works."""
config_flow.register_flow_implementation(hass, None, None)
flow = await init_config_flow(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "auth"
assert (
result["description_placeholders"]["cb_url"]
== "https://hass.com/api/ambiclimate"
)
url = result["description_placeholders"]["authorization_url"]
assert "https://api.ambiclimate.com/oauth2/authorize" in url
assert "client_id=id" in url
assert "response_type=code" in url
assert "redirect_uri=https%3A%2F%2Fhass.com%2Fapi%2Fambiclimate" in url
with patch("ambiclimate.AmbiclimateOAuth.get_access_token", return_value="test"):
result = await flow.async_step_code("123ABC")
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Ambiclimate"
assert result["data"]["callback_url"] == "https://hass.com/api/ambiclimate"
assert result["data"][CONF_CLIENT_SECRET] == "secret"
assert result["data"][CONF_CLIENT_ID] == "id"
with patch("ambiclimate.AmbiclimateOAuth.get_access_token", return_value=None):
result = await flow.async_step_code("123ABC")
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
with patch(
"ambiclimate.AmbiclimateOAuth.get_access_token",
side_effect=ambiclimate.AmbiclimateOauthError(),
):
result = await flow.async_step_code("123ABC")
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_abort_invalid_code(hass):
"""Test if no code is given to step_code."""
config_flow.register_flow_implementation(hass, None, None)
flow = await init_config_flow(hass)
with patch("ambiclimate.AmbiclimateOAuth.get_access_token", return_value=None):
result = await flow.async_step_code("invalid")
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "access_token"
async def test_already_setup(hass):
"""Test when already setup."""
config_flow.register_flow_implementation(hass, None, None)
flow = await init_config_flow(hass)
with patch.object(hass.config_entries, "async_entries", return_value=True):
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_view(hass):
"""Test view."""
hass.config_entries.flow.async_init = AsyncMock()
request = aiohttp.MockRequest(
b"", query_string="code=test_code", mock_source="test"
)
request.app = {"hass": hass}
view = config_flow.AmbiclimateAuthCallbackView()
assert await view.get(request) == "OK!"
request = aiohttp.MockRequest(b"", query_string="", mock_source="test")
request.app = {"hass": hass}
view = config_flow.AmbiclimateAuthCallbackView()
assert await view.get(request) == "No code"
|
the-stack_106_17418
|
from typing import Any, Dict, Optional, Type, Union
import numpy as np
import torch as th
from gym import spaces
from torch.nn import functional as F
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from stable_baselines3.common import logger
from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithmCustom
from stable_baselines3.common.policies import ActorCriticPolicy
from stable_baselines3.common.masked_mse_loss import MaskedMSELoss
from stable_baselines3.common.masked_mse_loss import MaskedABSLoss
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import explained_variance, get_schedule_fn
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads= []
layers = []
for n, p in named_parameters:
if(p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads)+1, lw=2, color="k" )
plt.xticks(range(0,len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom = -0.001, top=0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([Line2D([0], [0], color="c", lw=4),
Line2D([0], [0], color="b", lw=4),
Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
class PPO(OnPolicyAlgorithmCustom):
"""
Proximal Policy Optimization algorithm (PPO) (clip version)
Paper: https://arxiv.org/abs/1707.06347
Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
and Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
n_steps: int = 2048,
batch_size: Optional[int] = 64,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: Union[float, Schedule] = 0.2,
clip_range_vf: Union[None, float, Schedule] = None,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
use_sde: bool = False,
sde_sample_freq: int = -1,
target_kl: Optional[float] = None,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(PPO, self).__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
_init_setup_model=False,
)
self.batch_size = batch_size
self.n_epochs = n_epochs
self.clip_range = clip_range
self.clip_range_vf = clip_range_vf
self.target_kl = target_kl
self.reconstruction_loss = MaskedABSLoss()
self.save_collected = 1
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(PPO, self)._setup_model()
# Initialize schedules for policy/value clipping
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_vf is not None:
if isinstance(self.clip_range_vf, (float, int)):
assert self.clip_range_vf > 0, "`clip_range_vf` must be positive, " "pass `None` to deactivate vf clipping"
self.clip_range_vf = get_schedule_fn(self.clip_range_vf)
self.decoder_optimizer = th.optim.Adam(self.policy.parameters(),lr=self.learning_rate(1))
self.decoder_scheduler = th.optim.lr_scheduler.ReduceLROnPlateau(self.decoder_optimizer,'min', patience=50, min_lr=1e-6)
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value function
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
entropy_losses, all_kl_divs = [], []
decoder_losses = []
decoder_present_losses = []
decoder_future_losses = []
decoder_future2_losses = []
decoder_regularization_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
# train for n_epochs epochs
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data in self.rollout_buffer.get(self.batch_size):
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
# Re-sample the noise matrix because the log_std has changed
# TODO: investigate why there is no issue with the gradient
# if that line is commented (as in SAC)
if self.use_sde:
self.policy.reset_noise(self.batch_size)
values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)
# print("LogProb is ",rollout_data.old_log_prob)
values = values.flatten()
# Normalize advantage
advantages = rollout_data.advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# print("Ratio is ",ratio)
# clipped surrogate loss
policy_loss_1 = advantages * ratio
policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()
clip_fractions.append(clip_fraction)
if self.clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
values_pred = rollout_data.old_values + th.clamp(
values - rollout_data.old_values, -clip_range_vf, clip_range_vf
)
# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(rollout_data.returns, values_pred)
value_losses.append(value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
entropy_losses.append(entropy_loss.item())
# decoder_loss = self.reconstruction_loss(decoding, rollout_data.shifted_obs[:,18:,0], rollout_data.mask.unsqueeze(1).repeat(1,512,1))
# decoder_losses.append(decoder_loss.item())
loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
# for param in self.policy.parameters():
# print(type(param), param.size())
# tensor_size = param.size()
# size_tup = (512, 512, 2)
# size_tup_mlp = (128,256)
# if tensor_size==size_tup:
# print(param.grad)
# if tensor_size==size_tup_mlp:
# print(param.grad)
self.policy.optimizer.step()
# print("Approx KL ",th.mean(rollout_data.old_log_prob - log_prob).detach().cpu().numpy())
approx_kl_divs.append(th.mean(rollout_data.old_log_prob - log_prob).detach().cpu().numpy())
all_kl_divs.append(np.mean(approx_kl_divs))
if self.target_kl is not None and np.mean(approx_kl_divs) > 1.5 * self.target_kl:
print(f"Early stopping at step {epoch} due to reaching max kl: {np.mean(approx_kl_divs):.2f}")
break
for epoch in range(self.n_epochs):
# Do a complete pass on the rollout buffer
for rollout_data in self.rollout_buffer.get(int((self.batch_size+1)/2)):
encoding, decoding_present, decoding_future1, decoding_future2 = self.policy.decode(rollout_data.observations)
decoder_loss_present = self.reconstruction_loss(decoding_present, rollout_data.observations[:,18:,0], th.ones(rollout_data.observations[:,18:,0].size()).to(self.device))
decoder_present_losses.append(decoder_loss_present.item())
decoder_loss_future1 = self.reconstruction_loss(decoding_future1, rollout_data.shifted_obs[:,18:,0], rollout_data.mask.unsqueeze(1).repeat(1,decoding_future1.size()[1],1))
decoder_future_losses.append(decoder_loss_future1.item())
decoder_loss_future2 = self.reconstruction_loss(decoding_future2, rollout_data.double_shifted_obs[:,18:,0], rollout_data.mask2.unsqueeze(1).repeat(1,decoding_future2.size()[1],1))
decoder_future2_losses.append(decoder_loss_future2.item())
decoder_loss = decoder_loss_present + decoder_loss_future1 + decoder_loss_future2
decoder_losses.append(decoder_loss.item())
self.decoder_optimizer.zero_grad()
decoder_loss.backward()
self.decoder_optimizer.step()
self.decoder_scheduler.step(decoder_loss)
self._n_updates += self.n_epochs
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
logger.record("train/entropy_loss", np.mean(entropy_losses))
logger.record("train/policy_gradient_loss", np.mean(pg_losses))
logger.record("train/decoder_loss", np.mean(decoder_losses))
logger.record("train/decoder_loss_present", np.mean(decoder_present_losses))
logger.record("train/decoder_loss_future1", np.mean(decoder_future_losses))
logger.record("train/decoder_loss_future2", np.mean(decoder_future2_losses))
# logger.record("train/decoder_loss_regularization", np.mean(decoder_regularization_losses)*1e-4)
logger.record("train/value_loss", np.mean(value_losses))
logger.record("train/approx_kl", np.mean(approx_kl_divs))
logger.record("train/clip_fraction", np.mean(clip_fractions))
logger.record("train/loss", loss.item())
logger.record("train/explained_variance", explained_var)
if hasattr(self.policy, "log_std"):
logger.record("train/std", th.exp(self.policy.log_std).mean().item())
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
logger.record("train/clip_range", clip_range)
if self.clip_range_vf is not None:
logger.record("train/clip_range_vf", clip_range_vf)
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "PPO",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "PPO":
return super(PPO, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
|
the-stack_106_17419
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def read_dump(file_name, addrs, hd_master_addr_old):
"""
Read the given dump, count the addrs that match, count change and reserve.
Also check that the old hd_master is inactive
"""
with open(file_name, encoding='utf8') as inputfile:
found_addr = 0
found_addr_chg = 0
found_addr_rsv = 0
hd_master_addr_ret = None
for line in inputfile:
# only read non comment lines
if line[0] != "#" and len(line) > 10:
# split out some data
key_label, comment = line.split("#")
# key = key_label.split(" ")[0]
keytype = key_label.split(" ")[2]
if len(comment) > 1:
addr_keypath = comment.split(" addr=")[1]
addr = addr_keypath.split(" ")[0]
keypath = None
if keytype == "inactivehdmaster=1":
# ensure the old master is still available
assert(hd_master_addr_old == addr)
elif keytype == "hdmaster=1":
# ensure we have generated a new hd master key
assert(hd_master_addr_old != addr)
hd_master_addr_ret = addr
else:
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
# count key types
for addrObj in addrs:
if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
found_addr += 1
break
elif keytype == "change=1":
found_addr_chg += 1
break
elif keytype == "reserve=1":
found_addr_rsv += 1
break
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
class WalletDumpTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
self.extra_args = [["-keypool=90", "-usehd=1"]]
def setup_chain(self):
# TODO remove this when usehd=1 becomes the default
# use our own cache and -usehd=1 as extra arg as the default cache is run with -usehd=0
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir + "/hd", ["-usehd=1"], redirect_stderr=True)
set_cache_mocktime()
def setup_network(self, split=False):
# Use 1 minute timeout because the initial getnewaddress RPC can take
# longer than the default 30 seconds due to an expensive
# CWallet::TopUpKeyPool call, and the encryptwallet RPC made later in
# the test often takes even longer.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args, timewait=60, redirect_stderr=True)
def run_test (self):
tmpdir = self.options.tmpdir
# generate 20 addresses to compare against the dump
test_addr_count = 20
addrs = []
for i in range(0,test_addr_count):
addr = self.nodes[0].getnewaddress()
vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
addrs.append(vaddr)
# Should be a no-op:
self.nodes[0].keypoolrefill()
# dump unencrypted wallet
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
assert_equal(found_addr, test_addr_count) # all keys must be in the dump
assert_equal(found_addr_chg, 50) # 50 blocks where mined
assert_equal(found_addr_rsv, 180) # keypool size (external+internal)
#encrypt wallet, restart, unlock and dump
self.nodes[0].encryptwallet('test')
bitcoind_processes[0].wait()
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
self.nodes[0].walletpassphrase('test', 10)
# Should be a no-op:
self.nodes[0].keypoolrefill()
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
assert_equal(found_addr, test_addr_count)
# TODO clarify if we want the behavior that is tested below in Levocoin (only when HD seed was generated and not user-provided)
# assert_equal(found_addr_chg, 180 + 50) # old reserve keys are marked as change now
assert_equal(found_addr_rsv, 180) # keypool size
if __name__ == '__main__':
WalletDumpTest().main ()
|
the-stack_106_17420
|
"""Support for information from HP iLO sensors."""
from datetime import timedelta
import logging
import hpilo
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_VARIABLES,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SENSOR_TYPE,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "HP ILO"
DEFAULT_PORT = 443
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=300)
SENSOR_TYPES = {
"server_name": ["Server Name", "get_server_name"],
"server_fqdn": ["Server FQDN", "get_server_fqdn"],
"server_host_data": ["Server Host Data", "get_host_data"],
"server_oa_info": ["Server Onboard Administrator Info", "get_oa_info"],
"server_power_status": ["Server Power state", "get_host_power_status"],
"server_power_readings": ["Server Power readings", "get_power_readings"],
"server_power_on_time": ["Server Power On time", "get_server_power_on_time"],
"server_asset_tag": ["Server Asset Tag", "get_asset_tag"],
"server_uid_status": ["Server UID light", "get_uid_status"],
"server_health": ["Server Health", "get_embedded_health"],
"network_settings": ["Network Settings", "get_network_settings"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_VARIABLES, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SENSOR_TYPE): vol.All(
cv.string, vol.In(SENSOR_TYPES)
),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
],
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the HP iLO sensors."""
hostname = config.get(CONF_HOST)
port = config.get(CONF_PORT)
login = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
monitored_variables = config.get(CONF_MONITORED_VARIABLES)
# Create a data fetcher to support all of the configured sensors. Then make
# the first call to init the data and confirm we can connect.
try:
hp_ilo_data = HpIloData(hostname, port, login, password)
except ValueError as error:
_LOGGER.error(error)
return
# Initialize and add all of the sensors.
devices = []
for monitored_variable in monitored_variables:
new_device = HpIloSensor(
hass=hass,
hp_ilo_data=hp_ilo_data,
sensor_name="{} {}".format(
config.get(CONF_NAME), monitored_variable[CONF_NAME]
),
sensor_type=monitored_variable[CONF_SENSOR_TYPE],
sensor_value_template=monitored_variable.get(CONF_VALUE_TEMPLATE),
unit_of_measurement=monitored_variable.get(CONF_UNIT_OF_MEASUREMENT),
)
devices.append(new_device)
add_entities(devices, True)
class HpIloSensor(Entity):
"""Representation of a HP iLO sensor."""
def __init__(
self,
hass,
hp_ilo_data,
sensor_type,
sensor_name,
sensor_value_template,
unit_of_measurement,
):
"""Initialize the HP iLO sensor."""
self._hass = hass
self._name = sensor_name
self._unit_of_measurement = unit_of_measurement
self._ilo_function = SENSOR_TYPES[sensor_type][1]
self.hp_ilo_data = hp_ilo_data
if sensor_value_template is not None:
sensor_value_template.hass = hass
self._sensor_value_template = sensor_value_template
self._state = None
self._state_attributes = None
_LOGGER.debug("Created HP iLO sensor %r", self)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of the sensor."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._state_attributes
def update(self):
"""Get the latest data from HP iLO and updates the states."""
# Call the API for new data. Each sensor will re-trigger this
# same exact call, but that's fine. Results should be cached for
# a short period of time to prevent hitting API limits.
self.hp_ilo_data.update()
ilo_data = getattr(self.hp_ilo_data.data, self._ilo_function)()
if self._sensor_value_template is not None:
ilo_data = self._sensor_value_template.render(ilo_data=ilo_data)
self._state = ilo_data
class HpIloData:
"""Gets the latest data from HP iLO."""
def __init__(self, host, port, login, password):
"""Initialize the data object."""
self._host = host
self._port = port
self._login = login
self._password = password
self.data = None
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from HP iLO."""
try:
self.data = hpilo.Ilo(
hostname=self._host,
login=self._login,
password=self._password,
port=self._port,
)
except (
hpilo.IloError,
hpilo.IloCommunicationError,
hpilo.IloLoginFailed,
) as error:
raise ValueError(f"Unable to init HP ILO, {error}")
|
the-stack_106_17422
|
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This package contains the modules that make up the
Strawberry Fields backends. This includes photonic simulators,
shared numerical operations, and states objects returned by
statevector simulators.
Local statevector simulators
----------------------------
Below are all available local statevector backends currently
provided by Strawberry Fields. These simulators all run locally,
provide access to the state after simulation, and the state is
preserved between engine runs.
.. currentmodule:: strawberryfields.backends
.. autosummary::
:toctree: api
FockBackend
GaussianBackend
~tfbackend.TFBackend
BosonicBackend
.. raw:: html
<div style="display: none;">
.. currentmodule:: strawberryfields.backends
.. autosummary::
:toctree: api
BaseFockState
BaseGaussianState
~tfbackend.states.FockStateTF
BaseBosonicState
.. raw:: html
</div>
Backend API
-----------
A list of the abstract base classes that define the
statevector backend API
.. currentmodule:: strawberryfields.backends
.. autosummary::
:toctree: api
BaseState
BaseBackend
BaseFock
BaseGaussian
BaseBosonic
Utility modules
---------------
The following utility modules are provided for
backend development.
.. currentmodule:: strawberryfields.backends
.. autosummary::
:toctree: api
shared_ops
"""
from .base import BaseBackend, BaseFock, BaseGaussian, BaseBosonic, ModeMap
from .gaussianbackend import GaussianBackend
from .fockbackend import FockBackend
from .bosonicbackend import BosonicBackend
from .states import BaseState, BaseGaussianState, BaseFockState, BaseBosonicState
# There is no import for the TFBackend to avoid TensorFlow being a direct
# requirement of SF through a chain of imports
__all__ = [
"BaseBackend",
"BaseFock",
"BaseGaussian",
"BaseBosonic",
"FockBackend",
"GaussianBackend",
"BosonicBackend",
"TFBackend",
"BaseState",
"BaseFockState",
"BaseGaussianState",
"BaseBosonicState",
]
virtual_backends = ["X8_01"]
local_backends = {
b.short_name: b for b in (BaseBackend, GaussianBackend, FockBackend, BosonicBackend)
}
def load_backend(name):
"""Loads the specified backend by mapping a string
to the backend type, via the ``local_backends``
dictionary. Note that this function is used by the
frontend only, and should not be user-facing.
"""
if name == "tf":
# treat the tensorflow backend differently, to
# isolate the import of TensorFlow
from .tfbackend import TFBackend # pylint: disable=import-outside-toplevel
return TFBackend()
if name in virtual_backends:
# Backend is a remote device/simulator, that has a
# defined circuit spec, but no local backend class.
# By convention, the short name and corresponding
# circuit spec are the same.
backend_attrs = {"short_name": name, "circuit_spec": name}
backend_class = type(name, (BaseBackend,), backend_attrs)
return backend_class()
if name in local_backends:
backend = local_backends[name]()
return backend
raise ValueError("Backend '{}' is not supported.".format(name))
|
the-stack_106_17424
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from torch import nn
from torch.nn import functional as F
from ..box_head.roi_box_feature_extractors import ResNet50Conv5ROIFeatureExtractor
from paa_core.modeling import registry
from paa_core.modeling.poolers import Pooler
from paa_core.modeling.make_layers import make_conv3x3
registry.ROI_MASK_FEATURE_EXTRACTORS.register(
"ResNet50Conv5ROIFeatureExtractor", ResNet50Conv5ROIFeatureExtractor
)
@registry.ROI_MASK_FEATURE_EXTRACTORS.register("MaskRCNNFPNFeatureExtractor")
class MaskRCNNFPNFeatureExtractor(nn.Module):
"""
Heads for FPN for classification
"""
def __init__(self, cfg, in_channels):
"""
Arguments:
num_classes (int): number of output classes
input_size (int): number of channels of the input once it's flattened
representation_size (int): size of the intermediate representation
"""
super(MaskRCNNFPNFeatureExtractor, self).__init__()
resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
scales = cfg.MODEL.ROI_MASK_HEAD.POOLER_SCALES
sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
pooler = Pooler(
output_size=(resolution, resolution),
scales=scales,
sampling_ratio=sampling_ratio,
)
input_size = in_channels
self.pooler = pooler
use_gn = cfg.MODEL.ROI_MASK_HEAD.USE_GN
layers = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS
dilation = cfg.MODEL.ROI_MASK_HEAD.DILATION
next_feature = input_size
self.blocks = []
for layer_idx, layer_features in enumerate(layers, 1):
layer_name = "mask_fcn{}".format(layer_idx)
module = make_conv3x3(
next_feature, layer_features,
dilation=dilation, stride=1, use_gn=use_gn
)
self.add_module(layer_name, module)
next_feature = layer_features
self.blocks.append(layer_name)
self.out_channels = layer_features
def forward(self, x, proposals):
x = self.pooler(x, proposals)
for layer_name in self.blocks:
x = F.relu(getattr(self, layer_name)(x))
return x
def make_roi_mask_feature_extractor(cfg, in_channels):
func = registry.ROI_MASK_FEATURE_EXTRACTORS[
cfg.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR
]
return func(cfg, in_channels)
|
the-stack_106_17425
|
"""Pydantic loader using TOML serialization."""
import logging
from os import PathLike
from pathlib import Path
from typing import Union, Optional
import toml
from pydantic import BaseSettings
from toml.decoder import TomlDecodeError
import pydantic_loader
from pydantic_loader.encode import encode_pydantic_obj
_LOGGER = logging.getLogger(__name__)
def _load_toml(config_file: Union[Path, PathLike]) -> dict:
"""Load a toml file and return a dict.
Returns:
CfgError when something (anything) is wrong.
"""
try:
with open(config_file) as toml_tile:
dct = toml.load(toml_tile)
return dct
except TomlDecodeError as err:
raise pydantic_loader.CfgError(str(err))
def save_toml(config: BaseSettings, config_file: Path, make_path=False):
"""Serialize the config class and save it as a toml file.
Args:
config: Pydantic config instance
config_file: The output file
make_path: If True the path will be created.
Raises:
FileNotFoundError: If make_path=False and the folder does not consist.
"""
if make_path:
config_file.parent.mkdir(exist_ok=True)
dct = encode_pydantic_obj(config)
try:
val = toml.dumps(dct)
except TomlDecodeError as err:
raise pydantic_loader.CfgError(err)
except Exception as err:
raise pydantic_loader.CfgError(err)
with open(config_file, "w") as toml_file:
toml_file.write(val)
def load_toml(pydantic_obj, config_file: Optional[Path], on_error_return_default=False):
"""Load a config file and merge it into the config class.
Args:
pydantic_obj: A pydantic class to instantiate
config_file: An optional config file location.
on_error_return_default: If true loading is forgiving:
On fail it will load default settings. Otherwise it will raise CfgError.
Returns:
A config instance
raises:
CfgError when loading fails and on_error_return_default is False.
"""
return pydantic_loader.config.load_config(
pydantic_obj,
config_file,
loader=_load_toml,
on_error_return_default=on_error_return_default,
)
|
the-stack_106_17426
|
# -*- coding: utf-8 -*-
## Copyright 2014 Cognitect. All Rights Reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS-IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import unittest
# then import transit stuff
from transit.reader import Reader, JsonUnmarshaler, MsgPackUnmarshaler
from transit.writer import Writer
from transit.transit_types import Keyword, Symbol, URI, frozendict, TaggedValue, Link, true, false
from io import StringIO, BytesIO
from transit.helpers import mapcat
from helpers import ints_centered_on, hash_of_size, array_of_symbools
from uuid import UUID
from datetime import datetime
import dateutil.tz
from math import isnan
class ExemplarBaseTest(unittest.TestCase):
pass
def exemplar(name, val):
class ExemplarTest(ExemplarBaseTest):
def test_json(self):
with open("../transit-format/examples/0.8/simple/" + name + ".json") as stream:
data = Reader(protocol="json").read(stream)
self.assertEqual(val, data)
def test_msgpack(self):
with open("../transit-format/examples/0.8/simple/" + name + ".mp", 'rb') as stream:
data = Reader(protocol="msgpack").read(stream)
self.assertEqual(val, data)
def test_json_verbose(self):
with open("../transit-format/examples/0.8/simple/" + name + ".verbose.json") as stream:
data = Reader(protocol="json_verbose").read(stream)
self.assertEqual(val, data)
def test_reencode_msgpack(self):
io = BytesIO()
writer = Writer(io, protocol="msgpack")
writer.write(val)
s = io.getvalue()
io = BytesIO(s)
reader = Reader(protocol="msgpack")
newval = reader.read(io)
self.assertEqual(val, newval)
def test_reencode_json(self):
io = StringIO()
writer = Writer(io, protocol="json")
writer.write(val)
s = io.getvalue()
# Uncomment when debugging to see what payloads fail
# print(s)
io = StringIO(s)
reader = Reader(protocol="json")
newval = reader.read(io)
self.assertEqual(val, newval)
# test json verbose
def test_reencode_json_verbose(self):
io = StringIO()
writer = Writer(io, protocol="json_verbose")
writer.write(val)
s = io.getvalue()
io = StringIO(s)
reader = Reader(protocol="json_verbose")
newval = reader.read(io)
self.assertEqual(val, newval)
def assertEqual(self, val, data):
if type(val) is float or type(data) is float:
if type(val) is float and type(data) is float and isnan(val) and isnan(data):
return true
else:
unittest.TestCase.assertAlmostEqual(self, val, data, places=7, msg=name)
elif type(val) in [list, tuple]:
for v, d in zip(val, data):
self.assertEqual(v, d)
else:
unittest.TestCase.assertEqual(self, val, data, name + " " + str(val) + " vs " + str(data))
globals()["test_" + name + "_json"] = ExemplarTest
ARRAY_SIMPLE = (1, 2, 3)
ARRAY_MIXED = (0, 1, 2.0, true, false, 'five', Keyword("six"), Symbol("seven"), '~eight', None)
ARRAY_NESTED = (ARRAY_SIMPLE, ARRAY_MIXED)
SMALL_STRINGS = ("", "a", "ab", "abc", "abcd", "abcde", "abcdef")
POWERS_OF_TWO = tuple(map(lambda x: pow(2, x), range(66)))
INTERESTING_INTS = tuple(mapcat(lambda x: ints_centered_on(x, 2), POWERS_OF_TWO))
SYM_STRS = ["a", "ab", "abc", "abcd", "abcde", "a1", "b2", "c3", "a_b"]
SYMBOLS = tuple(map(Symbol, SYM_STRS))
KEYWORDS = tuple(map(Keyword, SYM_STRS))
UUIDS = (UUID('5a2cbea3-e8c6-428b-b525-21239370dd55'),
UUID('d1dc64fa-da79-444b-9fa4-d4412f427289'),
UUID('501a978e-3a3e-4060-b3be-1cf2bd4b1a38'),
UUID('b3ba141a-a776-48e4-9fae-a28ea8571f58'))
URIS = (
URI(u'http://example.com'),
URI(u'ftp://example.com'),
URI(u'file:///path/to/file.txt'),
URI(u'http://www.詹姆斯.com/'))
DATES = tuple(map(lambda x: datetime.fromtimestamp(x/1000.0, tz=dateutil.tz.tzutc()),
[-6106017600000, 0, 946728000000, 1396909037000]))
SET_SIMPLE = frozenset(ARRAY_SIMPLE)
SET_MIXED = frozenset(ARRAY_MIXED)
SET_NESTED = frozenset([SET_SIMPLE, SET_MIXED])
MAP_SIMPLE = frozendict({Keyword("a"): 1,
Keyword("b"): 2,
Keyword("c"): 3})
MAP_MIXED = frozendict({Keyword("a"): 1,
Keyword("b"): u"a string",
Keyword("c"): true})
MAP_NESTED = frozendict({Keyword("simple"): MAP_SIMPLE,
Keyword("mixed"): MAP_MIXED})
exemplar("uris", URIS)
exemplar("nil", None)
exemplar("true", true)
exemplar("false", false)
exemplar("zero", 0)
exemplar("one", 1)
exemplar("one_string", "hello")
exemplar("one_keyword", Keyword("hello"))
exemplar("one_symbol", Symbol("hello"))
exemplar("one_date", datetime.fromtimestamp(946728000000/1000.0, dateutil.tz.tzutc()))
exemplar("vector_simple", ARRAY_SIMPLE)
exemplar("vector_empty", ())
exemplar("vector_mixed", ARRAY_MIXED)
exemplar("vector_nested", ARRAY_NESTED)
exemplar("small_strings", SMALL_STRINGS)
exemplar("strings_tilde", tuple(map(lambda x: "~" + x, SMALL_STRINGS)))
exemplar("strings_hash", tuple(map(lambda x: "#" + x, SMALL_STRINGS)))
exemplar("strings_hat", tuple(map(lambda x: "^" + x, SMALL_STRINGS)))
exemplar("ints", tuple(range(128)))
exemplar("small_ints", ints_centered_on(0))
exemplar("ints_interesting", INTERESTING_INTS)
exemplar("ints_interesting_neg", tuple(map(lambda x: -x, INTERESTING_INTS)))
exemplar("doubles_small", tuple(map(float, ints_centered_on(0))))
exemplar("doubles_interesting", (-3.14159, 3.14159, 4E11, 2.998E8, 6.626E-34))
exemplar("one_uuid", UUIDS[0])
exemplar("uuids", UUIDS)
exemplar("one_uri", URIS[0])
exemplar("uris", URIS)
exemplar("dates_interesting", DATES)
exemplar("symbols", SYMBOLS)
exemplar("keywords", KEYWORDS)
exemplar("list_simple", ARRAY_SIMPLE)
exemplar("list_empty", ())
exemplar("list_mixed", ARRAY_MIXED)
exemplar("list_nested", ARRAY_NESTED)
exemplar("set_simple", SET_SIMPLE)
exemplar("set_empty", set())
exemplar("set_mixed", SET_MIXED)
exemplar("set_nested", SET_NESTED)
exemplar("map_simple", MAP_SIMPLE)
exemplar("map_mixed", MAP_MIXED)
exemplar("map_nested", MAP_NESTED)
exemplar("map_string_keys", {"first": 1, "second": 2, "third": 3})
exemplar("map_numeric_keys", {1: "one", 2: "two"})
exemplar("map_vector_keys", frozendict([[(1, 1), "one"],
[(2, 2), "two"]]))
exemplar("map_unrecognized_vals", {Keyword("key"): "~Unrecognized"})
#exemplar("map_unrecognized_keys", )
exemplar("vector_unrecognized_vals", ("~Unrecognized",))
exemplar("vector_1935_keywords_repeated_twice", tuple(array_of_symbools(1935, 1935*2)))
exemplar("vector_1936_keywords_repeated_twice", tuple(array_of_symbools(1936, 1936*2)))
exemplar("vector_1937_keywords_repeated_twice", tuple(array_of_symbools(1937, 1937*2)))
exemplar("map_10_items", hash_of_size(10))
exemplar("maps_two_char_sym_keys", ({Symbol("aa"): 1, Symbol("bb"): 2},
{Symbol("aa"): 3, Symbol("bb"): 4},
{Symbol("aa"): 5, Symbol("bb"): 6}))
exemplar("maps_three_char_sym_keys", ({Symbol("aaa"): 1, Symbol("bbb"): 2},
{Symbol("aaa"): 3, Symbol("bbb"): 4},
{Symbol("aaa"): 5, Symbol("bbb"): 6}))
exemplar("maps_four_char_sym_keys", ({Symbol("aaaa"): 1, Symbol("bbbb"): 2},
{Symbol("aaaa"): 3, Symbol("bbbb"): 4},
{Symbol("aaaa"): 5, Symbol("bbbb"): 6}))
exemplar("maps_two_char_string_keys", ({"aa": 1, "bb": 2},
{"aa": 3, "bb": 4},
{"aa": 5, "bb": 6}))
exemplar("maps_three_char_string_keys", ({"aaa": 1, "bbb": 2},
{"aaa": 3, "bbb": 4},
{"aaa": 5, "bbb": 6}))
exemplar("maps_four_char_string_keys", ({"aaaa": 1, "bbbb": 2},
{"aaaa": 3, "bbbb": 4},
{"aaaa": 5, "bbbb": 6}))
exemplar("maps_unrecognized_keys", (TaggedValue("abcde", Keyword("anything")),
TaggedValue("fghij", Keyword("anything-else")),))
exemplar("vector_special_numbers", (float("nan"), float("inf"), float("-inf")))
# Doesn't exist in simple examples but gave me tests to verify Link.
#exemplar("link", Link("http://www.blah.com", "test", "test", "link", "test"))
def make_hash_exemplar(n):
exemplar("map_%s_nested" % (n,), {Keyword("f"): hash_of_size(n),
Keyword("s"): hash_of_size(n)})
for n in [10, 1935, 1936, 1937]:
make_hash_exemplar(n)
if __name__=='__main__':
unittest.main()
#import cProfile
#import pstats
#cProfile.run('unittest.main()', 'exemptests')
#p = pstats.Stats('exemptests')
#p.sort_stats('time')
#p.print_stats()
|
the-stack_106_17427
|
# -*- coding: utf-8 -*-
"""
app.providers.aws
~~~~~~~~~~~~~~~~~
Provides AWS Sheets API related functions
"""
from datetime import datetime as dt
import pygogo as gogo
from app.helpers import flask_formatter as formatter
from app.routes.auth import Resource
logger = gogo.Gogo(
__name__, low_formatter=formatter, high_formatter=formatter, monolog=True
).logger
logger.propagate = False
class AWS(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._awsc = None
@property
def awsc(self):
if self.client and self._awsc is None:
self._awsc = self.client.session.client(self.resource)
return self._awsc
@property
def invalidation_batch(self):
return {
"Paths": {"Quantity": 4, "Items": self.items},
"CallerReference": dt.utcnow().isoformat(),
}
|
the-stack_106_17428
|
from copy import deepcopy
from django.contrib import admin
from django.contrib.admin import site
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.utils.translation import gettext
from cms.admin.forms import PageUserChangeForm, PageUserGroupForm
from cms.exceptions import NoPermissionsException
from cms.models import Page, PagePermission, PageUser, PageUserGroup
from cms.utils.compat.forms import UserAdmin
from cms.utils.conf import get_cms_setting
from cms.utils.permissions import (
get_model_permission_codename,
get_subordinate_groups,
get_subordinate_users,
get_user_permission_level,
)
user_model = get_user_model()
admin_class = UserAdmin
for model, admin_instance in site._registry.items():
if model == user_model:
admin_class = admin_instance.__class__
class GenericCmsPermissionAdmin:
def get_subordinates(self, user, site):
raise NotImplementedError
def _has_change_permissions_permission(self, request):
"""
User is able to add/change objects only if he haves can change
permission on some page.
"""
site = Site.objects.get_current(request)
try:
get_user_permission_level(request.user, site)
except NoPermissionsException:
return False
return True
def get_form(self, request, obj=None, **kwargs):
form_class = super().get_form(request, obj, **kwargs)
form_class._current_user = request.user
return form_class
def get_queryset(self, request):
queryset = super().get_queryset(request)
site = Site.objects.get_current(request)
user_ids = self.get_subordinates(request.user, site).values_list('pk', flat=True)
return queryset.filter(pk__in=user_ids)
def has_add_permission(self, request):
has_model_perm = super().has_add_permission(request)
if not has_model_perm:
return False
return self._has_change_permissions_permission(request)
def has_change_permission(self, request, obj=None):
has_model_perm = super().has_change_permission(request, obj)
if not has_model_perm:
return False
return self._has_change_permissions_permission(request)
def has_delete_permission(self, request, obj=None):
has_model_perm = super().has_delete_permission(request, obj)
if not has_model_perm:
return False
return self._has_change_permissions_permission(request)
def has_view_permission(self, request, obj=None):
# For django 2.1
# Default is to return True if user got `change` perm, but we have to
# get in consideration also cms permission system
return self.has_change_permission(request, obj)
class PageUserAdmin(GenericCmsPermissionAdmin, admin_class):
form = PageUserChangeForm
model = PageUser
def get_subordinates(self, user, site):
return get_subordinate_users(user, site).values_list('pk', flat=True)
def get_readonly_fields(self, request, obj=None):
fields = super().get_readonly_fields(request, obj)
if not request.user.is_superuser:
# Non superusers can't set superuser status on
# their subordinates.
fields = list(fields) + ['is_superuser']
return fields
def save_model(self, request, obj, form, change):
if not change:
# By default set the staff flag to True
# when a PageUser is first created
obj.is_staff = True
# Set the created_by field to the current user
obj.created_by = request.user
super().save_model(request, obj, form, change)
class PageUserGroupAdmin(GenericCmsPermissionAdmin, admin.ModelAdmin):
form = PageUserGroupForm
list_display = ('name', 'created_by')
fieldsets = [
(None, {'fields': ('name',)}),
]
def get_fieldsets(self, request, obj=None):
"""
Nobody can grant more than he haves, so check for user permissions
to Page and User model and render fieldset depending on them.
"""
fieldsets = deepcopy(self.fieldsets)
perm_models = (
(Page, gettext('Page permissions')),
(PageUser, gettext('User & Group permissions')),
(PagePermission, gettext('Page permissions management')),
)
for i, perm_model in enumerate(perm_models):
fields = []
model, title = perm_model
name = model.__name__.lower()
for key in ('add', 'change', 'delete'):
perm_code = get_model_permission_codename(model, action=key)
if request.user.has_perm(perm_code):
fields.append('can_%s_%s' % (key, name))
if fields:
fieldsets.insert(2 + i, (title, {'fields': (fields,)}))
return fieldsets
def get_subordinates(self, user, site):
return get_subordinate_groups(user, site).values_list('pk', flat=True)
if get_cms_setting('PERMISSION'):
admin.site.register(PageUser, PageUserAdmin)
admin.site.register(PageUserGroup, PageUserGroupAdmin)
|
the-stack_106_17429
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
import utils
import vision_transformer as vits
def extract_feature_pipeline(args):
# ============ preparing data ... ============
transform = pth_transforms.Compose([
pth_transforms.CenterCrop(96)
])
#dataset_train = ReturnIndexDataset(os.path.join(args.data_path, "train"), transform=transform)
#dataset_val = ReturnIndexDataset(os.path.join(args.data_path, "val"), transform=transform)
dataset_train = ReturnIndexDataset(args.data_path, "train", transform=transform, tansform_coord=None,
classes=None, seasons=None, split_by_region=True, download=False)
dataset_val = ReturnIndexDataset(args.data_path, "val", transform=transform, tansform_coord=None,
classes=None, seasons=None, split_by_region=True, download=False)
"""
args.data_path, "train", transform=transform, tansform_coord=None,
classes=None, seasons=None, split_by_region=True, download=False
"""
sampler = torch.utils.data.DistributedSampler(dataset_train, shuffle=False)
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
print(f"Data loaded with {len(dataset_train)} train and {len(dataset_val)} val imgs.")
# ============ building network ... ============
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0)
utils.replace_input_layer(model, inchannels=13)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
model.cuda()
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
model.eval()
# ============ extract features ... ============
print("Extracting features for train set...")
train_features = extract_features(model, data_loader_train)
print("Extracting features for val set...")
test_features = extract_features(model, data_loader_val)
if utils.get_rank() == 0:
train_features = nn.functional.normalize(train_features, dim=1, p=2)
test_features = nn.functional.normalize(test_features, dim=1, p=2)
train_labels = torch.tensor([s[-1] for s in dataset_train.samples]).long()
test_labels = torch.tensor([s[-1] for s in dataset_val.samples]).long()
# save features and labels
if args.dump_features and dist.get_rank() == 0:
torch.save(train_features.cpu(), os.path.join(args.dump_features, "trainfeat.pth"))
torch.save(test_features.cpu(), os.path.join(args.dump_features, "testfeat.pth"))
torch.save(train_labels.cpu(), os.path.join(args.dump_features, "trainlabels.pth"))
torch.save(test_labels.cpu(), os.path.join(args.dump_features, "testlabels.pth"))
return train_features, test_features, train_labels, test_labels
@torch.no_grad()
def extract_features(model, data_loader):
metric_logger = utils.MetricLogger(delimiter=" ")
features = None
for samples, index in metric_logger.log_every(data_loader, 10):
samples = samples.cuda(non_blocking=True)
index = index.cuda(non_blocking=True)
feats = model(samples.float()).clone()
# init storage feature matrix
if dist.get_rank() == 0 and features is None:
features = torch.zeros(len(data_loader.dataset), feats.shape[-1])
if args.use_cuda:
features = features.cuda(non_blocking=True)
print(f"Storing features into tensor of shape {features.shape}")
# get indexes from all processes
y_all = torch.empty(dist.get_world_size(), index.size(0), dtype=index.dtype, device=index.device)
y_l = list(y_all.unbind(0))
y_all_reduce = torch.distributed.all_gather(y_l, index, async_op=True)
y_all_reduce.wait()
index_all = torch.cat(y_l)
# share features between processes
feats_all = torch.empty(
dist.get_world_size(),
feats.size(0),
feats.size(1),
dtype=feats.dtype,
device=feats.device,
)
output_l = list(feats_all.unbind(0))
output_all_reduce = torch.distributed.all_gather(output_l, feats, async_op=True)
output_all_reduce.wait()
# update storage feature matrix
if dist.get_rank() == 0:
if args.use_cuda:
features.index_copy_(0, index_all, torch.cat(output_l))
else:
features.index_copy_(0, index_all.cpu(), torch.cat(output_l).cpu())
return features
@torch.no_grad()
def knn_classifier(train_features, train_labels, test_features, test_labels, k, T, num_classes=1000):
top1, top5, total = 0.0, 0.0, 0
train_features = train_features.t()
num_test_images, num_chunks = test_labels.shape[0], 100
imgs_per_chunk = num_test_images // num_chunks
retrieval_one_hot = torch.zeros(k, num_classes).cuda()
for idx in range(0, num_test_images, imgs_per_chunk):
# get the features for test images
features = test_features[
idx : min((idx + imgs_per_chunk), num_test_images), :
]
targets = test_labels[idx : min((idx + imgs_per_chunk), num_test_images)]
batch_size = targets.shape[0]
# calculate the dot product and compute top-k neighbors
similarity = torch.mm(features, train_features)
distances, indices = similarity.topk(k, largest=True, sorted=True)
candidates = train_labels.view(1, -1).expand(batch_size, -1)
retrieved_neighbors = torch.gather(candidates, 1, indices)
retrieval_one_hot.resize_(batch_size * k, num_classes).zero_()
retrieval_one_hot.scatter_(1, retrieved_neighbors.view(-1, 1), 1)
distances_transform = distances.clone().div_(T).exp_()
probs = torch.sum(
torch.mul(
retrieval_one_hot.view(batch_size, -1, num_classes),
distances_transform.view(batch_size, -1, 1),
),
1,
)
_, predictions = probs.sort(1, True)
# find the predictions that match the target
correct = predictions.eq(targets.data.view(-1, 1))
top1 = top1 + correct.narrow(1, 0, 1).sum().item()
top5 = top5 + correct.narrow(1, 0, 5).sum().item()
total += targets.size(0)
top1 = top1 * 100.0 / total
top5 = top5 * 100.0 / total
return top1, top5
#class ReturnIndexDataset(datasets.ImageFolder):
from sen12ms import AllSen12MSDataset
class ReturnIndexDataset(AllSen12MSDataset):
def __getitem__(self, idx):
img, lab = super(ReturnIndexDataset, self).__getitem__(idx)
return img, idx
if __name__ == '__main__':
parser = argparse.ArgumentParser('Evaluation with weighted k-NN on ImageNet')
parser.add_argument('--batch_size_per_gpu', default=128, type=int, help='Per-GPU batch-size')
parser.add_argument('--nb_knn', default=[10, 20, 100, 200], nargs='+', type=int,
help='Number of NN to use. 20 is usually working the best.')
parser.add_argument('--temperature', default=0.07, type=float,
help='Temperature used in the voting coefficient')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--use_cuda', default=True, type=utils.bool_flag,
help="Should we store the features on GPU? We recommend setting this to False if you encounter OOM")
parser.add_argument('--arch', default='deit_small', type=str,
choices=['deit_tiny', 'deit_small', 'vit_base'], help='Architecture (support only ViT atm).')
parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')
parser.add_argument("--checkpoint_key", default="teacher", type=str,
help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--dump_features', default=None,
help='Path where to save computed features, empty for no saving')
parser.add_argument('--load_features', default=None, help="""If the features have
already been computed, where to find them.""")
parser.add_argument('--num_workers', default=10, type=int, help='Number of data loading workers per GPU.')
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up
distributed training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--local_rank", default=0, type=int, help="Please ignore and do not set this argument.")
parser.add_argument('--data_path', default='/path/to/imagenet/', type=str)
args = parser.parse_args()
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
print("\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
cudnn.benchmark = True
if args.load_features:
train_features = torch.load(os.path.join(args.load_features, "trainfeat.pth"))
test_features = torch.load(os.path.join(args.load_features, "testfeat.pth"))
train_labels = torch.load(os.path.join(args.load_features, "trainlabels.pth"))
test_labels = torch.load(os.path.join(args.load_features, "testlabels.pth"))
else:
# need to extract features !
train_features, test_features, train_labels, test_labels = extract_feature_pipeline(args)
if utils.get_rank() == 0:
if args.use_cuda:
train_features = train_features.cuda()
test_features = test_features.cuda()
train_labels = train_labels.cuda()
test_labels = test_labels.cuda()
print("Features are ready!\nStart the k-NN classification.")
for k in args.nb_knn:
top1, top5 = knn_classifier(train_features, train_labels,
test_features, test_labels, k, args.temperature)
print(f"{k}-NN classifier result: Top1: {top1}, Top5: {top5}")
dist.barrier()
|
the-stack_106_17430
|
#!/usr/bin/python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Usage: inventory.py ip1 [ip2 ...]
# Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
#
# Advanced usage:
# Add another host after initial creation: inventory.py 10.10.1.5
# Delete a host: inventory.py -10.10.1.3
# Delete a host by id: inventory.py -node1
#
# Load a YAML or JSON file with inventory data: inventory.py load hosts.yaml
# YAML file should be in the following format:
# group1:
# host1:
# ip: X.X.X.X
# var: val
# group2:
# host2:
# ip: X.X.X.X
from collections import OrderedDict
try:
import configparser
except ImportError:
import ConfigParser as configparser
import os
import re
import sys
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
'calico-rr']
PROTECTED_NAMES = ROLES
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def get_var_as_bool(name, default):
value = os.environ.get(name, '')
return _boolean_states.get(value.lower(), default)
# Configurable as shell vars start
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory.cfg")
# Reconfigures cluster distribution at scale
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
DEBUG = get_var_as_bool("DEBUG", True)
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
# Configurable as shell vars end
class KubesprayInventory(object):
def __init__(self, changed_hosts=None, config_file=None):
self.config = configparser.ConfigParser(allow_no_value=True,
delimiters=('\t', ' '))
self.config_file = config_file
if self.config_file:
self.config.read(self.config_file)
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
self.parse_command(changed_hosts[0], changed_hosts[1:])
sys.exit(0)
self.ensure_required_groups(ROLES)
if changed_hosts:
self.hosts = self.build_hostnames(changed_hosts)
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
self.set_all(self.hosts)
self.set_k8s_cluster()
self.set_etcd(list(self.hosts.keys())[:3])
if len(self.hosts) >= SCALE_THRESHOLD:
self.set_kube_master(list(self.hosts.keys())[3:5])
else:
self.set_kube_master(list(self.hosts.keys())[:2])
self.set_kube_node(self.hosts.keys())
if len(self.hosts) >= SCALE_THRESHOLD:
self.set_calico_rr(list(self.hosts.keys())[:3])
else: # Show help if no options
self.show_help()
sys.exit(0)
self.write_config(self.config_file)
def write_config(self, config_file):
if config_file:
with open(config_file, 'w') as f:
self.config.write(f)
else:
print("WARNING: Unable to save config. Make sure you set "
"CONFIG_FILE env var.")
def debug(self, msg):
if DEBUG:
print("DEBUG: {0}".format(msg))
def get_ip_from_opts(self, optstring):
opts = optstring.split(' ')
for opt in opts:
if '=' not in opt:
continue
k, v = opt.split('=')
if k == "ip":
return v
raise ValueError("IP parameter not found in options")
def ensure_required_groups(self, groups):
for group in groups:
try:
self.debug("Adding group {0}".format(group))
self.config.add_section(group)
except configparser.DuplicateSectionError:
pass
def get_host_id(self, host):
'''Returns integer host ID (without padding) from a given hostname.'''
try:
short_hostname = host.split('.')[0]
return int(re.findall("\d+$", short_hostname)[-1])
except IndexError:
raise ValueError("Host name must end in an integer")
def build_hostnames(self, changed_hosts):
existing_hosts = OrderedDict()
highest_host_id = 0
try:
for host, opts in self.config.items('all'):
existing_hosts[host] = opts
host_id = self.get_host_id(host)
if host_id > highest_host_id:
highest_host_id = host_id
except configparser.NoSectionError:
pass
# FIXME(mattymo): Fix condition where delete then add reuses highest id
next_host_id = highest_host_id + 1
all_hosts = existing_hosts.copy()
for host in changed_hosts:
if host[0] == "-":
realhost = host[1:]
if self.exists_hostname(all_hosts, realhost):
self.debug("Marked {0} for deletion.".format(realhost))
all_hosts.pop(realhost)
elif self.exists_ip(all_hosts, realhost):
self.debug("Marked {0} for deletion.".format(realhost))
self.delete_host_by_ip(all_hosts, realhost)
elif host[0].isdigit():
if self.exists_hostname(all_hosts, host):
self.debug("Skipping existing host {0}.".format(host))
continue
elif self.exists_ip(all_hosts, host):
self.debug("Skipping existing host {0}.".format(host))
continue
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
next_host_id += 1
all_hosts[next_host] = "ansible_host={0} ip={1}".format(
host, host)
elif host[0].isalpha():
raise Exception("Adding hosts by hostname is not supported.")
return all_hosts
def exists_hostname(self, existing_hosts, hostname):
return hostname in existing_hosts.keys()
def exists_ip(self, existing_hosts, ip):
for host_opts in existing_hosts.values():
if ip == self.get_ip_from_opts(host_opts):
return True
return False
def delete_host_by_ip(self, existing_hosts, ip):
for hostname, host_opts in existing_hosts.items():
if ip == self.get_ip_from_opts(host_opts):
del existing_hosts[hostname]
return
raise ValueError("Unable to find host by IP: {0}".format(ip))
def purge_invalid_hosts(self, hostnames, protected_names=[]):
for role in self.config.sections():
for host, _ in self.config.items(role):
if host not in hostnames and host not in protected_names:
self.debug("Host {0} removed from role {1}".format(host,
role))
self.config.remove_option(role, host)
def add_host_to_group(self, group, host, opts=""):
self.debug("adding host {0} to group {1}".format(host, group))
self.config.set(group, host, opts)
def set_kube_master(self, hosts):
for host in hosts:
self.add_host_to_group('kube-master', host)
def set_all(self, hosts):
for host, opts in hosts.items():
self.add_host_to_group('all', host, opts)
def set_k8s_cluster(self):
self.add_host_to_group('k8s-cluster:children', 'kube-node')
self.add_host_to_group('k8s-cluster:children', 'kube-master')
def set_calico_rr(self, hosts):
for host in hosts:
if host in self.config.items('kube-master'):
self.debug("Not adding {0} to calico-rr group because it "
"conflicts with kube-master group".format(host))
continue
if host in self.config.items('kube-node'):
self.debug("Not adding {0} to calico-rr group because it "
"conflicts with kube-node group".format(host))
continue
self.add_host_to_group('calico-rr', host)
def set_kube_node(self, hosts):
for host in hosts:
if len(self.config['all']) >= SCALE_THRESHOLD:
if self.config.has_option('etcd', host):
self.debug("Not adding {0} to kube-node group because of "
"scale deployment and host is in etcd "
"group.".format(host))
continue
if len(self.config['all']) >= MASSIVE_SCALE_THRESHOLD:
if self.config.has_option('kube-master', host):
self.debug("Not adding {0} to kube-node group because of "
"scale deployment and host is in kube-master "
"group.".format(host))
continue
self.add_host_to_group('kube-node', host)
def set_etcd(self, hosts):
for host in hosts:
self.add_host_to_group('etcd', host)
def load_file(self, files=None):
'''Directly loads JSON, or YAML file to inventory.'''
if not files:
raise Exception("No input file specified.")
import json
import yaml
for filename in list(files):
# Try JSON, then YAML
try:
with open(filename, 'r') as f:
data = json.load(f)
except ValueError:
try:
with open(filename, 'r') as f:
data = yaml.load(f)
print("yaml")
except ValueError:
raise Exception("Cannot read %s as JSON, YAML, or CSV",
filename)
self.ensure_required_groups(ROLES)
self.set_k8s_cluster()
for group, hosts in data.items():
self.ensure_required_groups([group])
for host, opts in hosts.items():
optstring = "ansible_host={0} ip={0}".format(opts['ip'])
for key, val in opts.items():
if key == "ip":
continue
optstring += " {0}={1}".format(key, val)
self.add_host_to_group('all', host, optstring)
self.add_host_to_group(group, host)
self.write_config(self.config_file)
def parse_command(self, command, args=None):
if command == 'help':
self.show_help()
elif command == 'print_cfg':
self.print_config()
elif command == 'print_ips':
self.print_ips()
elif command == 'load':
self.load_file(args)
else:
raise Exception("Invalid command specified.")
def show_help(self):
help_text = '''Usage: inventory.py ip1 [ip2 ...]
Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
Available commands:
help - Display this message
print_cfg - Write inventory file to stdout
print_ips - Write a space-delimited list of IPs from "all" group
Advanced usage:
Add another host after initial creation: inventory.py 10.10.1.5
Delete a host: inventory.py -10.10.1.3
Delete a host by id: inventory.py -node1
Configurable env vars:
DEBUG Enable debug printing. Default: True
CONFIG_FILE File to write config to Default: ./inventory.cfg
HOST_PREFIX Host prefix for generated hosts. Default: node
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
'''
print(help_text)
def print_config(self):
self.config.write(sys.stdout)
def print_ips(self):
ips = []
for host, opts in self.config.items('all'):
ips.append(self.get_ip_from_opts(opts))
print(' '.join(ips))
def main(argv=None):
if not argv:
argv = sys.argv[1:]
KubesprayInventory(argv, CONFIG_FILE)
if __name__ == "__main__":
sys.exit(main())
|
the-stack_106_17432
|
#!/usr/bin/env python3
# Copyright (c) 2017-2019 The Tokyocoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import os
import shutil
from test_framework.test_framework import TokyocoinTestFramework
from test_framework.util import (
assert_equal,
)
class KeypoolRestoreTest(TokyocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 4
self.extra_args = [[], ['-keypool=100'], ['-keypool=100'], ['-keypool=100']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
wallet_path = os.path.join(self.nodes[1].datadir, self.chain, "wallets", self.default_wallet_name, self.wallet_data_filename)
wallet_backup_path = os.path.join(self.nodes[1].datadir, "wallet.bak")
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(wallet_path, wallet_backup_path)
self.start_node(1, self.extra_args[1])
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
for i, output_type in enumerate(["legacy", "p2sh-segwit", "bech32"]):
self.log.info("Generate keys for wallet with address type: {}".format(output_type))
idx = i+1
for _ in range(90):
addr_oldpool = self.nodes[idx].getnewaddress(address_type=output_type)
for _ in range(20):
addr_extpool = self.nodes[idx].getnewaddress(address_type=output_type)
# Make sure we're creating the outputs we expect
address_details = self.nodes[idx].validateaddress(addr_extpool)
if i == 0:
assert not address_details["isscript"] and not address_details["iswitness"]
elif i == 1:
assert address_details["isscript"] and not address_details["iswitness"]
else:
assert not address_details["isscript"] and address_details["iswitness"]
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
self.sync_blocks()
self.log.info("Restart node with wallet backup")
self.stop_node(idx)
shutil.copyfile(wallet_backup_path, wallet_path)
self.start_node(idx, self.extra_args[idx])
self.connect_nodes(0, idx)
self.sync_all()
self.log.info("Verify keypool is restored and balance is correct")
assert_equal(self.nodes[idx].getbalance(), 15)
assert_equal(self.nodes[idx].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
if self.options.descriptors:
if output_type == 'legacy':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/44'/1'/0'/0/110")
elif output_type == 'p2sh-segwit':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/49'/1'/0'/0/110")
elif output_type == 'bech32':
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/84'/1'/0'/0/110")
else:
assert_equal(self.nodes[idx].getaddressinfo(self.nodes[idx].getnewaddress(address_type=output_type))['hdkeypath'], "m/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
|
the-stack_106_17433
|
from __future__ import unicode_literals
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import is_secure_transport
from requests.auth import AuthBase
class OAuth2(AuthBase):
"""Adds proof of authorization (OAuth2 token) to the request."""
def __init__(self, client_id=None, client=None, token=None):
"""Construct a new OAuth 2 authorization object.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param token: Token dictionary, must include access_token
and token_type.
"""
self._client = client or WebApplicationClient(client_id, token=token)
if token:
for k, v in token.items():
setattr(self._client, k, v)
def __call__(self, r):
"""Append an OAuth 2 token to the request.
Note that currently HTTPS is required for all requests. There may be
a token type that allows for plain HTTP in the future and then this
should be updated to allow plain HTTP on a white list basis.
"""
if not is_secure_transport(r.url):
raise InsecureTransportError()
r.url, r.headers, r.body = self._client.add_token(
r.url, http_method=r.method, body=r.body, headers=r.headers
)
return r
|
the-stack_106_17434
|
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('est_released')
class EstimateRelease(object):
"""
Front-end for estimator plugins that estimate release times
for various things (series, movies).
"""
def estimate(self, entry):
"""
Estimate release schedule for Entry
:param entry:
:return: estimated date of released for the entry, None if it can't figure it out
"""
log.debug(entry['title'])
estimators = [
e.instance.estimate for e in plugin.get_plugins(interface='estimate_release')
]
for estimator in sorted(
estimators, key=lambda e: getattr(e, 'priority', plugin.PRIORITY_DEFAULT), reverse=True
):
estimate = estimator(entry)
# return first successful estimation
if estimate is not None:
return estimate
@event('plugin.register')
def register_plugin():
plugin.register(EstimateRelease, 'estimate_release', api_ver=2, interfaces=[])
|
the-stack_106_17435
|
import asyncio
from operator import le
import time
import aiohttp
from aiohttp import ClientSession
tasks = []
async def fetch_html(url: str, session: ClientSession, **kwargs) -> str:
resp = await session.request(method="GET", url=url, **kwargs)
resp.raise_for_status()
return await resp.text()
async def make_requests(url: str, **kwargs) -> None:
async with ClientSession() as session:
for i in range(1,3):
tasks.append(
fetch_html(url=url, session=session, **kwargs)
)
results = await asyncio.gather(*tasks)
print(results)
# do something with results
if __name__ == "__main__":
start = time.time()
try:
asyncio.run(make_requests(url='http://google.in/'))
except Exception:
print()
end = time.time()
print("Took {} seconds to pull {} websites.".format(end - start,len(tasks)))
|
the-stack_106_17438
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'My Project',
'author': 'Lei Fan',
'url': 'http://www.example.com',
'download_url': 'http://www.example.com',
'author_email': '[email protected]',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['ex47'],
'scripts': [],
'name': 'projectname'
}
setup(**config)
|
the-stack_106_17444
|
"""
Custom Gender Settings is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""
from typing import Callable, Any
from customgendersettings.enums.trait_ids import CGSTraitId
from customgendersettings.logging.has_cgs_log import HasCGSLog
from customgendersettings.settings.dialog import CGSGlobalSettingsDialog
from sims.sim_info import SimInfo
from sims4communitylib.dialogs.common_choice_outcome import CommonChoiceOutcome
from sims4communitylib.dialogs.common_ok_dialog import CommonOkDialog
from sims4communitylib.dialogs.ok_cancel_dialog import CommonOkCancelDialog
from sims4communitylib.dialogs.option_dialogs.common_choose_object_option_dialog import CommonChooseObjectOptionDialog
from sims4communitylib.dialogs.option_dialogs.options.common_dialog_option_context import CommonDialogOptionContext
from sims4communitylib.dialogs.option_dialogs.options.objects.common_dialog_action_option import \
CommonDialogActionOption
from sims4communitylib.dialogs.option_dialogs.options.objects.common_dialog_input_option import \
CommonDialogInputFloatOption
from sims4communitylib.dialogs.option_dialogs.options.objects.common_dialog_select_option import \
CommonDialogSelectOption
from sims4communitylib.dialogs.option_dialogs.options.objects.common_dialog_toggle_option import \
CommonDialogToggleOption
from customgendersettings.modinfo import ModInfo
from customgendersettings.enums.strings_enum import CGSStringId
from sims4communitylib.enums.common_voice_actor_type import CommonVoiceActorType
from sims4communitylib.enums.strings_enum import CommonStringId
from sims4communitylib.enums.traits_enum import CommonTraitId
from sims4communitylib.exceptions.common_exceptions_handler import CommonExceptionHandler
from sims4communitylib.utils.common_function_utils import CommonFunctionUtils
from sims4communitylib.utils.common_icon_utils import CommonIconUtils
from sims4communitylib.utils.sims.common_gender_utils import CommonGenderUtils
from sims4communitylib.utils.sims.common_sim_gender_option_utils import CommonSimGenderOptionUtils
from sims4communitylib.utils.sims.common_sim_voice_utils import CommonSimVoiceUtils
from sims4communitylib.utils.sims.common_species_utils import CommonSpeciesUtils
from sims4communitylib.utils.sims.common_trait_utils import CommonTraitUtils
from ui.ui_dialog import UiDialogOkCancel
class CustomGenderSettingsDialog(HasCGSLog):
""" A Dialog that opens custom gender settings. """
def __init__(self, sim_info: SimInfo, on_close: Callable[[], None]=CommonFunctionUtils.noop):
super().__init__()
self._sim_info = sim_info
self._on_close = on_close
# noinspection PyMissingOrEmptyDocstring
@property
def log_identifier(self) -> str:
return 'cgs_dialog'
def open(self) -> None:
""" Open the dialog. """
try:
def _on_close() -> None:
if self._on_close is not None:
self._on_close()
self._settings(on_close=_on_close)
except Exception as ex:
self.log.error('Error occurred while opening custom gender settings dialog.', exception=ex)
def _settings(self, on_close: Callable[[], Any]=None) -> None:
def _on_close() -> None:
if on_close is not None:
on_close()
def _reopen() -> None:
self._settings(on_close=on_close)
option_dialog = CommonChooseObjectOptionDialog(
CommonStringId.CUSTOM_GENDER_SETTINGS,
CGSStringId.CGS_CUSTOM_GENDER_SETTINGS_DESCRIPTION,
mod_identity=self.mod_identity,
on_close=_on_close
)
option_dialog.add_option(
CommonDialogActionOption(
CommonDialogOptionContext(
CGSStringId.GLOBAL_SETTINGS_NAME,
CGSStringId.GLOBAL_SETTINGS_DESCRIPTION
),
on_chosen=CGSGlobalSettingsDialog(self._sim_info, on_close=_reopen).open
)
)
def _on_toggle_global_exclude_chosen(option_identifier: str, has_trait: bool):
self.log.format(option_identifier=option_identifier, has_trait=has_trait)
if has_trait:
self.log.format_with_message('Adding the trait to the Sim.', sim=self._sim_info, has_trait=has_trait)
CommonTraitUtils.add_trait(self._sim_info, CGSTraitId.CGS_EXCLUDE_FROM_GLOBAL_OVERRIDES)
else:
self.log.format_with_message('Removing the trait from the Sim.', sim=self._sim_info, has_trait=has_trait)
CommonTraitUtils.remove_trait(self._sim_info, CGSTraitId.CGS_EXCLUDE_FROM_GLOBAL_OVERRIDES)
_reopen()
option_dialog.add_option(
CommonDialogToggleOption(
'ToggleGlobalExclude',
CommonTraitUtils.has_trait(self._sim_info, CGSTraitId.CGS_EXCLUDE_FROM_GLOBAL_OVERRIDES),
CommonDialogOptionContext(
CGSStringId.EXCLUDE_THIS_SIM_FROM_GLOBAL_OVERRIDES_NAME,
CGSStringId.EXCLUDE_THIS_SIM_FROM_GLOBAL_OVERRIDES_DESCRIPTION
),
on_chosen=_on_toggle_global_exclude_chosen
)
)
def _set_to_vanilla_gender_chosen() -> None:
if CommonGenderUtils.is_male(self._sim_info):
CommonSimGenderOptionUtils.update_gender_options_to_vanilla_male(self._sim_info)
else:
CommonSimGenderOptionUtils.update_gender_options_to_vanilla_female(self._sim_info)
_reopen()
option_dialog.add_option(
CommonDialogActionOption(
CommonDialogOptionContext(
CGSStringId.CGS_SET_TO_VANILLA_GENDER_OPTIONS_NAME,
CGSStringId.CGS_SET_TO_VANILLA_GENDER_OPTIONS_DESCRIPTION,
icon=CommonIconUtils.load_arrow_right_icon()
),
on_chosen=_set_to_vanilla_gender_chosen
)
)
def _on_gender_chosen() -> None:
@CommonExceptionHandler.catch_exceptions(ModInfo.get_identity(), fallback_return=False)
def _on_ok(_: UiDialogOkCancel):
CommonGenderUtils.swap_gender(self._sim_info, update_gender_options=True, update_voice=True, update_outfits=True)
_reopen()
@CommonExceptionHandler.catch_exceptions(self.mod_identity, fallback_return=False)
def _on_cancel(_: UiDialogOkCancel):
CommonGenderUtils.swap_gender(self._sim_info, update_gender_options=False, update_voice=False, update_outfits=False)
_reopen()
CommonOkCancelDialog(
CGSStringId.UPDATE_GENDER_OPTIONS_TOO_QUESTION,
CGSStringId.DO_YOU_WANT_GENDER_OPTIONS_UPDATED_TOO,
ok_text_identifier=CommonStringId.S4CL_YES,
cancel_text_identifier=CommonStringId.S4CL_NO,
mod_identity=self.mod_identity
).show(on_ok_selected=_on_ok, on_cancel_selected=_on_cancel)
current_gender_string = CGSStringId.MALE
if CommonGenderUtils.is_female(self._sim_info):
current_gender_string = CGSStringId.FEMALE
option_dialog.add_option(
CommonDialogActionOption(
CommonDialogOptionContext(
CGSStringId.CGS_SWAP_GENDER_NAME,
CGSStringId.CGS_SWAP_GENDER_DESCRIPTION,
title_tokens=(current_gender_string,),
icon=CommonIconUtils.load_arrow_right_icon()
),
on_chosen=_on_gender_chosen
)
)
if CommonSpeciesUtils.is_human(self._sim_info):
def _on_physical_frame_chosen() -> None:
value = not CommonSimGenderOptionUtils.has_masculine_frame(self._sim_info)
CommonSimGenderOptionUtils.update_body_frame(self._sim_info, value)
_reopen()
current_body_frame = CommonStringId.FEMININE
if CommonSimGenderOptionUtils.has_masculine_frame(self._sim_info):
current_body_frame = CommonStringId.MASCULINE
option_dialog.add_option(
CommonDialogActionOption(
CommonDialogOptionContext(
CommonStringId.PHYSICAL_FRAME,
CGSStringId.CGS_CURRENT,
title_tokens=(current_body_frame,),
description_tokens=(current_body_frame,),
icon=CommonIconUtils.load_arrow_right_icon()
),
on_chosen=_on_physical_frame_chosen
)
)
current_clothing = CommonStringId.FEMININE
if CommonSimGenderOptionUtils.prefers_menswear(self._sim_info):
current_clothing = CommonStringId.MASCULINE
def _on_clothing_preference_chosen() -> None:
value = not CommonSimGenderOptionUtils.prefers_menswear(self._sim_info)
CommonSimGenderOptionUtils.update_clothing_preference(self._sim_info, value)
_reopen()
option_dialog.add_option(
CommonDialogActionOption(
CommonDialogOptionContext(
CommonStringId.CLOTHING_PREFERENCE,
CGSStringId.CGS_CURRENT,
title_tokens=(current_clothing,),
description_tokens=(current_clothing,),
icon=CommonIconUtils.load_arrow_right_icon()
),
on_chosen=_on_clothing_preference_chosen
)
)
def _on_toggle_breasts_chosen(option_identifier: str, has_breasts: bool):
self.log.format(option_identifier=option_identifier, has_breasts=has_breasts)
def _on_acknowledged(_) -> None:
_reopen()
CommonSimGenderOptionUtils.update_has_breasts(self._sim_info, has_breasts)
CommonOkDialog(
CGSStringId.CGS_SETTING_SAVE_RELOAD_ALERT_NAME,
CGSStringId.CGS_SETTING_SAVE_RELOAD_ALERT_DESCRIPTION
).show(on_acknowledged=_on_acknowledged)
has_vanilla_breasts = False
if CommonGenderUtils.is_female(self._sim_info):
has_vanilla_breasts = not CommonTraitUtils.has_trait(self._sim_info, CommonTraitId.BREASTS_FORCE_OFF)
option_dialog.add_option(
CommonDialogToggleOption(
'ToggleBreasts',
CommonTraitUtils.has_trait(self._sim_info, CommonTraitId.BREASTS_FORCE_ON) or has_vanilla_breasts,
CommonDialogOptionContext(
CGSStringId.CGS_TOGGLE_BREASTS_NAME,
CGSStringId.CGS_TOGGLE_BREASTS_DESCRIPTION
),
on_chosen=_on_toggle_breasts_chosen
)
)
option_dialog.add_option(
CommonDialogActionOption(
CommonDialogOptionContext(
CGSStringId.CGS_PREGNANCY_OPTIONS_NAME,
CGSStringId.CGS_PREGNANCY_OPTIONS_DESCRIPTION,
icon=CommonIconUtils.load_arrow_navigate_into_icon()
),
on_chosen=lambda *_, **__: self._pregnancy_options(on_close=_reopen)
)
)
def _on_can_use_toilet_standing_chosen(_: str, can_use_toilet_standing: bool):
CommonSimGenderOptionUtils.set_can_use_toilet_standing(self._sim_info, can_use_toilet_standing)
_reopen()
option_dialog.add_option(
CommonDialogToggleOption(
'CanUseToiletStanding',
CommonSimGenderOptionUtils.uses_toilet_standing(self._sim_info),
CommonDialogOptionContext(
CGSStringId.CGS_CAN_USE_TOILET_STANDING_NAME,
CGSStringId.CGS_CAN_USE_TOILET_STANDING_DESCRIPTION,
title_tokens=(
CommonStringId.S4CL_YES if CommonSimGenderOptionUtils.uses_toilet_standing(self._sim_info) else CommonStringId.S4CL_NO,
)
),
on_chosen=_on_can_use_toilet_standing_chosen
)
)
def _on_can_use_toilet_sitting_chosen(_: str, can_use_toilet_sitting: bool):
CommonSimGenderOptionUtils.set_can_use_toilet_sitting(self._sim_info, can_use_toilet_sitting)
_reopen()
option_dialog.add_option(
CommonDialogToggleOption(
'CanUseToiletSitting',
CommonSimGenderOptionUtils.uses_toilet_sitting(self._sim_info),
CommonDialogOptionContext(
CGSStringId.CGS_CAN_USE_TOILET_SITTING_NAME,
CGSStringId.CGS_CAN_USE_TOILET_SITTING_DESCRIPTION,
title_tokens=(
CommonStringId.S4CL_YES if CommonSimGenderOptionUtils.uses_toilet_standing(self._sim_info) else CommonStringId.S4CL_NO,
)
),
on_chosen=_on_can_use_toilet_sitting_chosen
)
)
def _on_voice_pitch_changed(_: str, setting_value: float, outcome: CommonChoiceOutcome):
if setting_value is None or CommonChoiceOutcome.is_error_or_cancel(outcome):
_reopen()
return
CommonSimVoiceUtils.set_voice_pitch(self._sim_info, setting_value)
_reopen()
voice_pitch = CommonSimVoiceUtils.get_voice_pitch(self._sim_info)
option_dialog.add_option(
CommonDialogInputFloatOption(
'VoicePitch',
voice_pitch,
CommonDialogOptionContext(
CGSStringId.SET_VOICE_PITCH_TITLE,
CGSStringId.SET_VOICE_PITCH_DESCRIPTION,
title_tokens=(
str(voice_pitch),
),
description_tokens=(
'-1.0',
'1.0'
)
),
min_value=-1.0,
max_value=1.0,
on_chosen=_on_voice_pitch_changed
)
)
voice_actor = CommonSimVoiceUtils.get_voice_actor(self._sim_info)
option_dialog.add_option(
CommonDialogActionOption(
CommonDialogOptionContext(
CGSStringId.SET_VOICE_ACTOR_TITLE,
CGSStringId.SET_VOICE_ACTOR_DESCRIPTION,
title_tokens=(
str(voice_actor.name if isinstance(voice_actor, CommonVoiceActorType) else voice_actor),
),
icon=CommonIconUtils.load_arrow_navigate_into_icon()
),
on_chosen=lambda *_, **__: self._set_voice_actor(on_close=_reopen)
)
)
option_dialog.show(sim_info=self._sim_info)
def _pregnancy_options(self, on_close: Callable[[], None]=None) -> None:
def _on_close() -> None:
if on_close is not None:
on_close()
def _reopen() -> None:
self._pregnancy_options(on_close=on_close)
option_dialog = CommonChooseObjectOptionDialog(
CGSStringId.CGS_PREGNANCY_OPTIONS_NAME,
CGSStringId.CGS_PREGNANCY_OPTIONS_DESCRIPTION,
mod_identity=self.mod_identity,
on_close=_on_close
)
if CommonSpeciesUtils.is_animal(self._sim_info):
def _on_reproductive_chosen(_: str, picked_option: bool):
if picked_option is None:
_on_close()
return
value = not CommonSimGenderOptionUtils.can_reproduce(self._sim_info)
CommonSimGenderOptionUtils.update_can_reproduce(self._sim_info, value)
_reopen()
current_selected = CGSStringId.NATURAL
can_reproduce = CommonSimGenderOptionUtils.can_reproduce(self._sim_info)
if not can_reproduce:
current_selected = CGSStringId.FIXED
option_dialog.add_option(
CommonDialogToggleOption(
'Reproductive Settings',
can_reproduce,
CommonDialogOptionContext(
CGSStringId.REPRODUCTIVE_SETTINGS,
CGSStringId.CGS_CURRENT,
description_tokens=(current_selected,),
icon=CommonIconUtils.load_question_mark_icon()
),
on_chosen=_on_reproductive_chosen
)
)
def _can_impregnate_chosen(option_identifier: str, can_get_others_pregnant: bool):
self.log.format(option_identifier=option_identifier, can_get_others_pregnant=can_get_others_pregnant)
value = not CommonSimGenderOptionUtils.can_impregnate(self._sim_info)
CommonSimGenderOptionUtils.update_can_impregnate(self._sim_info, value)
_reopen()
option_dialog.add_option(
CommonDialogToggleOption(
'CanImpregnate',
CommonSimGenderOptionUtils.can_impregnate(self._sim_info),
CommonDialogOptionContext(
CGSStringId.CGS_CAN_GET_OTHERS_PREGNANT_NAME,
CGSStringId.CGS_CAN_GET_OTHERS_PREGNANT_DESCRIPTION
),
on_chosen=_can_impregnate_chosen
)
)
def _can_be_impregnated_chosen(option_identifier: str, can_get_pregnant: bool):
self.log.format(option_identifier=option_identifier, can_get_pregnant=can_get_pregnant)
value = not CommonSimGenderOptionUtils.can_be_impregnated(self._sim_info)
CommonSimGenderOptionUtils.update_can_be_impregnated(self._sim_info, value)
_reopen()
option_dialog.add_option(
CommonDialogToggleOption(
'CanBeImpregnated',
CommonSimGenderOptionUtils.can_be_impregnated(self._sim_info),
CommonDialogOptionContext(
CGSStringId.CGS_CAN_BECOME_PREGNANT_NAME,
CGSStringId.CGS_CAN_BECOME_PREGNANT_DESCRIPTION
),
on_chosen=_can_be_impregnated_chosen
)
)
option_dialog.show(sim_info=self._sim_info)
def _set_voice_actor(self, on_close: Callable[[], None]=None) -> None:
def _on_close() -> None:
if on_close is not None:
on_close()
voice_actor = CommonSimVoiceUtils.get_voice_actor(self._sim_info)
option_dialog = CommonChooseObjectOptionDialog(
CGSStringId.SET_VOICE_ACTOR_TITLE,
CGSStringId.SET_VOICE_ACTOR_DESCRIPTION,
title_tokens=(
str(voice_actor.name if isinstance(voice_actor, CommonVoiceActorType) else voice_actor),
),
mod_identity=self.mod_identity,
on_close=_on_close
)
@CommonExceptionHandler.catch_exceptions(self.mod_identity, fallback_return=False)
def _on_chosen(_: str, chosen: CommonVoiceActorType) -> None:
if chosen is None:
self.log.format_with_message('No chosen', chosen=chosen)
_on_close()
return
CommonSimVoiceUtils.set_voice_actor(self._sim_info, chosen)
_on_close()
voice_actor_types = CommonSimVoiceUtils.determine_available_voice_types(self._sim_info)
for voice_actor_type in voice_actor_types:
option_dialog.add_option(
CommonDialogSelectOption(
voice_actor_type.name,
voice_actor_type,
CommonDialogOptionContext(
voice_actor_type.name,
0,
icon=CommonIconUtils.load_checked_square_icon() if voice_actor_type == voice_actor else CommonIconUtils.load_unchecked_square_icon()
),
on_chosen=_on_chosen
)
)
option_dialog.show(sim_info=self._sim_info)
|
the-stack_106_17445
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model based on combination of depthwise and 1x1 convolutions."""
from kws_streaming.layers import speech_features
from kws_streaming.layers.compat import tf
from kws_streaming.layers.stream import Stream
from kws_streaming.models.utils import parse
def model_parameters(parser_nn):
"""Depthwise Convolutional(DS CNN) model parameters.
In more details parameters are described at:
https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization
https://www.tensorflow.org/api_docs/python/tf/keras/layers/DepthwiseConv2D
https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D
Args:
parser_nn: global command line args parser
Returns: parser with updated arguments
"""
parser_nn.add_argument(
'--cnn1_kernel_size',
type=str,
default='3,3',
help='Heights and widths of the first 2D convolution',
)
parser_nn.add_argument(
'--cnn1_dilation_rate',
type=str,
default='2,1',
help='Dilation rate of the first 2D convolution',
)
parser_nn.add_argument(
'--cnn1_strides',
type=str,
default='1,1',
help='Strides of the first 2D convolution along the height and width',
)
parser_nn.add_argument(
'--cnn1_padding',
type=str,
default='valid',
help="one of 'valid' or 'same'",
)
parser_nn.add_argument(
'--cnn1_filters',
type=int,
default=300,
help='Number of output filters in the first 2D convolution layers',
)
parser_nn.add_argument(
'--cnn1_act',
type=str,
default='relu',
help='Activation function in the first 2D convolution layers',
)
parser_nn.add_argument(
'--bn_momentum',
type=float,
default=0.98,
help='Momentum for the moving average',
)
parser_nn.add_argument(
'--bn_center',
type=int,
default=1,
help='If True, add offset of beta to normalized tensor.'
'If False, beta is ignored',
)
parser_nn.add_argument(
'--bn_scale',
type=int,
default=0,
help='If True, multiply by gamma. If False, gamma is not used. '
'When the next layer is linear (also e.g. nn.relu), this can be disabled'
'since the scaling will be done by the next layer.',
)
parser_nn.add_argument(
'--bn_renorm',
type=int,
default=0,
help='Whether to use Batch Renormalization',
)
parser_nn.add_argument(
'--dw2_kernel_size',
type=str,
default='(3,3),(3,3),(10,3),(5,3),(10,3)',
help='Height and width of the 2D Depthwise convolutions',
)
parser_nn.add_argument(
'--dw2_dilation_rate',
type=str,
default='(1,1),(2,2),(1,1),(2,2),(1,1)',
help='Dilation rate of the 2D Depthwise convolutions',
)
parser_nn.add_argument(
'--dw2_strides',
type=str,
default='(1,1),(1,1),(1,1),(1,1),(1,1)',
help='Strides of the 2D Depthwise convolutions',
)
parser_nn.add_argument(
'--dw2_padding',
type=str,
default='valid',
help="one of 'valid' or 'same'",
)
parser_nn.add_argument(
'--dw2_act',
type=str,
default="'relu','relu','relu','relu','relu'",
help='Activation functions in the Depthwise convolution layers',
)
parser_nn.add_argument(
'--cnn2_filters',
type=str,
default='300,300,300,300,300',
help='Number of output filters in 1x1 convolution layers',
)
parser_nn.add_argument(
'--cnn2_act',
type=str,
default="'relu','relu','relu','relu','relu'",
help='Activation functions in 1x1 convolution layers',
)
parser_nn.add_argument(
'--dropout1',
type=float,
default=0.2,
help='Percentage of data dropped',
)
def model(flags):
"""Depthwise convolutional model.
It is based on paper:
MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications https://arxiv.org/abs/1704.04861
Hello Edge: Keyword Spotting on Microcontrollers
https://arxiv.org/pdf/1711.07128.pdf
Args:
flags: data/model parameters
Returns:
Keras model for training
"""
input_audio = tf.keras.layers.Input(
shape=(flags.desired_samples,), batch_size=flags.batch_size)
net = speech_features.SpeechFeatures(
frame_size_ms=flags.window_size_ms,
frame_step_ms=flags.window_stride_ms,
sample_rate=flags.sample_rate,
use_tf_fft=flags.use_tf_fft,
preemph=flags.preemph,
window_type=flags.window_type,
feature_type=flags.feature_type,
mel_num_bins=flags.mel_num_bins,
mel_lower_edge_hertz=flags.mel_lower_edge_hertz,
mel_upper_edge_hertz=flags.mel_upper_edge_hertz,
mel_non_zero_only=flags.mel_non_zero_only,
fft_magnitude_squared=flags.fft_magnitude_squared,
dct_num_features=flags.dct_num_features)(
input_audio)
net = tf.keras.backend.expand_dims(net)
net = Stream(
cell=tf.keras.layers.Conv2D(
kernel_size=parse(flags.cnn1_kernel_size),
dilation_rate=parse(flags.cnn1_dilation_rate),
filters=flags.cnn1_filters,
padding=flags.cnn1_padding,
strides=parse(flags.cnn1_strides)))(
net)
net = tf.keras.layers.BatchNormalization(
momentum=flags.bn_momentum,
center=flags.bn_center,
scale=flags.bn_scale,
renorm=flags.bn_renorm)(
net)
net = tf.keras.layers.Activation('relu')(net)
for kernel_size, dw2_act, dilation_rate, strides, filters, cnn2_act in zip(
parse(flags.dw2_kernel_size), parse(flags.dw2_act),
parse(flags.dw2_dilation_rate), parse(flags.dw2_strides),
parse(flags.cnn2_filters), parse(flags.cnn2_act)):
net = Stream(
cell=tf.keras.layers.DepthwiseConv2D(
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding=flags.dw2_padding,
strides=strides))(
net)
net = tf.keras.layers.BatchNormalization(
momentum=flags.bn_momentum,
center=flags.bn_center,
scale=flags.bn_scale,
renorm=flags.bn_renorm)(
net)
net = tf.keras.layers.Activation(dw2_act)(net)
net = tf.keras.layers.Conv2D(kernel_size=(1, 1), filters=filters)(net)
net = tf.keras.layers.BatchNormalization(
momentum=flags.bn_momentum,
center=flags.bn_center,
scale=flags.bn_scale,
renorm=flags.bn_renorm)(
net)
net = tf.keras.layers.Activation(cnn2_act)(net)
net = Stream(
cell=tf.keras.layers.AveragePooling2D(
pool_size=(int(net.shape[1]), int(net.shape[2]))))(
net)
net = Stream(cell=tf.keras.layers.Flatten())(net)
net = tf.keras.layers.Dropout(rate=flags.dropout1)(net)
net = tf.keras.layers.Dense(units=flags.label_count)(net)
return tf.keras.Model(input_audio, net)
|
the-stack_106_17447
|
# Copyright 2018 CNRS - Airbus SAS
# Author: Joseph Mirabel
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from dynamic_graph import plug
from dynamic_graph.sot.core.integrator_euler import IntegratorEulerVectorMatrix, IntegratorEulerVectorDouble
from dynamic_graph.sot.core.operator import Add_of_vector
class Controller:
def __init__ (self, name, nums, denoms, period, initialValue):
"""
- nums: coeffs of the numerator in increasing order of derivatives
- denoms: coeffs of the denominator in increasing order of derivatives
"""
self.name = name
if isinstance(nums[0], (float, int)):
self.function = IntegratorEulerVectorDouble (name + "_H")
else:
self.function = IntegratorEulerVectorMatrix (name + "_H")
for n in nums : self.function.pushNumCoef (n)
for n in denoms: self.function.pushDenomCoef (n)
self.function.sin.value = initialValue
self.function.initialize()
self.function.setSamplingPeriod(period)
self.ref_m_meas = None
def addFeedback (self):
if self.ref_m_meas is None:
self.ref_m_meas = Add_of_vector(self.name + "_ref_m_meas")
self.ref_m_meas.setCoeff1( 1)
self.ref_m_meas.setCoeff2(-1)
plug(self.ref_m_meas.sout, self.function.sin)
@property
def hasFeedback (self):
return self.ref_m_meas is not None
@property
def reference (self):
""" input signal """
if self.ref_m_meas is not None:
return self.ref_m_meas.sin1
else:
return self.function.sin
@property
def referenceName (self):
""" input signal """
if self.ref_m_meas is not None:
return self.ref_m_meas.name + ".sin1"
else:
return self.function.name + ".sin"
@property
def measurement (self):
""" input signal """
assert self.ref_m_meas is not None
return self.ref_m_meas.sin2
@property
def measurementName (self):
""" input signal """
assert self.ref_m_meas is not None
return self.ref_m_meas.name + ".sin2"
@property
def output (self): return self.function.sout
@property
def outputName (self): return self.function.name + ".sout"
@property
def outputDerivative (self): return self.function.derivativesout
def secondOrderOpenLoop (name, wn, z, period, initialValue):
"""
Transfer function:
wn**2
H(s) = -------------------------
s**2 + 2*z*wn * s + wn**2
- wm: corner frequency
- z : damping
"""
nums = ( wn**2 ,)
denoms = ( wn**2, 2*z*wn, 1. )
return Controller (name, nums, denoms, period, initialValue)
def secondOrderClosedLoop (name, wn, z, period, initialValue):
"""
Create a 2nd order transfer function with loop closure so that
the overall relation between the input and the output is as below.
It considers a unit feedback.
wn**2
H(s) = -------------------------
s**2 + 2*z*wn * s + wn**2
- wm: corner frequency
- z : damping
"""
from math import sqrt
nums = ( wn**2 ,)
denoms = ( 0, 2*z*wn, 1. )
control = Controller (name, nums, denoms, period, initialValue)
control.addFeedback()
return control
|
the-stack_106_17449
|
"""
This file handles converting callables with numpy docstrings into config classes
by parsing their docstrings to find their default values, finding the help text
for each value, and then calling ``make_config`` to create a config class
representing the arguments to that callable.
"""
import inspect
import dataclasses
from typing import Dict, Optional, Tuple, Type, Any, Callable
from ...base import make_config, field
from .exceptions import ParameterNotInDocString
# Things people name their types mapped their real python types.
NUMPY_DOCS_TYPE_MAP = {
"int": int,
"integer": int,
"str": str,
"string": str,
"float": float,
"dict": dict,
"bool": bool,
}
def numpy_get_default(type_str):
if not "default" in type_str:
return dataclasses.MISSING
type_str = type_str[type_str.index("default") :]
type_str = type_str.replace("default", "")
type_str = type_str.replace(")", "")
type_str = type_str.replace("=", "")
type_str = type_str.replace('"', "")
type_str = type_str.replace("'", "")
type_str = type_str.strip()
if type_str == "None":
return None
return type_str
def numpy_doc_to_field(type_str, description, param):
default = param.default
if default is inspect.Parameter.empty:
default = numpy_get_default(type_str)
type_cls = Any
# Set of choices
if "{'" in type_str and "'}" in type_str:
type_cls = str
elif "{" in type_str and "}" in type_str:
type_cls = int
if "." in type_str:
type_cls = float
else:
type_split = list(
map(lambda x: x.lower(), type_str.replace(",", "").split())
)
for numpy_type_name, python_type in NUMPY_DOCS_TYPE_MAP.items():
if numpy_type_name in type_split:
type_cls = python_type
if type_cls == Any and default != None:
type_cls = type(default)
return type_cls, field(description, default=default)
def numpy_cleanup_description(dtypes, description_lines, last: bool = False):
if description_lines:
# Remove the section header if we're on the last argument (since we will
# have the title of it in the body of the last arguments description
# currently).
if last:
description_lines = description_lines[:-1]
# Get rid of any leading blank lines
while description_lines and description_lines[0] == "":
description_lines = description_lines[1:]
# Get rid of any trailing blank lines
while description_lines and description_lines[-1] == "":
description_lines = description_lines[:-1]
# Set the description to be the joined lines
return " ".join(description_lines)
return dtypes
def numpy_docstring_args(cls: Callable):
parameters = inspect.signature(cls).parameters
docstring = inspect.getdoc(cls)
docparams = {}
# Parse parameters and their datatypes from docstring
last_param_name = None
for line in docstring.split("\n"):
if not ":" in line:
if last_param_name:
if line.startswith("--"):
docparams[last_param_name][1] = numpy_cleanup_description(
dtypes, docparams[last_param_name][1], last=True
)
break
# Append description lines
docparams[last_param_name][1].append(line.strip())
continue
param_name, dtypes = line.split(":", maxsplit=1)
param_name = param_name.strip()
dtypes = dtypes.strip()
if not param_name in parameters or param_name in docparams:
continue
docparams[param_name] = [dtypes, []]
if last_param_name:
docparams[last_param_name][1] = numpy_cleanup_description(
dtypes, docparams[last_param_name][1]
)
last_param_name = param_name
# Ensure all required parameters are present in docstring
for param_name, param in parameters.items():
if param_name in ["args", "kwargs"]:
continue
if not param_name in docparams:
raise ParameterNotInDocString(
f"{param_name} for {cls.__qualname__}"
)
docparams[param_name] = numpy_doc_to_field(
*docparams[param_name], param
)
return docparams
def make_config_numpy(
name: str,
cls: Type,
properties: Optional[Dict[str, Tuple[Type, field]]] = None,
):
"""
Given a numpy class, read its docstring and ``__init__`` parameters to
generate a config class with properties containing the correct types,
and default values.
"""
if properties is None:
properties = {}
properties.update(numpy_docstring_args(cls))
return make_config(
name, [tuple([key] + list(value)) for key, value in properties.items()]
)
|
the-stack_106_17450
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .array import SparseArray, SparseNDArray
from .core import get_array_module, get_sparse_module, naked, issparse, np, is_cupy
class SparseVector(SparseArray):
__slots__ = 'spmatrix',
def __init__(self, spvector, shape=()):
if shape and len(shape) != 1:
raise ValueError('Only accept 1-d array')
if isinstance(spvector, SparseVector):
self.spmatrix = spvector.spmatrix
else:
spvector = spvector.reshape(1, shape[0])
self.spmatrix = spvector.tocsr()
@property
def shape(self):
return self.spmatrix.shape[1],
def transpose(self, axes=None):
assert axes is None or tuple(axes) == (0,)
return self
@property
def T(self):
return self
def __truediv__(self, other):
try:
other = naked(other)
except TypeError:
return NotImplemented
x = self.spmatrix / other
if issparse(x):
return SparseNDArray(x, shape=self.shape)
if x.shape != self.shape:
x = np.asarray(x).reshape(self.shape)
return get_array_module(x).asarray(x)
def __rtruediv__(self, other):
try:
other = naked(other)
except TypeError:
return NotImplemented
try:
x = other / self.spmatrix
except TypeError:
x = other / self.spmatrix.toarray()
if issparse(x):
return SparseNDArray(x, shape=self.shape)
if x.shape != self.shape:
x = np.asarray(x).reshape(self.shape)
return get_array_module(x).asarray(x)
def dot(self, other, sparse=True):
other_shape = other.shape
try:
other = naked(other)
except TypeError:
return NotImplemented
if not sparse:
a = self.toarray()
if issparse(other):
other = other.toarray().reshape(other_shape)
x = a.dot(other)
else:
if len(other_shape) == 1:
x = self.spmatrix.dot(other.T)
else:
x = self.spmatrix.dot(other)
if issparse(x):
if x.shape == (1, 1):
# return scalar
return x.toarray()[0, 0]
shape = (x.shape[1],)
return SparseNDArray(x, shape=shape)
return get_array_module(x).asarray(x)
def concatenate(self, other, axis=0):
if other.ndim != 1:
raise ValueError('all the input arrays must have same number of dimensions')
try:
other = naked(other)
except TypeError:
return NotImplemented
if issparse(other):
xps = get_sparse_module(self.spmatrix)
if axis != 0:
raise ValueError('axis can only be 0')
other = other.reshape(1, other.shape[0]) if other.shape[0] != 1 else other
x = xps.hstack((self.spmatrix.reshape(1, self.shape[0]), other))
else:
xp = get_array_module(self.spmatrix)
x = xp.concatenate((self.spmatrix.toarray().reshape(self.shape), other), axis=axis)
if issparse(x):
return SparseNDArray(x, shape=(x.shape[1],))
return get_array_module(x).asarray(x)
def _reduction(self, method_name, axis=None, dtype=None, keepdims=None, todense=False, **kw):
if not todense:
assert keepdims is None or keepdims is False
if isinstance(axis, tuple):
assert axis == (0, )
axis = None
if todense:
x = self.spmatrix.toarray()
x = getattr(get_array_module(x), method_name)(x, axis=axis, **kw)
else:
x = getattr(self.spmatrix, method_name)(axis=axis, **kw)
m = get_array_module(x)
return m.array([x])[0]
def __setitem__(self, key, value):
if is_cupy(self.spmatrix):
return NotImplemented
else:
x = self.spmatrix.tolil()
key = (0,) + (key, )
x[key] = value
x = x.tocsr()
self.spmatrix = x
|
the-stack_106_17454
|
from __future__ import annotations
from collections import defaultdict
from struct import pack, Struct
from typing import List, TYPE_CHECKING
from pyNastran.bdf import MAX_INT
from pyNastran.op2.errors import SixtyFourBitError
if TYPE_CHECKING: # pragma: no cover
from pyNastran.op2.op2 import OP2
def write_geom1(op2_file, op2_ascii, obj: OP2, endian: bytes=b'<'):
#if not hasattr(obj, 'nodes'):
#return
if not hasattr(obj, 'nodes'):
return
nnodes = len(obj.nodes)
ncoords = len(obj.coords)
ngeom1 = nnodes or ncoords
if not ngeom1:
return
cards_written = {}
write_geom_header(b'GEOM1', op2_file, op2_ascii)
itable = -3
if nnodes:
max_nid = max(obj.nodes)
if max_nid > MAX_INT: # is the max 2147483647? 2^31-1
raise SixtyFourBitError(f'64-bit OP2 writing is not supported; max GRID nid={max_nid}')
#nvalues = nnodes * 8
#nbytes = nvalues * 4
#assert nnodes == 72, nnodes
nfields = 8 # nid, cp, x, y, z, cd, ps, seid
nvalues = nfields * nnodes + 3 # 3 comes from the keys
cards_written['GRID'] = nnodes
#assert nbytes == 2316, nbytes
#op2_file.write(pack('6i', *[4, 0, 4, 4, 1, 4]))
key = (4501, 45, 1)
nbytes = write_block(op2_file, op2_ascii, nvalues, key)
spack = Struct('ii 3f 3i')
for unused_nid, node in sorted(obj.nodes.items()):
xyz = node.xyz
ps = node.ps
if ps == '':
psi = 0
else:
psi = int(ps)
seid = node.seid
if seid == '':
seidi = 0
else:
seidi = int(seid)
data = [node.nid, node.Cp(), xyz[0], xyz[1], xyz[2], node.Cd(), psi, seidi]
op2_file.write(spack.pack(*data))
op2_ascii.write(' nid=%s cp=%s xyz=(%s, %s, %s) cd=%s ps=%s seid=%s\n' % tuple(data))
op2_file.write(pack('i', nbytes))
itable -= 1
data = [
4, itable, 4,
4, 1, 4,
4, 0, 4]
op2_file.write(pack('9i', *data))
op2_ascii.write(str(data) + '\n')
#-------------------------------------
if ncoords:
out = defaultdict(list)
for cid, coord in obj.coords.items():
if coord.type == 'GMCORD':
obj.log.warning(f'skipping {coord.type}')
continue
out[coord.type].append(cid)
coord_type_key_map = {
'CORD1C' : (1701, 17, 6),
'CORD1R' : (1801, 18, 5),
'CORD1S' : (1901, 19, 7),
'CORD2C' : (2001, 20, 9),
'CORD2R' : (2101, 21, 8),
'CORD2S' : (2201, 22, 10),
'CORD3G' : (14301, 143, 651),
}
for coord_type, cids in sorted(out.items()):
max_cid = max(cids)
if max_cid > 99999999: # is the max 2147483647? 2^31-1
raise SixtyFourBitError(f'64-bit OP2 writing is not supported; max {coord_type}={max_cid}')
key = coord_type_key_map[coord_type]
ncards = len(cids)
cards_written[coord_type] = ncards
if '2' in coord_type:
coord_int = 2
elif '1' in coord_type:
coord_int = 1
else: # pragma: no cover
raise NotImplementedError(coord_type)
if coord_type[-1] == 'R':
coord_rcs_int = 1
elif coord_type[-1] == 'C':
coord_rcs_int = 2
elif coord_type[-1] == 'S':
coord_rcs_int = 3
else: # pragma: no cover
raise NotImplementedError(coord_type)
if coord_type in ['CORD2R', 'CORD2C', 'CORD2S']:
nvalues = 13 * ncards + 3
spack = Struct(b'4i 9f')
nbytes = write_block(op2_file, op2_ascii, nvalues, key)
for cid in sorted(cids):
coord = obj.coords[cid]
data = ([cid, coord_rcs_int, coord_int, coord.Rid(), ] +
list(coord.e1) + list(coord.e2) + list(coord.e3))
op2_file.write(spack.pack(*data))
op2_ascii.write(' cid=%s data=%s' % (cid, str(data[1:])))
elif coord_type in ['CORD1R', 'CORD1C', 'CORD1S']:
nvalues = 6 * ncards + 3
spack = Struct(b'6i')
nbytes = write_block(op2_file, op2_ascii, nvalues, key)
nids = []
for cid in cids:
coord = obj.coords[cid]
nids.extend([coord.G1(), coord.G2(), coord.G3()])
max_nid = max(nids)
if max_nid > 99999999:
raise SixtyFourBitError(f'64-bit OP2 writing is not supported; {coord_type}: max nid={max_nid}')
del nids
for cid in sorted(cids):
coord = obj.coords[cid]
data = [cid, coord_rcs_int, coord_int, coord.G1(), coord.G2(), coord.G3()]
op2_file.write(spack.pack(*data))
op2_ascii.write(' cid=%s data=%s' % (cid, str(data[1:])))
else:
raise NotImplementedError(coord_type)
op2_file.write(pack('i', nbytes))
itable -= 1
data = [
4, itable, 4,
4, 1, 4,
4, 0, 4]
op2_file.write(pack('9i', *data))
op2_ascii.write(str(data) + '\n')
#_write_markers(op2_file, op2_ascii, [2, 4])
#-------------------------------------
close_geom_table(op2_file, op2_ascii, itable)
obj.log.debug(str(cards_written))
def write_block(op2_file, op2_ascii, nvalues, key):
nbytes = nvalues * 4
op2_file.write(pack('3i', *[4, nvalues, 4]))
op2_file.write(pack('i', nbytes)) #values, nbtyes))
op2_file.write(pack('3i', *key))
op2_ascii.write(str(key) + '\n')
return nbytes
def init_table(table_name: bytes) -> List[int]:
data = [
4, 2, 4,
#4, 2,4,
8, b'%8s' % table_name, 8,
4, -1, 4,
#4, 1, 4,
#4, 0, 4,
]
return data
def write_geom_header(table_name: bytes, op2_file, op2_ascii, endian: bytes=b'<'):
op2_ascii.write('----------\n')
data = init_table(table_name)
op2_file.write(pack('4i 8s i 3i', *data))
op2_ascii.write(str(data) + '\n')
data = [
4, 7, 4,
28, 1, 2, 3, 4, 5, 6, 7, 28,
]
#struct_3i = Struct(endian + b'3i')
op2_file.write(pack('3i 9i', *data))
op2_ascii.write(str(data) + '\n')
#-------------------------------------
data = [
4, -2, 4,
4, 1, 4,
4, 0, 4]
op2_file.write(pack('9i', *data))
op2_ascii.write(str(data) + '\n')
data = [
#4, 0, 4,
4, 2, 4,
8, 1, 2, 8,
]
op2_file.write(pack('3i 4i', *data))
op2_ascii.write(str(data) + '\n')
#data = [8, 1, 2, 8]
#op2_file.write(pack('4i', *data))
#-------------------------------------
data = [
4, -3, 4,
4, 1, 4,
4, 0, 4]
op2_file.write(pack('9i', *data))
op2_ascii.write(str(data) + '\n')
def close_geom_table(op2_file, op2_ascii, itable):
data = [
4, 3, 4,
12, 1, 2, 3, 12]
op2_file.write(pack('3i 5i', *data))
op2_ascii.write(str(data) + '\n')
itable -= 1
#-------------------------------------
data = [
4, itable, 4,
4, 1, 4,
4, 0, 4]
op2_file.write(pack('9i', *data))
op2_ascii.write(str(data) + '\n')
data = [
4, 0, 4,
#4, 2, 4
]
op2_file.write(pack('3i', *data))
op2_ascii.write(str(data) + '\n')
itable -= 1
def fill_defaultdict(typed_dict, direct_dict=None): # pragma: no cover
if not isinstance(typed_dict, tuple):
typed_dict = (typed_dict, )
out = defaultdict(list)
for dicti in typed_dict:
for idi, obj in dicti.items():
out[obj.type].append(idi)
if direct_dict:
if not isinstance(direct_dict, tuple):
direct_dict = (direct_dict, )
for dicti in direct_dict:
values = list(dicti.keys())
value0 = values[0]
obj0 = dicti[value0]
out[obj0.type] = values
return out
|
the-stack_106_17457
|
r"""
Evaluate match expressions, as used by `-k` and `-m`.
The grammar is:
expression: expr? EOF
expr: and_expr ('or' and_expr)*
and_expr: not_expr ('and' not_expr)*
not_expr: 'not' not_expr | '(' expr ')' | ident
ident: (\w|:|\+|-|\.|\[|\])+
The semantics are:
- Empty expression evaluates to False.
- ident evaluates to True of False according to a provided matcher function.
- or/and/not evaluate according to the usual boolean semantics.
"""
import ast
import enum
import re
import types
from typing import Callable
from typing import Iterator
from typing import Mapping
from typing import Optional
from typing import Sequence
import attr
from _pytest.compat import TYPE_CHECKING
if TYPE_CHECKING:
from typing import NoReturn
__all__ = [
"Expression",
"ParseError",
]
class TokenType(enum.Enum):
LPAREN = "left parenthesis"
RPAREN = "right parenthesis"
OR = "or"
AND = "and"
NOT = "not"
IDENT = "identifier"
EOF = "end of input"
@attr.s(frozen=True, slots=True)
class Token:
type = attr.ib(type=TokenType)
value = attr.ib(type=str)
pos = attr.ib(type=int)
class ParseError(Exception):
"""The expression contains invalid syntax.
:param column: The column in the line where the error occurred (1-based).
:param message: A description of the error.
"""
def __init__(self, column: int, message: str) -> None:
self.column = column
self.message = message
def __str__(self) -> str:
return "at column {}: {}".format(self.column, self.message)
class Scanner:
__slots__ = ("tokens", "current")
def __init__(self, input: str) -> None:
self.tokens = self.lex(input)
self.current = next(self.tokens)
def lex(self, input: str) -> Iterator[Token]:
pos = 0
while pos < len(input):
if input[pos] in (" ", "\t"):
pos += 1
elif input[pos] == "(":
yield Token(TokenType.LPAREN, "(", pos)
pos += 1
elif input[pos] == ")":
yield Token(TokenType.RPAREN, ")", pos)
pos += 1
else:
match = re.match(r"(:?\w|:|\+|-|\.|\[|\])+", input[pos:])
if match:
value = match.group(0)
if value == "or":
yield Token(TokenType.OR, value, pos)
elif value == "and":
yield Token(TokenType.AND, value, pos)
elif value == "not":
yield Token(TokenType.NOT, value, pos)
else:
yield Token(TokenType.IDENT, value, pos)
pos += len(value)
else:
raise ParseError(
pos + 1, 'unexpected character "{}"'.format(input[pos]),
)
yield Token(TokenType.EOF, "", pos)
def accept(self, type: TokenType, *, reject: bool = False) -> Optional[Token]:
if self.current.type is type:
token = self.current
if token.type is not TokenType.EOF:
self.current = next(self.tokens)
return token
if reject:
self.reject((type,))
return None
def reject(self, expected: Sequence[TokenType]) -> "NoReturn":
raise ParseError(
self.current.pos + 1,
"expected {}; got {}".format(
" OR ".join(type.value for type in expected), self.current.type.value,
),
)
def expression(s: Scanner) -> ast.Expression:
if s.accept(TokenType.EOF):
ret = ast.NameConstant(False) # type: ast.expr
else:
ret = expr(s)
s.accept(TokenType.EOF, reject=True)
return ast.fix_missing_locations(ast.Expression(ret))
def expr(s: Scanner) -> ast.expr:
ret = and_expr(s)
while s.accept(TokenType.OR):
rhs = and_expr(s)
ret = ast.BoolOp(ast.Or(), [ret, rhs])
return ret
def and_expr(s: Scanner) -> ast.expr:
ret = not_expr(s)
while s.accept(TokenType.AND):
rhs = not_expr(s)
ret = ast.BoolOp(ast.And(), [ret, rhs])
return ret
def not_expr(s: Scanner) -> ast.expr:
if s.accept(TokenType.NOT):
return ast.UnaryOp(ast.Not(), not_expr(s))
if s.accept(TokenType.LPAREN):
ret = expr(s)
s.accept(TokenType.RPAREN, reject=True)
return ret
ident = s.accept(TokenType.IDENT)
if ident:
return ast.Name(ident.value, ast.Load())
s.reject((TokenType.NOT, TokenType.LPAREN, TokenType.IDENT))
class MatcherAdapter(Mapping[str, bool]):
"""Adapts a matcher function to a locals mapping as required by eval()."""
def __init__(self, matcher: Callable[[str], bool]) -> None:
self.matcher = matcher
def __getitem__(self, key: str) -> bool:
return self.matcher(key)
def __iter__(self) -> Iterator[str]:
raise NotImplementedError()
def __len__(self) -> int:
raise NotImplementedError()
class Expression:
"""A compiled match expression as used by -k and -m.
The expression can be evaulated against different matchers.
"""
__slots__ = ("code",)
def __init__(self, code: types.CodeType) -> None:
self.code = code
@classmethod
def compile(self, input: str) -> "Expression":
"""Compile a match expression.
:param input: The input expression - one line.
"""
astexpr = expression(Scanner(input))
code = compile(
astexpr, filename="<pytest match expression>", mode="eval",
) # type: types.CodeType
return Expression(code)
def evaluate(self, matcher: Callable[[str], bool]) -> bool:
"""Evaluate the match expression.
:param matcher: Given an identifier, should return whether it matches or not.
Should be prepared to handle arbitrary strings as input.
Returns whether the expression matches or not.
"""
ret = eval(
self.code, {"__builtins__": {}}, MatcherAdapter(matcher)
) # type: bool
return ret
|
the-stack_106_17458
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import os
import base64
kfp_version = os.environ["KFP_VERSION"]
disable_istio_sidecar = os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
mlpipeline_minio_access_key = base64.b64encode(
bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
mlpipeline_minio_secret_key = base64.b64encode(
bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8')
class Controller(BaseHTTPRequestHandler):
def sync(self, parent, children):
# HACK: Currently using serving.kubeflow.org/inferenceservice to identify
# kubeflow user namespaces.
# TODO: let Kubeflow profile controller add a pipeline specific label to
# user namespaces and use that label instead.
pipeline_enabled = parent.get("metadata", {}).get(
"labels", {}).get("serving.kubeflow.org/inferenceservice")
if not pipeline_enabled:
return {"status": {}, "children": []}
# Compute status based on observed state.
desired_status = {
"kubeflow-pipelines-ready": \
len(children["Secret.v1"]) == 1 and \
len(children["ConfigMap.v1"]) == 1 and \
len(children["Deployment.apps/v1"]) == 2 and \
len(children["Service.v1"]) == 2 and \
len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and \
len(children["ServiceRole.rbac.istio.io/v1alpha1"]) == 1 and \
len(children["ServiceRoleBinding.rbac.istio.io/v1alpha1"]) == 1 and \
"True" or "False"
}
# Generate the desired child object(s).
# parent is a namespace
namespace = parent.get("metadata", {}).get("name")
desired_resources = [
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "metadata-grpc-configmap",
"namespace": namespace,
},
"data": {
"METADATA_GRPC_SERVICE_HOST":
"metadata-grpc-service.kubeflow",
"METADATA_GRPC_SERVICE_PORT": "8080",
},
},
# Visualization server related manifests below
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
},
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"image":
"uhub.service.ucloud.cn/a4x-kubeflow/ml-pipeline/visualization-server:" +
kfp_version,
"imagePullPolicy":
"IfNotPresent",
"name":
"ml-pipeline-visualizationserver",
"ports": [{
"containerPort": 8888
}],
}],
"serviceAccountName":
"default-editor",
},
},
},
},
{
"apiVersion": "networking.istio.io/v1alpha3",
"kind": "DestinationRule",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"host": "ml-pipeline-visualizationserver",
"trafficPolicy": {
"tls": {
"mode": "ISTIO_MUTUAL"
}
}
}
},
{
"apiVersion": "rbac.istio.io/v1alpha1",
"kind": "ServiceRole",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"rules": [{
"services": ["ml-pipeline-visualizationserver.*"]
}]
}
},
{
"apiVersion": "rbac.istio.io/v1alpha1",
"kind": "ServiceRoleBinding",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"subjects": [{
"properties": {
"source.principal":
"cluster.local/ns/kubeflow/sa/ml-pipeline"
}
}],
"roleRef": {
"kind": "ServiceRole",
"name": "ml-pipeline-visualizationserver"
}
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"ports": [{
"name": "http",
"port": 8888,
"protocol": "TCP",
"targetPort": 8888,
}],
"selector": {
"app": "ml-pipeline-visualizationserver",
},
},
},
# Artifact fetcher related resources below.
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-ui-artifact"
}
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"name":
"ml-pipeline-ui-artifact",
"image":
"uhub.service.ucloud.cn/a4x-kubeflow/ml-pipeline/frontend:" + kfp_version,
"imagePullPolicy":
"IfNotPresent",
"ports": [{
"containerPort": 3000
}]
}],
"serviceAccountName":
"default-editor"
}
}
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
"labels": {
"app": "ml-pipeline-ui-artifact"
}
},
"spec": {
"ports": [{
"name":
"http", # name is required to let istio understand request protocol
"port": 80,
"protocol": "TCP",
"targetPort": 3000
}],
"selector": {
"app": "ml-pipeline-ui-artifact"
}
}
},
]
print('Received request:', parent)
print('Desired resources except secrets:', desired_resources)
# Moved after the print argument because this is sensitive data.
desired_resources.append({
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mlpipeline-minio-artifact",
"namespace": namespace,
},
"data": {
"accesskey": mlpipeline_minio_access_key,
"secretkey": mlpipeline_minio_secret_key,
},
})
return {"status": desired_status, "children": desired_resources}
def do_POST(self):
# Serve the sync() function as a JSON webhook.
observed = json.loads(
self.rfile.read(int(self.headers.get("content-length"))))
desired = self.sync(observed["parent"], observed["children"])
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(desired), 'utf-8'))
HTTPServer(("", 8080), Controller).serve_forever()
|
the-stack_106_17459
|
import datetime
import unittest
import uuid
from flask import current_app
from sqlalchemy import create_engine
from werkzeug.exceptions import InternalServerError
from secure_message import constants
from secure_message.application import create_app
from secure_message.services.service_toggles import internal_user_service
from secure_message.common.eventsapi import EventsApi
from secure_message.repository import database
from secure_message.repository.database import SecureMessage
from secure_message.repository.modifier import Modifier
from secure_message.repository.retriever import Retriever
from secure_message.validation.user import User
class ModifyTestCaseHelper:
"""Helper class for Modify Tests"""
BRES_SURVEY = "33333333-22222-3333-4444-88dc018a1a4c"
def populate_database(self, record_count=0, mark_as_read=True):
"""Adds a specified number of Messages to the db in a single thread"""
thread_id = str(uuid.uuid4())
with self.engine.connect() as con:
for i in range(record_count):
msg_id = str(uuid.uuid4())
# Only the first message in a thread needs a entry in the conversation table
if i == 0:
query = f'''INSERT INTO securemessage.conversation(id, is_closed, closed_by, closed_by_uuid) VALUES('{thread_id}', false, '', '')'''
con.execute(query)
query = f'''INSERT INTO securemessage.secure_message(id, msg_id, subject, body, thread_id,
collection_case, business_id, collection_exercise, survey) VALUES ({i}, '{msg_id}', 'test','test','{thread_id}',
'ACollectionCase', 'f1a5e99c-8edf-489a-9c72-6cabe6c387fc', 'ACollectionExercise',
'{constants.NON_SPECIFIC_INTERNAL_USER}')'''
con.execute(query)
query = f'''INSERT INTO securemessage.status(label, msg_id, actor) VALUES('SENT', '{msg_id}',
'0a7ad740-10d5-4ecb-b7ca-3c0384afb882')'''
con.execute(query)
query = f"INSERT INTO securemessage.status(label, msg_id, actor) VALUES('INBOX', '{msg_id}', " \
f"'{constants.NON_SPECIFIC_INTERNAL_USER}')"
con.execute(query)
query = f"INSERT INTO securemessage.status(label, msg_id, actor) VALUES('UNREAD', '{msg_id}'," \
f"'{constants.NON_SPECIFIC_INTERNAL_USER}')"
con.execute(query)
query = f'''INSERT INTO securemessage.events(event, msg_id, date_time)
VALUES('{EventsApi.SENT.value}', '{msg_id}', '2017-02-03 00:00:00')'''
con.execute(query)
if mark_as_read:
query = f'''INSERT INTO securemessage.events(event, msg_id, date_time)
VALUES('{EventsApi.READ.value}', '{msg_id}', '2017-02-03 00:00:00')'''
con.execute(query)
return thread_id
def create_conversation_with_respondent_as_unread(self, user, message_count=0):
"""Adds a specified number of Messages to the db in a single thread"""
# we should not be inserting records into the db for a unit test but sadly without a greater rework its the only way
thread_id = str(uuid.uuid4())
with self.engine.connect() as con:
for i in range(message_count):
msg_id = str(uuid.uuid4())
# Only the first message in a thread needs a entry in the conversation table
if i == 0:
query = f'''INSERT INTO securemessage.conversation(id, is_closed, closed_by, closed_by_uuid) VALUES('{thread_id}', false, '', '')'''
con.execute(query)
query = f'''INSERT INTO securemessage.secure_message(id, msg_id, subject, body, thread_id,
collection_case, business_id, collection_exercise, survey) VALUES ({i}, '{msg_id}', 'test','test','{thread_id}',
'ACollectionCase', 'f1a5e99c-8edf-489a-9c72-6cabe6c387fc', 'ACollectionExercise',
'{user.user_uuid}')'''
con.execute(query)
query = f'''INSERT INTO securemessage.status(label, msg_id, actor) VALUES('SENT', '{msg_id}',
'{constants.NON_SPECIFIC_INTERNAL_USER}')'''
con.execute(query)
query = f"INSERT INTO securemessage.status(label, msg_id, actor) VALUES('INBOX', '{msg_id}', " \
f"'{user.user_uuid}')"
con.execute(query)
query = f"INSERT INTO securemessage.status(label, msg_id, actor) VALUES('UNREAD', '{msg_id}'," \
f" '{user.user_uuid}')"
con.execute(query)
query = f'''INSERT INTO securemessage.events(event, msg_id, date_time)
VALUES('{EventsApi.SENT.value}', '{msg_id}', '2020-11-20 00:00:00')'''
con.execute(query)
return thread_id
def add_conversation(self, conversation_id=str(uuid.uuid4()), is_closed=False, closed_by='', closed_by_uuid='', closed_at=None):
""" Populate the conversation table"""
# If conversation created needs to be closed, values are generated for you. These can be overrriden by passing them
# into function (i.e., if you need a specific name, but don't care about anything else, then only pass in 'closed_by')
if is_closed:
if not closed_by:
closed_by = "Some person"
if not closed_by_uuid:
closed_by_uuid = str(uuid.uuid4())
if not closed_at:
closed_at = datetime.datetime.utcnow()
with self.engine.connect() as con:
query = f'''INSERT INTO securemessage.conversation(id, is_closed, closed_by, closed_by_uuid, closed_at) VALUES('{conversation_id}',
'{is_closed}', '{closed_by}', '{closed_by_uuid}', '{closed_at}')'''
con.execute(query)
return conversation_id
class ModifyTestCase(unittest.TestCase, ModifyTestCaseHelper):
"""Test case for message retrieval"""
def setUp(self):
"""setup test environment"""
self.app = create_app()
self.app.testing = True
self.engine = create_engine(self.app.config['SQLALCHEMY_DATABASE_URI'])
with self.app.app_context():
database.db.init_app(current_app)
database.db.drop_all()
database.db.create_all()
self.db = database.db
self.user_internal = User('ce12b958-2a5f-44f4-a6da-861e59070a31', 'internal')
self.user_respondent = User('0a7ad740-10d5-4ecb-b7ca-3c0384afb882', 'respondent')
def tearDown(self):
self.engine.dispose()
def test_all_messages_in_conversation_marked_unread(self):
# create a thread with two messages
conversation_id = self.create_conversation_with_respondent_as_unread(user=self.user_respondent, message_count=2)
with self.app.app_context():
conversation = Retriever.retrieve_thread(conversation_id, self.user_respondent)
for msg in conversation.all():
# as there's two ways that a message is unread, first check the `read at` time isn't set
self.assertIsNone(msg.read_at)
# now collect all the message labels
labels = []
for status in msg.statuses:
labels.append(status.label)
# and check the unread is present
self.assertTrue("UNREAD" in labels)
# now mark the first message as read and check the whole conversation is now read
Modifier.mark_message_as_read(conversation[0].serialize(self.user_respondent), self.user_respondent)
con = Retriever.retrieve_thread(conversation_id, self.user_respondent)
for msg in con.all():
# message `read at` should now be set
self.assertIsNotNone(msg.read_at)
# collect the labels again
labels = []
for status in msg.statuses:
labels.append(status.label)
# and there should be no unread
self.assertFalse("UNREAD" in labels)
def test_close_conversation(self):
"""Test close conversation works"""
conversation_id = self.populate_database(1)
with self.app.app_context():
internal_user_service.use_mock_service()
# msg_id is the same as thread id for a conversation of 1
metadata = Retriever.retrieve_conversation_metadata(conversation_id)
Modifier.close_conversation(metadata, self.user_internal)
metadata = Retriever.retrieve_conversation_metadata(conversation_id)
self.assertTrue(metadata.is_closed)
self.assertEqual(metadata.closed_by, "Selphie Tilmitt")
self.assertEqual(metadata.closed_by_uuid, "ce12b958-2a5f-44f4-a6da-861e59070a31")
self.assertTrue(isinstance(metadata.closed_at, datetime.datetime))
# Test that timestamp on read message is less than 3 seconds old to prove it
# was only just created
delta = datetime.datetime.utcnow() - metadata.closed_at
self.assertTrue(delta.total_seconds() < 3)
def test_open_conversation(self):
"""Test re-opening conversation works"""
conversation_id = self.add_conversation(is_closed=True)
with self.app.app_context():
# msg_id is the same as thread id for a conversation of 1
metadata = Retriever.retrieve_conversation_metadata(conversation_id)
Modifier.open_conversation(metadata, self.user_internal)
metadata = Retriever.retrieve_conversation_metadata(conversation_id)
self.assertFalse(metadata.is_closed)
self.assertIsNone(metadata.closed_by)
self.assertIsNone(metadata.closed_by_uuid)
self.assertIsNone(metadata.closed_at)
def test_two_unread_labels_are_added_to_message(self):
"""testing duplicate message labels are not added to the database"""
self.populate_database(1)
with self.engine.connect() as con:
query = con.execute('SELECT msg_id FROM securemessage.secure_message LIMIT 1')
msg_id = query.first()[0]
with self.app.app_context():
with current_app.test_request_context():
message = Retriever.retrieve_message(msg_id, self.user_internal)
Modifier.add_unread(message, self.user_internal)
Modifier.add_unread(message, self.user_internal)
with self.engine.connect() as con:
query = f"SELECT count(label) FROM securemessage.status WHERE msg_id = '{msg_id}' AND label = 'UNREAD'"
query_x = con.execute(query)
unread_label_total = []
for row in query_x:
unread_label_total.append(row[0])
self.assertTrue(unread_label_total[0] == 1)
def test_read_date_is_set(self):
"""testing message read_date is set when unread label is removed"""
thread_id = self.populate_database(1, mark_as_read=False)
with self.app.app_context():
thread = Retriever.retrieve_thread(thread_id, self.user_respondent).all()
serialised_message = Retriever.retrieve_message(thread[0].msg_id, self.user_internal)
Modifier.mark_message_as_read(serialised_message, self.user_internal)
serialised_message = Retriever.retrieve_message(thread[0].msg_id, self.user_internal)
db_message = SecureMessage.query.filter(SecureMessage.msg_id == serialised_message['msg_id']).one()
self.assertIsNotNone(serialised_message['read_date'])
self.assertTrue(isinstance(db_message.read_at, datetime.datetime))
# Test that timestamp on read message is less than 3 seconds old to prove it
# was only just created
delta = datetime.datetime.utcnow() - db_message.read_at
self.assertTrue(delta.total_seconds() < 3)
def test_read_date_is_not_reset(self):
"""testing message read_date is not reset when unread label is removed again"""
self.populate_database(1)
with self.engine.connect() as con:
query = con.execute('SELECT msg_id FROM securemessage.secure_message LIMIT 1')
msg_id = query.first()[0]
with self.app.app_context():
with current_app.test_request_context():
message = Retriever.retrieve_message(msg_id, self.user_internal)
Modifier.mark_message_as_read(message, self.user_internal)
message = Retriever.retrieve_message(msg_id, self.user_internal)
read_date_set = message['read_date']
Modifier.add_unread(message, self.user_internal)
message = Retriever.retrieve_message(msg_id, self.user_internal)
Modifier.mark_message_as_read(message, self.user_internal)
message = Retriever.retrieve_message(msg_id, self.user_internal)
self.assertEqual(message['read_date'], read_date_set)
def test_exception_for_add_label_raises(self):
with self.app.app_context():
database.db.drop_all()
with current_app.test_request_context():
with self.assertRaises(InternalServerError):
Modifier.add_label('UNREAD', {'survey': 'survey'}, self.user_internal)
def test_exception_for_remove_label_raises(self):
with self.app.app_context():
database.db.drop_all()
with current_app.test_request_context():
with self.assertRaises(InternalServerError):
Modifier.remove_label('UNREAD', {'survey': 'survey'}, self.user_internal)
def test_get_label_actor_to_respondent(self):
message_to_respondent = {'msg_id': 'test1',
'msg_to': ['0a7ad740-10d5-4ecb-b7ca-3c0384afb882'],
'msg_from': 'ce12b958-2a5f-44f4-a6da-861e59070a31',
'subject': 'MyMessage',
'body': 'hello',
'thread_id': '',
'collection_case': 'ACollectionCase',
'collection_exercise': 'ACollectionExercise',
'business_id': 'f1a5e99c-8edf-489a-9c72-6cabe6c387fc',
'survey': self.BRES_SURVEY,
'from_internal': True}
self.assertEqual(Modifier._get_label_actor(user=self.user_internal, message=message_to_respondent),
'ce12b958-2a5f-44f4-a6da-861e59070a31')
self.assertEqual(Modifier._get_label_actor(user=self.user_respondent, message=message_to_respondent),
'0a7ad740-10d5-4ecb-b7ca-3c0384afb882')
def test_get_label_actor_to_group(self):
message_to_internal_group = {'msg_id': 'test3',
'msg_to': [constants.NON_SPECIFIC_INTERNAL_USER],
'msg_from': '0a7ad740-10d5-4ecb-b7ca-3c0384afb882',
'subject': 'MyMessage',
'body': 'hello',
'thread_id': '',
'collection_case': 'ACollectionCase',
'collection_exercise': 'ACollectionExercise',
'business_id': 'f1a5e99c-8edf-489a-9c72-6cabe6c387fc',
'survey': self.BRES_SURVEY,
'from_internal': False}
self.assertEqual(Modifier._get_label_actor(user=self.user_internal, message=message_to_internal_group),
constants.NON_SPECIFIC_INTERNAL_USER)
self.assertEqual(Modifier._get_label_actor(user=self.user_respondent, message=message_to_internal_group),
'0a7ad740-10d5-4ecb-b7ca-3c0384afb882')
def test_get_label_actor_to_internal_user(self):
message_to_internal_user = {'msg_id': 'test4',
'msg_to': ['ce12b958-2a5f-44f4-a6da-861e59070a31'],
'msg_from': '0a7ad740-10d5-4ecb-b7ca-3c0384afb882',
'subject': 'MyMessage',
'body': 'hello',
'thread_id': '',
'collection_case': 'ACollectionCase',
'collection_exercise': 'ACollectionExercise',
'business_id': 'f1a5e99c-8edf-489a-9c72-6cabe6c387fc',
'survey': self.BRES_SURVEY,
'from_internal': False}
self.assertEqual(Modifier._get_label_actor(user=self.user_internal, message=message_to_internal_user),
'ce12b958-2a5f-44f4-a6da-861e59070a31')
self.assertEqual(Modifier._get_label_actor(user=self.user_respondent, message=message_to_internal_user),
'0a7ad740-10d5-4ecb-b7ca-3c0384afb882')
def test_get_label_actor_raises_exception_for_missing_fields(self):
message_missing_fields = {'msg_id': 'test5',
'subject': 'MyMessage',
'body': 'hello',
'thread_id': '',
'collection_case': 'ACollectionCase',
'collection_exercise': 'ACollectionExercise',
'business_id': 'f1a5e99c-8edf-489a-9c72-6cabe6c387fc',
'survey': self.BRES_SURVEY,
'from_internal': False}
with self.assertRaises(InternalServerError):
Modifier._get_label_actor(user=self.user_internal, message=message_missing_fields)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_17461
|
import argparse
parser = argparse.ArgumentParser(description="Download and transform EEG dataset")
parser.add_argument("action",
type=str,
choices=["download", "transform", "compress"],
help="Action can be download/transform/compress the dataset")
parser.add_argument("dataset",
type=str,
help="If the action is download, specifies the dataset size (small, large, full), otherwise specifies dataset path (.tar.gz)")
parser.add_argument("output-path",
type=str,
help="Output dataset path. The resulting file will be in .tar.gz if the action is download and .csv or parquet format if compression is enabled")
# early parsing arguments
args = None
if __name__ == "__main__":
# parse arguments
args = vars(parser.parse_args())
# imports
import os
import csv
import tempfile
from tqdm import tqdm
from termcolor import colored
from sibyl import dataset
from sibyl import transformer
from sibyl.util import filesystem as fs
def download_dataset(dataset_type: str, output_path: str):
print(colored("Starting to download {} dataset".format(dataset_type), "cyan"))
url = dataset.get_download_url(dataset_type)
print(colored("Downloading {}".format(url), "cyan"))
dataset.async_download(url, output_path)
print(colored("Download finished.", "cyan"))
def process_dataset(dataset_path: str, output_path: str):
print(colored("Transforming dataset...", "cyan"))
temp_dataset_path = tempfile.mkdtemp(prefix="sibyl_eeg_temp")
# if the passed path is a file, extract it first
print(colored("Decompressing main dataset archive...", "cyan"))
fs.decompress_tar(dataset_path, temp_dataset_path)
# decompress all tar files
for data_file in tqdm(fs.find_files(temp_dataset_path, ".tar.gz"), desc="Decompressing files (step 1)"):
fs.decompress_tar(data_file, temp_dataset_path)
fs.delete_file(data_file)
# decompress all gz files
for data_file in tqdm(fs.find_files(temp_dataset_path, ".gz"), desc="Decompressing files (step 2)"):
sample_extract_path = os.path.join(temp_dataset_path, os.path.basename(data_file) + ".txt")
fs.decompress_gz(data_file, sample_extract_path)
fs.delete_file(data_file)
# process all files
with open(output_path, 'w', newline='', encoding='utf-8') as file_stream:
csv_writer = csv.writer(file_stream)
transformer.write_csv_header(csv_writer)
for record_file in tqdm(fs.find_files(temp_dataset_path, ".txt"), desc="Parsing dataset"):
rows = transformer.parse_file(record_file)
if rows is None:
continue
for row in rows:
csv_writer.writerow(row)
fs.delete_file(record_file)
# delete temporary directory
print(colored("Deleting temporary files...", "cyan"))
fs.delete_dir(temp_dataset_path)
print(colored("Transform complete!", "cyan"))
def compress_dataset(dataset_path: str, output_path: str):
print(colored("Saving as parquet file with gzip compression, might take a while!", "cyan"))
print(colored("Compressing...", "cyan"))
transformer.save_as_parquet(dataset_path, output_path)
print(colored("Saved as {}".format(output_path), "cyan"))
# main app entry point
if __name__ == "__main__":
# download dataset from UCI server (.tar.gz)
if args["action"] == "download":
if (args["dataset"] not in ["small", "large", "full"]):
print(colored("Unknown dataset type, valid values are: small, large, full", "red"))
exit()
download_dataset(args["dataset"], args["output-path"])
# transform dataset (.tar.gz to .csv)
elif args["action"] == "transform":
if not fs.is_file_exists(args["dataset"]):
print(colored("Dataset file does not exists", "red"))
exit()
if not fs.is_file_extension(args["dataset"], [".tar.gz", ".tar"]):
print(colored("Dataset is not in .tar or .tar.gz extension", "red"))
exit()
process_dataset(args["dataset"], args["output-path"])
# compress dataset (.csv to .parquet)
elif args["action"] == "compress":
if not fs.is_file_exists(args["dataset"]):
print(colored("Dataset file does not exists", "red"))
exit()
if not fs.is_file_extension(args["dataset"], [".csv"]):
print(colored("Dataset is not in .csv extension", "red"))
exit()
compress_dataset(args["dataset"], args["output-path"])
# out of range
else:
print("Unknown action, valid values are: download, transform")
|
the-stack_106_17465
|
import requests
import sys
import math
import os
import dataclasses
import typing
import time
from interpreter import Interpreter
import operations as op
def get(index: int, node: op.Cons):
if index == 0:
return op.Ap(op.Car(), node).evaluate(op.Environment())
return get(index - 1, op.Ap(op.Cdr(), node))
@dataclasses.dataclass
class StaticGameInfo:
x0: int = None
role: int = None
x2: op.Node = None
x3: op.Node = None
x4: op.Node = None
def __init__(self, node: op.Node):
self.x0 = get(0, node).n
self.role = get(1, node).n
self.x2 = get(2, node)
self.x3 = get(3, node)
self.x4 = get(4, node)
@dataclasses.dataclass
class Ship:
role: int = None
shipId: int = None
position: typing.Tuple[int, int] = None
velocity: typing.Tuple[int, int] = None
x4: op.Node = None
x5: op.Node = None
x6: op.Node = None
x7: op.Node = None
def __init__(self, node: op.Node):
self.role = get(0, node).n
self.shipId = get(1, node).n
pos = get(2, node)
self.position = (pos.args[0].n, pos.args[1].n)
vel = get(3, node)
self.velocity = (vel.args[0].n, vel.args[1].n)
self.x4 = get(4, node)
self.x5 = get(5, node)
self.x6 = get(6, node)
self.x7 = get(7, node)
@dataclasses.dataclass
class GameState:
gameTick: op.Node = None
x1: op.Node = None
shipAndCommands: op.Node = None
def __init__(self, node: op.Node):
if isinstance(node, op.Nil):
return
self.gameTick = get(0, node)
self.x1 = get(1, node)
self.shipAndCommands = self._parse_shipAndCommands(get(2, node))
def _parse_shipAndCommands(self, node: op.Node):
if isinstance(node, op.Nil):
return []
shipAndCommand = get(0, node)
ship = Ship(get(0, shipAndCommand))
appliedCommands = get(1, shipAndCommand)
return [(ship, appliedCommands)]
@dataclasses.dataclass
class GameResponse:
success: int = None
gameStage: int = None
staticGameInfo: StaticGameInfo = None
gameState: GameState = None
def __init__(self, node: op.Node):
self.success = get(0, node).n
if self.success != 1:
return
self.gameStage = get(1, node).n
self.staticGameInfo = StaticGameInfo(get(2, node))
self.gameState = GameState(get(3, node))
def accelerateCommand(shipId: int, vector: typing.Tuple[int, int]) -> str:
return f"( 0 , {shipId} , ap ap vec {vector[0]} {vector[1]} )"
def detonateCommand(shipId: int) -> str:
return f"( 1 , {shipId} )"
def shootCommand(shipId: int, target: typing.Tuple[int, int], x3) -> str:
return f"( 2 , {shipId} , ap ap vec {target[0]} {target[1]} , {x3} )"
def print_game_response(response) -> GameResponse:
gresponse = GameResponse(response)
print(gresponse)
return gresponse
def main():
server_url = sys.argv[1]
player_key = sys.argv[2]
dev_mode = sys.argv[3] if len(sys.argv) == 4 else False
print('ServerUrl: %s; PlayerKey: %s' % (server_url, player_key))
op.set_parameter(
server_url, '' if os.environ.get("API_KEY", "") == "" else
("?apiKey=" + os.environ.get("API_KEY", "")))
interpreter = Interpreter()
if dev_mode:
res = interpreter.evaluate_expression("ap send ( 1 , 0 )")
print(res)
if not isinstance(res, op.Cons) or res.args[0] != op.Number(1):
raise Exception("failed to CREATE player_key")
attacker_player_key = str(get(1, get(0, get(1, res))).n)
defender_player_key = str(get(1, get(1, get(1, res))).n)
print('attacker', attacker_player_key)
print('defender', defender_player_key)
if dev_mode == "attack":
player_key = attacker_player_key
print('attacker mode')
else:
player_key = defender_player_key
print('defender mode')
print_game_response(
interpreter.evaluate_expression(f"ap send ( 2 , {player_key} , ( ) )"))
print_game_response(
interpreter.evaluate_expression(
f"ap send ( 3 , {player_key} , ( 1 , 2 , 3 , 4 ) )"))
for i in range(10000):
res = print_game_response(
interpreter.evaluate_expression(
f"ap send ( 4 , {player_key} , ( {accelerateCommand(0, (1, -1))} , {shootCommand(0, (1, 1), 1) } ) )"
))
if res.success == 0:
break
if res.gameStage == 2:
break
time.sleep(0.1)
# print('accelarate')
# print_game_response(
# interpreter.evaluate_expression(
# f"ap send ( 4 , {player_key} , {accelerateCommand(0, (1, 1))} )"))
# print('detonate')
# print_game_response(
# interpreter.evaluate_expression(
# f"ap send ( 4 , {player_key} , {detonateCommand(0)} )"))
# print('shoot')
# print_game_response(
# interpreter.evaluate_expression(
# f"ap send ( 4 , {player_key} , {shootCommand(0, (1, 1), 1)} )"))
if __name__ == '__main__':
main()
|
the-stack_106_17466
|
import os
from typing import Optional, List
from rebulk.match import MatchesDict
from organizer import config
from organizer.api import tmdb_api
from organizer.processor.abstract_processor import AbstractProcessor
from organizer.util.translation import translate
class EpisodeProcessor(AbstractProcessor):
"""
The processor dedicated to Episodes type medias
"""
def process(self, filename: str, guessit_data: MatchesDict):
tvdb_data = self._get_tvdb_data(guessit_data['title'])
if tvdb_data is None:
return None
self._update_genres(tvdb_data)
return tvdb_data
@staticmethod
def _get_tvdb_data(title: str) -> Optional[any]:
results = tmdb_api.tv_endpoint.search(title)
if len(results) == 1:
return results[0]
for result in results:
if result.name == title:
return result
if result.original_language != tmdb_api.tmdb.language:
if translate(tmdb_api.tmdb.language, result.original_language, title) == result.original_name:
return result
return None
@staticmethod
def _update_genres(data) -> Optional[any]:
data.genres = []
for genre_id in data.genre_ids:
genre = [tg for tg in tmdb_api.genre_tv_db if tg.id == genre_id]
if len(genre) == 1:
data.genres.append(genre[0].name)
@staticmethod
def get_output_dirs() -> List[dict]:
dirs = [dir for dir in config['output'] if dir['type'] == 'episode']
dirs.sort(key=lambda dir: len(dir['filters'] if dir['filters'] is not None else []), reverse=True)
return dirs
def get_output_dir(self, tvdb_data, guessit_data: MatchesDict = None):
output_dir = AbstractProcessor.get_output_dir(self, tvdb_data)
return os.path.join(output_dir, guessit_data['title'], "Saison " + str(guessit_data['season']).zfill(2))
def get_output_filename(self, guessit_data: MatchesDict, tvdb_data) -> str:
return '%s - s%02de%02d.%s' % (
guessit_data['title'],
guessit_data['season'],
guessit_data['episode'],
guessit_data['container']
)
|
the-stack_106_17469
|
import logging
import json
import importlib
import binascii
from typing import List
from types import SimpleNamespace
from neo3.core import cryptography
version = '0.6'
core_logger = logging.getLogger('neo3.core')
network_logger = logging.getLogger('neo3.network')
storage_logger = logging.getLogger('neo3.storage')
def load_class_from_path(path_and_class: str):
"""
Dynamically load a class from a module at the specified path
Args:
path_and_class: relative path where to find the module and its class name
i.e. 'neo3.<package>.<package>.<module>.<class name>'
Raises:
ValueError: if the Module or Class is not found.
Returns:
class object
"""
try:
module_path = '.'.join(path_and_class.split('.')[:-1])
module = importlib.import_module(module_path)
except ImportError as err:
raise ValueError(f"Failed to import module {module_path} with error: {err}")
try:
class_name = path_and_class.split('.')[-1]
class_obj = getattr(module, class_name)
return class_obj
except AttributeError as err:
raise ValueError(f"Failed to get class {class_name} with error: {err}")
class IndexableNamespace(SimpleNamespace):
def __len__(self):
return len(self.__dict__)
def __getitem__(self, key):
return self.__dict__[key]
def get(self, key, default=None):
try:
return self.__dict__[key]
except KeyError:
return default
class Settings(IndexableNamespace):
db = None
_cached_standby_committee = None
default_settings = {
'network': {
'magic': 5195086,
'seedlist': [],
'validators_count': 1,
'standby_committee': ['02158c4a4810fa2a6a12f7d33d835680429e1a68ae61161c5b3fbc98c7f1f17765']
},
'storage': {
'use_default': True,
'default_provider': 'memory',
"providers": {
"memory": {
'class_path': 'neo3.storage.implementations.MemoryDB',
'options': {}
},
"leveldb": {
'class_path': 'neo3.storage.implementations.LevelDB',
'options': {
'path': '/tmp/neo3/'
}
},
"postgresql": {
'class_path': 'neo3.storage.implementations.PostgresDB',
'options': {
'host': '127.0.0.1',
'port': 5432
}
}
}
},
'policy': {
'max_tx_per_block': 512
},
'native_contract_activation': {}
}
@classmethod
def from_json(cls, json: dict):
o = cls(**json)
o._convert(o.__dict__, o.__dict__)
return o
@classmethod
def from_file(cls, path_to_json: str):
with open(path_to_json, 'r') as f:
data = json.load(f)
return cls.from_json(data)
def register(self, json: dict):
self.__dict__.update(json)
self._convert(self.__dict__, self.__dict__)
def _convert(self, what: dict, where: dict):
# turn all _dictionary what into IndexableNamespaces
to_update = []
for k, v in what.items():
if isinstance(v, dict):
to_update.append((k, IndexableNamespace(**v)))
for k, v in to_update:
if isinstance(where, dict):
where.update({k: v})
else:
where.__dict__.update({k: v})
self._convert(where[k].__dict__, where[k].__dict__)
@property
def standby_committee(self) -> List[cryptography.ECPoint]:
if self._cached_standby_committee is None:
points = []
for p in self.network.standby_committee:
points.append(cryptography.ECPoint.deserialize_from_bytes(binascii.unhexlify(p)))
self._cached_standby_committee = points
return self._cached_standby_committee
@property
def standby_validators(self) -> List[cryptography.ECPoint]:
return self.standby_committee[:self.network.validators_count]
@property
def database(self):
try:
if self.db:
return self.db
if not self.storage.use_default:
return None
default_provider_name = self.storage.default_provider
provider = self.storage.providers[default_provider_name]
db_class = load_class_from_path(provider.class_path)
return db_class(provider.options.__dict__)
except Exception as e:
return None
def reset_settings_to_default(self):
self.__dict__.clear()
self.__dict__.update(self.from_json(self.default_settings).__dict__)
settings = Settings.from_json(Settings.default_settings)
|
the-stack_106_17470
|
"""
Support for Modbus switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.modbus/
"""
import logging
import voluptuous as vol
from homeassistant.components.modbus import (
CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN)
from homeassistant.const import (
CONF_NAME, CONF_SLAVE, CONF_COMMAND_ON, CONF_COMMAND_OFF, STATE_ON)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers import config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['modbus']
CONF_COIL = "coil"
CONF_COILS = "coils"
CONF_REGISTER = "register"
CONF_REGISTERS = "registers"
CONF_VERIFY_STATE = "verify_state"
CONF_VERIFY_REGISTER = "verify_register"
CONF_REGISTER_TYPE = "register_type"
CONF_STATE_ON = "state_on"
CONF_STATE_OFF = "state_off"
REGISTER_TYPE_HOLDING = 'holding'
REGISTER_TYPE_INPUT = 'input'
REGISTERS_SCHEMA = vol.Schema({
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_SLAVE): cv.positive_int,
vol.Required(CONF_REGISTER): cv.positive_int,
vol.Required(CONF_COMMAND_ON): cv.positive_int,
vol.Required(CONF_COMMAND_OFF): cv.positive_int,
vol.Optional(CONF_VERIFY_STATE, default=True): cv.boolean,
vol.Optional(CONF_VERIFY_REGISTER):
cv.positive_int,
vol.Optional(CONF_REGISTER_TYPE, default=REGISTER_TYPE_HOLDING):
vol.In([REGISTER_TYPE_HOLDING, REGISTER_TYPE_INPUT]),
vol.Optional(CONF_STATE_ON): cv.positive_int,
vol.Optional(CONF_STATE_OFF): cv.positive_int,
})
COILS_SCHEMA = vol.Schema({
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Required(CONF_COIL): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SLAVE): cv.positive_int,
})
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_COILS, CONF_REGISTERS),
PLATFORM_SCHEMA.extend({
vol.Optional(CONF_COILS): [COILS_SCHEMA],
vol.Optional(CONF_REGISTERS): [REGISTERS_SCHEMA]
}))
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Read configuration and create Modbus devices."""
switches = []
if CONF_COILS in config:
for coil in config.get(CONF_COILS):
hub_name = coil.get(CONF_HUB)
hub = hass.data[MODBUS_DOMAIN][hub_name]
switches.append(ModbusCoilSwitch(
hub,
coil.get(CONF_NAME),
coil.get(CONF_SLAVE),
coil.get(CONF_COIL)))
if CONF_REGISTERS in config:
for register in config.get(CONF_REGISTERS):
hub_name = register.get(CONF_HUB)
hub = hass.data[MODBUS_DOMAIN][hub_name]
switches.append(ModbusRegisterSwitch(
hub,
register.get(CONF_NAME),
register.get(CONF_SLAVE),
register.get(CONF_REGISTER),
register.get(CONF_COMMAND_ON),
register.get(CONF_COMMAND_OFF),
register.get(CONF_VERIFY_STATE),
register.get(CONF_VERIFY_REGISTER),
register.get(CONF_REGISTER_TYPE),
register.get(CONF_STATE_ON),
register.get(CONF_STATE_OFF)))
add_entities(switches)
class ModbusCoilSwitch(ToggleEntity, RestoreEntity):
"""Representation of a Modbus coil switch."""
def __init__(self, hub, name, slave, coil):
"""Initialize the coil switch."""
self._hub = hub
self._name = name
self._slave = int(slave) if slave else None
self._coil = int(coil)
self._is_on = None
async def async_added_to_hass(self):
"""Handle entity which will be added."""
state = await self.async_get_last_state()
if not state:
return
self._is_on = state.state == STATE_ON
@property
def is_on(self):
"""Return true if switch is on."""
return self._is_on
@property
def name(self):
"""Return the name of the switch."""
return self._name
def turn_on(self, **kwargs):
"""Set switch on."""
self._hub.write_coil(self._slave, self._coil, True)
def turn_off(self, **kwargs):
"""Set switch off."""
self._hub.write_coil(self._slave, self._coil, False)
def update(self):
"""Update the state of the switch."""
result = self._hub.read_coils(self._slave, self._coil, 1)
try:
self._is_on = bool(result.bits[0])
except AttributeError:
_LOGGER.error(
'No response from hub %s, slave %s, coil %s',
self._hub.name,
self._slave,
self._coil)
class ModbusRegisterSwitch(ModbusCoilSwitch):
"""Representation of a Modbus register switch."""
# pylint: disable=super-init-not-called
def __init__(self, hub, name, slave, register, command_on,
command_off, verify_state, verify_register,
register_type, state_on, state_off):
"""Initialize the register switch."""
self._hub = hub
self._name = name
self._slave = slave
self._register = register
self._command_on = command_on
self._command_off = command_off
self._verify_state = verify_state
self._verify_register = (
verify_register if verify_register else self._register)
self._register_type = register_type
if state_on is not None:
self._state_on = state_on
else:
self._state_on = self._command_on
if state_off is not None:
self._state_off = state_off
else:
self._state_off = self._command_off
self._is_on = None
def turn_on(self, **kwargs):
"""Set switch on."""
self._hub.write_register(
self._slave,
self._register,
self._command_on)
if not self._verify_state:
self._is_on = True
def turn_off(self, **kwargs):
"""Set switch off."""
self._hub.write_register(
self._slave,
self._register,
self._command_off)
if not self._verify_state:
self._is_on = False
def update(self):
"""Update the state of the switch."""
if not self._verify_state:
return
value = 0
if self._register_type == REGISTER_TYPE_INPUT:
result = self._hub.read_input_registers(
self._slave,
self._register,
1)
else:
result = self._hub.read_holding_registers(
self._slave,
self._register,
1)
try:
value = int(result.registers[0])
except AttributeError:
_LOGGER.error(
'No response from hub %s, slave %s, register %s',
self._hub.name,
self._slave,
self._verify_register)
if value == self._state_on:
self._is_on = True
elif value == self._state_off:
self._is_on = False
else:
_LOGGER.error(
'Unexpected response from hub %s, slave %s '
'register %s, got 0x%2x',
self._hub.name,
self._slave,
self._verify_register,
value)
|
the-stack_106_17472
|
import os
from gensim.models.doc2vec import Doc2Vec
from utils.mapreduce import corpus_iterator
import gensim.models
import psutil
import logging
logger = logging.getLogger(__name__)
CPU_CORES = psutil.cpu_count()
assert gensim.models.doc2vec.FAST_VERSION > -1
class d2v_embedding(corpus_iterator):
def __init__(self, *args, **kwargs):
super(d2v_embedding, self).__init__(*args, **kwargs)
self.epoch_n = int(kwargs["epoch_n"])
self.clf = Doc2Vec(
workers=CPU_CORES,
window=int(kwargs["window"]),
negative=int(kwargs["negative"]),
sample=float(kwargs["sample"]),
size=int(kwargs["size"]),
min_count=int(kwargs["min_count"]),
)
def compute(self, config):
logger.info("Learning the vocabulary")
ITR = self.labelized_sentence_iterator()
self.clf.build_vocab(ITR)
logger.info("Training the features")
for n in range(self.epoch_n):
logger.info(" - Epoch {}".format(n))
ITR = self.labelized_sentence_iterator()
self.clf.train(ITR)
logger.info("Reducing the features")
self.clf.init_sims(replace=True)
logger.info("Saving the features")
out_dir = config["output_data_directory"]
f_features = os.path.join(out_dir, config["d2v_embedding"]["f_db"])
self.clf.save(f_features)
|
the-stack_106_17473
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 Vic Chan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from paddle.trainer.PyDataProvider2 import *
import numpy as np
import struct
import matplotlib.pyplot as plt
def read_image_files(filename, num):
bin_file = open(filename, 'rb')
buf = bin_file.read()
index = 0
magic, numImage, numRows, numCols = struct.unpack_from('>IIII', buf, index)
index += struct.calcsize('>IIII')
image_sets = []
for i in range(num):
images = struct.unpack_from('>784B', buf, index)
index += struct.calcsize('>784B')
images = np.array(images)
images = images/255.0
images = images.tolist()
image_sets.append(images)
bin_file.close()
return image_sets
def read_label_files(filename):
bin_file = open(filename, 'rb')
buf = bin_file.read()
index = 0
magic, nums = struct.unpack_from('>II', buf, index)
index += struct.calcsize('>II')
labels = struct.unpack_from('>%sB' % nums, buf, index)
bin_file.close()
labels = np.array(labels)
return labels
def fetch_traingset():
path = '/Users/vic/Dev/DeepLearning/Paddle/DeepLearningWithPaddle/DNN-MNIST/'
image_file = path + 'data/train-images-idx3-ubyte'
label_file = path + 'data/train-labels-idx1-ubyte'
images = read_image_files(image_file,60000)
labels = read_label_files(label_file)
return {'images': images,
'labels': labels}
def fetch_testingset():
path = '/Users/vic/Dev/DeepLearning/Paddle/DeepLearningWithPaddle/DNN-MNIST/'
image_file = path + 'data/t10k-images-idx3-ubyte'
label_file = path + 'data/t10k-labels-idx1-ubyte'
images = read_image_files(image_file,10000)
labels = read_label_files(label_file)
return {'images': images,
'labels': labels}
def test():
data = fetch_testingset()
image = data['images'][10]
print("Label: %d" % data['labels'][10])
images = np.reshape(image, [28, 28])
plt.imshow(images, cmap='gray')
plt.show()
|
the-stack_106_17476
|
'''
Задача «Разворот последовательности»
Условие
Дана последовательность целых чисел, заканчивающаяся числом 0.
Выведите эту последовательность в обратном порядке.
При решении этой задачи нельзя пользоваться массивами
и прочими динамическими структурами данных.
Рекурсия вам поможет.
'''
def reverse(s):
# s1 = list(s)
for i in s[::-1]:
print(i)
return
s = []
i = int(input())
while i != 0:
s.append(i)
i = int(input())
s.append(0)
# print(s)
reverse(s)
# решение разарботчиков
def reverse():
x = int(input())
if x != 0:
reverse()
print(x)
reverse()
|
the-stack_106_17477
|
import argparse
import ast
import astor
parser = argparse.ArgumentParser()
parser.add_argument('path')
parser.add_argument('-i', '--ignore')
class Transformer(ast.NodeTransformer):
def visit_YieldFrom(self, node):
return ast.Await(node.value)
def visit_With(self, node):
change_node = False
new_items = []
for item in node.items:
if isinstance(item.context_expr, ast.YieldFrom):
new_item = ast.withitem(
item.context_expr.value,
item.optional_vars,
)
new_items.append(new_item)
change_node = True
else:
new_items.append(item)
if not change_node:
return node
return ast.AsyncWith(items=new_items, body=node.body)
def visit_FunctionDef(self, node):
change_node = False
decorator_list = []
for decorator in node.decorator_list:
if (
isinstance(decorator, ast.Attribute) and
decorator.value.id == 'asyncio' and
decorator.attr == 'coroutine'
):
change_node = True
else:
decorator_list.append(decorator.id)
if not change_node:
return node
return ast.AsyncFunctionDef(
name=node.name,
args=node.args,
body=node.body,
decorator_list=decorator_list,
returns=node.returns,
)
def transform(path, transformer):
with open(path) as f:
source = f.read()
tree = ast.parse(source)
old_tree_dump = astor.dump_tree(tree)
transformer.visit(tree)
transformer.visit(tree)
transformer.visit(tree)
if old_tree_dump == astor.dump_tree(tree):
return
source = astor.to_source(tree)
with open(path, 'w') as f:
f.write(source)
def main():
args = parser.parse_args()
transformer = Transformer()
files = astor.code_to_ast.find_py_files(
args.path, ignore=args.ignore)
for _, path in files:
print(path)
transform(path, transformer)
print('Done!')
if __name__ == '__main__':
main()
|
the-stack_106_17478
|
import jpy
_JCallbackAdapter = jpy.get_type('io.deephaven.server.plugin.python.CallbackAdapter')
def initialize_all_and_register_into(callback: _JCallbackAdapter):
try:
from . import register
except ModuleNotFoundError as e:
# deephaven.plugin is an optional dependency, so if it can't be found, there are no Deephaven python plugins
# to register
if e.name == 'deephaven.plugin':
return
raise e
register.initialize_all_and_register_into(callback)
|
the-stack_106_17479
|
# -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Quantum Fourier Transform examples.
Note: if you have only cloned the Qiskit repository but not
used `pip install`, the examples only work from the root directory.
"""
import math
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit import execute, BasicAer, IBMQ
from qiskit.providers.ibmq import least_busy
###############################################################
# make the qft
###############################################################
def input_state(circ, q, n):
"""n-qubit input state for QFT that produces output 1."""
for j in range(n):
circ.h(q[j])
circ.u1(math.pi/float(2**(j)), q[j]).inverse()
def qft(circ, q, n):
"""n-qubit QFT on q in circ."""
for j in range(n):
for k in range(j):
circ.cu1(math.pi/float(2**(j-k)), q[j], q[k])
circ.h(q[j])
q = QuantumRegister(5, "q")
c = ClassicalRegister(5, "c")
qft3 = QuantumCircuit(q, c, name="qft3")
qft4 = QuantumCircuit(q, c, name="qft4")
qft5 = QuantumCircuit(q, c, name="qft5")
input_state(qft3, q, 3)
qft3.barrier()
qft(qft3, q, 3)
qft3.barrier()
for j in range(3):
qft3.measure(q[j], c[j])
input_state(qft4, q, 4)
qft4.barrier()
qft(qft4, q, 4)
qft4.barrier()
for j in range(4):
qft4.measure(q[j], c[j])
input_state(qft5, q, 5)
qft5.barrier()
qft(qft5, q, 5)
qft5.barrier()
for j in range(5):
qft5.measure(q[j], c[j])
print(qft3)
print(qft4)
print(qft5)
###############################################################
# Set up the API and execute the program.
###############################################################
try:
IBMQ.load_accounts()
except:
print("""WARNING: There's no connection with the API for remote backends.
Have you initialized a file with your personal token?
For now, there's only access to local simulator backends...""")
print('Qasm simulator')
sim_backend = BasicAer.get_backend('qasm_simulator')
job = execute([qft3, qft4, qft5], sim_backend, shots=1024)
result = job.result()
print(result.get_counts(qft3))
print(result.get_counts(qft4))
print(result.get_counts(qft5))
# Second version: real device
least_busy_device = least_busy(IBMQ.backends(simulator=False,
filters=lambda x: x.configuration().n_qubits > 4))
print("Running on current least busy device: ", least_busy_device)
job = execute([qft3, qft4, qft5], least_busy_device, shots=1024)
result = job.result()
print(result.get_counts(qft3))
print(result.get_counts(qft4))
print(result.get_counts(qft5))
|
the-stack_106_17483
|
GAME_DURATION = 40.0
TIME_BONUS_MIN = 2
TIME_BONUS_MAX = 5
TIME_BONUS_RANGE = 3.0
SEND_UPDATE = 0.2
TOW_WIN = 0
TOW_TIE = 1
TOW_LOSS = 2
TOON_VS_TOON = 0
TOON_VS_COG = 1
WAIT_FOR_CLIENTS_TIMEOUT = 20
TUG_TIMEOUT = 45
WAIT_FOR_GO_TIMEOUT = 15
WIN_JELLYBEANS = 15
LOSS_JELLYBEANS = 4
TIE_WIN_JELLYBEANS = 12
TIE_LOSS_JELLYBEANS = 8
TIE_JELLYBEANS = 5
|
the-stack_106_17487
|
import os
from setuptools import find_packages, setup
setup_dir = os.path.abspath(os.path.dirname(__file__))
def read_file(filename):
filepath = os.path.join(setup_dir, filename)
with open(filepath) as file:
return file.read()
setup(
name="spark-plot",
use_scm_version=True,
packages=find_packages(),
# package_dir={"": "src"},
zip_safe=False,
include_package_data=True,
# package_data={"spark_plot": ["includes/*"]},
# data_files=data_files,
# cmdclass={"install": InstallCmd},
# entry_points = {},
options={"bdist_wheel": {"universal": "1"}},
python_requires=">=3.7",
setup_requires=["setuptools_scm"],
install_requires=read_file("requirements.txt").splitlines(),
extras_require={
"test": ["pytest", "pytest-cov", "toml"],
"dev": read_file("requirements-dev.txt").splitlines(),
},
description="",
long_description=read_file("README.md"),
long_description_content_type="text/markdown",
license="Apache License, Version 2.0",
maintainer="Daniel Rodriguez",
maintainer_email="[email protected]",
url="https://github.com/danielfrg/spark-plot",
keywords=[],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
)
|
the-stack_106_17488
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../src/'))
#import sphinx_bootstrap_theme
# -- Project information -----------------------------------------------------
project = 'tools21cm'
copyright = '2020, Sambit Giri'
author = 'Sambit Giri'
# The full version, including alpha/beta/rc tags
version = release = '2.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '2.3'
#pygments_style = 'sphinx'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.ifconfig",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.mathjax",
"sphinx.ext.autosectionlabel",
"numpydoc",
"nbsphinx",
#"IPython.sphinxext.ipython_console_highlighting",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
#html_theme = "sphinx_rtd_theme"
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
import sphinx_readable_theme
html_theme_path = [sphinx_readable_theme.get_html_theme_path()]
html_theme = 'readable'
pygments_style = "trac"
html_use_smartypants = True
html_last_updated_fmt = "%b %d, %Y"
html_split_index = False
html_sidebars = {"**": ["globaltoc.html", "sourcelink.html", "searchbox.html"]}
html_short_title = "%s" % (project)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
mathjax_path = (
"http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
)
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"templates",
#"**.ipynb_checkpoints",
]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
|
the-stack_106_17489
|
import os
import shutil
from PIL import Image
INP_SUB_PATH = '/storage/timm/'
INP_SUB_FILE = 'submission_best_tf_efficientnet_b8.txt'
OUT_SUB_PATH = '/storage/submissions/'
OUT_SUB_FILE = 'team007_b8tf_datacrop_BEST_postproc_final.txt'
TEST_SET_DIR = '/dataset/test_set_A_full/'
THRESHOLD = {
'0': 0,
'1': 0.62,
'2': 0,
'3': 0,
'4': 0.52,
'5': 0,
'6': 0.41,
'7': 0,
}
WIDTH0 = [
]
HEIGHT0 = [
500,
640,
672,
704,
736,
768,
800,
832,
864,
896,
928,
960,
]
# Yêu cầu file inp_sub phải theo format "file_name top1 score1 top2 score2"
# Trả về path đến 2 file: (submission theo format btc, submission kèm theo score để viz)
def post_proc(INP_SUB_PATH, INP_SUB_FILE, OUT_SUB_PATH, OUT_SUB_FILE, TEST_SET_DIR):
fn, _ = os.path.splitext(OUT_SUB_FILE)
test_dir = os.path.dirname(TEST_SET_DIR+'/')
CROP_DIR = test_dir + '_cropped'
NONE_DIR = test_dir + '_none'
SQUARE_DIR = test_dir + '_square'
with open(os.path.join(INP_SUB_PATH, INP_SUB_FILE), 'r') as f:
lines = f.read().splitlines()
lines = [line.strip().split() for line in lines]
lines.sort(key=lambda x: x[0])
def _post_proc(lines):
result = []
for line in lines:
if len(line) == 2:
result.append(line[:2])
continue
fi = line[0]
top1 = line[1]
score1 = '1.0'
top2 = '7'
score2 = '1.0'
score1 = line[2]
top2 = line[3]
score2 = line[4]
img_path = os.path.join(CROP_DIR, fi)
if not os.path.exists(img_path):
continue
img_path = os.path.join(TEST_SET_DIR, fi)
print(img_path)
img = Image.open(img_path)
width, height = img.size
if top1 == '0' and round(width/height, 4) == 1.7778:
result.append([fi, top2, score2, top1, score1])
elif width in WIDTH0 or height in HEIGHT0:
result.append([fi, '0', '1.0', top1, score1])
elif float(score1) <= THRESHOLD[top1]:
result.append([fi, top2, score2, top1, score1])
else:
result.append([fi, top1, score1, top2, score2])
return result
def _populate_none(result):
dir = os.listdir(NONE_DIR)
for fi in dir:
result.append([fi, '0', '1.0000', '0', '1.0000'])
def _populate_square(result):
dir = os.listdir(SQUARE_DIR)
for fi in dir:
result.append([fi, '0', '1.0000', '0', '1.0000'])
result = _post_proc(lines)
print(len(result))
_populate_none(result)
_populate_square(result)
# print(result)
print(len(result))
out_sub_path = os.path.join(OUT_SUB_PATH, OUT_SUB_FILE)
with open(out_sub_path, 'w') as f:
for line in result:
f.write('\t'.join(line[:2]) + '\n')
out_sub_path_viz = os.path.join(OUT_SUB_PATH, "{}_viz.txt".format(fn))
with open(out_sub_path_viz, 'w') as f:
for line in result:
f.write('\t'.join(line) + '\n')
return out_sub_path, out_sub_path_viz
if __name__ == '__main__':
post_proc(INP_SUB_PATH, INP_SUB_FILE, OUT_SUB_PATH, OUT_SUB_FILE, TEST_SET_DIR)
|
the-stack_106_17491
|
# noinspection PyProtectedMember
from torch.optim.lr_scheduler import _LRScheduler, MultiStepLR, CosineAnnealingLR
# noinspection PyAttributeOutsideInit
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: init learning rate = base lr / multiplier
warmup_epoch: target learning rate is reached at warmup_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, warmup_epoch, after_scheduler, last_epoch=-1):
self.multiplier = multiplier
if self.multiplier <= 1.:
raise ValueError('multiplier should be greater than 1.')
self.warmup_epoch = warmup_epoch
self.after_scheduler = after_scheduler
self.finished = False
super().__init__(optimizer, last_epoch=last_epoch)
def get_lr(self):
if self.last_epoch > self.warmup_epoch:
return self.after_scheduler.get_lr()
else:
return [base_lr / self.multiplier * ((self.multiplier - 1.) * self.last_epoch / self.warmup_epoch + 1.)
for base_lr in self.base_lrs]
def step(self, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
if epoch > self.warmup_epoch:
self.after_scheduler.step(epoch - self.warmup_epoch)
else:
super(GradualWarmupScheduler, self).step(epoch)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
state = {key: value for key, value in self.__dict__.items() if key != 'optimizer' and key != 'after_scheduler'}
state['after_scheduler'] = self.after_scheduler.state_dict()
return state
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
after_scheduler_state = state_dict.pop('after_scheduler')
self.__dict__.update(state_dict)
self.after_scheduler.load_state_dict(after_scheduler_state)
def get_scheduler(optimizer, n_iter_per_epoch, args):
if "cosine" in args.lr_scheduler:
scheduler = CosineAnnealingLR(
optimizer=optimizer,
eta_min=0.000001,
T_max=(args.epochs - args.warmup_epoch) * n_iter_per_epoch)
elif "step" in args.lr_scheduler:
scheduler = MultiStepLR(
optimizer=optimizer,
gamma=args.lr_decay_rate,
milestones=[(m - args.warmup_epoch)*n_iter_per_epoch for m in args.lr_decay_epochs])
else:
raise NotImplementedError(f"scheduler {args.lr_scheduler} not supported")
if args.warmup_epoch > 0:
scheduler = GradualWarmupScheduler(
optimizer,
multiplier=args.warmup_multiplier,
after_scheduler=scheduler,
warmup_epoch=args.warmup_epoch * n_iter_per_epoch)
return scheduler
|
the-stack_106_17493
|
import logging
from typing import Callable
from PIL import Image, ImageDraw, ImageFont
from PIL.Image import Image as ImageType
from custom_components.xiaomi_cloud_map_extractor.common.map_data import ImageData
from custom_components.xiaomi_cloud_map_extractor.const import *
_LOGGER = logging.getLogger(__name__)
class ImageHandler:
COLORS = {
COLOR_MAP_INSIDE: (32, 115, 185),
COLOR_MAP_OUTSIDE: (19, 87, 148),
COLOR_MAP_WALL: (100, 196, 254),
COLOR_MAP_WALL_V2: (93, 109, 126),
COLOR_GREY_WALL: (93, 109, 126),
COLOR_CLEANED_AREA: (127, 127, 127, 127),
COLOR_PATH: (147, 194, 238),
COLOR_GOTO_PATH: (0, 255, 0),
COLOR_PREDICTED_PATH: (255, 255, 0),
COLOR_ZONES: (0xAD, 0xD8, 0xFF, 0x8F),
COLOR_ZONES_OUTLINE: (0xAD, 0xD8, 0xFF),
COLOR_VIRTUAL_WALLS: (255, 0, 0),
COLOR_NEW_DISCOVERED_AREA: (64, 64, 64),
COLOR_NO_GO_ZONES: (255, 33, 55, 127),
COLOR_NO_GO_ZONES_OUTLINE: (255, 0, 0),
COLOR_NO_MOPPING_ZONES: (163, 130, 211, 127),
COLOR_NO_MOPPING_ZONES_OUTLINE: (163, 130, 211),
COLOR_CHARGER: (0x66, 0xfe, 0xda, 0x7f),
COLOR_ROBO: (75, 235, 149),
COLOR_ROOM_NAMES: (0, 0, 0),
COLOR_OBSTACLE: (0, 0, 0, 128),
COLOR_IGNORED_OBSTACLE: (0, 0, 0, 128),
COLOR_OBSTACLE_WITH_PHOTO: (0, 0, 0, 128),
COLOR_IGNORED_OBSTACLE_WITH_PHOTO: (0, 0, 0, 128),
COLOR_UNKNOWN: (0, 0, 0),
COLOR_SCAN: (0xDF, 0xDF, 0xDF),
COLOR_ROOM_1: (240, 178, 122),
COLOR_ROOM_2: (133, 193, 233),
COLOR_ROOM_3: (217, 136, 128),
COLOR_ROOM_4: (52, 152, 219),
COLOR_ROOM_5: (205, 97, 85),
COLOR_ROOM_6: (243, 156, 18),
COLOR_ROOM_7: (88, 214, 141),
COLOR_ROOM_8: (245, 176, 65),
COLOR_ROOM_9: (252, 212, 81),
COLOR_ROOM_10: (72, 201, 176),
COLOR_ROOM_11: (84, 153, 199),
COLOR_ROOM_12: (133, 193, 233),
COLOR_ROOM_13: (245, 176, 65),
COLOR_ROOM_14: (82, 190, 128),
COLOR_ROOM_15: (72, 201, 176),
COLOR_ROOM_16: (165, 105, 189)
}
ROOM_COLORS = [COLOR_ROOM_1, COLOR_ROOM_2, COLOR_ROOM_3, COLOR_ROOM_4, COLOR_ROOM_5, COLOR_ROOM_6, COLOR_ROOM_7,
COLOR_ROOM_8, COLOR_ROOM_9, COLOR_ROOM_10, COLOR_ROOM_11, COLOR_ROOM_12, COLOR_ROOM_13,
COLOR_ROOM_14, COLOR_ROOM_15, COLOR_ROOM_16]
@staticmethod
def load_bg_image(image_width, image_height, alpha=50, bg_file_path="/config/www/map_tmp.png") -> ImageType:
image_orig = Image.open(f"{bg_file_path}").convert('RGBA')
image_rsz = image_orig.resize(
(int(image_width), int(image_height)),
resample=Image.NEAREST)
data=image_rsz.getdata() #you'll get a list of tuples
newData=[]
for a in data:
a=a[:3] #you'll get your tuple shorten to RGB
a=a+(alpha,) #change the 100 to any transparency number you like between (0,255)
newData.append(a)
image_rsz.putdata(newData) #you'll get your new img ready
return image_rsz
@staticmethod
def create_empty_map_image(colors, text="NO MAP") -> ImageType:
color = ImageHandler.__get_color__(COLOR_MAP_OUTSIDE, colors)
image = Image.new('RGBA', (300, 200), color=color)
#image = Image.open(f"/local/map_tmp.png")
if sum(color[0:3]) > 382:
text_color = (0, 0, 0)
else:
text_color = (255, 255, 255)
draw = ImageDraw.Draw(image, "RGBA")
w, h = draw.textsize(text)
draw.text(((image.size[0] - w) / 2, (image.size[1] - h) / 2), text, fill=text_color)
return image
@staticmethod
def draw_path(image: ImageData, path, colors, scale):
ImageHandler.__draw_path__(image, path, ImageHandler.__get_color__(COLOR_PATH, colors), scale)
@staticmethod
def draw_goto_path(image: ImageData, path, colors, scale):
ImageHandler.__draw_path__(image, path, ImageHandler.__get_color__(COLOR_GOTO_PATH, colors), scale)
@staticmethod
def draw_predicted_path(image: ImageData, path, colors, scale):
ImageHandler.__draw_path__(image, path, ImageHandler.__get_color__(COLOR_PREDICTED_PATH, colors), scale)
@staticmethod
def draw_no_go_areas(image: ImageData, areas, colors):
ImageHandler.__draw_areas__(image, areas,
ImageHandler.__get_color__(COLOR_NO_GO_ZONES, colors),
ImageHandler.__get_color__(COLOR_NO_GO_ZONES_OUTLINE, colors))
@staticmethod
def draw_no_mopping_areas(image: ImageData, areas, colors):
ImageHandler.__draw_areas__(image, areas,
ImageHandler.__get_color__(COLOR_NO_MOPPING_ZONES, colors),
ImageHandler.__get_color__(COLOR_NO_MOPPING_ZONES_OUTLINE, colors))
@staticmethod
def draw_walls(image: ImageData, walls, colors):
draw = ImageDraw.Draw(image.data, 'RGBA')
for wall in walls:
draw.line(wall.to_img(image.dimensions).as_list(),
ImageHandler.__get_color__(COLOR_VIRTUAL_WALLS, colors), width=2)
@staticmethod
def draw_zones(image: ImageData, zones, colors):
areas = list(map(lambda z: z.as_area(), zones))
ImageHandler.__draw_areas__(image, areas,
ImageHandler.__get_color__(COLOR_ZONES, colors),
ImageHandler.__get_color__(COLOR_ZONES_OUTLINE, colors))
@staticmethod
def draw_charger(image: ImageData, charger, sizes, colors):
color = ImageHandler.__get_color__(COLOR_CHARGER, colors)
radius = sizes[CONF_SIZE_CHARGER_RADIUS]
ImageHandler.__draw_circle__(image, charger, radius, color, color)
@staticmethod
def draw_obstacles(image: ImageData, obstacles, sizes, colors):
color = ImageHandler.__get_color__(COLOR_OBSTACLE, colors)
radius = sizes[CONF_SIZE_OBSTACLE_RADIUS]
ImageHandler.draw_all_obstacles(image, obstacles, radius, color)
@staticmethod
def draw_ignored_obstacles(image: ImageData, obstacles, sizes, colors):
color = ImageHandler.__get_color__(COLOR_IGNORED_OBSTACLE, colors)
radius = sizes[CONF_SIZE_IGNORED_OBSTACLE_RADIUS]
ImageHandler.draw_all_obstacles(image, obstacles, radius, color)
@staticmethod
def draw_obstacles_with_photo(image: ImageData, obstacles, sizes, colors):
color = ImageHandler.__get_color__(COLOR_OBSTACLE_WITH_PHOTO, colors)
radius = sizes[CONF_SIZE_OBSTACLE_WITH_PHOTO_RADIUS]
ImageHandler.draw_all_obstacles(image, obstacles, radius, color)
@staticmethod
def draw_ignored_obstacles_with_photo(image: ImageData, obstacles, sizes, colors):
color = ImageHandler.__get_color__(COLOR_IGNORED_OBSTACLE_WITH_PHOTO, colors)
radius = sizes[CONF_SIZE_IGNORED_OBSTACLE_WITH_PHOTO_RADIUS]
ImageHandler.draw_all_obstacles(image, obstacles, radius, color)
@staticmethod
def draw_all_obstacles(image: ImageData, obstacles, radius, color):
for obstacle in obstacles:
ImageHandler.__draw_circle__(image, obstacle, radius, color, color)
@staticmethod
def draw_vacuum_position(image: ImageData, vacuum_position, sizes, colors):
color = ImageHandler.__get_color__(COLOR_ROBO, colors)
radius = sizes[CONF_SIZE_VACUUM_RADIUS]
ImageHandler.__draw_circle__(image, vacuum_position, radius, color, color)
@staticmethod
def draw_room_names(image: ImageData, rooms, colors):
color = ImageHandler.__get_color__(COLOR_ROOM_NAMES, colors)
for room in rooms.values():
p = room.point()
if p is not None:
point = p.to_img(image.dimensions)
ImageHandler.__draw_text__(image, room.name, point.x, point.y, color)
@staticmethod
def rotate(image: ImageData):
if image.dimensions.rotation == 90:
image.data = image.data.transpose(Image.ROTATE_90)
if image.dimensions.rotation == 180:
image.data = image.data.transpose(Image.ROTATE_180)
if image.dimensions.rotation == 270:
image.data = image.data.transpose(Image.ROTATE_270)
@staticmethod
def draw_texts(image: ImageData, texts):
for text_config in texts:
x = text_config[CONF_X] * image.data.size[0] / 100
y = text_config[CONF_Y] * image.data.size[1] / 100
ImageHandler.__draw_text__(image, text_config[CONF_TEXT], x, y, text_config[CONF_COLOR],
text_config[CONF_FONT], text_config[CONF_FONT_SIZE])
@staticmethod
def draw_layer(image: ImageData, layer_name):
ImageHandler.__draw_layer__(image, image.additional_layers[layer_name])
@staticmethod
def __draw_circle__(image: ImageData, center, r, outline, fill):
def draw_func(draw: ImageDraw):
point = center.to_img(image.dimensions)
coords = [point.x - r, point.y - r, point.x + r, point.y + r]
draw.ellipse(coords, outline=outline, fill=fill)
ImageHandler.__draw_on_new_layer__(image, draw_func)
@staticmethod
def __draw_areas__(image: ImageData, areas, fill, outline):
if len(areas) == 0:
return
for area in areas:
def draw_func(draw: ImageDraw):
draw.polygon(area.to_img(image.dimensions).as_list(), fill, outline)
ImageHandler.__draw_on_new_layer__(image, draw_func)
@staticmethod
def __draw_path__(image: ImageData, path, color, scale):
if len(path.path) < 2:
return
def draw_func(draw: ImageDraw):
s = path.path[0].to_img(image.dimensions)
for point in path.path[1:]:
e = point.to_img(image.dimensions)
draw.line([s.x * scale, s.y * scale, e.x * scale, e.y * scale], width=int(scale), fill=color)
s = e
ImageHandler.__draw_on_new_layer__(image, draw_func, scale)
@staticmethod
def __draw_text__(image: ImageData, text, x, y, color, font_file=None, font_size=None):
def draw_func(draw: ImageDraw):
font = ImageFont.load_default()
try:
if font_file is not None and font_size > 0:
font = ImageFont.truetype(font_file, font_size)
except OSError:
_LOGGER.warning("Unable to find font file: %s", font_file)
except ImportError:
_LOGGER.warning("Unable to open font: %s", font_file)
finally:
w, h = draw.textsize(text, font)
draw.text((x - w / 2, y - h / 2), text, font=font, fill=color)
ImageHandler.__draw_on_new_layer__(image, draw_func)
@staticmethod
def __get_color__(name, colors, default_name=None):
if name in colors:
return colors[name]
if default_name is None:
return ImageHandler.COLORS[name]
return ImageHandler.COLORS[default_name]
@staticmethod
def __draw_on_new_layer__(image: ImageData, draw_function: Callable, scale=1):
if scale == 1:
size = image.data.size
else:
size = [int(image.data.size[0] * scale), int(image.data.size[1] * scale)]
layer = Image.new("RGBA", size, (255, 255, 255, 0))
draw = ImageDraw.Draw(layer, "RGBA")
draw_function(draw)
if scale != 1:
layer = layer.resize(image.data.size, resample=Image.BOX)
ImageHandler.__draw_layer__(image, layer)
@staticmethod
def __draw_layer__(image: ImageData, layer: ImageType):
image.data = Image.alpha_composite(image.data, layer)
|
the-stack_106_17495
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse("recipe:tag-list")
class PublicTagsApiTests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsApiTests(TestCase):
"""Test the authorized user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
"[email protected]", "testpass1234"
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name="Vegan")
Tag.objects.create(user=self.user, name="Dessert")
tags = Tag.objects.all().order_by("-name")
serializer = TagSerializer(tags, many=True)
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
user2 = get_user_model().objects.create_user("[email protected]", "pass4321")
Tag.objects.create(user=user2, name="Fruity")
tag = Tag.objects.create(user=self.user, name="Comfort Food")
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]["name"], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {"name": "Test Tag", "user": self.user.id}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
tag_exists = Tag.objects.filter(
user=self.user, name=payload["name"]
).exists()
self.assertTrue(tag_exists)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {"name": "", "user": self.user.id}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
|
the-stack_106_17498
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
class ImagesNegativeTest(base.BaseV2ImageTest):
"""here we have -ve tests for show_image and delete_image api
Tests
** get non-existent image
** get image with image_id=NULL
** get the deleted image
** delete non-existent image
** delete image with image_id=NULL
** delete the deleted image
"""
@test.attr(type=['negative'])
@decorators.idempotent_id('668743d5-08ad-4480-b2b8-15da34f81d9f')
def test_get_non_existent_image(self):
# get the non-existent image
non_existent_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.show_image,
non_existent_id)
@test.attr(type=['negative'])
@decorators.idempotent_id('ef45000d-0a72-4781-866d-4cb7bf2562ad')
def test_get_image_null_id(self):
# get image with image_id = NULL
image_id = ""
self.assertRaises(lib_exc.NotFound, self.client.show_image, image_id)
@test.attr(type=['negative'])
@decorators.idempotent_id('e57fc127-7ba0-4693-92d7-1d8a05ebcba9')
def test_get_delete_deleted_image(self):
# get and delete the deleted image
# create and delete image
image = self.client.create_image(name='test',
container_format='bare',
disk_format='raw')
self.client.delete_image(image['id'])
self.client.wait_for_resource_deletion(image['id'])
# get the deleted image
self.assertRaises(lib_exc.NotFound,
self.client.show_image, image['id'])
# delete the deleted image
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
image['id'])
@test.attr(type=['negative'])
@decorators.idempotent_id('6fe40f1c-57bd-4918-89cc-8500f850f3de')
def test_delete_non_existing_image(self):
# delete non-existent image
non_existent_image_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
non_existent_image_id)
@test.attr(type=['negative'])
@decorators.idempotent_id('32248db1-ab88-4821-9604-c7c369f1f88c')
def test_delete_image_null_id(self):
# delete image with image_id=NULL
image_id = ""
self.assertRaises(lib_exc.NotFound, self.client.delete_image,
image_id)
@test.attr(type=['negative'])
@decorators.idempotent_id('292bd310-369b-41c7-a7a3-10276ef76753')
def test_register_with_invalid_container_format(self):
# Negative tests for invalid data supplied to POST /images
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
name='test', container_format='wrong',
disk_format='vhd')
@test.attr(type=['negative'])
@decorators.idempotent_id('70c6040c-5a97-4111-9e13-e73665264ce1')
def test_register_with_invalid_disk_format(self):
self.assertRaises(lib_exc.BadRequest, self.client.create_image,
name='test', container_format='bare',
disk_format='wrong')
|
the-stack_106_17499
|
"""
Contacts - SOLUTION
"""
# You went to a conference and got people to sign up for text updates from your startup. Go through this dict to make the phone numbers readable to a computer.
# Hint: It can't include any non-numeric
# characters.
contacts = {
'Jamie': '1.192.168.0143',
'Kartik': '1.837.209.1121',
'Grant': '1.826.386.1758',
'Brandon': '1.935.297.9447',
'Monique': '1.702.716.5353',
'Sohom': '1.576.619.6100',
}
for k, v in contacts.items():
v = v.split('.')
v = ''.join(v)
contacts[k] = v
for k, v in contacts.items():
print(f'Name: {k}, Phone: {v}')
|
the-stack_106_17500
|
from helpers.registry import registry
import requests
from helpers.console_utils import console
from brownie import web3
def address_to_id(token_address):
checksummed = web3.toChecksumAddress(token_address)
if checksummed == web3.toChecksumAddress(registry.tokens.wbtc):
return "wrapped-bitcoin"
if checksummed == web3.toChecksumAddress(registry.tokens.badger):
return "badger-dao"
if checksummed == web3.toChecksumAddress(registry.tokens.digg):
return "digg"
else:
assert False
def fetch_usd_value(token_address, amount):
price = fetch_usd_price(address_to_id(token_address))
return price * amount
def fetch_daily_twap(token_address):
id = address_to_id(token_address)
url = "https://api.coingecko.com/api/v3/coins/" + id
params = "?tickers=true&community_data=false&developer_data=false&sparkline=false"
r = requests.get(url, params)
data = r.json()
market_data = data["market_data"]
console.print(market_data)
return market_data
def fetch_usd_price(token_address):
id = address_to_id(token_address)
url = "https://api.coingecko.com/api/v3/coins/" + id
params = "?tickers=false&community_data=false&developer_data=false&sparkline=false"
r = requests.get(url, params)
data = r.json()
usd_price = data["market_data"]["current_price"]["usd"]
console.print(usd_price)
return usd_price
def fetch_usd_price_eth():
url = "https://api.coingecko.com/api/v3/coins/" + "ethereum"
params = "?tickers=false&community_data=false&developer_data=false&sparkline=false"
r = requests.get(url, params)
data = r.json()
usd_price = data["market_data"]["current_price"]["usd"]
console.print(usd_price)
return usd_price
|
the-stack_106_17501
|
import os
from .markup_threaded_poll_text import MarkupThreadedPollText
class LoadAverageBox( MarkupThreadedPollText ):
defaults = [
("update_interval", 5, "Update interval in seconds, if none, the "
"widget updates whenever the event loop is idle."),
]
def __init__( self, *args, **kwargs ):
MarkupThreadedPollText.__init__( self, *args, **kwargs )
self.add_defaults( LoadAverageBox.defaults )
def poll( self ):
return "%.2f %.2f %.2f" % os.getloadavg()
# vim: tabstop=4 softtabstop=4 shiftwidth=4 expandtab smarttab
|
the-stack_106_17502
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util.protobuf import compare
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(len(a_value.bytes_list.value),
len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(
a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 3, 1, 2],
5: [0, 4, 1, 2, 3]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 2, 3, 1],
5: [0, 2, 3, 4, 1]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
with ops.Graph().as_default():
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# pylint: disable=protected-access
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
return lambda *args, **kwargs: _use_c_api_wrapper(fn, False, *args, **kwargs)
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
return lambda *args, **kwargs: _use_c_api_wrapper(fn, True, *args, **kwargs)
def run_in_graph_and_eager_modes(__unused__=None, graph=None, config=None,
use_gpu=False, force_gpu=False,
reset_test=True):
"""Runs the test in both graph and eager modes.
Args:
__unused__: Prevents sliently skipping tests.
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
reset_test: If True, tearDown and SetUp the test case again.
Returns:
Returns a decorator that will run the decorated test function
using both a graph and using eager execution.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
"""Test method decorator."""
def decorated(self):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self.setUp()
def run_eager_mode():
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self)
else:
with context.device("/device:CPU:0"):
f(self)
eager_graph = graph or ops.Graph()
with context.eager_mode():
with eager_graph.as_default():
run_eager_mode()
return decorated
return decorator
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(local_device.physical_device_desc)
>= min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
logging.info("SET UP: %s" % str(self))
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
ops.get_default_graph().seed = random_seed.DEFAULT_GRAPH_SEED
def tearDown(self):
logging.info("TEAR DOWN: %s" % str(self))
for thread in self._threads:
self.assertFalse(thread.is_alive(), "A checkedThread did not terminate")
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type %s and %s" %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_helper(self, tensors):
if isinstance(tensors, ops.EagerTensor):
return tensors.numpy()
if isinstance(tensors, resource_variable_ops.ResourceVariable):
return tensors.read_value().numpy()
if isinstance(tensors, tuple):
return tuple([self._eval_helper(t) for t in tensors])
elif isinstance(tensors, list):
return [self._eval_helper(t) for t in tensors]
elif isinstance(tensors, dict):
assert not tensors, "Only support empty dict now."
return dict()
else:
raise ValueError("Unsupported type.")
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.in_eager_mode():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
self.assertTrue(
math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
self.assertEqual(len(farray1), len(farray2))
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b), np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, err_msg=msg)
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays, or dicts of same, have near values.
This does not support nested dicts.
Args:
a: The expected numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `b` is a dict.
b: The actual numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `a` is a dict.
rtol: relative tolerance.
atol: absolute tolerance.
Raises:
ValueError: if only one of `a` and `b` is a dict.
"""
is_a_dict = isinstance(a, dict)
if is_a_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, %s vs %s." % (a, b))
if is_a_dict:
self.assertItemsEqual(
a.keys(), b.keys(),
msg="mismatched keys, expected %s, got %s" % (a.keys(), b.keys()))
for k in a:
self._assertArrayLikeAllClose(
a[k], b[k], rtol=rtol, atol=atol,
msg="%s: expected %s, got %s." % (k, a, b))
else:
self._assertArrayLikeAllClose(a, b, rtol=rtol, atol=atol)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
def create_local_cluster(num_workers, num_ps, protocol="grpc",
worker_config=None, ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix,
config=worker_config, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix,
config=ps_config, start=True)
for ix in range(num_ps)
]
return workers, ps_servers
|
the-stack_106_17504
|
"""
This file offers the methods to automatically retrieve the graph Cryomorphaceae bacterium BACL21 MAG-121220-bin10.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def CryomorphaceaeBacteriumBacl21Mag121220Bin10(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Cryomorphaceae bacterium BACL21 MAG-121220-bin10 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Cryomorphaceae bacterium BACL21 MAG-121220-bin10 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CryomorphaceaeBacteriumBacl21Mag121220Bin10",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_106_17507
|
# STANDARD
ADDEDTOGAME = "该物品被添加到游戏中。"
ALLCLASSESBOX = "[[All classes/zh-hans|全兵种]]"
ITEMLOOK = "" # not matches Chinese translation very well
NOUNMARKER_INDEFINITE_COSMETIC = "一件"
NOUNMARKER_INDEFINITE_SET = "一个"
NOUNMARKER_INDEFINITE_WEAPON = "一把"
SENTENCE_1_ALL = "'''{{{{item name|{item_name}}}}}({item_name})'''是{noun_marker}{promotional}{workshop_link}{class_list}{item_type}。"
SENTENCE_1_COMMUNITY_COSMETIC = "由[[Steam Workshop/zh-hans|社区]]玩家制作的"
SENTENCE_1_COMMUNITY_WEAPON = "由[[Steam Workshop/zh-hans|社区]]玩家制作的"
SENTENCE_1_PROMO_COSMETIC = "在[[Promotional items/zh-hans|促销活动]]中加入的"
SENTENCE_1_PROMO_WEAPON = "在[[Promotional items/zh-hans|促销活动]]中加入的"
SENTENCE_1_COSMETIC = "[[Cosmetic Item/zh-hans|饰品]]"
SENTENCE_1_SET = "[[Item set/zh-hans|物品套装]]"
SENTENCE_1_CLASSES_ALL = "[[Classes/zh-hans|全兵种]]通用"
SENTENCE_1_CLASSES_ONE = "[[{class_name}/zh-hans|{loc_class_name}]]专属"
SENTENCE_1_CLASSES_MORE = "与[[{class_name}/zh-hans|{loc_class_name}]]通用" # manually remove redundant "专属" or "通用".
SENTENCE_1_CLASSES_AND = "与"
SENTENCE_1_CLASSES_COMMA = ","
SENTENCE_COMMUNITY = "{{{{item name|{item_name}}}}}是被{workshop_link}到[[Steam Workshop/zh-hans|Steam创意工坊]]的。"
SENTENCE_COMMUNITY_LINK = "[{link} 贡献]"
SENTENCE_COMMUNITY_NAME = ""
SENTENCE_PROMOTIONAL = "{date}{steam}购买了《[[{game_name}/zh-hans|{loc_class_name}]]》的玩家,会收到[[Genuine/zh-hans|纯正]]品质的{{{{item name|{item_name}}}}}作为奖励。"
SENTENCE_PROMOTIONAL_STEAM = "于[[Steam/zh-hans|Steam]]上"
SENTENCE_PROMOTIONAL_DATE = "在{date}之前"
SENTENCE_SET = "" # not used anymore
SENTENCE_SET_INCLUDES = "该套装包含以下物品:"
SENTENCE_THUMBNAIL = "{{{{item name|{item_name}}}}}的创意工坊缩略图"
SENTENCE_1_SUB_PRIMARY = "[[Weapon/zh-hans#{class_name}primary|主武器]]"
SENTENCE_1_SUB_SECONDARY = "[[Weapon/zh-hans#{class_name}secondary|副武器]]"
SENTENCE_1_SUB_MELEE = "[[Weapon/zh-hans#{class_name}melee|近战武器]]"
ITEM_FLAGS = {
"not usable in crafting": "不可参与合成",
"not tradable": "不可交易",
"not tradable or usable in crafting": "不可交易或参与合成",
}
ATTRIBUTES = {
"achievement item: not tradable": "成就物品:不可交易",
"holiday restriction: tf birthday": "节日使用限制:军团要塞生日",
"holiday restriction: winter": "", # not found in localization files
"holiday restriction: halloween": "节日限制:万圣节",
"holiday restriction: halloween / full moon": "节日限制:万圣节/满月之夜",
"holiday restriction: halloween / full moon / valentine's day": "节日限制:万圣节/满月之夜/情人节",
}
CLASSES = {
"Scout": "侦察兵",
"Soldier": "士兵",
"Pyro": "火焰兵",
"Demoman": "爆破手",
"Heavy": "机枪手",
"Engineer": "工程师",
"Medic": "医生",
"Sniper": "狙击手",
"Spy": "间谍",
}
HEADINGS = {
'as a crafting ingredient': "作为合成材料",
'blueprint': "蓝图",
'bugs': "漏洞",
'crafting': "合成",
'damage and function times': "伤害和作用时间",
'external links': "外部链接",
'gallery': "画廊",
'item set': "物品套装",
'notes': "注释",
'painted variants': "染色预览",
'references': "参考内容",
'related achievements': "相关成就",
'see also': "参见",
'strange variant': "奇异属性",
'styles': "式样",
'trivia': "细枝末节",
'unused content': "未使用内容",
'update history': "更新历史",
}
ITEM_LEVELS = {
'Apparel': "服装",
'Armband': "武装带",
'Aura of Incorruptibility': "正直光环",
'Backpack': "背包",
'Badge': "徽章",
'Balloon': "气球",
'Bandages': "绷带",
'Bandana': "花色丝质大手帕",
'Bandolier': "子弹带",
'Barbeque': "烧烤用品",
'Beach Towel': "海滩浴巾",
'Bells': "铃铛",
'Bird Head': "鸟头",
'Blueprints': "蓝图",
'Bones': "骨骼",
'Bongos': "小鼓",
'Boots': "靴子",
'Botkiller': "机器人毁灭者",
'Cape': "斗篷",
'Championship Belt': "冠军腰带",
'Cigar': "雪茄",
'Coat': "外套",
'Coffin': "棺材",
'Community Medal': "社区勋章",
'Conscience': "良心",
'Cooler': "冷藏箱",
'Cosmetic Armor': "装饰用盔甲",
'Cosmetic Augmentation': "装饰性身体改造",
'Cosmetic Axe': "装饰用斧头",
'Cosmetic Knife': "装饰用刀子",
'Costume Piece': "服装",
'Decorative Bombs': "装饰用炸弹",
'Duck': "鸭子",
'Electronic Device': "电子仪器",
'Eye Stalks': "眼柄",
'Facial Hair': "胡子",
'Flair!': "漂亮的小徽章",
'Flip-Flops': "人字拖",
'Fuel Tank': "燃料罐",
'Func_Medal': "", # not found in localization files
'Futuristic Sound Device': "未来主义风格音响设备",
'Ghost': "鬼魂",
'Glasses': "眼镜",
'Glove': "手套",
'Gloves': "手套",
'Golf Clubs': "",
'Hair': "头发",
'Hat': "帽子",
'Headgear': "头饰",
'Headset': "头戴式显示器",
'Helmet': "头盔",
'Holiday Hat': "节日帽",
'Hooves': "蹄子",
'Kilt': "苏格兰褶裥短裙",
'Lantern': "灯笼",
'Lunchbox': "饭盒",
'Mascot': "吉祥物",
'Mask': "面具",
'Medal': "勋章",
'Medallion': "奖章",
'Mystical Lamp': "神灯",
'Necklace': "项链",
'Photograph': "照片",
'Pin': "胸针",
'Pipe': "烟斗",
'Pocket Buddy': "口袋伙计",
'Pocket Square': "口袋方块",
'Poncho': "斗篷",
'Puffy Shirt': "宽松衬衫",
'Pyrovision Goggles': "护目镜",
'Refreshment': "点心",
'Ring': "钻戒",
'Robot': "机器人",
'Safety Apparatus': "安全装置",
'Satchel': "小包",
'Scarf': "围巾",
'Science Project': "科学项目",
'Shield': "衬衫",
'Shirt': "衬衫",
'Shoes': "鞋子",
'Skateboard': "滑板",
'Sled': "雪橇",
'Snow Globe': "雪景球",
'Spikes': "跑鞋",
'Spirit Animal': "小动物",
'Spooky Companion': "幽灵同伴",
'Spurs': "靴刺",
'Squirrel': "松鼠",
'Stethoscope': "听诊器",
'Stocking': "袜子",
'Supplies': "补给品",
'Tattoos': "刺青",
'Tentacles': "触手",
'Tournament Medal': "锦标赛奖牌",
'Towel': "毛巾",
'Treasure': "宝箱",
'Tuxedo': "燕尾服",
'Undead Pet': "亡灵宠物",
'Uniform': "制服",
"Veteran's Beret": "",
'Wings': "翅膀",
}
|
the-stack_106_17508
|
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# scripts/readelf.py
#
# A clone of 'readelf' in Python, based on the pyelftools library
#
# Eli Bendersky ([email protected])
# This code is in the public domain
#-------------------------------------------------------------------------------
import os, sys
from optparse import OptionParser
import string
# If elftools is not installed, maybe we're running from the root or scripts
# dir of the source distribution
try:
import elftools
except ImportError:
sys.path.extend(['.', '..'])
from elftools import __version__
from elftools.common.exceptions import ELFError
from elftools.common.py3compat import (
ifilter, byte2int, bytes2str, itervalues, str2bytes)
from elftools.elf.elffile import ELFFile
from elftools.elf.segments import InterpSegment
from elftools.elf.sections import SymbolTableSection
from elftools.elf.relocation import RelocationSection
from elftools.elf.descriptions import (
describe_ei_class, describe_ei_data, describe_ei_version,
describe_ei_osabi, describe_e_type, describe_e_machine,
describe_e_version_numeric, describe_p_type, describe_p_flags,
describe_sh_type, describe_sh_flags,
describe_symbol_type, describe_symbol_bind, describe_symbol_visibility,
describe_symbol_shndx, describe_reloc_type,
)
from elftools.dwarf.dwarfinfo import DWARFInfo
from elftools.dwarf.descriptions import (
describe_reg_name, describe_attr_value, set_global_machine_arch,
describe_CFI_instructions, describe_CFI_register_rule,
describe_CFI_CFA_rule,
)
from elftools.dwarf.constants import (
DW_LNS_copy, DW_LNS_set_file, DW_LNE_define_file)
from elftools.dwarf.callframe import CIE, FDE
class ReadElf(object):
""" display_* methods are used to emit output into the output stream
"""
def __init__(self, file, output):
""" file:
stream object with the ELF file to read
output:
output stream to write to
"""
self.elffile = ELFFile(file)
self.output = output
# Lazily initialized if a debug dump is requested
self._dwarfinfo = None
def display_file_header(self):
""" Display the ELF file header
"""
self._emitline('ELF Header:')
self._emit(' Magic: ')
self._emitline(' '.join('%2.2x' % byte2int(b)
for b in self.elffile.e_ident_raw))
header = self.elffile.header
e_ident = header['e_ident']
self._emitline(' Class: %s' %
describe_ei_class(e_ident['EI_CLASS']))
self._emitline(' Data: %s' %
describe_ei_data(e_ident['EI_DATA']))
self._emitline(' Version: %s' %
describe_ei_version(e_ident['EI_VERSION']))
self._emitline(' OS/ABI: %s' %
describe_ei_osabi(e_ident['EI_OSABI']))
self._emitline(' ABI Version: %d' %
e_ident['EI_ABIVERSION'])
self._emitline(' Type: %s' %
describe_e_type(header['e_type']))
self._emitline(' Machine: %s' %
describe_e_machine(header['e_machine']))
self._emitline(' Version: %s' %
describe_e_version_numeric(header['e_version']))
self._emitline(' Entry point address: %s' %
self._format_hex(header['e_entry']))
self._emit(' Start of program headers: %s' %
header['e_phoff'])
self._emitline(' (bytes into file)')
self._emit(' Start of section headers: %s' %
header['e_shoff'])
self._emitline(' (bytes into file)')
self._emitline(' Flags: %s' %
self._format_hex(header['e_flags']))
self._emitline(' Size of this header: %s (bytes)' %
header['e_ehsize'])
self._emitline(' Size of program headers: %s (bytes)' %
header['e_phentsize'])
self._emitline(' Number of program headers: %s' %
header['e_phnum'])
self._emitline(' Size of section headers: %s (bytes)' %
header['e_shentsize'])
self._emitline(' Number of section headers: %s' %
header['e_shnum'])
self._emitline(' Section header string table index: %s' %
header['e_shstrndx'])
def display_program_headers(self, show_heading=True):
""" Display the ELF program headers.
If show_heading is True, displays the heading for this information
(Elf file type is...)
"""
self._emitline()
if self.elffile.num_segments() == 0:
self._emitline('There are no program headers in this file.')
return
elfheader = self.elffile.header
if show_heading:
self._emitline('Elf file type is %s' %
describe_e_type(elfheader['e_type']))
self._emitline('Entry point is %s' %
self._format_hex(elfheader['e_entry']))
# readelf weirness - why isn't e_phoff printed as hex? (for section
# headers, it is...)
self._emitline('There are %s program headers, starting at offset %s' % (
elfheader['e_phnum'], elfheader['e_phoff']))
self._emitline()
self._emitline('Program Headers:')
# Now comes the table of program headers with their attributes. Note
# that due to different formatting constraints of 32-bit and 64-bit
# addresses, there are some conditions on elfclass here.
#
# First comes the table heading
#
if self.elffile.elfclass == 32:
self._emitline(' Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align')
else:
self._emitline(' Type Offset VirtAddr PhysAddr')
self._emitline(' FileSiz MemSiz Flags Align')
# Now the entries
#
for segment in self.elffile.iter_segments():
self._emit(' %-14s ' % describe_p_type(segment['p_type']))
if self.elffile.elfclass == 32:
self._emitline('%s %s %s %s %s %-3s %s' % (
self._format_hex(segment['p_offset'], fieldsize=6),
self._format_hex(segment['p_vaddr'], fullhex=True),
self._format_hex(segment['p_paddr'], fullhex=True),
self._format_hex(segment['p_filesz'], fieldsize=5),
self._format_hex(segment['p_memsz'], fieldsize=5),
describe_p_flags(segment['p_flags']),
self._format_hex(segment['p_align'])))
else: # 64
self._emitline('%s %s %s' % (
self._format_hex(segment['p_offset'], fullhex=True),
self._format_hex(segment['p_vaddr'], fullhex=True),
self._format_hex(segment['p_paddr'], fullhex=True)))
self._emitline(' %s %s %-3s %s' % (
self._format_hex(segment['p_filesz'], fullhex=True),
self._format_hex(segment['p_memsz'], fullhex=True),
describe_p_flags(segment['p_flags']),
# lead0x set to False for p_align, to mimic readelf.
# No idea why the difference from 32-bit mode :-|
self._format_hex(segment['p_align'], lead0x=False)))
if isinstance(segment, InterpSegment):
self._emitline(' [Requesting program interpreter: %s]' %
bytes2str(segment.get_interp_name()))
# Sections to segments mapping
#
if self.elffile.num_sections() == 0:
# No sections? We're done
return
self._emitline('\n Section to Segment mapping:')
self._emitline(' Segment Sections...')
for nseg, segment in enumerate(self.elffile.iter_segments()):
self._emit(' %2.2d ' % nseg)
for section in self.elffile.iter_sections():
if ( not section.is_null() and
segment.section_in_segment(section)):
self._emit('%s ' % bytes2str(section.name))
self._emitline('')
def display_section_headers(self, show_heading=True):
""" Display the ELF section headers
"""
elfheader = self.elffile.header
if show_heading:
self._emitline('There are %s section headers, starting at offset %s' % (
elfheader['e_shnum'], self._format_hex(elfheader['e_shoff'])))
self._emitline('\nSection Header%s:' % (
's' if elfheader['e_shnum'] > 1 else ''))
# Different formatting constraints of 32-bit and 64-bit addresses
#
if self.elffile.elfclass == 32:
self._emitline(' [Nr] Name Type Addr Off Size ES Flg Lk Inf Al')
else:
self._emitline(' [Nr] Name Type Address Offset')
self._emitline(' Size EntSize Flags Link Info Align')
# Now the entries
#
for nsec, section in enumerate(self.elffile.iter_sections()):
self._emit(' [%2u] %-17.17s %-15.15s ' % (
nsec, bytes2str(section.name), describe_sh_type(section['sh_type'])))
if self.elffile.elfclass == 32:
self._emitline('%s %s %s %s %3s %2s %3s %2s' % (
self._format_hex(section['sh_addr'], fieldsize=8, lead0x=False),
self._format_hex(section['sh_offset'], fieldsize=6, lead0x=False),
self._format_hex(section['sh_size'], fieldsize=6, lead0x=False),
self._format_hex(section['sh_entsize'], fieldsize=2, lead0x=False),
describe_sh_flags(section['sh_flags']),
section['sh_link'], section['sh_info'],
section['sh_addralign']))
else: # 64
self._emitline(' %s %s' % (
self._format_hex(section['sh_addr'], fullhex=True, lead0x=False),
self._format_hex(section['sh_offset'],
fieldsize=16 if section['sh_offset'] > 0xffffffff else 8,
lead0x=False)))
self._emitline(' %s %s %3s %2s %3s %s' % (
self._format_hex(section['sh_size'], fullhex=True, lead0x=False),
self._format_hex(section['sh_entsize'], fullhex=True, lead0x=False),
describe_sh_flags(section['sh_flags']),
section['sh_link'], section['sh_info'],
section['sh_addralign']))
self._emitline('Key to Flags:')
self._emit(' W (write), A (alloc), X (execute), M (merge), S (strings)')
if self.elffile['e_machine'] in ('EM_X86_64', 'EM_L10M'):
self._emitline(', l (large)')
else:
self._emitline()
self._emitline(' I (info), L (link order), G (group), T (TLS), E (exclude), x (unknown)')
self._emitline(' O (extra OS processing required) o (OS specific), p (processor specific)')
def display_symbol_tables(self):
""" Display the symbol tables contained in the file
"""
for section in self.elffile.iter_sections():
if not isinstance(section, SymbolTableSection):
continue
if section['sh_entsize'] == 0:
self._emitline("\nSymbol table '%s' has a sh_entsize of zero!" % (
bytes2str(section.name)))
continue
self._emitline("\nSymbol table '%s' contains %s entries:" % (
bytes2str(section.name), section.num_symbols()))
if self.elffile.elfclass == 32:
self._emitline(' Num: Value Size Type Bind Vis Ndx Name')
else: # 64
self._emitline(' Num: Value Size Type Bind Vis Ndx Name')
for nsym, symbol in enumerate(section.iter_symbols()):
# symbol names are truncated to 25 chars, similarly to readelf
self._emitline('%6d: %s %5d %-7s %-6s %-7s %4s %.25s' % (
nsym,
self._format_hex(symbol['st_value'], fullhex=True, lead0x=False),
symbol['st_size'],
describe_symbol_type(symbol['st_info']['type']),
describe_symbol_bind(symbol['st_info']['bind']),
describe_symbol_visibility(symbol['st_other']['visibility']),
describe_symbol_shndx(symbol['st_shndx']),
bytes2str(symbol.name)))
def display_relocations(self):
""" Display the relocations contained in the file
"""
has_relocation_sections = False
for section in self.elffile.iter_sections():
if not isinstance(section, RelocationSection):
continue
has_relocation_sections = True
self._emitline("\nRelocation section '%s' at offset %s contains %s entries:" % (
bytes2str(section.name),
self._format_hex(section['sh_offset']),
section.num_relocations()))
if section.is_RELA():
self._emitline(" Offset Info Type Sym. Value Sym. Name + Addend")
else:
self._emitline(" Offset Info Type Sym.Value Sym. Name")
# The symbol table section pointed to in sh_link
symtable = self.elffile.get_section(section['sh_link'])
for rel in section.iter_relocations():
hexwidth = 8 if self.elffile.elfclass == 32 else 12
self._emit('%s %s %-17.17s' % (
self._format_hex(rel['r_offset'],
fieldsize=hexwidth, lead0x=False),
self._format_hex(rel['r_info'],
fieldsize=hexwidth, lead0x=False),
describe_reloc_type(
rel['r_info_type'], self.elffile)))
if rel['r_info_sym'] == 0:
self._emitline()
continue
symbol = symtable.get_symbol(rel['r_info_sym'])
# Some symbols have zero 'st_name', so instead what's used is
# the name of the section they point at
if symbol['st_name'] == 0:
symsec = self.elffile.get_section(symbol['st_shndx'])
symbol_name = symsec.name
else:
symbol_name = symbol.name
self._emit(' %s %s%22.22s' % (
self._format_hex(
symbol['st_value'],
fullhex=True, lead0x=False),
' ' if self.elffile.elfclass == 32 else '',
bytes2str(symbol_name)))
if section.is_RELA():
self._emit(' %s %x' % (
'+' if rel['r_addend'] >= 0 else '-',
abs(rel['r_addend'])))
self._emitline()
if not has_relocation_sections:
self._emitline('\nThere are no relocations in this file.')
def display_hex_dump(self, section_spec):
""" Display a hex dump of a section. section_spec is either a section
number or a name.
"""
section = self._section_from_spec(section_spec)
if section is None:
self._emitline("Section '%s' does not exist in the file!" % (
section_spec))
return
self._emitline("\nHex dump of section '%s':" % bytes2str(section.name))
self._note_relocs_for_section(section)
addr = section['sh_addr']
data = section.data()
dataptr = 0
while dataptr < len(data):
bytesleft = len(data) - dataptr
# chunks of 16 bytes per line
linebytes = 16 if bytesleft > 16 else bytesleft
self._emit(' %s ' % self._format_hex(addr, fieldsize=8))
for i in range(16):
if i < linebytes:
self._emit('%2.2x' % byte2int(data[dataptr + i]))
else:
self._emit(' ')
if i % 4 == 3:
self._emit(' ')
for i in range(linebytes):
c = data[dataptr + i : dataptr + i + 1]
if byte2int(c[0]) >= 32 and byte2int(c[0]) < 0x7f:
self._emit(bytes2str(c))
else:
self._emit(bytes2str(b'.'))
self._emitline()
addr += linebytes
dataptr += linebytes
self._emitline()
def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = self._section_from_spec(section_spec)
if section is None:
self._emitline("Section '%s' does not exist in the file!" % (
section_spec))
return
self._emitline("\nString dump of section '%s':" % bytes2str(section.name))
found = False
data = section.data()
dataptr = 0
while dataptr < len(data):
while ( dataptr < len(data) and
not (32 <= byte2int(data[dataptr]) <= 127)):
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
found = True
self._emitline(' [%6x] %s' % (
dataptr, bytes2str(data[dataptr:endptr])))
dataptr = endptr
if not found:
self._emitline(' No strings found in this section.')
else:
self._emitline()
def display_debug_dump(self, dump_what):
""" Dump a DWARF section
"""
self._init_dwarfinfo()
if self._dwarfinfo is None:
return
set_global_machine_arch(self.elffile.get_machine_arch())
if dump_what == 'info':
self._dump_debug_info()
elif dump_what == 'decodedline':
self._dump_debug_line_programs()
elif dump_what == 'frames':
self._dump_debug_frames()
elif dump_what == 'frames-interp':
self._dump_debug_frames_interp()
else:
self._emitline('debug dump not yet supported for "%s"' % dump_what)
def _format_hex(self, addr, fieldsize=None, fullhex=False, lead0x=True):
""" Format an address into a hexadecimal string.
fieldsize:
Size of the hexadecimal field (with leading zeros to fit the
address into. For example with fieldsize=8, the format will
be %08x
If None, the minimal required field size will be used.
fullhex:
If True, override fieldsize to set it to the maximal size
needed for the elfclass
lead0x:
If True, leading 0x is added
"""
s = '0x' if lead0x else ''
if fullhex:
fieldsize = 8 if self.elffile.elfclass == 32 else 16
if fieldsize is None:
field = '%x'
else:
field = '%' + '0%sx' % fieldsize
return s + field % addr
def _section_from_spec(self, spec):
""" Retrieve a section given a "spec" (either number or name).
Return None if no such section exists in the file.
"""
try:
num = int(spec)
if num < self.elffile.num_sections():
return self.elffile.get_section(num)
else:
return None
except ValueError:
# Not a number. Must be a name then
return self.elffile.get_section_by_name(str2bytes(spec))
def _note_relocs_for_section(self, section):
""" If there are relocation sections pointing to the givne section,
emit a note about it.
"""
for relsec in self.elffile.iter_sections():
if isinstance(relsec, RelocationSection):
info_idx = relsec['sh_info']
if self.elffile.get_section(info_idx) == section:
self._emitline(' Note: This section has relocations against it, but these have NOT been applied to this dump.')
return
def _init_dwarfinfo(self):
""" Initialize the DWARF info contained in the file and assign it to
self._dwarfinfo.
Leave self._dwarfinfo at None if no DWARF info was found in the file
"""
if self._dwarfinfo is not None:
return
if self.elffile.has_dwarf_info():
self._dwarfinfo = self.elffile.get_dwarf_info()
else:
self._dwarfinfo = None
def _dump_debug_info(self):
""" Dump the debugging info section.
"""
self._emitline('Contents of the .debug_info section:\n')
# Offset of the .debug_info section in the stream
section_offset = self._dwarfinfo.debug_info_sec.global_offset
for cu in self._dwarfinfo.iter_CUs():
self._emitline(' Compilation Unit @ offset %s:' %
self._format_hex(cu.cu_offset))
self._emitline(' Length: %s (%s)' % (
self._format_hex(cu['unit_length']),
'%s-bit' % cu.dwarf_format()))
self._emitline(' Version: %s' % cu['version']),
self._emitline(' Abbrev Offset: %s' % cu['debug_abbrev_offset']),
self._emitline(' Pointer Size: %s' % cu['address_size'])
# The nesting depth of each DIE within the tree of DIEs must be
# displayed. To implement this, a counter is incremented each time
# the current DIE has children, and decremented when a null die is
# encountered. Due to the way the DIE tree is serialized, this will
# correctly reflect the nesting depth
#
die_depth = 0
for die in cu.iter_DIEs():
if die.is_null():
die_depth -= 1
continue
self._emitline(' <%s><%x>: Abbrev Number: %s (%s)' % (
die_depth,
die.offset,
die.abbrev_code,
die.tag))
for attr in itervalues(die.attributes):
name = attr.name
# Unknown attribute values are passed-through as integers
if isinstance(name, int):
name = 'Unknown AT value: %x' % name
self._emitline(' <%2x> %-18s: %s' % (
attr.offset,
name,
describe_attr_value(
attr, die, section_offset)))
if die.has_children:
die_depth += 1
self._emitline()
def _dump_debug_line_programs(self):
""" Dump the (decoded) line programs from .debug_line
The programs are dumped in the order of the CUs they belong to.
"""
self._emitline('Decoded dump of debug contents of section .debug_line:\n')
for cu in self._dwarfinfo.iter_CUs():
lineprogram = self._dwarfinfo.line_program_for_CU(cu)
cu_filename = ''
if len(lineprogram['include_directory']) > 0:
cu_filename = '%s/%s' % (
bytes2str(lineprogram['include_directory'][0]),
bytes2str(lineprogram['file_entry'][0].name))
else:
cu_filename = bytes2str(lineprogram['file_entry'][0].name)
self._emitline('CU: %s:' % cu_filename)
self._emitline('File name Line number Starting address')
# Print each state's file, line and address information. For some
# instructions other output is needed to be compatible with
# readelf.
for entry in lineprogram.get_entries():
state = entry.state
if state is None:
# Special handling for commands that don't set a new state
if entry.command == DW_LNS_set_file:
file_entry = lineprogram['file_entry'][entry.args[0] - 1]
if file_entry.dir_index == 0:
# current directory
self._emitline('\n./%s:[++]' % (
bytes2str(file_entry.name)))
else:
self._emitline('\n%s/%s:' % (
bytes2str(lineprogram['include_directory'][file_entry.dir_index - 1]),
bytes2str(file_entry.name)))
elif entry.command == DW_LNE_define_file:
self._emitline('%s:' % (
bytes2str(lineprogram['include_directory'][entry.args[0].dir_index])))
elif not state.end_sequence:
# readelf doesn't print the state after end_sequence
# instructions. I think it's a bug but to be compatible
# I don't print them too.
self._emitline('%-35s %11d %18s' % (
bytes2str(lineprogram['file_entry'][state.file - 1].name),
state.line,
'0' if state.address == 0 else
self._format_hex(state.address)))
if entry.command == DW_LNS_copy:
# Another readelf oddity...
self._emitline()
def _dump_debug_frames(self):
""" Dump the raw frame information from .debug_frame
"""
if not self._dwarfinfo.has_CFI():
return
self._emitline('Contents of the .debug_frame section:')
for entry in self._dwarfinfo.CFI_entries():
if isinstance(entry, CIE):
self._emitline('\n%08x %08x %08x CIE' % (
entry.offset, entry['length'], entry['CIE_id']))
self._emitline(' Version: %d' % entry['version'])
self._emitline(' Augmentation: "%s"' % bytes2str(entry['augmentation']))
self._emitline(' Code alignment factor: %u' % entry['code_alignment_factor'])
self._emitline(' Data alignment factor: %d' % entry['data_alignment_factor'])
self._emitline(' Return address column: %d' % entry['return_address_register'])
self._emitline()
else: # FDE
self._emitline('\n%08x %08x %08x FDE cie=%08x pc=%08x..%08x' % (
entry.offset,
entry['length'],
entry['CIE_pointer'],
entry.cie.offset,
entry['initial_location'],
entry['initial_location'] + entry['address_range']))
self._emit(describe_CFI_instructions(entry))
self._emitline()
def _dump_debug_frames_interp(self):
""" Dump the interpreted (decoded) frame information from .debug_frame
"""
if not self._dwarfinfo.has_CFI():
return
self._emitline('Contents of the .debug_frame section:')
for entry in self._dwarfinfo.CFI_entries():
if isinstance(entry, CIE):
self._emitline('\n%08x %08x %08x CIE "%s" cf=%d df=%d ra=%d' % (
entry.offset,
entry['length'],
entry['CIE_id'],
bytes2str(entry['augmentation']),
entry['code_alignment_factor'],
entry['data_alignment_factor'],
entry['return_address_register']))
ra_regnum = entry['return_address_register']
else: # FDE
self._emitline('\n%08x %08x %08x FDE cie=%08x pc=%08x..%08x' % (
entry.offset,
entry['length'],
entry['CIE_pointer'],
entry.cie.offset,
entry['initial_location'],
entry['initial_location'] + entry['address_range']))
ra_regnum = entry.cie['return_address_register']
# Print the heading row for the decoded table
self._emit(' LOC')
self._emit(' ' if entry.structs.address_size == 4 else ' ')
self._emit(' CFA ')
# Decode the table nad look at the registers it describes.
# We build reg_order here to match readelf's order. In particular,
# registers are sorted by their number, and the register matching
# ra_regnum is always listed last with a special heading.
decoded_table = entry.get_decoded()
reg_order = sorted(ifilter(
lambda r: r != ra_regnum,
decoded_table.reg_order))
# Headings for the registers
for regnum in reg_order:
self._emit('%-6s' % describe_reg_name(regnum))
self._emitline('ra ')
# Now include ra_regnum in reg_order to print its values similarly
# to the other registers.
reg_order.append(ra_regnum)
for line in decoded_table.table:
self._emit(self._format_hex(
line['pc'], fullhex=True, lead0x=False))
self._emit(' %-9s' % describe_CFI_CFA_rule(line['cfa']))
for regnum in reg_order:
if regnum in line:
s = describe_CFI_register_rule(line[regnum])
else:
s = 'u'
self._emit('%-6s' % s)
self._emitline()
self._emitline()
def _emit(self, s=''):
""" Emit an object to output
"""
self.output.write(str(s))
def _emitline(self, s=''):
""" Emit an object to output, followed by a newline
"""
self.output.write(str(s) + '\n')
SCRIPT_DESCRIPTION = 'Display information about the contents of ELF format files'
VERSION_STRING = '%%prog: based on pyelftools %s' % __version__
def main(stream=None):
# parse the command-line arguments and invoke ReadElf
optparser = OptionParser(
usage='usage: %prog [options] <elf-file>',
description=SCRIPT_DESCRIPTION,
add_help_option=False, # -h is a real option of readelf
prog='readelf.py',
version=VERSION_STRING)
optparser.add_option('-H', '--help',
action='store_true', dest='help',
help='Display this information')
optparser.add_option('-h', '--file-header',
action='store_true', dest='show_file_header',
help='Display the ELF file header')
optparser.add_option('-l', '--program-headers', '--segments',
action='store_true', dest='show_program_header',
help='Display the program headers')
optparser.add_option('-S', '--section-headers', '--sections',
action='store_true', dest='show_section_header',
help="Display the sections' headers")
optparser.add_option('-e', '--headers',
action='store_true', dest='show_all_headers',
help='Equivalent to: -h -l -S')
optparser.add_option('-s', '--symbols', '--syms',
action='store_true', dest='show_symbols',
help='Display the symbol table')
optparser.add_option('-r', '--relocs',
action='store_true', dest='show_relocs',
help='Display the relocations (if present)')
optparser.add_option('-x', '--hex-dump',
action='store', dest='show_hex_dump', metavar='<number|name>',
help='Dump the contents of section <number|name> as bytes')
optparser.add_option('-p', '--string-dump',
action='store', dest='show_string_dump', metavar='<number|name>',
help='Dump the contents of section <number|name> as strings')
optparser.add_option('--debug-dump',
action='store', dest='debug_dump_what', metavar='<what>',
help=(
'Display the contents of DWARF debug sections. <what> can ' +
'one of {info,decodedline,frames,frames-interp}'))
options, args = optparser.parse_args()
if options.help or len(args) == 0:
optparser.print_help()
sys.exit(0)
if options.show_all_headers:
do_file_header = do_section_header = do_program_header = True
else:
do_file_header = options.show_file_header
do_section_header = options.show_section_header
do_program_header = options.show_program_header
with open(args[0], 'rb') as file:
try:
readelf = ReadElf(file, stream or sys.stdout)
if do_file_header:
readelf.display_file_header()
if do_section_header:
readelf.display_section_headers(
show_heading=not do_file_header)
if do_program_header:
readelf.display_program_headers(
show_heading=not do_file_header)
if options.show_symbols:
readelf.display_symbol_tables()
if options.show_relocs:
readelf.display_relocations()
if options.show_hex_dump:
readelf.display_hex_dump(options.show_hex_dump)
if options.show_string_dump:
readelf.display_string_dump(options.show_string_dump)
if options.debug_dump_what:
readelf.display_debug_dump(options.debug_dump_what)
except ELFError as ex:
sys.stderr.write('ELF error: %s\n' % ex)
sys.exit(1)
def profile_main():
# Run 'main' redirecting its output to readelfout.txt
# Saves profiling information in readelf.profile
PROFFILE = 'readelf.profile'
import cProfile
cProfile.run('main(open("readelfout.txt", "w"))', PROFFILE)
# Dig in some profiling stats
import pstats
p = pstats.Stats(PROFFILE)
p.sort_stats('cumulative').print_stats(25)
#-------------------------------------------------------------------------------
if __name__ == '__main__':
main()
#profile_main()
|
the-stack_106_17510
|
import logging
logging.basicConfig(
level=logging.WARNING
)
LOGGERS = {}
def set_log_level(debug_level: int):
logging.basicConfig(level=debug_level)
for _, logger in LOGGERS.items():
logger.setLevel(debug_level)
def get_logger(logger_name: str) -> logging.Logger:
if logger_name not in LOGGERS.keys():
LOGGERS[logger_name] = logging.getLogger(logger_name)
return LOGGERS[logger_name]
|
the-stack_106_17511
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 1 14:11:42 2020
@author: jisuk
"""
# %% import basic modules
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
import tensorflow as tf
import numpy as np
# %% MNIST dataset parameters
num_classes = 10 # 0~9 digits
num_features = 784 # 28 * 28
# training parameters
learning_rate = 0.01
training_steps = 1000
batch_size = 256
display_step = 50
# %% prepare MNIST data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# convert to float32
x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float16)
# Flatten image to 1-D vector of 784 features (28*28)
x_train, x_test = x_train.reshape(
[-1, num_features]), x_test.reshape([-1, num_features])
# normalize images vaue from [0,255] to [0 1]
x_train, x_test = x_train/255., x_test / 255.
# %%Use tf.data API to shuffle and batch data
train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)
# %%weight of shape [784,10] the 28 * 28 image features and total number of classes
W = tf.Variable(tf.ones([num_features, num_classes]), name='weight')
# Bias of shape [10] , the tota number of classes
b = tf.Variable(tf.zeros([num_classes]), name='bias')
# logistic regression (Wx + b)
def logistic_regression(x):
# apply softmax to normalize the logits to a probability distribution
return tf.nn.softmax(tf.matmul(x, W)+b)
# cross-Entropy loss function
def cross_entropy(y_pred, y_true):
# encode label to a one-hot vector
y_true = tf.one_hot(y_true, depth=num_classes)
# clip prediction values to aviod log(0) error
y_pred = tf.clip_by_value(y_pred, 1e-9, 1.)
# compute cross-entropy
return tf.reduce_mean(-tf.reduce_sum(y_true*tf.math.log(y_pred), 1))
# accuracy metric
def accuracy(y_pred, y_true):
# predicted class is the index of highest score in prediction vector(i.e. argmax)
correct_prediction = tf.equal(
tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# stochastic gradient descent optimizer
optimizer = tf.optimizers.SGD(learning_rate)
# %% Optimization process
def run_optimization(x, y):
# wrap conputation inside a gradientTape for automatic differetiation
with tf.GradientTape() as g:
pred = logistic_regression(x)
loss = cross_entropy(pred, y)
# compute gradients
gradients = g.gradient(loss, [W, b])
# update W and b following gradients
optimizer.apply_gradients(zip(gradients, [W, b]))
# %% Run training for the given number of steps
for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):
# run the oprimization to update W and b values
run_optimization(batch_x, batch_y)
if step % display_step == 0:
pred = logistic_regression(batch_x)
loss = cross_entropy(pred, batch_y)
acc = accuracy(pred, batch_y)
print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc))
# %% test model on validation set
x_test_tensor = tf.constant(x_test, dtype=tf.float32)
pred = logistic_regression(x_test_tensor)
print("Test Accuracy: %f" % accuracy(pred, y_test))
# %% visualize predictions
import matplotlib.pyplot as plt
# %%predict 5 images from validation set
prediction_count = 20
n_images = np.random.randint(0,x_test.shape[0],(prediction_count))
test_images = x_test[n_images]
test_y_answer = y_test[n_images]
test_images_tensor = tf.constant(test_images, dtype=tf.float32)
predictions = logistic_regression(test_images_tensor)
# display image and model prediction
for i in range(prediction_count):
showimg = np.reshape(test_images[i], [28, 28])
showimg = np.uint8(showimg * 255)
plt.imshow(np.reshape(showimg, [28, 28]), cmap='gray')
plt.show()
print("Model prediction: %i, Answer: %i" % (np.argmax(predictions.numpy()[i]),test_y_answer[i]))
|
the-stack_106_17512
|
import functools
import unittest2
from compass.config_management.utils import config_merger
from compass.config_management.utils import config_merger_callbacks
from compass.config_management.utils import config_reference
class TestConfigMerger(unittest2.TestCase):
def test_merge(self):
upper_config = {
'networking': {
'interfaces': {
'management': {
'ip_start': '192.168.1.1',
'ip_end': '192.168.1.100',
'netmask': '255.255.255.0',
'dns_pattern': '%(hostname)s.%(clustername)s.%(search_path)s',
},
'floating': {
'ip_start': '172.16.0.1',
'ip_end': '172.16.0.100',
'netmask': '0.0.0.0',
'dns_pattern': 'public-%(hostname)s.%(clustername)s.%(search_path)s',
},
},
'global': {
'search_path': 'ods.com',
'default_no_proxy': ['127.0.0.1', 'localhost'],
},
},
'clustername': 'cluster1',
'dashboard_roles': ['os-single-controller'],
'role_assign_policy': {
'policy_by_host_numbers': {},
'default': {
'roles': ['os-single-controller', 'os-network',
'os-compute-worker'],
'default_min': 1,
},
},
}
lower_configs = {
1: {
'hostname': 'host1',
},
2: {
'hostname': 'host2',
'networking': {
'interfaces': {
'management': {
'ip': '192.168.1.50',
},
},
},
'roles': ['os-single-controller', 'os-network'],
}
}
expected_lower_configs = {
1: {
'networking': {
'interfaces': {
'floating': {
'ip': '172.16.0.1',
'netmask': '0.0.0.0',
'dns_alias': 'public-host1.cluster1.ods.com'
},
'management': {
'ip': '192.168.1.1',
'netmask': '255.255.255.0',
'dns_alias': 'host1.cluster1.ods.com'
}
},
'global': {
'search_path': 'ods.com',
'default_no_proxy': ['127.0.0.1', 'localhost'],
'ignore_proxy': '127.0.0.1,localhost,host1,192.168.1.1,host2,192.168.1.50'
}
},
'hostname': 'host1',
'has_dashboard_roles': False,
'roles': ['os-compute-worker']
},
2: {
'networking': {
'interfaces': {
'floating': {
'ip': '172.16.0.2',
'netmask': '0.0.0.0',
'dns_alias': 'public-host2.cluster1.ods.com'
},
'management': {
'ip': '192.168.1.50',
'netmask': '255.255.255.0',
'dns_alias': 'host2.cluster1.ods.com'
}
},
'global': {
'search_path': 'ods.com',
'default_no_proxy': ['127.0.0.1', 'localhost'],
'ignore_proxy': '127.0.0.1,localhost,host1,192.168.1.1,host2,192.168.1.50'
}
},
'hostname': 'host2',
'has_dashboard_roles': True,
'roles': ['os-single-controller', 'os-network']
}
}
mappings=[
config_merger.ConfigMapping(
path_list=['/networking/interfaces/*'],
from_upper_keys={'ip_start': 'ip_start', 'ip_end': 'ip_end'},
to_key='ip',
value=config_merger_callbacks.assign_ips
),
config_merger.ConfigMapping(
path_list=['/role_assign_policy'],
from_upper_keys={
'policy_by_host_numbers': 'policy_by_host_numbers',
'default': 'default'},
to_key='/roles',
value=config_merger_callbacks.assign_roles_by_host_numbers
),
config_merger.ConfigMapping(
path_list=['/dashboard_roles'],
from_lower_keys={'lower_values': '/roles'},
to_key='/has_dashboard_roles',
value=config_merger_callbacks.has_intersection
),
config_merger.ConfigMapping(
path_list=[
'/networking/global',
'/networking/interfaces/*/netmask',
'/networking/interfaces/*/nic',
'/networking/interfaces/*/promisc',
'/security/*',
'/partition',
]
),
config_merger.ConfigMapping(
path_list=['/networking/interfaces/*'],
from_upper_keys={'pattern': 'dns_pattern',
'clustername': '/clustername',
'search_path': '/networking/global/search_path'},
from_lower_keys={'hostname': '/hostname'},
to_key='dns_alias',
value=functools.partial(config_merger_callbacks.assign_from_pattern,
upper_keys=['search_path', 'clustername'],
lower_keys=['hostname'])
),
config_merger.ConfigMapping(
path_list=['/networking/global'],
from_upper_keys={'default': 'default_no_proxy',
'clusterid': '/clusterid'},
from_lower_keys={'hostnames': '/hostname',
'ips': '/networking/interfaces/management/ip'},
to_key='ignore_proxy',
value=config_merger_callbacks.assign_noproxy
)
]
merger = config_merger.ConfigMerger(mappings)
merger.merge(upper_config, lower_configs)
self.assertEqual(lower_configs, expected_lower_configs)
if __name__ == '__main__':
unittest2.main()
|
the-stack_106_17513
|
import subprocess
class Extractor:
def __init__(self, config, jar_path, max_path_length, max_path_width):
self.config = config
self.max_path_length = max_path_length
self.max_path_width = max_path_width
self.jar_path = jar_path
def extract_paths(self, path):
command = ['java', '-cp', self.jar_path, 'JavaExtractor.App', '--max_path_length',
str(self.max_path_length), '--max_path_width', str(self.max_path_width), '--file', path, '--no_hash']
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
output = out.decode().splitlines()
if len(output) == 0:
err = err.decode()
raise ValueError(err)
hash_to_string_dict = {}
result = []
for i, line in enumerate(output):
parts = line.rstrip().split(' ')
method_name = parts[0]
current_result_line_parts = [method_name]
contexts = parts[1:]
for context in contexts[:self.config.MAX_CONTEXTS]:
context_parts = context.split(',')
context_word1 = context_parts[0]
context_path = context_parts[1]
context_word2 = context_parts[2]
hashed_path = str(self.java_string_hashcode(context_path))
hash_to_string_dict[hashed_path] = context_path
current_result_line_parts += ['%s,%s,%s' % (context_word1, hashed_path, context_word2)]
space_padding = ' ' * (self.config.MAX_CONTEXTS - len(contexts))
result_line = ' '.join(current_result_line_parts) + space_padding
result.append(result_line)
return result, hash_to_string_dict
@staticmethod
def java_string_hashcode(s):
"""
Imitating Java's String#hashCode, because the model is trained on hashed paths but we wish to
Present the path attention on un-hashed paths.
"""
h = 0
for c in s:
h = (31 * h + ord(c)) & 0xFFFFFFFF
return ((h + 0x80000000) & 0xFFFFFFFF) - 0x80000000
|
the-stack_106_17514
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import unittest
from yaml import safe_load
from maro.data_lib.cim.port_parser import PortsParser
default_conf = """
ports:
demand_port_001:
capacity: 100000
empty_return:
buffer_ticks: 1
noise: 4
full_return:
buffer_ticks: 2
noise: 5
initial_container_proportion: 0.5
order_distribution:
source:
noise: 0
proportion: 0.33
targets:
supply_port_001:
noise: 0
proportion: 1
supply_port_001:
capacity: 1000000
empty_return:
buffer_ticks: 3
noise: 6
full_return:
buffer_ticks: 4
noise: 7
initial_container_proportion: 0.5
order_distribution:
source:
noise: 0
proportion: 0.67
"""
class TestPortParser(unittest.TestCase):
def test_port_parser(self):
total_cntr = 100
conf = safe_load(default_conf)
ppr = PortsParser()
port_mapping, ports = ppr.parse(conf["ports"], total_cntr)
# port number should be same with config
self.assertEqual(2, len(ports))
# all empty sould be used
self.assertEqual(total_cntr, sum([p.empty for p in ports]))
# capacity should same with config
self.assertListEqual([100000, 1000000], [p.capacity for p in ports])
# check empty and full return buffer tick
# and noise
self.assertListEqual([1, 3], [p.empty_return_buffer.base for p in ports])
self.assertListEqual([4, 6], [p.empty_return_buffer.noise for p in ports])
self.assertListEqual([2, 4], [p.full_return_buffer.base for p in ports])
self.assertListEqual([5, 7], [p.full_return_buffer.noise for p in ports])
#
self.assertEqual(len(port_mapping), len(ports))
if __name__ == "__main__":
unittest.main()
|
the-stack_106_17515
|
#!/usr/bin/env python3
from result import Result
from exceptions import CGECoreOutTypeError, CGECoreOutTranslateError
class Translate():
def __init__(self, type, transl_table):
self.transl_table = transl_table
self.type = type
if(type not in Result.beone_defs):
raise CGECoreOutTypeError(
"Unknown type given to Translate object. type given: {}. "
"type must be one of:\n{}"
.format(type, list(Result.beone_defs.keys())))
self._check_translation_keys()
def translate(self, source_dict):
dest_dict = {}
for key, val in source_dict.items():
dest_key = self.transl_table.get(key, None)
if(dest_key is not None and val is not None):
dest_dict[dest_key] = val
return dest_dict
def _check_translation_keys(self):
for key, val in self.transl_table.items():
if(val not in Result.beone_defs[self.type]):
raise CGECoreOutTranslateError(
"Value in the translation table given was not found in the"
" definition of the given type. Type given: {}. Value not "
"found: {}. Possible values for the given type: {}"
.format(self.type, self.key,
list(self.transl_table.keys())))
|
the-stack_106_17516
|
#!/usr/bin/env python3
import cv2
import numpy as np
from activity_service import add_to_sample, run_activity_inference
import requests
import json
import base64
import configparser
config = configparser.ConfigParser()
config.read("config.ini")
def encode_img(image):
_, buffer = cv2.imencode('.jpg', image)
enc_buff = base64.b64encode(buffer)
return str(enc_buff, 'utf-8')
def visualize_text(img, text):
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 1
thickness = 1
line_height = np.ceil(0.075 * img.shape[0])
line_width = np.ceil(0.35 * img.shape[1])
line_spacing = np.ceil(0.025 * img.shape[0])
for idx, line in enumerate(text):
((s_x, s_y), _) = cv2.getTextSize(line, font,font_scale, thickness)
if s_x > line_width:
font_scale_x = int((line_width / s_x) / 0.1) * 0.1
s_x *= font_scale_x
s_y *= font_scale_x
font_scale *= font_scale_x
if s_y > line_height:
font_scale_y = int((line_height / s_y) / 0.1) * 0.1
s_x *= font_scale_y
s_y *= font_scale_y
font_scale *= font_scale_y
cv2.putText(img, line, (10,int((idx+1) * (line_spacing + s_y))), font, font_scale, (0, 0, 255), thickness, cv2.LINE_AA)
return img
def visualize_activity(api_url, img):
status_code, last_prediction = add_to_sample(api_url, img)
text = ['', '']
if status_code == 200:
prediction = run_activity_inference(api_url)
text = [f'Label: {prediction[2]}', f'Confidence: {float(prediction[1]) * 100 :0.3f}%']
elif status_code == 202:
if last_prediction != '':
text = [f'Label: {last_prediction[2]}', f'Confidence: {float(last_prediction[1]) * 100 :0.3f}%']
img = visualize_text(img, text)
return img
def visualize_faces(img):
base_url = "http://{}/model/api/v1.0/".format(config['HOSTNAMES']['face_service'])
headers = {'Content-Type': 'application/json'}
image_req = json.dumps({'img': str(encode_img(img))})
response = requests.request(
"POST", base_url+'recognize', headers=headers, data=image_req)
try:
names = json.loads(response.content)['names']
faces = json.loads(response.content)['faces']
landmarks = json.loads(response.content)['landmarks']
# parse predictions
names = names.replace(r'[', '').replace(r']', '').replace(r'"', '').replace(r' ', '').split(',')
# print('Names :', names)
faces = faces.replace(r'[', '').replace(r' ', '').split('],')
faces = [face.replace(r']', '').split(',') for face in faces]
faces = [[float(pos) for pos in face_pos] for face_pos in faces]
faces = np.array(faces)
# print('Faces :', faces)
landmarks = landmarks.replace(r'[', '').replace(r' ', '').split(']],')
landmarks = [landmark.split('],') for landmark in landmarks]
landmarks = [[s.replace(']','').split(',') for s in landmark] for landmark in landmarks]
landmarks = [[[float(pos) for pos in landmark_pos] for landmark_pos in landmark] for landmark in landmarks]
landmarks = np.array(landmarks)
# print('Landmarks :', landmarks)
for i in range(len(names)):
box = faces[i].astype(np.int)
color = (0, 0, 255)
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2)
if landmarks is not None:
landmark5 = landmarks[i].astype(np.int)
# print(landmark5.shape)
for l in range(landmark5.shape[0]):
color = (0, 0, 255)
if l == 0 or l == 3:
color = (0, 255, 0)
cv2.circle(img, (landmark5[l][0], landmark5[l][1]), 1, color, 2)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (box[2]-80, box[3]+15)
fontScale = 0.4
fontColor = (0, 255, 255)
lineType = 2
cv2.putText(img, names[i],
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
# cv2.imshow('output', img)
except:
pass
return img
def visualize_pose(img):
url = "http://{}/".format(config['HOSTNAMES']['pose_service'])
headers = {'Content-Type': 'application/json'}
image_req = json.dumps({'img': str(encode_img(img))})
response = requests.request("GET", url=url+'analyse_image', headers=headers, data=image_req)
img = json.loads(response.content)['data']
img = np.array(img, dtype=np.uint8)
return img
|
the-stack_106_17517
|
"""Summary
"""
from PyQt5.QtCore import QRectF, QPointF
from PyQt5.QtWidgets import QGraphicsObject
from cadnano import util
from cadnano.views.pathview import pathstyles as styles
from cadnano.gui.palette import getPenObj, getNoBrush
_BW = styles.PATH_BASE_WIDTH
_TOOL_RECT = QRectF(0, 0, _BW, _BW) # protected not private
_RECT = QRectF(-styles.PATH_BASE_HL_STROKE_WIDTH,
-styles.PATH_BASE_HL_STROKE_WIDTH,
_BW + 2 * styles.PATH_BASE_HL_STROKE_WIDTH,
_BW + 2 * styles.PATH_BASE_HL_STROKE_WIDTH)
_PEN = getPenObj(styles.RED_STROKE, styles.PATH_BASE_HL_STROKE_WIDTH)
_BRUSH = getNoBrush()
class AbstractPathTool(QGraphicsObject):
"""Abstract base class to be subclassed by all other pathview tools.
Attributes:
manager (TYPE): Description
"""
def __init__(self, manager):
"""Summary
Args:
manager (TYPE): Description
"""
super(AbstractPathTool, self).__init__(None)
self.manager = manager
self._window = manager.window
self._active = False
self._last_location = None
self._rect = _RECT
self._pen = _PEN
self.hide()
######################## Drawing #######################################
def paint(self, painter, option, widget=None):
"""Summary
Args:
painter (TYPE): Description
option (TYPE): Description
widget (None, optional): Description
Returns:
TYPE: Description
"""
painter.setPen(self._pen)
painter.setBrush(_BRUSH)
painter.drawRect(_TOOL_RECT)
def boundingRect(self):
"""Summary
Returns:
TYPE: Description
"""
return self._rect
######################### Positioning and Parenting ####################
def updateLocation(self, virtual_helix_item, scene_pos, *args):
"""Takes care of caching the location so that a tool switch
outside the context of an event will know where to
position the new tool and snaps self's pos to the upper
left hand corner of the base the user is mousing over.
Args:
virtual_helix_item (cadnano.views.pathview.virtualhelixitem.VirtualHelixItem): Description
scene_pos (TYPE): Description
*args (TYPE): Description
"""
if virtual_helix_item:
if self.parentObject() != virtual_helix_item:
self.setParentItem(virtual_helix_item)
self._last_location = (virtual_helix_item, scene_pos)
pos_item = virtual_helix_item.mapFromScene(scene_pos)
pos = self.helixPos(pos_item)
if pos is not None:
if pos != self.pos():
self.setPos(pos)
self.update(self.boundingRect())
else:
self._last_location = None
if self.isVisible():
self.hide()
# end def
def lastLocation(self):
"""A tool's last_location consists of a VirtualHelixItem and a ScenePos
(QPoint) representing the last known location of the mouse.
It can be used to provide visual continuity when switching tools.
When the new tool is selected, this method will be invoked by
calling `updateLocation(*old_tool.lastLocation())`.
Returns:
location (tuple): (virtual_helix_item, QPoint) representing the last
known location of the mouse for purposes of positioning
the graphic of a new tool on switching tools (the tool
will have called on it)
"""
return self._last_location
def setActive(self, will_be_active, old_tool=None):
"""
Called by PathToolManager.setActiveTool when the tool becomes
active. Used, for example, to show/hide tool-specific ui elements.
Args:
will_be_active (TYPE): Description
old_tool (None, optional): Description
"""
if self._active and not will_be_active:
self.deactivate()
self._active = will_be_active
def deactivate(self):
"""Summary
Returns:
TYPE: Description
"""
self.hide()
def isActive(self):
"""Returns isActive
"""
return self._active
def widgetClicked(self):
"""Called every time a widget representing self gets clicked,
not just when changing tools.
"""
####################### Coordinate Utilities ###########################
def baseAtPoint(self, virtual_helix_item, pt):
"""Returns the (is_fwd, base_idx, strand_idx) corresponding
to pt in virtual_helix_item.
Args:
virtual_helix_item (cadnano.views.pathview.virtualhelixitem.VirtualHelixItem): Description
pt (TYPE): Description
"""
x, strand_idx = self.helixIndex(pt)
is_fwd = False if util.clamp(strand_idx, 0, 1) else True
return (is_fwd, x, strand_idx)
def helixIndex(self, point):
"""Returns the (row, col) of the base which point lies within.
Returns:
point (tuple) in virtual_helix_item coordinates
Args:
point (TYPE): Description
"""
x = int(int(point.x()) / _BW)
y = int(int(point.y()) / _BW)
return (x, y)
# end def
def helixPos(self, point):
"""
Snaps a point to the upper left corner of the base
it is within.
point is in virtual_helix_item coordinates
Args:
point (TYPE): Description
"""
col = int(int(point.x()) / _BW)
row = int(int(point.y()) / _BW)
# Doesn't know numBases, can't check if point is too far right
if col < 0 or row < 0 or row > 1:
return None
return QPointF(col * _BW, row * _BW)
# end def
def hoverLeaveEvent(self, event):
"""
flag is for the case where an item in the path also needs to
implement the hover method
Args:
event (TYPE): Description
"""
self.hide()
# end def
# end class
|
the-stack_106_17519
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 服务端工具
Case Name : 使用--skip-root-items跳过需要root权限执行的检查项(CheckBond)
Description :
1.跳过网卡绑定模式检查,检查CPU:gs_check -i CheckBond,CheckCPU --skip-root-items
2.先检查CPU再跳过网卡绑定模式检查:gs_check -i CheckCPU,CheckBond --skip-root-items
3.直接跳过网卡绑定模式检查(单项检查):gs_check -i CheckBond --skip-root-items
Expect :
1.检查完成
2.检查完成
3.检查失败
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class Tools(unittest.TestCase):
def setUp(self):
logger.info('--------------Opengauss_Function_Tools_gs_check_Case0327start-------------------')
self.dbuserNode = Node('dbuser')
self.rootNode = Node('default')
self.Constant = Constant()
def test_server_tools1(self):
logger.info('------------------跳过网卡绑定模式检查,检查CPU------------------')
check_cmd1 = f'''
source {macro.DB_ENV_PATH}
gs_check -i CheckBond,CheckCPU --skip-root-items
'''
logger.info(check_cmd1)
msg1 = self.dbuserNode.sh(check_cmd1).result()
logger.info(msg1)
flag = (self.Constant.GS_CHECK_SUCCESS_MSG2[0] in msg1 or self.Constant.GS_CHECK_SUCCESS_MSG2[1] in msg1) and \
self.Constant.GS_CHECK_SUCCESS_MSG2[2] in msg1
self.assertTrue(flag)
logger.info('------------------先检查CPU再跳过网卡绑定模式检查------------------')
check_cmd2 = f'''
source {macro.DB_ENV_PATH}
gs_check -i CheckCPU,CheckBond --skip-root-items
'''
logger.info(check_cmd2)
msg2 = self.dbuserNode.sh(check_cmd2).result()
logger.info(msg2)
flag = (self.Constant.GS_CHECK_SUCCESS_MSG2[0] in msg2 or self.Constant.GS_CHECK_SUCCESS_MSG2[1] in msg2) and \
self.Constant.GS_CHECK_SUCCESS_MSG2[2] in msg2
self.assertTrue(flag)
logger.info('------------------直接跳过网卡绑定模式检查(单项检查)------------------')
check_cmd3 = f'''
source {macro.DB_ENV_PATH}
gs_check -i CheckBond --skip-root-items
'''
logger.info(check_cmd3)
msg3 = self.dbuserNode.sh(check_cmd3).result()
logger.info(msg3)
self.assertIn('ERROR: No check item can be performed, please confirm the input parameters',msg3)
def tearDown(self):
logger.info('--------------无需清理环境-------------------')
logger.info('------------------Opengauss_Function_Tools_gs_check_Case0327finish------------------')
|
the-stack_106_17520
|
"""Tasks for management of this project."""
from __future__ import print_function
import datetime
import doctest
import markdown2
import os
import shutil
import sys
import unittest
from collections import (
namedtuple)
from glob import (
glob)
from jinja2 import (
Environment,
FileSystemLoader)
from pathlib import (
Path)
HERE = os.path.dirname(os.path.abspath(__file__))
MD_DIR = os.path.join(HERE, 'markdown')
STATIC_DIR = os.path.join(HERE, 'static')
JINJA_DIR = os.path.join(HERE, 'jinja')
BUILD_DIR = os.path.join(HERE, 'build')
BUILD_STATIC_DIR = os.path.join(BUILD_DIR, 'static')
FAVICON_PATH = os.path.join(STATIC_DIR, 'favicon.ico')
MD_EXTRAS = ['fenced-code-blocks', 'header-ids', 'metadata', 'tables']
DT_IN_FORMAT = '%Y-%m-%d'
Page = namedtuple(
'Page',
['description', 'link', 'published_at', 'last_modified_at', 'raw_html',
'title'])
class TestFailureError(Exception):
"""Exception type for test failures."""
def _mkdir_if_not_present(dirname):
"""Utility to make a directory if it does not already exist."""
Path(dirname).mkdir(parents=True, exist_ok=True)
def _rmdir_if_present(dirname):
"""Delete a directory if it is present."""
try:
shutil.rmtree(dirname)
except FileNotFoundError:
pass
def _iter_paths(directory, glob_pattern):
"""Iterate over files within a directory that match a pattern."""
for path in Path(directory).glob(glob_pattern):
yield path
def _to_dt(dt_str):
"""Convert a string to a datetime.datetime object."""
return datetime.datetime.strptime(dt_str, DT_IN_FORMAT)
def test():
"""Run doctests over all articles."""
articles_pattern = os.path.join(MD_DIR, '*.md')
suite = doctest.DocFileSuite(
*glob(articles_pattern),
optionflags=doctest.IGNORE_EXCEPTION_DETAIL)
runner = unittest.TextTestRunner()
result = runner.run(suite)
if not result.wasSuccessful():
raise TestFailureError
def build():
"""Compile the site into the build directory."""
clean()
_mkdir_if_not_present(BUILD_DIR)
env = Environment(loader=FileSystemLoader(JINJA_DIR))
# compile markdown pages
pages = []
md_template = env.get_template('md_page.template')
for md_path in _iter_paths(MD_DIR, '*.md'):
with md_path.open(encoding='utf-8') as f:
md_source = f.read()
md_as_html = markdown2.markdown(md_source, extras=MD_EXTRAS)
page = Page(
md_as_html.metadata['description'],
'/' + md_path.stem,
_to_dt(md_as_html.metadata['published_at']),
_to_dt(md_as_html.metadata['last_modified_at']),
str(md_as_html),
md_as_html.metadata['title'])
pages.append(page)
pages = sorted(pages, key=lambda p: p.last_modified_at, reverse=True)
rendered_page = md_template.render(page=page)
dest_fname = md_path.stem + '.html'
dest_path = os.path.join(BUILD_DIR, dest_fname)
with open(dest_path, 'w') as f:
f.write(rendered_page)
print('[*] Compiled markdown pages into', BUILD_DIR)
# compile html pages
for html_path in _iter_paths(JINJA_DIR, '*.html'):
template = env.get_template(html_path.name)
rendered_page = template.render(pages=pages)
dest_path = os.path.join(BUILD_DIR, html_path.name)
with open(dest_path, 'w') as f:
f.write(rendered_page)
print('[*] Compiled HTML pages into', BUILD_DIR)
# copy static files into the build static directory
shutil.copy(FAVICON_PATH, BUILD_DIR)
_rmdir_if_present(BUILD_STATIC_DIR)
shutil.copytree(STATIC_DIR, BUILD_STATIC_DIR)
print('[*] Copied static assets into', BUILD_STATIC_DIR)
def clean():
"""Remove the build directory."""
_rmdir_if_present(BUILD_DIR)
print('[*] Cleaning done')
def serve():
"""Run a livereload server on port 5000."""
from livereload import Server
watch_patterns = [
os.path.join(MD_DIR, '*.md'),
os.path.join(JINJA_DIR, '*'),
os.path.join(STATIC_DIR, '*'),
os.path.join(STATIC_DIR, '**', '*')
]
server = Server()
build()
for pattern in watch_patterns:
server.watch(pattern, build)
print('[*] Running livereload server on port 5000')
server.serve(root=BUILD_DIR, port=5000, host='127.0.0.1')
TASKS = {
'build': build,
'clean': clean,
'serve': serve,
'test': test
}
TASK_KEYS = list(sorted(TASKS.keys()))
def main():
try:
sys.argv.pop(0)
if len(sys.argv) != 1:
raise ValueError('Must specify task to perform')
task = sys.argv.pop()
if task not in TASK_KEYS:
raise ValueError('Specified task must be one of:' +
', '.join(TASK_KEYS))
task_func = TASKS[task]
task_func()
except ValueError as e:
print(e, file=sys.stderr)
return 1
except TestFailureError:
print('Tests failed!')
return 1
except Exception as e:
print('Unknown exception occured!', file=sys.stderr)
raise e
return 0
if __name__ == '__main__':
sys.exit(main())
|
the-stack_106_17521
|
# Copyright 2016, FBPIC contributors
# Authors: Remi Lehe, Manuel Kirchen, Kevin Peters, Soeren Jalas
# License: 3-Clause-BSD-LBNL
"""
Fourier-Bessel Particle-In-Cell (FB-PIC) main file
It defines a set of generic functions for printing simulation information.
"""
import sys, time
from fbpic import __version__
from fbpic.utils.cuda import cuda, cuda_installed
from fbpic.utils.mpi import MPI, mpi_installed, gpudirect_enabled
# Check if terminal is correctly set to UTF-8 and set progress character
if sys.stdout.encoding == 'UTF-8':
progress_char = u'\u2588'
else:
progress_char = '-'
class ProgressBar(object):
"""
ProgressBar class that keeps track of the time spent by the algorithm.
It handles the calculation and printing of the progress bar and a
summary of the total runtime.
"""
def __init__(self, N, n_avg=20, Nbars=35, char=progress_char):
"""
Initializes a timer / progression bar.
Timing is done with respect to the absolute time at initialization.
Parameters
----------
N: int
The total number of iterations performed by the step loop
n_avg: int, optional
The amount of recent timesteps used to calculate the average
time taken by a step
Nbar: int, optional
The number of bars printed for the progression bar
char: str, optional
The character used to show the progression.
"""
self.N = N
self.n_avg = n_avg
self.Nbars = Nbars
self.bar_char = char
# Initialize variables to measure the time taken by the simulation
self.i_step = 0
self.start_time = time.time()
self.prev_time = self.start_time
self.total_duration = 0.
self.time_per_step = 0.
self.avg_time_per_step = 0.
self.eta = None
def time( self, i_step ):
"""
Calculates the time taken by the last iterations, the average time
taken by the most recent iterations and the estimated remaining
simulation time.
Parameters
----------
i_step : int
The current iteration of the loop
"""
# Register current step
self.i_step = i_step
# Calculate time taken by last step
curr_time = time.time()
self.total_duration = curr_time - self.start_time
self.time_per_step = curr_time - self.prev_time
# Estimate average time per step
self.avg_time_per_step += \
(self.time_per_step - self.avg_time_per_step)/self.n_avg
if self.i_step <= 2:
# Ignores first step in time estimation (compilation time)
self.avg_time_per_step = self.time_per_step
# Estimated time in seconds until it will finish
if self.i_step < self.n_avg:
self.eta = None
else:
self.eta = self.avg_time_per_step*(self.N-self.i_step)
# Advance the previous time to the current time
self.prev_time = curr_time
def print_progress( self ):
"""
Prints a progression bar with the estimated
remaining simulation time and the time taken by the last step.
"""
i = self.i_step
# Print progress bar
if i == 0:
# Let the user know that the first step is much longer
sys.stdout.write('\r' + \
'Just-In-Time compilation (up to one minute) ...')
sys.stdout.flush()
else:
# Print the progression bar
nbars = int( (i+1)*1./self.N*self.Nbars )
sys.stdout.write('\r|' + nbars*self.bar_char )
sys.stdout.write((self.Nbars-nbars)*' ')
sys.stdout.write('| %d/%d' %(i+1,self.N))
if self.eta is None:
# Time estimation is only printed after n_avg timesteps
sys.stdout.write(', calc. ETA...')
else:
# Conversion to H:M:S
m, s = divmod(self.eta, 60)
h, m = divmod(m, 60)
sys.stdout.write(', %d:%02d:%02d left' % (h, m, s))
# Time taken by the last step
sys.stdout.write(', %d ms/step' %(self.time_per_step*1.e3))
sys.stdout.flush()
# Clear line
sys.stdout.write('\033[K')
def print_summary( self ):
"""
Print a summary about the total runtime of the simulation.
"""
avg_tps = (self.total_duration / self.N)*1.e3
m, s = divmod(self.total_duration, 60)
h, m = divmod(m, 60)
print('\nTotal time taken (with compilation): %d:%02d:%02d' %(h, m, s))
print('Average time per iteration ' \
'(with compilation): %d ms\n' %(avg_tps))
# -----------------------------------------------------
# Print utilities
# -----------------------------------------------------
def print_simulation_setup( sim, verbose_level=1 ):
"""
Print information about the simulation.
- Version of FBPIC
- CPU or GPU computation
- Number of parallel MPI domains
- Number of threads in case of CPU multi-threading
- (Additional detailed information)
Parameters
----------
sim: an fbpic Simulation object
Contains all the information of the simulation setup
verbose_level: int, optional
Level of detail of the simulation information
0 - Print no information
1 (Default) - Print basic information
2 - Print detailed information
"""
if verbose_level > 0:
# Print version of FBPIC
message = '\nFBPIC (%s)\n'%__version__
# Basic information
if verbose_level == 1:
# Print information about computational setup
if sim.use_cuda:
message += "\nRunning on GPU "
else:
message += "\nRunning on CPU "
if sim.comm.size > 1:
message += "with %d MPI processes " %sim.comm.size
if sim.use_threading and not sim.use_cuda:
message += "(%d threads per process) " %sim.cpu_threads
# Detailed information
elif verbose_level == 2:
# Information on MPI
if mpi_installed:
message += '\nMPI available: Yes'
message += '\nMPI processes used: %d' %sim.comm.size
message += '\nMPI Library Information: \n%s' \
%MPI.Get_library_version()
else:
message += '\nMPI available: No'
# Information on Cuda
if cuda_installed:
message += '\nCUDA available: Yes'
else:
message += '\nCUDA available: No'
# Information about the architecture and the node used
if sim.use_cuda:
message += '\nCompute architecture: GPU (CUDA)'
if mpi_installed:
if gpudirect_enabled:
message += '\nCUDA GPUDirect (MPI) enabled: Yes'
else:
message += '\nCUDA GPUDirect (MPI) enabled: No'
node_message = get_gpu_message()
else:
message += '\nCompute architecture: CPU'
if sim.use_threading:
message += '\nCPU multi-threading enabled: Yes'
message += '\nThreads: %s' %sim.cpu_threads
else:
message += '\nCPU multi-threading enabled: No'
if sim.fld.trans[0].fft.use_mkl:
message += '\nFFT library: MKL'
else:
message += '\nFFT library: pyFFTW'
node_message = get_cpu_message()
# Gather the information about where each node runs
if sim.comm.size > 1:
node_messages = sim.comm.mpi_comm.gather( node_message )
if sim.comm.rank == 0:
node_message = ''.join( node_messages )
message += node_message
message += '\n'
# Information on the numerical algorithm
if sim.fld.n_order == -1:
message += '\nPSATD stencil order: infinite'
else:
message += '\nPSATD stencil order: %d' %sim.fld.n_order
message += '\nParticle shape: %s' %sim.particle_shape
message += '\nLongitudinal boundaries: %s' %sim.comm.boundaries['z']
message += '\nTransverse boundaries: %s' %sim.comm.boundaries['r']
message += '\nGuard region size: %d ' %sim.comm.n_guard+'cells'
message += '\nDamping region size: %d ' %sim.comm.nz_damp+'cells'
message += '\nInjection region size: %d ' %sim.comm.n_inject+'cells'
message += '\nParticle exchange period: every %d ' \
%sim.comm.exchange_period + 'step'
if sim.boost is not None:
message += '\nBoosted frame: Yes'
message += '\nBoosted frame gamma: %d' %sim.boost.gamma0
if sim.use_galilean:
message += '\nGalilean frame: Yes'
else:
message += '\nGalilean frame: No'
else:
message += '\nBoosted frame: False'
message += '\n'
# Only processor 0 prints the message:
if sim.comm.rank == 0:
print( message )
def print_available_gpus():
"""
Lists all available CUDA GPUs.
"""
cuda.detect()
def get_gpu_message():
"""
Returns a string with information about the currently selected GPU.
"""
gpu = cuda.gpus.current
# Convert bytestring to actual string
try:
gpu_name = gpu.name.decode()
except AttributeError:
gpu_name = gpu.name
# Print the GPU that is being used
if MPI.COMM_WORLD.size > 1:
rank = MPI.COMM_WORLD.rank
node = MPI.Get_processor_name()
message = "\nMPI rank %d selected a %s GPU with id %s on node %s" %(
rank, gpu_name, gpu.id, node)
else:
message = "\nFBPIC selected a %s GPU with id %s" %( gpu_name, gpu.id )
if mpi_installed:
node = MPI.Get_processor_name()
message += " on node %s" %node
return(message)
def get_cpu_message():
"""
Returns a string with information about the node of each MPI rank
"""
# Print the node that is being used
if MPI.COMM_WORLD.size > 1:
rank = MPI.COMM_WORLD.rank
node = MPI.Get_processor_name()
message = "\nMPI rank %d runs on node %s" %(rank, node)
else:
message = ""
return(message)
def print_gpu_meminfo_all():
"""
Prints memory information about all available CUDA GPUs.
"""
gpus = cuda.gpus.lst
for gpu in gpus:
print_gpu_meminfo(gpu)
def print_gpu_meminfo(gpu):
"""
Prints memory information about the GPU.
Parameters :
------------
gpu : object
A numba cuda gpu context object.
"""
with gpu:
meminfo = cuda.current_context().get_memory_info()
print("GPU: %s, free: %s Mbytes, total: %s Mbytes \
" % (gpu, meminfo[0]*1e-6, meminfo[1]*1e-6))
def catch_gpu_memory_error( f ):
"""
Decorator that calls the function `f` and catches any GPU memory
error, during the execution of f.
If a memory error occurs, this decorator prints a corresponding message
and aborts the simulation (using MPI abort if needed)
"""
# Redefine the original function by calling it within a try/except
def g(*args, **kwargs):
try:
return f(*args, **kwargs)
except cuda.cudadrv.driver.CudaAPIError as e:
handle_cuda_memory_error( e, f.__name__ )
# Decorator: return the new function
return(g)
def handle_cuda_memory_error( exception, function_name ):
"""
Print a message indicating which GPU went out of memory,
and abort the simulation (using MPI Abort if needed)
"""
# Print a useful message
message = '\nERROR: GPU reached OUT_OF_MEMORY'
if MPI.COMM_WORLD.size > 1:
message += ' on MPI rank %d' %MPI.COMM_WORLD.rank
message += '\n(Error occured in fbpic function `%s`)\n' %function_name
sys.stdout.write(message)
sys.stdout.flush()
# Abort the simulation
if MPI.COMM_WORLD.size > 1:
MPI.COMM_WORLD.Abort()
else:
raise( exception )
|
the-stack_106_17522
|
import sqlite3
def readFromFile(filename):
str = ''
f = open(filename, 'r', encoding='UTF-8')
while True:
line = f.readline()
if len(line) == 0:
break
str += line
f.close()
return str
def init_db():
c = sqlite3.connect('../sql.db').cursor()
c.execute(readFromFile('init.sql'))
c.commit()
init_db()
|
the-stack_106_17523
|
from typing import List
class Solution:
def findMaxConsecutiveOnes(self, nums: List[int]) -> int:
count, ans = 0, 0
for n in nums:
if n == 1:
count += 1
ans = max(ans, count)
else:
count = 0
return ans
# TESTS
for nums, expected in [
([0, 0, 0, 0, 0, 0], 0),
([1, 0, 1, 1, 1, 0], 3),
([1, 1, 0, 1, 1, 1], 3),
([1, 0, 1, 1, 0, 1], 2),
([1, 1, 1, 1, 1, 1], 6),
]:
sol = Solution()
actual = sol.findMaxConsecutiveOnes(nums)
print("Max consecutive ones in", nums, "->", actual)
assert actual == expected
|
the-stack_106_17525
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
# 设置flask 关联的数据库
# 使用mysql 进行连接
username = "root"
pwd = "123456"
ip = "134.175.28.202"
# 和启动docker 服务设定的端口保持一致
port = "8888"
database = "test_ck18"
app.config['SQLALCHEMY_DATABASE_URI'] = \
f'mysql+pymysql://{username}:{pwd}@{ip}:{port}/{database}?charset=utf8'
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
# /tmp/test.db 数据库生成路径
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
#
# 数据库关联flask
db = SQLAlchemy(app)
# 一个类表示一张表
class User(db.Model):
# 每个类属性,表示这个表的表头数据
# id , username , email
# 在db.Column实例时说明,当前这一列数据的配置
# 整型, primary_key设置主键
# 设定表名
__tablename__ = "client"
id = db.Column(db.Integer, primary_key=True)
# 字符串类型,unique 为True代表这一列数据,是不能重复的,都是唯一数据
# 比如手机号、邮箱的设定, nullable表示,是否可以添加一个为空的数据
# 如果nullable为False ,代表当前的字段,是不可以为空的
# db.String(80)表示最长长度 为80
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
gender = db.Column(db.String(120))
# 魔法方法, 打印的时候调用, print、log
def __repr__(self):
return '<User %r>' % self.username
if __name__ == '__main__':
# 删除表
# db.drop_all()
# 如果表内存放数据,先把数据导出
# 创建表
# db.create_all()
# 增删查改
## 增加数据信息
user2 = User(id=2, username="李五", email="[email protected]", gender="男")
# 把数据对象,添加在session中
# 对应git commit的操作,可以提交多次
# db.session.add(user2)
# db.session.add(user3)
# 批量添加
# db.session.add_all([user4, user5])
# 所有涉及对数据库修改的操作,最后都需要commit 一下
# 对应 git 的push 操作,所有的修改会提交到数据库
# db.session.commit()
## 查询query,query前面是哪个类,代表,查询哪张表
# res = User.query.all() # 插叙所有
### 条件查询, 我要去查询 id = 1 的时候的数据信息
# res = User.query.filter_by(gender="男").all()
# 拿到查询结果的第一条
# res = User.query.filter_by(gender="男").first()
# print(res.username, res.email, res.gender)
# 通过表达式查询
# res = User.query.all()
# print(res)
#删除操作
# User.query.filter_by(id=2).delete()
# db.session.commit()
## 修改操作
### 第一种方法
# res = User.query.filter_by(id=3).first()
# 直接修改属性
# res.gender = "女"
# db.session.commit()
### 第二种方法
res =User.query.filter_by(id=4).update({"gender": "女", "email": "[email protected]"})
# db.session.commit()
|
the-stack_106_17532
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Modifications Copyright 2020 Patrick Hüther ([email protected])
# - this is a modified version of build_voc2012_data.py
"""Converts ara_rosetteSet data to TFRecord file format with Example protos.
Rosette dataset is expected to have the following directory structure:
- datasets
- build_data.py
- build_rosette_data.py (current working directory).
+ ara_rosetteSet | ara_senescentSet | ara_anthoSet
+ rosettes
+ PNGImages
+ SegmentationClass
+ ImageSets
+ Segmentation
+ tfrecord
Image folder:
./ara_rosetteSet/rosettes/PNGImages
Semantic segmentation annotations:
./ara_rosetteSet/rosettes/SegmentationClass
list folder:
./ara_rosetteSet/rosettes/ImageSets/Segmentation
This script converts data into sharded data files and save at tfrecord folder.
The Example proto contains the following fields:
image/encoded: encoded image content.
image/filename: image filename.
image/format: image file format.
image/height: image height.
image/width: image width.
image/channels: image channels.
image/segmentation/class/encoded: encoded semantic segmentation content.
image/segmentation/class/format: semantic segmentation file format.
"""
import math
import os.path
import sys
import build_data
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('image_folder',
'./ara_rosetteSet/rosettes/PNGImages',
'Folder containing images.')
tf.app.flags.DEFINE_string(
'semantic_segmentation_folder',
'/ara_rosetteSet/rosettes/SegmentationClassRaw',
'Folder containing semantic segmentation annotations.')
tf.app.flags.DEFINE_string(
'list_folder',
'./ara_rosetteSet/rosettes/ImageSets/Segmentation',
'Folder containing lists for training and validation')
tf.app.flags.DEFINE_string(
'output_dir',
'./tfrecord',
'Path to save converted SSTable of TensorFlow examples.')
tf.app.flags.DEFINE_integer(
'shard_number',
5,
'Number of shards. Ideally results in TFRecord files of roughly 100MB each')
_NUM_SHARDS = FLAGS.shard_number
def _convert_dataset(dataset_split):
"""Converts the specified dataset split to TFRecord format.
Args:
dataset_split: The dataset split (e.g., train, test).
Raises:
RuntimeError: If loaded image and label have different shape.
"""
dataset = os.path.basename(dataset_split)[:-4]
sys.stdout.write('Processing ' + dataset)
filenames = [x.strip('\n') for x in open(dataset_split, 'r')]
num_images = len(filenames)
num_per_shard = int(math.ceil(num_images / float(_NUM_SHARDS)))
image_reader = build_data.ImageReader('png', channels=3)
label_reader = build_data.ImageReader('png', channels=1)
for shard_id in range(_NUM_SHARDS):
output_filename = os.path.join(
FLAGS.output_dir,
'%s-%05d-of-%05d.tfrecord' % (dataset, shard_id, _NUM_SHARDS))
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_idx = shard_id * num_per_shard
end_idx = min((shard_id + 1) * num_per_shard, num_images)
for i in range(start_idx, end_idx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i + 1, len(filenames), shard_id))
sys.stdout.flush()
# Read the image.
image_filename = os.path.join(
FLAGS.image_folder, filenames[i] + '.' + FLAGS.image_format)
image_data = tf.gfile.FastGFile(image_filename, 'rb').read()
height, width = image_reader.read_image_dims(image_data)
# Read the semantic segmentation annotation.
seg_filename = os.path.join(
FLAGS.semantic_segmentation_folder,
filenames[i] + '.' + FLAGS.label_format)
seg_data = tf.gfile.FastGFile(seg_filename, 'rb').read()
seg_height, seg_width = label_reader.read_image_dims(seg_data)
if height != seg_height or width != seg_width:
raise RuntimeError('Shape mismatched between image and label.')
# Convert to tf example.
example = build_data.image_seg_to_tfexample(
image_data, filenames[i], height, width, seg_data)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def main(unused_argv):
dataset_splits = tf.gfile.Glob(os.path.join(FLAGS.list_folder, '*.txt'))
for dataset_split in dataset_splits:
_convert_dataset(dataset_split)
if __name__ == '__main__':
tf.app.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.