filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_24076 | import numpy as np
aa = np.array([[1,2,3],[2,4,6]])
bb = aa>2
idx = np.array([1,3,5])
x = np.zeros((6,))
x[idx] = np.array([1.1,1.4,1.5])
a = np.arange(10).reshape(2, 5)
indexer = np.array([[1,3,2],[2,4,3]])
sup = np.repeat(np.arange(2).reshape(2,1),3,axis=1)
a[sup,indexer] # this is what I need
np.ix_([0, 1], [2, 4])
print('eof') |
the-stack_106_24080 | """Functional tests for interactive HTTP API."""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
from mock import patch
standard_library.install_aliases()
import functools
import pytest
import requests
import six
import testplan
from testplan import report
from testplan.testing import multitest
from testplan.common.utils import timing
from testplan.common import entity
from testplan.exporters.testing import XMLExporter
from tests.unit.testplan.runnable.interactive import test_api
@multitest.testsuite
class ExampleSuite(object):
"""Example test suite."""
def __init__(self, tmpfile):
self._tmpfile = tmpfile
@multitest.testcase
def test_passes(self, env, result):
"""Testcase that passes."""
result.true(True)
@multitest.testcase
def test_fails(self, env, result):
"""Testcase that fails."""
result.true(False)
@multitest.testcase
def test_logs(self, env, result):
"""Testcase that makes a log."""
result.log("Here I share my deepest thoughts")
@multitest.testcase
def test_attach(self, env, result):
"""Testcase that attaches a file."""
result.attach(self._tmpfile)
@multitest.testcase(parameters=[1, 2, 3])
def test_parametrized(self, env, result, val):
"""Parametrized testcase."""
result.log(val)
result.gt(val, 0)
result.lt(val, 10)
@pytest.fixture
def plan(tmpdir):
"""Yield an interactive testplan."""
with patch(
"testplan.runnable.interactive.reloader.ModuleReloader"
) as MockReloader:
MockReloader.return_value = None
plan = testplan.TestplanMock(
name=six.ensure_str("InteractiveAPITest"),
interactive_port=0,
interactive_block=False,
exporters=[XMLExporter(xml_dir=str(tmpdir / "xml_exporter"))],
)
logfile = tmpdir / "attached_log.txt"
logfile.write_text(
"This text will be written into the attached file.",
encoding="utf8",
)
plan.add(
multitest.MultiTest(
name=six.ensure_str("ExampleMTest"),
suites=[ExampleSuite(str(logfile))],
)
)
plan.run()
timing.wait(
lambda: plan.interactive.http_handler_info is not None,
300,
raise_on_timeout=True,
)
yield plan
plan.abort()
# Expected JSON to be returned from each API resource at start of day, before
# any tests have been run.
EXPECTED_INITIAL_GET = [
(
"/report",
{
"attachments": {},
"category": "testplan",
"entry_uids": ["ExampleMTest"],
"meta": {},
"name": "InteractiveAPITest",
"parent_uids": [],
"status": "unknown",
"runtime_status": "ready",
"counter": {"failed": 0, "passed": 0, "total": 7, "unknown": 7},
"status_override": None,
"tags_index": {},
"timer": {},
"uid": "InteractiveAPITest",
},
),
(
"/report/tests",
[
{
"category": "multitest",
"description": None,
"entry_uids": ["ExampleSuite"],
"env_status": "STOPPED",
"fix_spec_path": None,
"name": "ExampleMTest",
"parent_uids": ["InteractiveAPITest"],
"part": None,
"status": "unknown",
"runtime_status": "ready",
"counter": {
"failed": 0,
"passed": 0,
"total": 7,
"unknown": 7,
},
"status_override": None,
"tags": {},
"timer": {},
"uid": "ExampleMTest",
}
],
),
(
"/report/tests/ExampleMTest",
{
"category": "multitest",
"description": None,
"entry_uids": ["ExampleSuite"],
"env_status": "STOPPED",
"fix_spec_path": None,
"name": "ExampleMTest",
"parent_uids": ["InteractiveAPITest"],
"part": None,
"status": "unknown",
"runtime_status": "ready",
"counter": {"failed": 0, "passed": 0, "total": 7, "unknown": 7},
"status_override": None,
"tags": {},
"timer": {},
"uid": "ExampleMTest",
},
),
(
"/report/tests/ExampleMTest/suites",
[
{
"category": "testsuite",
"description": "Example test suite.",
"entry_uids": [
"test_passes",
"test_fails",
"test_logs",
"test_attach",
"test_parametrized",
],
"env_status": None,
"fix_spec_path": None,
"name": "ExampleSuite",
"parent_uids": ["InteractiveAPITest", "ExampleMTest"],
"part": None,
"status": "unknown",
"runtime_status": "ready",
"counter": {
"failed": 0,
"passed": 0,
"total": 7,
"unknown": 7,
},
"status_override": None,
"tags": {},
"timer": {},
"uid": "ExampleSuite",
}
],
),
(
"/report/tests/ExampleMTest/suites/ExampleSuite",
{
"category": "testsuite",
"description": "Example test suite.",
"entry_uids": [
"test_passes",
"test_fails",
"test_logs",
"test_attach",
"test_parametrized",
],
"env_status": None,
"fix_spec_path": None,
"name": "ExampleSuite",
"parent_uids": ["InteractiveAPITest", "ExampleMTest"],
"part": None,
"status": "unknown",
"runtime_status": "ready",
"counter": {"failed": 0, "passed": 0, "total": 7, "unknown": 7},
"status_override": None,
"tags": {},
"timer": {},
"uid": "ExampleSuite",
},
),
(
"/report/tests/ExampleMTest/suites/ExampleSuite/testcases",
[
{
"category": "testcase",
"description": "Testcase that passes.",
"entries": [],
"logs": [],
"name": "test_passes",
"parent_uids": [
"InteractiveAPITest",
"ExampleMTest",
"ExampleSuite",
],
"status": "unknown",
"runtime_status": "ready",
"counter": {
"passed": 0,
"failed": 0,
"total": 1,
"unknown": 1,
},
"status_override": None,
"suite_related": False,
"tags": {},
"timer": {},
"type": "TestCaseReport",
"uid": "test_passes",
"status_reason": None,
},
{
"category": "testcase",
"description": "Testcase that fails.",
"entries": [],
"logs": [],
"name": "test_fails",
"parent_uids": [
"InteractiveAPITest",
"ExampleMTest",
"ExampleSuite",
],
"status": "unknown",
"runtime_status": "ready",
"counter": {
"passed": 0,
"failed": 0,
"total": 1,
"unknown": 1,
},
"status_override": None,
"suite_related": False,
"tags": {},
"timer": {},
"type": "TestCaseReport",
"uid": "test_fails",
"status_reason": None,
},
{
"category": "testcase",
"description": "Testcase that makes a log.",
"entries": [],
"logs": [],
"name": "test_logs",
"parent_uids": [
"InteractiveAPITest",
"ExampleMTest",
"ExampleSuite",
],
"status": "unknown",
"runtime_status": "ready",
"status_override": None,
"counter": {
"passed": 0,
"failed": 0,
"total": 1,
"unknown": 1,
},
"suite_related": False,
"tags": {},
"timer": {},
"type": "TestCaseReport",
"uid": "test_logs",
"status_reason": None,
},
{
"category": "testcase",
"description": "Testcase that attaches a file.",
"entries": [],
"logs": [],
"name": "test_attach",
"parent_uids": [
"InteractiveAPITest",
"ExampleMTest",
"ExampleSuite",
],
"status": "unknown",
"runtime_status": "ready",
"status_override": None,
"counter": {
"passed": 0,
"failed": 0,
"total": 1,
"unknown": 1,
},
"suite_related": False,
"tags": {},
"timer": {},
"type": "TestCaseReport",
"uid": "test_attach",
"status_reason": None,
},
{
"category": "parametrization",
"counter": {
"passed": 0,
"failed": 0,
"total": 3,
"unknown": 3,
},
"description": "Parametrized testcase.",
"entry_uids": [
"test_parametrized__val_1",
"test_parametrized__val_2",
"test_parametrized__val_3",
],
"env_status": None,
"fix_spec_path": None,
"name": "test_parametrized",
"parent_uids": [
"InteractiveAPITest",
"ExampleMTest",
"ExampleSuite",
],
"part": None,
"runtime_status": "ready",
"status": "unknown",
"status_override": None,
"tags": {},
"timer": {},
"uid": "test_parametrized",
},
],
),
(
"/report/tests/ExampleMTest/suites/ExampleSuite/testcases/test_passes",
{
"category": "testcase",
"description": "Testcase that passes.",
"entries": [],
"logs": [],
"name": "test_passes",
"parent_uids": [
"InteractiveAPITest",
"ExampleMTest",
"ExampleSuite",
],
"status": "unknown",
"runtime_status": "ready",
"status_override": None,
"suite_related": False,
"tags": {},
"timer": {},
"type": "TestCaseReport",
"uid": "test_passes",
"status_reason": None,
},
),
(
"/report/tests/ExampleMTest/suites/ExampleSuite/testcases/"
"test_parametrized/parametrizations",
[
{
"category": "testcase",
"description": "Parametrized testcase.",
"entries": [],
"logs": [],
"name": "test_parametrized <val=1>",
"parent_uids": [
"InteractiveAPITest",
"ExampleMTest",
"ExampleSuite",
"test_parametrized",
],
"status": "unknown",
"runtime_status": "ready",
"status_override": None,
"suite_related": False,
"tags": {},
"timer": {},
"type": "TestCaseReport",
"uid": "test_parametrized__val_1",
"status_reason": None,
},
{
"category": "testcase",
"description": "Parametrized testcase.",
"entries": [],
"logs": [],
"name": "test_parametrized <val=2>",
"parent_uids": [
"InteractiveAPITest",
"ExampleMTest",
"ExampleSuite",
"test_parametrized",
],
"status": "unknown",
"runtime_status": "ready",
"status_override": None,
"suite_related": False,
"tags": {},
"timer": {},
"type": "TestCaseReport",
"uid": "test_parametrized__val_2",
"status_reason": None,
},
{
"category": "testcase",
"description": "Parametrized testcase.",
"entries": [],
"logs": [],
"name": "test_parametrized <val=3>",
"parent_uids": [
"InteractiveAPITest",
"ExampleMTest",
"ExampleSuite",
"test_parametrized",
],
"status": "unknown",
"runtime_status": "ready",
"status_override": None,
"suite_related": False,
"tags": {},
"timer": {},
"type": "TestCaseReport",
"uid": "test_parametrized__val_3",
"status_reason": None,
},
],
),
(
"/report/tests/ExampleMTest/suites/ExampleSuite/testcases/"
"test_parametrized/parametrizations/test_parametrized__val_1",
{
"category": "testcase",
"description": "Parametrized testcase.",
"entries": [],
"logs": [],
"name": "test_parametrized <val=1>",
"parent_uids": [
"InteractiveAPITest",
"ExampleMTest",
"ExampleSuite",
"test_parametrized",
],
"status": "unknown",
"runtime_status": "ready",
"status_override": None,
"suite_related": False,
"tags": {},
"timer": {},
"type": "TestCaseReport",
"uid": "test_parametrized__val_1",
"status_reason": None,
},
),
]
# Expected results of testcases.
EXPECTED_TESTCASE_RESULTS = [
("test_passes", "passed"),
("test_fails", "failed"),
("test_logs", "passed"),
("test_parametrized", "passed"),
]
# Expected results of parametrized testcases.
EXPECTED_PARAM_TESTCASE_RESULTS = [
("test_parametrized__val_1", "passed"),
("test_parametrized__val_2", "passed"),
("test_parametrized__val_3", "passed"),
]
def test_initial_get(plan):
"""
Test GETting the report state through each of the API resources at the
start of day, i.e. before any tests have been run.
"""
host, port = plan.interactive.http_handler_info
assert host == "0.0.0.0"
for resource_path, expected_json in EXPECTED_INITIAL_GET:
rsp = requests.get(
"http://localhost:{port}/api/v1/interactive{resource}".format(
port=port, resource=resource_path
)
)
assert rsp.status_code == 200
test_api.compare_json(rsp.json(), expected_json)
def test_run_all_tests(plan):
"""
Test running all tests.
"""
host, port = plan.interactive.http_handler_info
assert host == "0.0.0.0"
report_url = "http://localhost:{}/api/v1/interactive/report".format(port)
rsp = requests.get(report_url)
assert rsp.status_code == 200
report_json = rsp.json()
last_hash = report_json["hash"]
# Trigger all tests to run by updating the report status to RUNNING
# and PUTting back the data.
report_json["runtime_status"] = report.RuntimeStatus.RUNNING
rsp = requests.put(report_url, json=report_json)
assert rsp.status_code == 200
updated_json = rsp.json()
test_api.compare_json(updated_json, report_json)
assert updated_json["hash"] != last_hash
timing.wait(
functools.partial(
_check_test_status, report_url, "failed", updated_json["hash"]
),
interval=0.2,
timeout=300,
raise_on_timeout=True,
)
# After running all tests, check that we can retrieve the attached file.
_test_attachments(port)
def test_run_mtest(plan):
"""Test running a single MultiTest."""
host, port = plan.interactive.http_handler_info
assert host == "0.0.0.0"
mtest_url = "http://localhost:{}/api/v1/interactive/report/tests/ExampleMTest".format(
port
)
rsp = requests.get(mtest_url)
assert rsp.status_code == 200
mtest_json = rsp.json()
# Trigger all tests to run by updating the report status to RUNNING
# and PUTting back the data.
mtest_json["runtime_status"] = report.RuntimeStatus.RUNNING
rsp = requests.put(mtest_url, json=mtest_json)
assert rsp.status_code == 200
updated_json = rsp.json()
test_api.compare_json(updated_json, mtest_json)
assert updated_json["hash"] != mtest_json["hash"]
timing.wait(
functools.partial(
_check_test_status, mtest_url, "failed", updated_json["hash"]
),
interval=0.2,
timeout=300,
raise_on_timeout=True,
)
def test_environment_control(plan):
"""Test starting and stopping the environment."""
host, port = plan.interactive.http_handler_info
assert host == "0.0.0.0"
mtest_url = "http://localhost:{}/api/v1/interactive/report/tests/ExampleMTest".format(
port
)
rsp = requests.get(mtest_url)
assert rsp.status_code == 200
mtest_json = rsp.json()
# Trigger the environment to start by setting the env_status to STARTING
# and PUTting back the data.
mtest_json["env_status"] = entity.ResourceStatus.STARTING
rsp = requests.put(mtest_url, json=mtest_json)
assert rsp.status_code == 200
updated_json = rsp.json()
test_api.compare_json(updated_json, mtest_json)
assert updated_json["hash"] != mtest_json["hash"]
# Wait for the environment to become STARTED.
timing.wait(
functools.partial(
_check_env_status,
mtest_url,
entity.ResourceStatus.STARTED,
updated_json["hash"],
),
interval=0.2,
timeout=300,
raise_on_timeout=True,
)
# Now trigger the environment to stop by setting the env_status to STOPPING
# and PUTting back the data.
mtest_json = updated_json
mtest_json["env_status"] = entity.ResourceStatus.STOPPING
rsp = requests.put(mtest_url, json=mtest_json)
assert rsp.status_code == 200
updated_json = rsp.json()
test_api.compare_json(updated_json, mtest_json)
assert updated_json["hash"] != mtest_json["hash"]
# Wait for the environment to become STOPPED.
timing.wait(
functools.partial(
_check_env_status,
mtest_url,
entity.ResourceStatus.STOPPED,
updated_json["hash"],
),
interval=0.2,
timeout=30,
raise_on_timeout=True,
)
def test_run_suite(plan):
"""Test running a single test suite."""
host, port = plan.interactive.http_handler_info
assert host == "0.0.0.0"
suite_url = (
"http://localhost:{}/api/v1/interactive/report/tests/ExampleMTest/"
"suites/ExampleSuite".format(port)
)
rsp = requests.get(suite_url)
assert rsp.status_code == 200
suite_json = rsp.json()
# Trigger all tests to run by updating the report status to RUNNING
# and PUTting back the data.
suite_json["runtime_status"] = report.RuntimeStatus.RUNNING
rsp = requests.put(suite_url, json=suite_json)
assert rsp.status_code == 200
updated_json = rsp.json()
test_api.compare_json(updated_json, suite_json)
assert updated_json["hash"] != suite_json["hash"]
timing.wait(
functools.partial(
_check_test_status, suite_url, "failed", updated_json["hash"]
),
interval=0.2,
timeout=300,
raise_on_timeout=True,
)
def test_run_testcase(plan):
"""Test running a single testcase."""
host, port = plan.interactive.http_handler_info
assert host == "0.0.0.0"
for testcase_name, expected_result in EXPECTED_TESTCASE_RESULTS:
testcase_url = (
"http://localhost:{port}/api/v1/interactive/report/tests/"
"ExampleMTest/suites/ExampleSuite/testcases/{testcase}".format(
port=port, testcase=testcase_name
)
)
rsp = requests.get(testcase_url)
assert rsp.status_code == 200
testcase_json = rsp.json()
# Trigger all tests to run by updating the report status to RUNNING
# and PUTting back the data.
testcase_json["runtime_status"] = report.RuntimeStatus.RUNNING
rsp = requests.put(testcase_url, json=testcase_json)
assert rsp.status_code == 200
updated_json = rsp.json()
test_api.compare_json(updated_json, testcase_json)
assert updated_json["hash"] != testcase_json["hash"]
timing.wait(
functools.partial(
_check_test_status,
testcase_url,
expected_result,
updated_json["hash"],
),
interval=0.2,
timeout=300,
raise_on_timeout=True,
)
def test_export_report(plan):
host, port = plan.interactive.http_handler_info
assert host == "0.0.0.0"
export_url = (
"http://localhost:{port}/api/v1/interactive/report/export".format(
port=port
)
)
rsp = requests.get(export_url)
assert rsp.status_code == 200
result = rsp.json()
assert len(result["history"]) == 0
assert "XML exporter" in result["available"]
rsp = requests.post(export_url, json={"exporters": ["XML exporter"]})
assert rsp.status_code == 200
result = rsp.json()
assert len(result["history"]) == 1
def test_run_param_testcase(plan):
"""Test running a single parametrized testcase."""
host, port = plan.interactive.http_handler_info
assert host == "0.0.0.0"
for param_name, expected_result in EXPECTED_PARAM_TESTCASE_RESULTS:
testcase_url = (
"http://localhost:{port}/api/v1/interactive/report/tests/"
"ExampleMTest/suites/ExampleSuite/testcases/test_parametrized/"
"parametrizations/{param}".format(port=port, param=param_name)
)
rsp = requests.get(testcase_url)
assert rsp.status_code == 200
testcase_json = rsp.json()
# Trigger all tests to run by updating the report status to RUNNING
# and PUTting back the data.
testcase_json["runtime_status"] = report.RuntimeStatus.RUNNING
rsp = requests.put(testcase_url, json=testcase_json)
assert rsp.status_code == 200
updated_json = rsp.json()
test_api.compare_json(updated_json, testcase_json)
assert updated_json["hash"] != testcase_json["hash"]
timing.wait(
functools.partial(
_check_test_status,
testcase_url,
expected_result,
updated_json["hash"],
),
interval=0.2,
timeout=300,
raise_on_timeout=True,
)
def _test_attachments(port):
"""
Test retrieving an attached file. The test_attach testcase needs to have
been run first.
"""
all_attachments_url = (
"http://localhost:{port}/api/v1/interactive/attachments".format(
port=port
)
)
rsp = requests.get(all_attachments_url)
assert rsp.status_code == 200
attachments = rsp.json()
assert len(attachments) == 1
assert attachments[0].startswith("attached_log")
attachment_uid = attachments[0]
single_attachment_url = all_attachments_url + "/" + attachment_uid
rsp = requests.get(single_attachment_url)
assert rsp.status_code == 200
assert rsp.text == "This text will be written into the attached file."
def _check_test_status(test_url, expected_status, last_hash):
"""
Check the test status by polling the report resource. If the test is
still running, return False. Otherwise assert that the status matches
the expected status and return True.
"""
rsp = requests.get(test_url)
assert rsp.status_code == 200
report_json = rsp.json()
if report_json["runtime_status"] == report.RuntimeStatus.RUNNING:
return False
else:
assert report_json["runtime_status"] == report.RuntimeStatus.FINISHED
assert report_json["status"] == expected_status
assert report_json["hash"] != last_hash
return True
def _check_env_status(test_url, expected_status, last_hash):
"""
Check the environment status by polling the report resource. Return
True if the status matches the expected status, False otherwise.
"""
rsp = requests.get(test_url)
assert rsp.status_code == 200
report_json = rsp.json()
if report_json["env_status"] == expected_status:
assert report_json["hash"] != last_hash
return True
else:
return False
|
the-stack_106_24082 | """
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.7.1-alpha.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_kratos_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class IdResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'Id', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, id, *args, **kwargs): # noqa: E501
"""IdResponse - a model defined in OpenAPI
Args:
id (str): The id of the newly created object.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
the-stack_106_24083 | import pandas as pd
import os
import torch
from pathlib import Path
import pickle
import logging
import shutil
from torch.utils.data import (
Dataset,
TensorDataset,
DataLoader,
RandomSampler,
SequentialSampler,
)
from torch.utils.data.distributed import DistributedSampler
from transformers import AutoTokenizer
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
labels: (Optional) [string]. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
if isinstance(label, list):
self.label = label
elif label:
self.label = str(label)
else:
self.label = None
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
output_mode="classification",
cls_token_at_end=False,
pad_on_left=False,
cls_token="[CLS]",
sep_token="[SEP]",
pad_token=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
cls_token_segment_id=1,
pad_token_segment_id=0,
mask_padding_with_zero=True,
logger=None,
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
if logger:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(str(example.text_a))
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(str(example.text_b))
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[: (max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = (
[0 if mask_padding_with_zero else 1] * padding_length
) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + (
[0 if mask_padding_with_zero else 1] * padding_length
)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if isinstance(example.label, list):
label_id = []
for label in example.label:
label_id.append(float(label))
else:
if example.label is not None:
label_id = label_map[example.label]
else:
label_id = ""
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
)
)
return features
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, filename, size=-1):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, filename, size=-1):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, filename, size=-1):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
class TextProcessor(DataProcessor):
def __init__(self, data_dir, label_dir):
self.data_dir = data_dir
self.label_dir = label_dir
self.labels = None
def get_train_examples(
self, filename="train.csv", text_col="text", label_col="label", size=-1
):
if size == -1:
data_df = pd.read_csv(os.path.join(self.data_dir, filename))
return self._create_examples(
data_df, "train", text_col=text_col, label_col=label_col
)
else:
data_df = pd.read_csv(os.path.join(self.data_dir, filename))
# data_df['comment_text'] = data_df['comment_text'].apply(cleanHtml)
return self._create_examples(
data_df.sample(size), "train", text_col=text_col, label_col=label_col
)
def get_dev_examples(
self, filename="val.csv", text_col="text", label_col="label", size=-1
):
if size == -1:
data_df = pd.read_csv(os.path.join(self.data_dir, filename))
return self._create_examples(
data_df, "dev", text_col=text_col, label_col=label_col
)
else:
data_df = pd.read_csv(os.path.join(self.data_dir, filename))
return self._create_examples(
data_df.sample(size), "dev", text_col=text_col, label_col=label_col
)
def get_test_examples(
self, filename="val.csv", text_col="text", label_col="label", size=-1
):
data_df = pd.read_csv(os.path.join(self.data_dir, filename))
# data_df['comment_text'] = data_df['comment_text'].apply(cleanHtml)
if size == -1:
return self._create_examples(
data_df, "test", text_col=text_col, label_col=None
)
else:
return self._create_examples(
data_df.sample(size), "test", text_col=text_col, label_col=None
)
def get_labels(self, filename="labels.csv"):
"""See base class."""
if self.labels is None:
self.labels = list(
pd.read_csv(os.path.join(self.label_dir, filename), header=None)[0]
.astype("str")
.values
)
return self.labels
def _create_examples(self, df, set_type, text_col, label_col):
"""Creates examples for the training and dev sets."""
if label_col is None:
return list(
df.apply(
lambda row: InputExample(
guid=row.index, text_a=str(row[text_col]), label=None
),
axis=1,
)
)
else:
return list(
df.apply(
lambda row: InputExample(
guid=row.index,
text_a=str(row[text_col]),
label=str(row[label_col]),
),
axis=1,
)
)
class MultiLabelTextProcessor(TextProcessor):
def _create_examples(self, df, set_type, text_col, label_col):
def _get_labels(row, label_col):
if isinstance(label_col, list):
return list(row[label_col])
else:
# create one hot vector of labels
label_list = self.get_labels()
labels = [0] * len(label_list)
# cast with string in case labels are integers
labels[label_list.index(str(row[label_col]))] = 1
return labels
"""Creates examples for the training and dev sets."""
if label_col is None:
return list(
df.apply(
lambda row: InputExample(
guid=row.index, text_a=str(row[text_col]), label=[]
),
axis=1,
)
)
else:
return list(
df.apply(
lambda row: InputExample(
guid=row.index,
text_a=str(row[text_col]),
label=_get_labels(row, label_col),
),
axis=1,
)
)
class LRFinderDataset(Dataset):
def __init__(self, data_dir, filename, text_col, label_col):
super().__init__()
self.text_col = text_col
self.label_col = label_col
self.data = pd.read_csv(os.path.join(data_dir, filename))
def __getitem__(self, idx):
return self.data.loc[idx, self.text_col], self.data.loc[idx, self.label_col]
def __len__(self):
return self.data.shape[0]
class BertDataBunch(object):
def __init__(
self,
data_dir,
label_dir,
tokenizer,
train_file="train.csv",
val_file="val.csv",
test_data=None,
label_file="labels.csv",
text_col="text",
label_col="label",
batch_size_per_gpu=16,
max_seq_length=512,
multi_gpu=True,
multi_label=False,
backend="nccl",
model_type="bert",
logger=None,
clear_cache=False,
no_cache=False,
custom_sampler=None,
pos_weight=None,
weight=None
):
# just in case someone passes string instead of Path
if isinstance(data_dir, str):
data_dir = Path(data_dir)
if isinstance(label_dir, str):
label_dir = Path(label_dir)
if isinstance(tokenizer, str):
# instantiate the new tokeniser object using the tokeniser name
tokenizer = AutoTokenizer.from_pretrained(tokenizer, use_fast=True)
self.tokenizer = tokenizer
self.data_dir = data_dir
self.train_file = train_file
self.val_file = val_file
self.test_data = test_data
self.cache_dir = data_dir / "cache"
self.max_seq_length = max_seq_length
self.batch_size_per_gpu = batch_size_per_gpu
self.train_dl = None
self.val_dl = None
self.test_dl = None
self.multi_label = multi_label
self.n_gpu = 1
self.no_cache = no_cache
self.model_type = model_type
self.output_mode = "classification"
self.custom_sampler = custom_sampler
self.pos_weight = pos_weight
self.weight = weight
if logger is None:
logger = logging.getLogger()
self.logger = logger
if multi_gpu:
self.n_gpu = torch.cuda.device_count()
if clear_cache:
shutil.rmtree(self.cache_dir, ignore_errors=True)
if multi_label:
processor = MultiLabelTextProcessor(data_dir, label_dir)
else:
processor = TextProcessor(data_dir, label_dir)
self.labels = processor.get_labels(label_file)
if train_file:
# Train DataLoader
train_examples = None
cached_features_file = os.path.join(
self.cache_dir,
"cached_{}_{}_{}_{}_{}".format(
self.model_type.replace("/", "-"),
"train",
"multi_label" if self.multi_label else "multi_class",
str(self.max_seq_length),
os.path.basename(train_file),
),
)
if os.path.exists(cached_features_file) is False or self.no_cache is True:
train_examples = processor.get_train_examples(
train_file, text_col=text_col, label_col=label_col
)
train_dataset = self.get_dataset_from_examples(
train_examples, "train", no_cache=self.no_cache
)
self.train_batch_size = self.batch_size_per_gpu * max(1, self.n_gpu)
if self.custom_sampler is not None:
train_sampler = self.custom_sampler
else:
train_sampler = RandomSampler(train_dataset)
self.train_dl = DataLoader(
train_dataset, sampler=train_sampler, batch_size=self.train_batch_size
)
if val_file:
# Validation DataLoader
val_examples = None
cached_features_file = os.path.join(
self.cache_dir,
"cached_{}_{}_{}_{}_{}".format(
self.model_type.replace("/", "-"),
"dev",
"multi_label" if self.multi_label else "multi_class",
str(self.max_seq_length),
os.path.basename(val_file),
),
)
if os.path.exists(cached_features_file) is False:
val_examples = processor.get_dev_examples(
val_file, text_col=text_col, label_col=label_col
)
val_dataset = self.get_dataset_from_examples(
val_examples, "dev", no_cache=self.no_cache
)
# no grads necessary, hence double val batch size
self.val_batch_size = self.batch_size_per_gpu * 2 * max(1, self.n_gpu)
val_sampler = SequentialSampler(val_dataset)
self.val_dl = DataLoader(
val_dataset, sampler=val_sampler, batch_size=self.val_batch_size
)
if test_data:
# Test set loader for predictions
test_examples = []
input_data = []
for index, text in enumerate(test_data):
test_examples.append(InputExample(index, text))
input_data.append({"id": index, "text": text})
test_dataset = self.get_dataset_from_examples(
test_examples, "test", is_test=True, no_cache=self.no_cache
)
self.test_batch_size = self.batch_size_per_gpu * max(1, self.n_gpu)
test_sampler = SequentialSampler(test_dataset)
self.test_dl = DataLoader(
test_dataset, sampler=test_sampler, batch_size=self.test_batch_size
)
def get_dl_from_texts(self, texts):
test_examples = []
input_data = []
for index, text in enumerate(texts):
test_examples.append(InputExample(index, text, label=None))
input_data.append({"id": index, "text": text})
test_dataset = self.get_dataset_from_examples(
test_examples, "test", is_test=True, no_cache=True
)
test_sampler = SequentialSampler(test_dataset)
return DataLoader(
test_dataset, sampler=test_sampler, batch_size=self.batch_size_per_gpu
)
def save(self, filename="databunch.pkl"):
tmp_path = self.data_dir / "tmp"
tmp_path.mkdir(exist_ok=True)
with open(str(tmp_path / filename), "wb") as f:
pickle.dump(self, f)
def get_dataset_from_examples(
self, examples, set_type="train", is_test=False, no_cache=False
):
if set_type == "train":
file_name = self.train_file
elif set_type == "dev":
file_name = self.val_file
elif set_type == "test":
file_name = (
"test" # test is not supposed to be a file - just a list of texts
)
cached_features_file = os.path.join(
self.cache_dir,
"cached_{}_{}_{}_{}_{}".format(
self.model_type.replace("/", "-"),
set_type,
"multi_label" if self.multi_label else "multi_class",
str(self.max_seq_length),
os.path.basename(file_name),
),
)
if os.path.exists(cached_features_file) and no_cache is False:
self.logger.info(
"Loading features from cached file %s", cached_features_file
)
features = torch.load(cached_features_file)
else:
# Create tokenized and numericalized features
features = convert_examples_to_features(
examples,
label_list=self.labels,
max_seq_length=self.max_seq_length,
tokenizer=self.tokenizer,
output_mode=self.output_mode,
# xlnet has a cls token at the end
cls_token_at_end=bool(self.model_type in ["xlnet"]),
cls_token=self.tokenizer.cls_token,
sep_token=self.tokenizer.sep_token,
cls_token_segment_id=2 if self.model_type in ["xlnet"] else 0,
# pad on the left for xlnet
pad_on_left=bool(self.model_type in ["xlnet"]),
pad_token_segment_id=4 if self.model_type in ["xlnet"] else 0,
logger=self.logger,
)
# Create folder if it doesn't exist
if no_cache is False:
self.cache_dir.mkdir(exist_ok=True)
self.logger.info(
"Saving features into cached file %s", cached_features_file
)
torch.save(features, cached_features_file)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in features], dtype=torch.long
)
all_segment_ids = torch.tensor(
[f.segment_ids for f in features], dtype=torch.long
)
if is_test is False: # labels not available for test set
if self.multi_label:
all_label_ids = torch.tensor(
[f.label_id for f in features], dtype=torch.float
)
else:
all_label_ids = torch.tensor(
[f.label_id for f in features], dtype=torch.long
)
dataset = TensorDataset(
all_input_ids, all_input_mask, all_segment_ids, all_label_ids
)
else:
all_label_ids = []
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids)
return dataset
|
the-stack_106_24085 | #!python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A utility script to make sure that the minidump produced by SyzyASan gets
symbolized correctly.
"""
import logging
import minidump_symbolizer
import optparse
import os
import sys
_USAGE = """\
%prog [options]
Run the Asan symbolizer script on a minidump and ensure that the symbolization
is correct.
"""
def _ParseArguments():
"""Parse the command line arguments.
Returns:
The options on the command line.
"""
parser = optparse.OptionParser(usage=_USAGE)
parser.add_option('--minidump', help='The minidump to process.')
parser.add_option('--bug-type', help='The expected crash type.')
parser.add_option('--access-mode', help='The expected access mode.')
parser.add_option('--access-size', help='The expected access size.')
parser.add_option('--corrupt-heap', action='store_true', default=False,
help='Indicates if we expect the heap to be corrupt')
opts, _ = parser.parse_args()
for path in minidump_symbolizer._DEFAULT_CDB_PATHS:
if os.path.isfile(path):
opts.cdb_path = path
break
if not opts.cdb_path:
parser.error('Unable to find cdb.exe.')
return opts
def main():
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
opts = _ParseArguments()
report = minidump_symbolizer.ProcessMinidump(opts.minidump,
opts.cdb_path,
None)
if report.bad_access_info['error_type'] != opts.bug_type:
logger.error('Unexpected error type (expected %s, got %s).',
opts.bug_type, report.bad_access_info['error_type'])
return 1
if report.bad_access_info['access_mode'] != opts.access_mode:
logger.error('Unexpected access mode (expected %s, got %s).',
opts.access_mode, report.bad_access_info['access_mode'])
return 1
if report.bad_access_info['access_size'] != opts.access_size:
logger.error('Unexpected access size (expected %s, got %s).',
opts.access_size, report.bad_access_info['access_size'])
return 1
heap_is_corrupt = report.bad_access_info['heap_is_corrupt'] != '0'
if opts.corrupt_heap != heap_is_corrupt:
logger.error('Unexpected heap corruption state.')
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
the-stack_106_24087 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import qiita_db as qdb
from future.utils import viewitems
PJ = qdb.processing_job.ProcessingJob
# selecting all artifact ids
with qdb.sql_connection.TRN:
sql = """SELECT artifact_id FROM qiita.artifact"""
qdb.sql_connection.TRN.add(sql, [])
all_artifacts = qdb.sql_connection.TRN.execute_fetchindex()
nodes = {}
for aid in all_artifacts:
aid = aid[0]
with qdb.sql_connection.TRN:
sql = """SELECT parent_id, artifact_id
FROM qiita.artifact_descendants(%s)"""
qdb.sql_connection.TRN.add(sql, [aid])
edges = [tuple(e)
for e in qdb.sql_connection.TRN.execute_fetchindex()]
for parent, child in edges:
# By creating all the artifacts here we are saving DB calls
if parent not in nodes:
nodes[parent] = qdb.artifact.Artifact(parent)
if child not in nodes:
nodes[child] = qdb.artifact.Artifact(child)
job_id = None
with qdb.sql_connection.TRN:
sql = """SELECT processing_job_id
FROM qiita.artifact_processing_job
JOIN qiita.processing_job USING (processing_job_id)
JOIN qiita.processing_job_status USING
(processing_job_status_id)
WHERE artifact_id = %s"""
qdb.sql_connection.TRN.add(sql, [nodes[parent].id])
job_ids = qdb.sql_connection.TRN.execute_fetchflatten()
for j_id in job_ids:
job = qdb.processing_job.ProcessingJob(j_id)
if job.status == 'success' and job.outputs:
for _, a in viewitems(job.outputs):
if a.id == child:
job_id = job.id
break
if job_id is None:
# inserting the missing values
c = nodes[child]
cmd_out = c.artifact_type
if cmd_out == 'Demultiplexed':
cmd_out = 'demultiplexed'
elif cmd_out == 'BIOM':
cmd_out = 'OTU table'
else:
# the actual DB has other possible values in
# artifact_type
continue
cmd_out_id = qdb.util.convert_to_id(
cmd_out, "command_output", "name")
# the owner of the study will create the job
job = PJ.create(c.study.owner, c.processing_parameters, True)
with qdb.sql_connection.TRN:
sql = """INSERT INTO
qiita.artifact_output_processing_job
(artifact_id, processing_job_id,
command_output_id)
VALUES (%s, %s, %s)"""
qdb.sql_connection.TRN.add(
sql, [child, job.id, cmd_out_id])
job._update_children({parent: child})
job._set_status('success')
|
the-stack_106_24089 | import argparse
import re
import sys
from itertools import groupby
from jinja2 import Environment, FileSystemLoader, Template
from pathlib import Path
script_path = Path(__file__).parent.resolve()
sys.path.append(str(script_path.parent))
from utils import createFolder, deleteFolder, genSTM32List
# Base path
core_path = script_path.parent.parent
SrcWrapper_path = ""
HALDrivers_path = ""
CMSIS_Device_ST_path = ""
CMSIS_DSP_lib_path = ""
system_path = ""
# CMSIS outside of the core. Can be updated by arg
CMSIS_path = core_path.parent / "ArduinoModule-CMSIS" / "CMSIS_5"
CMSIS_DSPSrc_path = ""
# Out sources files
HALoutSrc_path = ""
LLoutSrc_path = ""
CMSIS_DSP_outSrc_path = ""
# Out include files
LLoutInc_path = ""
# Out startup files
CMSIS_Startupfile = ""
# Out system stm32 files
system_stm32_outfile = ""
# List of STM32 series
stm32_series = []
# Templating
templates_dir = script_path / "templates"
all_ll_h_file = "stm32yyxx_ll.h"
ll_h_file = "stm32yyxx_ll_ppp.h"
c_file = "stm32yyxx_zz_ppp.c"
stm32_def_build_file = "stm32_def_build.h"
system_stm32_file = "system_stm32yyxx.c"
# Create the jinja2 environment.
j2_env = Environment(
loader=FileSystemLoader(str(templates_dir)), trim_blocks=True, lstrip_blocks=True
)
all_ll_header_file_template = j2_env.get_template(all_ll_h_file)
ll_h_file_template = j2_env.get_template(ll_h_file)
c_file_template = j2_env.get_template(c_file)
dsp_file_template = Template('#include "../Source/{{ dsp }}/{{ dsp }}.c"')
stm32_def_build_template = j2_env.get_template(stm32_def_build_file)
system_stm32_template = j2_env.get_template(system_stm32_file)
# re
peripheral_c_regex = re.compile(r"stm32\w+_[h]?[al][l]_(.*).c$")
peripheral_h_regex = re.compile(r"stm32\w+_(\w+).h$")
def checkConfig(arg_core, arg_cmsis):
global core_path
global CMSIS_path
global CMSIS_DSPSrc_path
global SrcWrapper_path
global HALDrivers_path
global CMSIS_Device_ST_path
global CMSIS_DSP_lib_path
global CMSIS_DSP_outSrc_path
global CMSIS_Startupfile
global system_path
global system_stm32_outfile
global HALoutSrc_path
global LLoutSrc_path
global LLoutInc_path
if arg_core is not None:
core_path = Path(arg_core).resolve()
CMSIS_path = core_path.parent / "ArduinoModule-CMSIS" / "CMSIS_5"
if not core_path.is_dir():
print(f"Could not find {core_path}")
exit(1)
system_path = core_path / "system"
SrcWrapper_path = core_path / "libraries" / "SrcWrapper"
HALDrivers_path = system_path / "Drivers"
CMSIS_Device_ST_path = system_path / "Drivers" / "CMSIS" / "Device" / "ST"
CMSIS_DSP_lib_path = core_path / "libraries" / "CMSIS_DSP"
CMSIS_DSP_outSrc_path = CMSIS_DSP_lib_path / "src"
CMSIS_Startupfile = core_path / "cores" / "arduino" / "stm32" / stm32_def_build_file
system_stm32_outfile = SrcWrapper_path / "src" / "stm32" / system_stm32_file
HALoutSrc_path = SrcWrapper_path / "src" / "HAL"
LLoutSrc_path = SrcWrapper_path / "src" / "LL"
LLoutInc_path = core_path / "cores" / "arduino" / "stm32" / "LL"
if arg_cmsis is not None:
CMSIS_path = Path(arg_cmsis).resolve()
CMSIS_DSPSrc_path = CMSIS_path / "CMSIS" / "DSP" / "Source"
def printCMSISStartup(log):
filelist = sorted(CMSIS_Device_ST_path.glob("**/startup_*.s"))
filelist = [pth.name for pth in filelist]
if len(filelist):
if log:
print(f"Number of startup files: {len(filelist)}")
# Some mcu have two startup files
# Ex: WL one for cm0plus and one for cm4
# In that case this is the same value line so add an extra defined
# to use the correct one.
group_startup_list = [
list(g) for _, g in groupby(filelist, lambda x: re.split("_|\\.", x)[1])
]
cmsis_list = []
for fn_list in group_startup_list:
if len(fn_list) == 1:
valueline = re.split("_|\\.", fn_list[0])
vline = valueline[1].upper().replace("X", "x")
cmsis_list.append({"vline": vline, "fn": fn_list[0], "cm": ""})
else:
for fn in fn_list:
valueline = re.split("_|\\.", fn)
vline = valueline[1].upper().replace("X", "x")
cm = valueline[2].upper()
cmsis_list.append({"vline": vline, "fn": fn, "cm": cm})
out_file = open(CMSIS_Startupfile, "w", newline="\n")
out_file.write(stm32_def_build_template.render(cmsis_list=cmsis_list))
out_file.close()
else:
if log:
print("No startup files found!")
def printSystemSTM32(log):
filelist = sorted(system_path.glob("STM32*/system_stm32*.c"))
if len(filelist):
if log:
print(f"Number of system stm32 files: {len(filelist)}")
system_list = []
for fp in filelist:
system_list.append({"serie": fp.parent.name, "fn": fp.name})
out_file = open(system_stm32_outfile, "w", newline="\n")
out_file.write(system_stm32_template.render(system_list=system_list))
out_file.close()
else:
if log:
print("No system stm32 files found!")
def wrap(arg_core, arg_cmsis, log):
global stm32_series
# check config have to be done first
checkConfig(arg_core, arg_cmsis)
stm32_series = genSTM32List(HALDrivers_path, "")
# Remove old file
deleteFolder(HALoutSrc_path)
createFolder(HALoutSrc_path)
deleteFolder(LLoutSrc_path)
createFolder(LLoutSrc_path)
deleteFolder(LLoutInc_path)
createFolder(LLoutInc_path)
if CMSIS_Startupfile.is_file():
CMSIS_Startupfile.unlink()
all_ll_h_list = []
# key: peripheral, value: serie list
ll_h_dict = {}
ll_c_dict = {}
hal_c_dict = {}
# Search all files for each series
for serie in stm32_series:
src = HALDrivers_path / f"STM32{serie}xx_HAL_Driver" / "Src"
inc = HALDrivers_path / f"STM32{serie}xx_HAL_Driver" / "Inc"
if src.exists():
if log:
print(f"Generating for {serie}...")
lower = serie.lower()
# Search stm32yyxx_[hal|ll]*.c file
filelist = src.glob(f"stm32{lower}xx_*.c")
for fp in filelist:
# File name
fn = fp.name
found = peripheral_c_regex.match(fn)
if "_template" in fn:
continue
peripheral = found.group(1) if found else "hal"
if "_ll_" in fn:
if peripheral in ll_c_dict:
ll_c_dict[peripheral].append(lower)
else:
ll_c_dict[peripheral] = [lower]
else:
if peripheral in hal_c_dict:
hal_c_dict[peripheral].append(lower)
else:
hal_c_dict[peripheral] = [lower]
# Search stm32yyxx_ll_*.h file
filelist = inc.glob(f"stm32{lower}xx_ll_*.h")
for fp in filelist:
# File name
fn = fp.name
found = peripheral_h_regex.match(fn)
if not found:
continue
peripheral = found.group(1)
# Amend all LL header list
all_ll_h_list.append(fn.replace(lower, "yy"))
if peripheral in ll_h_dict:
ll_h_dict[peripheral].append(lower)
else:
ll_h_dict[peripheral] = [lower]
# Generate stm32yyxx_hal_*.c file
for key, value in hal_c_dict.items():
if key == "hal":
filepath = HALoutSrc_path / c_file.replace("zz", "hal").replace(
"_ppp", ""
)
else:
filepath = HALoutSrc_path / c_file.replace("zz", "hal").replace(
"ppp", key
)
out_file = open(filepath, "w", newline="\n")
out_file.write(
c_file_template.render(periph=key, type="hal", serieslist=value)
)
out_file.close()
# Generate stm32yyxx_ll_*.c file
for key, value in ll_c_dict.items():
filepath = LLoutSrc_path / c_file.replace("zz", "ll").replace(
"ppp", key
)
out_file = open(filepath, "w", newline="\n")
out_file.write(
c_file_template.render(periph=key, type="ll", serieslist=value)
)
out_file.close()
# Generate stm32yyxx_ll_*.h file
for key, value in ll_h_dict.items():
filepath = LLoutInc_path / ll_h_file.replace("ppp", key)
out_file = open(filepath, "w", newline="\n")
out_file.write(ll_h_file_template.render(periph=key, serieslist=value))
out_file.close()
if log:
print("done")
# Filter all LL header file
all_ll_h_list = sorted(set(all_ll_h_list))
# Generate the all LL header file
all_ll_file = open(LLoutInc_path / all_ll_h_file, "w", newline="\n")
all_ll_file.write(all_ll_header_file_template.render(ll_header_list=all_ll_h_list))
all_ll_file.close()
# CMSIS startup files
printCMSISStartup(log)
# system stm32 files
printSystemSTM32(log)
# CMSIS DSP C source file
if not CMSIS_path.is_dir():
print(f"Could not find {CMSIS_path}")
print("CMSIS DSP generation skipped.")
else:
# Delete all subfolders
deleteFolder(CMSIS_DSP_outSrc_path / "*")
dirlist = []
for path_object in CMSIS_DSPSrc_path.glob("**/*"):
if path_object.is_file():
if path_object.name.endswith(".c"):
dirlist.append(path_object.parent.name)
dirlist = sorted(set(dirlist))
for dn in dirlist:
fdn = CMSIS_DSP_outSrc_path / dn
if not fdn.is_dir():
createFolder(fdn)
out_file = open(fdn / (f"{dn}.c"), "w", newline="\n")
all_ll_file.write(dsp_file_template.render(dsp_path=dn))
out_file.close()
return 0
if __name__ == "__main__":
# Parser
wrapparser = argparse.ArgumentParser(
description="Generate all wrappers files (HAL, LL, CMSIS, ...)"
)
wrapparser.add_argument(
"-c",
"--core",
metavar="core_path",
help=f"Root path of the STM32 core. Default: {core_path}",
)
wrapparser.add_argument(
"-s",
"--cmsis",
metavar="cmsis_path",
help=f"Root path of the CMSIS. Default: {CMSIS_path}",
)
wrapargs = wrapparser.parse_args()
wrap(wrapargs.core, wrapargs.cmsis, True)
|
the-stack_106_24091 | import re
import json
import random
import cbor2
import base64
from requests.models import Response
from localstack import config
from localstack.constants import APPLICATION_JSON, APPLICATION_CBOR
from localstack.utils.aws import aws_stack
from localstack.utils.common import to_str, json_safe, clone, epoch_timestamp, now_utc
from localstack.utils.analytics import event_publisher
from localstack.services.awslambda import lambda_api
from localstack.services.generic_proxy import ProxyListener
# action headers
ACTION_PREFIX = 'Kinesis_20131202'
ACTION_PUT_RECORD = '%s.PutRecord' % ACTION_PREFIX
ACTION_PUT_RECORDS = '%s.PutRecords' % ACTION_PREFIX
ACTION_LIST_STREAMS = '%s.ListStreams' % ACTION_PREFIX
ACTION_CREATE_STREAM = '%s.CreateStream' % ACTION_PREFIX
ACTION_DELETE_STREAM = '%s.DeleteStream' % ACTION_PREFIX
ACTION_UPDATE_SHARD_COUNT = '%s.UpdateShardCount' % ACTION_PREFIX
ACTION_GET_RECORDS = '%s.GetRecords' % ACTION_PREFIX
# list of stream consumer details
STREAM_CONSUMERS = []
class ProxyListenerKinesis(ProxyListener):
def forward_request(self, method, path, data, headers):
global STREAM_CONSUMERS
data = self.decode_content(data or '{}')
action = headers.get('X-Amz-Target', '').split('.')[-1]
if action == 'RegisterStreamConsumer':
consumer = clone(data)
consumer['ConsumerStatus'] = 'ACTIVE'
consumer['ConsumerARN'] = '%s/consumer/%s' % (data['StreamARN'], data['ConsumerName'])
consumer['ConsumerCreationTimestamp'] = float(now_utc())
consumer = json_safe(consumer)
STREAM_CONSUMERS.append(consumer)
return {'Consumer': consumer}
elif action == 'DeregisterStreamConsumer':
def consumer_matches(c):
stream_arn = data.get('StreamARN')
cons_name = data.get('ConsumerName')
cons_arn = data.get('ConsumerARN')
return (c.get('ConsumerARN') == cons_arn or
(c.get('StreamARN') == stream_arn and c.get('ConsumerName') == cons_name))
STREAM_CONSUMERS = [c for c in STREAM_CONSUMERS if not consumer_matches(c)]
return {}
elif action == 'ListStreamConsumers':
result = {
'Consumers': [c for c in STREAM_CONSUMERS if c.get('StreamARN') == data.get('StreamARN')]
}
return result
elif action == 'DescribeStreamConsumer':
consumer_arn = data.get('ConsumerARN') or data['ConsumerName']
consumer_name = data.get('ConsumerName') or data['ConsumerARN']
creation_timestamp = data.get('ConsumerCreationTimestamp')
result = {
'ConsumerDescription': {
'ConsumerARN': consumer_arn,
'ConsumerCreationTimestamp': creation_timestamp,
'ConsumerName': consumer_name,
'ConsumerStatus': 'ACTIVE',
'StreamARN': data.get('StreamARN')
}
}
return result
if random.random() < config.KINESIS_ERROR_PROBABILITY:
action = headers.get('X-Amz-Target')
if action in [ACTION_PUT_RECORD, ACTION_PUT_RECORDS]:
return kinesis_error_response(data, action)
return True
def return_response(self, method, path, data, headers, response):
action = headers.get('X-Amz-Target')
data = self.decode_content(data or '{}')
response._content = self.replace_in_encoded(response.content or '')
records = []
if action in (ACTION_CREATE_STREAM, ACTION_DELETE_STREAM):
event_type = (event_publisher.EVENT_KINESIS_CREATE_STREAM if action == ACTION_CREATE_STREAM
else event_publisher.EVENT_KINESIS_DELETE_STREAM)
payload = {'n': event_publisher.get_hash(data.get('StreamName'))}
if action == ACTION_CREATE_STREAM:
payload['s'] = data.get('ShardCount')
event_publisher.fire_event(event_type, payload=payload)
elif action == ACTION_PUT_RECORD:
response_body = self.decode_content(response.content)
# Note: avoid adding 'encryptionType':'NONE' in the event_record, as this breaks .NET Lambdas
event_record = {
'approximateArrivalTimestamp': epoch_timestamp(),
'data': data['Data'],
'partitionKey': data['PartitionKey'],
'sequenceNumber': response_body.get('SequenceNumber')
}
event_records = [event_record]
stream_name = data['StreamName']
lambda_api.process_kinesis_records(event_records, stream_name)
elif action == ACTION_PUT_RECORDS:
event_records = []
response_body = self.decode_content(response.content)
if 'Records' in response_body:
response_records = response_body['Records']
records = data['Records']
for index in range(0, len(records)):
record = records[index]
# Note: avoid adding 'encryptionType':'NONE' in the event_record, as this breaks .NET Lambdas
event_record = {
'approximateArrivalTimestamp': epoch_timestamp(),
'data': record['Data'],
'partitionKey': record['PartitionKey'],
'sequenceNumber': response_records[index].get('SequenceNumber')
}
event_records.append(event_record)
stream_name = data['StreamName']
lambda_api.process_kinesis_records(event_records, stream_name)
elif action == ACTION_UPDATE_SHARD_COUNT:
# Currently kinesalite, which backs the Kinesis implementation for localstack, does
# not support UpdateShardCount:
# https://github.com/mhart/kinesalite/issues/61
#
# [Terraform](https://www.terraform.io) makes the call to UpdateShardCount when it
# applies Kinesis resources. A Terraform run fails when this is not present.
#
# The code that follows just returns a successful response, bypassing the 400
# response that kinesalite returns.
#
response = Response()
response.status_code = 200
content = {
'CurrentShardCount': 1,
'StreamName': data['StreamName'],
'TargetShardCount': data['TargetShardCount']
}
response.encoding = 'UTF-8'
response._content = json.dumps(content)
return response
elif action == ACTION_GET_RECORDS:
sdk_v2 = self.sdk_is_v2(headers.get('User-Agent', '').split(' ')[0])
results, encoding_type = self.decode_content(response.content, True)
records = results.get('Records', [])
if not records:
return response
for record in records:
if sdk_v2:
record['ApproximateArrivalTimestamp'] = int(record['ApproximateArrivalTimestamp'] * 1000)
if not isinstance(record['Data'], str):
record['Data'] = base64.encodebytes(bytearray(record['Data']['data']))
if encoding_type == APPLICATION_CBOR:
response._content = cbor2.dumps(results)
else:
response._content = json.dumps(results)
return response
def sdk_is_v2(self, user_agent):
if re.search(r'\/2.\d+.\d+', user_agent):
return True
return False
def replace_in_encoded(self, data):
if not data:
return ''
decoded, type_encoding = self.decode_content(data, True)
if type_encoding == APPLICATION_JSON:
return re.sub(r'arn:aws:kinesis:[^:]+:', 'arn:aws:kinesis:%s:' % aws_stack.get_region(),
to_str(data))
if type_encoding == APPLICATION_CBOR:
replaced = re.sub(r'arn:aws:kinesis:[^:]+:', 'arn:aws:kinesis:%s:' % aws_stack.get_region(),
json.dumps(decoded))
return cbor2.dumps(json.loads(replaced))
def decode_content(self, data, describe=False):
content_type = ''
try:
decoded = json.loads(to_str(data))
content_type = APPLICATION_JSON
except UnicodeDecodeError:
decoded = cbor2.loads(data)
content_type = APPLICATION_CBOR
if describe:
return decoded, content_type
return decoded
# instantiate listener
UPDATE_KINESIS = ProxyListenerKinesis()
def kinesis_error_response(data, action):
error_response = Response()
if action == ACTION_PUT_RECORD:
error_response.status_code = 400
content = {
'ErrorCode': 'ProvisionedThroughputExceededException',
'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'
}
else:
error_response.status_code = 200
content = {'FailedRecordCount': 1, 'Records': []}
for record in data.get('Records', []):
content['Records'].append({
'ErrorCode': 'ProvisionedThroughputExceededException',
'ErrorMessage': 'Rate exceeded for shard X in stream Y under account Z.'
})
error_response._content = json.dumps(content)
return error_response
|
the-stack_106_24092 | # TODO from typing import List
import datetime
import json
import os
import re
import urllib.request
from bs4 import BeautifulSoup
from .models import Content
def get_text_wiki(lil):
"""
Get content from wikihow page and format.
:param lil: html tag content.
:return: format content.
"""
global ordinal
content = [lil.getText().split("\n")]
final_text_wiki = []
for i in content:
# pattern4 = re.compile(r"WH.(\w+\.)(\w+).*|googletag.(\w+\.)(\w+).*|//.*|^if.*")
pattern4 = re.compile(r"WH.(\w+\.)(\w+).*|googletag.(\w+\.)(\w+).*|//.*|^if.*|var img = document.getElementById.*|defer.*|(\[\d*\])")
p = len(i)
text4 = []
for j in range(p):
text = re.sub(pattern4, "", i[j])
text4.append(text)
_text = list(filter(None, text4))
del _text[0]
ordinal = " ".join(_text)
final_text_wiki.append(_text)
return ordinal
def get_image_wiki(lil):
"""
Get image link form content and serialize that content.
:param lil: html tag content.
:return: Image link each step.
"""
global ordinal
regex1 = re.compile(r"https://www.wikihow.com/images/.*")
content_lis1 = lil.find_all("video", attrs={"data-poster": regex1})
content_lis2 = lil.find_all("img", attrs={"data-src": regex1})
content = []
if content_lis2:
for li in content_lis2:
content.append(li.get("data-src"))
ordinal = " ".join(content)
else:
for li in content_lis1:
content.append(li.get("data-poster"))
ordinal = " ".join(content)
return ordinal
def get_step_num_wiki(lil):
"""
Get step number from wikihow website.
:param lil: html tag content.
:return: step number
"""
global ordinal
regex1 = re.compile(r"step_num")
content_lis1 = lil.find_all("div", attrs={"class": regex1})
content = []
for li in content_lis1:
content.append(li.get("aria-label"))
ordinal = " ".join(content)
return ordinal
def wiki_how_content(str_, user_text):
"""
It's the main function call form view file.
:param str_: url link. Which is search form wikihow search function.
:param user_text: user input.
:return:
"""
global new_dic, link_preview_dict, method
start_time = datetime.datetime.now()
url = str_
page = urllib.request.urlopen(url) # connect to website
soup = BeautifulSoup(page, "html.parser")
regex = re.compile("^hasimage")
# print("".join([i.getText() for i in soup.find_all("div", attrs={"class": "altblock"})]))
l = [i.getText() for i in soup.find_all("div", attrs={"class": "altblock"})]
content_lis = soup.find_all("li", attrs={"class": regex})
content = []
content_dic = []
method1 = 0
para = 0
status = 1
for li in content_lis:
step = get_step_num_wiki(li)
# create dictionary step, description and image.
link_preview_dict = {
"step": step,
"description": get_text_wiki(li),
"image": get_image_wiki(li),
}
if para != 0:
if step == "Step 1":
method = l[method1].rstrip()
if method == "Method 1":
status = 0
method1 += 1
new_dic = {"Part": content}
content_dic.append(new_dic)
content = []
content.append(link_preview_dict)
para += 1
method = l[method1].rstrip()
method1 += 1
new_dic = {"Part": content}
content_dic.append(new_dic)
# Calculate time difference.
end_time = datetime.datetime.now()
difference_time = end_time - start_time
s_time = difference_time.total_seconds()
url_ = url.replace("https://www.wikihow.com/", "")
folder_path = "/home/rakibul/PycharmProjects/Test/wikihow-py/wikihow/media/"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
if content_dic:
with open(folder_path + user_text + '.json', 'w') as outfile:
json.dump(content_dic, outfile, indent=4)
data_content = Content.objects.create(url_text=url_, user_text=user_text,
scrape_time=s_time, url=url, json_file=user_text + ".json")
data_content.save()
return user_text, status
else:
return user_text, status
# return content_dic
# wiki_how_content("https://www.wikihow.com/Become-a-Psychotherapist")
|
the-stack_106_24093 | import time
import matplotlib
from charlieenv import CharlieEnv
from evaluatebob import evaluate
from vectorizeenv import VectorizedClass
matplotlib.use('TkAgg')
from stable_baselines3 import PPO
from bobenv import GetBobEnvClass
def just_bob():
for i in [100000, 500000, 1000000, 5000000]:
start = time.time()
bob = PPO("CnnPolicy", VectorizedClass(GetBobEnvClass(25), 6), verbose=0).learn(i)
end = time.time()
print(f"For {i} we took {end-start} and got {evaluate(bob, 25, episodes=100)}")
exit()
done = False
env = GetBobEnvClass(25)()
obs = env.reset()
while not done:
action = bob.predict(obs)
obs, rew, done, _ = env.step(action[0])
env.render()
def charlie():
for i in [100000//6, 500000//6, 1000000//6, 5000000//6]:
start = time.time()
bob = PPO("CnnPolicy", VectorizedClass(GetBobEnvClass(10), 6), verbose=0, n_steps=200)
charli = PPO("MlpPolicy", CharlieEnv(bob, t=200, maxsize=10), verbose=0, n_steps=1000).learn(i)
end = time.time()
print(f"For {i} we took {end-start} and got {evaluate(bob, 10, episodes=100)}")
exit()
done = False
env = GetBobEnvClass(25)()
obs = env.reset()
while not done:
action = bob.predict(obs)
obs, rew, done, _ = env.step(action[0])
env.render()
def main():
"""
start = time.time()
bob = PPO("CnnPolicy", VectorizedClass(GetBobEnvClass(25), 6), verbose=1)#.learn(100000)
#evaluate(bob, 25, episodes=100)
done = False
env = GetBobEnvClass(25)()
obs = env.reset()
while not done:
action = bob.predict(obs)
obs, rew, done, _ = env.step(action[0])
env.render()
#env = BobEnv(5)
#env.render()
# sleep(1000)
#just_bob()"""
charlie()
if __name__ == "__main__":
#main()
charlie() |
the-stack_106_24094 | import numpy as np
import pandas as pd
from .classifiers import classifier_dict
def local_test(x_train, pit_train, x_test, alphas=np.linspace(0.0, 1.0, 11), clf_name='MLP', n_trials=1000):
clf = classifier_dict[clf_name]
### calculate T_i value at point of interest x_test
all_rhat_alphas = {}
for alpha in alphas:
ind_train = [1*(x<=alpha) for x in pit_train]
rhat = clf
rhat.fit(X=x_train, y=ind_train)
all_rhat_alphas[alpha] = rhat.predict_proba(x_test)[:, 1][0]
all_rhat_alphas = pd.Series(all_rhat_alphas)
Ti_value = ((all_rhat_alphas - alphas)**2).sum() / len(alphas)
### refit the classifier using Unif[0,1] random values in place of true PIT values
all_unif_Ti_values = {}
for k in range(n_trials):
Ti_values_k = {}
all_rhat_alphas_k = {}
unif_values = np.random.uniform(size=pit_train.shape[0])
for alpha in alphas:
ind_values_k = [1*(x<=alpha) for x in unif_values]
rhat_k = clf
rhat_k.fit(X=x_train, y=ind_values_k)
all_rhat_alphas_k[alpha] = rhat_k.predict_proba(x_test)[:, 1][0]
all_rhat_alphas_k = pd.Series(all_rhat_alphas_k)
Ti_values_k = ((all_rhat_alphas_k - alphas)**2).sum() / len(alphas)
all_unif_Ti_values[k] = Ti_values_k
### compute local p-value
local_pvalue = sum(1* (Ti_value < pd.Series(all_unif_Ti_values))) / len(all_unif_Ti_values)
return local_pvalue
|
the-stack_106_24100 | import re
import json
from functools import lru_cache
import typing
from mitmproxy.contentviews import base
PARSE_ERROR = object()
@lru_cache(1)
def parse_json(s: bytes) -> typing.Any:
try:
return json.loads(s.decode('utf-8'))
except ValueError:
return PARSE_ERROR
def format_json(data: typing.Any) -> typing.Iterator[base.TViewLine]:
encoder = json.JSONEncoder(indent=4, sort_keys=True, ensure_ascii=False)
current_line: base.TViewLine = []
for chunk in encoder.iterencode(data):
if "\n" in chunk:
rest_of_last_line, chunk = chunk.split("\n", maxsplit=1)
# rest_of_last_line is a delimiter such as , or [
current_line.append(('text', rest_of_last_line))
yield current_line
current_line = []
if re.match(r'\s*"', chunk):
current_line.append(('json_string', chunk))
elif re.match(r'\s*\d', chunk):
current_line.append(('json_number', chunk))
elif re.match(r'\s*(true|null|false)', chunk):
current_line.append(('json_boolean', chunk))
else:
current_line.append(('text', chunk))
yield current_line
class ViewJSON(base.View):
name = "JSON"
def __call__(self, data, **metadata):
data = parse_json(data)
if data is not PARSE_ERROR:
return "JSON", format_json(data)
def render_priority(self, data: bytes, *, content_type: typing.Optional[str] = None, **metadata) -> float:
if content_type in (
"application/json",
"application/json-rpc",
):
return 1
if content_type and content_type.startswith("application/") and content_type.endswith("+json"):
return 1
return 0
|
the-stack_106_24102 | # Note: this broke when moving to Python 3.9 duo to some issue with numba.
# Refer to C++ implementation.
import time
import numpy as np
from numba import njit, uint32
@njit(uint32(uint32, uint32[:], uint32, uint32))
def play_till_round(max_round, memory, last_number, n_starting_numbers):
for round_nr in range(n_starting_numbers + 1, max_round + 1):
next_number = (
round_nr - 1 - from_memory if (from_memory := memory[last_number]) else 0
)
memory[last_number] = round_nr - 1
last_number = next_number
return next_number
def initial_memory(numbers, max_round):
memory = np.zeros(max_round, dtype=np.uint32)
for i, n in enumerate(numbers[:-1]):
memory[n] = i + 1
return memory
with open("input.txt") as f:
init_numbers = list(map(int, f.read().strip().split(",")))
t0 = time.time()
for part, n in ((1, 2020), (2, 30_000_000)):
print(
f"Part {part}:",
play_till_round(
n, initial_memory(init_numbers, n), init_numbers[-1], len(init_numbers)
),
)
print(f"Total time: {round(time.time() - t0, 2)} seconds")
|
the-stack_106_24104 | #!/usr/bin/env python3
import os
from aws_cdk import (
aws_ec2 as ec2,
aws_ecs as ecs,
aws_lambda as aws_lambda,
aws_dynamodb as dynamodb,
aws_batch as batch,
aws_s3 as s3,
aws_iam as iam,
aws_ecr_assets,
core
)
from batch_job_cdk.constructs.instance_profile import InstanceProfile
from batch_job_cdk.constructs.batch_lambda_function import BatchLambdaFunction
'''
Sample CDK code for creating the required infrastructure for running a AWS Batch job.
Creates a S3 bucket as a source for reading data, and a dynamodb table as a target.
AWS Batch as the compute enviroment in which a docker image with the DL model runs.
'''
job_definition_name = "aws-blog-batch-job-image-transform-job-definition"
job_queue_name = "aws-blog-batch-job-image-transform-job-queue"
db_table_name = "aws-blog-batch-job-image-transform-dynamodb-table"
stack_name = "aws-blog-batch-job-image-transform-stack"
lambda_function_name = "aws-blog-batch-job-image-transform-lambda"
compute_env_name = "aws-blog-batch-compute-environment"
batch_lambda_function_name = "aws-blog-batch-job-function"
# Relative path to the source code for the aws batch job, from the project root
docker_base_dir = "src_batch_job"
# Relative path to the source for the AWS lambda, from the project root
lambda_script_dir = "src_lambda"
class BatchJobStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
s3_bucket = s3.Bucket(self,
'batch-job-bucket',
public_read_access=False,
block_public_access=s3.BlockPublicAccess.BLOCK_ALL
)
db_table = dynamodb.Table(self,
'batch-job-dynamodb-table',
table_name=db_table_name,
partition_key=dynamodb.Attribute(
name=f'ImageId',
type=dynamodb.AttributeType.STRING),
billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST,
point_in_time_recovery=True
)
vpc = ec2.Vpc(self,
"batch-job-vpc",
max_azs=2
)
sg = ec2.SecurityGroup(self,
"batch-job-security-group",
vpc=vpc,
security_group_name="aws-blog-batch-job-security-group",
)
docker_image_asset = aws_ecr_assets.DockerImageAsset(self,
"ecr-docker-image-asset",
directory=docker_base_dir
)
docker_container_image = ecs.ContainerImage.from_docker_image_asset(docker_image_asset)
batch_job_definition = batch.JobDefinition(self,
"job-definition",
job_definition_name=job_definition_name,
container=batch.JobDefinitionContainer(
image=docker_container_image,
gpu_count=0,
vcpus=8,
memory_limit_mib=8192),
retry_attempts=5,
timeout=core.Duration.minutes(30)
)
batch_instance_role = iam.Role(self,
'batch-job-instance-role',
assumed_by=iam.CompositePrincipal(
iam.ServicePrincipal('ec2.amazonaws.com'),
iam.ServicePrincipal('ecs.amazonaws.com'),
iam.ServicePrincipal('ecs-tasks.amazonaws.com')
),
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name("service-role/AmazonEC2ContainerServiceforEC2Role"),
])
db_table.grant_read_write_data(batch_instance_role)
s3_bucket.grant_read(batch_instance_role)
batch_instance_profile = InstanceProfile(self, 'batch-job-instance-profile')
batch_instance_profile.attach_role(batch_instance_role)
compute_environment = batch.ComputeEnvironment(self,
"batch-compute-environment",
compute_environment_name=compute_env_name,
compute_resources=batch.ComputeResources(
vpc=vpc,
minv_cpus=0,
desiredv_cpus=0,
maxv_cpus=32,
instance_role=batch_instance_profile.profile_arn,
security_groups=[sg],
type=batch.ComputeResourceType.ON_DEMAND,
))
job_queue = batch.JobQueue(self,
"job-queue",
job_queue_name=job_queue_name,
priority=1,
compute_environments=[
batch.JobQueueComputeEnvironment(
compute_environment=compute_environment,
order=1)
])
batch_lambda_function = BatchLambdaFunction(self,
'batch-lambda-function',
function_name=batch_lambda_function_name,
code_path=lambda_script_dir,
environment={
"BATCH_JOB_QUEUE": job_queue_name,
"BATCH_JOB_DEFINITION": job_definition_name,
"REGION": os.environ["CDK_DEFAULT_REGION"],
"S3_BUCKET_NAME": s3_bucket.bucket_name,
"DYNAMODB_TABLE_NAME": db_table_name
})
s3_bucket.grant_read_write(batch_lambda_function.function)
|
the-stack_106_24107 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PTransforms for supporting Kafka in Python pipelines. These transforms do not
run a Kafka client in Python. Instead, they expand to ExternalTransforms
which the Expansion Service resolves to the Java SDK's KafkaIO. In other
words: they are cross-language transforms.
Note: To use these transforms, you need to start a Java Expansion Service.
Please refer to the portability documentation on how to do that. Flink Users
can use the built-in Expansion Service of the Flink Runner's Job Server. The
expansion service address has to be provided when instantiating the
transforms.
If you start Flink's Job Server, the expansion service will be started on
port 8097. This is also the configured default for this transform. For a
different address, please set the expansion_service parameter.
For more information see:
- https://beam.apache.org/documentation/runners/flink/
- https://beam.apache.org/roadmap/portability/
"""
from __future__ import absolute_import
import typing
from past.builtins import unicode
from apache_beam.transforms.external import ExternalTransform
from apache_beam.transforms.external import NamedTupleBasedPayloadBuilder
ReadFromKafkaSchema = typing.NamedTuple(
'ReadFromKafkaSchema',
[
('consumer_config', typing.List[typing.Tuple[unicode, unicode]]),
('topics', typing.List[unicode]),
('key_deserializer', unicode),
('value_deserializer', unicode),
]
)
class ReadFromKafka(ExternalTransform):
"""
An external PTransform which reads from Kafka and returns a KV pair for
each item in the specified Kafka topics. If no Kafka Deserializer for
key/value is provided, then the data will be returned as a raw byte array.
Note: Runners need to support translating Read operations in order to use
this source. At the moment only the Flink Runner supports this.
Experimental; no backwards compatibility guarantees. It requires special
preparation of the Java SDK. See BEAM-7870.
"""
# Returns the key/value data as raw byte arrays
byte_array_deserializer = 'org.apache.kafka.common.serialization.' \
'ByteArrayDeserializer'
URN = 'beam:external:java:kafka:read:v1'
def __init__(self, consumer_config,
topics,
key_deserializer=byte_array_deserializer,
value_deserializer=byte_array_deserializer,
expansion_service=None):
"""
Initializes a read operation from Kafka.
:param consumer_config: A dictionary containing the consumer configuration.
:param topics: A list of topic strings.
:param key_deserializer: A fully-qualified Java class name of a Kafka
Deserializer for the topic's key, e.g.
'org.apache.kafka.common.
serialization.LongDeserializer'.
Default: 'org.apache.kafka.common.
serialization.ByteArrayDeserializer'.
:param value_deserializer: A fully-qualified Java class name of a Kafka
Deserializer for the topic's value, e.g.
'org.apache.kafka.common.
serialization.LongDeserializer'.
Default: 'org.apache.kafka.common.
serialization.ByteArrayDeserializer'.
:param expansion_service: The address (host:port) of the ExpansionService.
"""
super(ReadFromKafka, self).__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
ReadFromKafkaSchema(
consumer_config=list(consumer_config.items()),
topics=topics,
key_deserializer=key_deserializer,
value_deserializer=value_deserializer,
)
),
expansion_service
)
WriteToKafkaSchema = typing.NamedTuple(
'WriteToKafkaSchema',
[
('producer_config', typing.List[typing.Tuple[unicode, unicode]]),
('topic', unicode),
('key_serializer', unicode),
('value_serializer', unicode),
]
)
class WriteToKafka(ExternalTransform):
"""
An external PTransform which writes KV data to a specified Kafka topic.
If no Kafka Serializer for key/value is provided, then key/value are
assumed to be byte arrays.
Experimental; no backwards compatibility guarantees. It requires special
preparation of the Java SDK. See BEAM-7870.
"""
# Default serializer which passes raw bytes to Kafka
byte_array_serializer = 'org.apache.kafka.common.serialization.' \
'ByteArraySerializer'
URN = 'beam:external:java:kafka:write:v1'
def __init__(self, producer_config,
topic,
key_serializer=byte_array_serializer,
value_serializer=byte_array_serializer,
expansion_service=None):
"""
Initializes a write operation to Kafka.
:param consumer_config: A dictionary containing the producer configuration.
:param topic: A Kafka topic name.
:param key_deserializer: A fully-qualified Java class name of a Kafka
Serializer for the topic's key, e.g.
'org.apache.kafka.common.
serialization.LongSerializer'.
Default: 'org.apache.kafka.common.
serialization.ByteArraySerializer'.
:param value_deserializer: A fully-qualified Java class name of a Kafka
Serializer for the topic's value, e.g.
'org.apache.kafka.common.
serialization.LongSerializer'.
Default: 'org.apache.kafka.common.
serialization.ByteArraySerializer'.
:param expansion_service: The address (host:port) of the ExpansionService.
"""
super(WriteToKafka, self).__init__(
self.URN,
NamedTupleBasedPayloadBuilder(
WriteToKafkaSchema(
producer_config=list(producer_config.items()),
topic=topic,
key_serializer=key_serializer,
value_serializer=value_serializer,
)
),
expansion_service
)
|
the-stack_106_24112 | import SPARQLWrapper
import argparse
from collections import defaultdict,OrderedDict
import json
import re
def runQuery(query):
endpoint = 'https://query.wikidata.org/sparql'
sparql = SPARQLWrapper.SPARQLWrapper(endpoint, agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36')
sparql.setQuery(query)
sparql.setReturnFormat(SPARQLWrapper.JSON)
results = sparql.query().convert()
return results['results']['bindings']
def main():
parser = argparse.ArgumentParser(description='Tool to pull geographic data from WikiData using SPARQL')
parser.add_argument('--outJSON',type=str,required=True,help='File to output entities')
args = parser.parse_args()
totalCount = 0
administrativeTerritorialEntity = 'Q56061'
print("Gathering types of geographic location from Wikidata...")
geoClasses = OrderedDict()
geoClasses['Q47168'] = 'county of the United States'
geoConcepts = OrderedDict()
geoConcepts.update(geoClasses)
for classID,className in geoClasses.items():
query = """
SELECT ?entity ?entityLabel WHERE {
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
?entity wdt:P279* wd:%s.
}
""" % classID
for row in runQuery(query):
if 'xml:lang' in row['entityLabel'] and row['entityLabel']['xml:lang'] == 'en':
locationID = row['entity']['value'].split('/')[-1]
locationType = row['entityLabel']['value']
geoConcepts[locationID] = locationType
entities = defaultdict(dict)
geoConcepts['Q515'] = 'city'
#geoConcepts['Q7930989'] = 'city/town'
geoConcepts['Q1549591'] = 'big city'
geoConcepts['Q6256'] = 'country'
geoConcepts['Q112099'] = 'island nation'
geoConcepts['Q3624078'] = 'sovereign state'
geoConcepts['Q35657'] = 'state of the United States'
geoConcepts['Q1352230'] = 'territory of the United States'
geoConcepts['Q16110'] = 'region of Italy'
geoConcepts['Q36784'] = 'region of France'
geoConcepts['Q1221156'] = 'state of Germany'
geoConcepts['Q1615742'] = 'province of China'
geoConcepts['Q11828004'] = 'province of Canada'
geoConcepts['Q9357527'] = 'territory of Canada'
geoConcepts['Q50337'] = 'prefecture of Japan'
geoConcepts['Q5852411'] = 'state of Australia'
coordRegex = re.compile(r'Point\((?P<longitude>[-+]?\d*\.?\d*) (?P<latitude>[-+]?\d*\.?\d*)\)')
print("Gathering locations from Wikidata...")
for i,(conceptID,conceptType) in enumerate(geoConcepts.items()):
#if i >= 10:
# break
query = """
SELECT ?entity ?entityLabel ?entityDescription ?alias ?coords WHERE {
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
?entity wdt:P31 wd:%s.
?entity wdt:P625 ?coords.
OPTIONAL {?entity skos:altLabel ?alias FILTER (LANG (?alias) = "en") .}
}
""" % conceptID
rowCount = 0
for row in runQuery(query):
longID = row['entity']['value']
if 'xml:lang' in row['entityLabel'] and row['entityLabel']['xml:lang'] == 'en':
# Get the Wikidata ID, not the whole URL
shortID = longID.split('/')[-1]
entity = entities[shortID]
entity['id'] = shortID
entity['type'] = conceptType
entity['name'] = row['entityLabel']['value']
match = coordRegex.match(row['coords']['value'])
if match:
entity['longitude'] = float(match.groupdict()['longitude'])
entity['latitude'] = float(match.groupdict()['latitude'])
if 'entityDescription' in row and 'xml:lang' in row['entityDescription'] and row['entityDescription']['xml:lang'] == 'en':
entity['description'] = row['entityDescription']['value']
if not 'aliases' in entity:
entity['aliases'] = []
if 'alias' in row and row['alias']['xml:lang'] == 'en':
entity['aliases'].append(row['alias']['value'])
rowCount += 1
totalCount += 1
print("%s (%d/%d): %d rows" % (conceptType, i+1, len(geoConcepts), rowCount))
for entityID,entity in entities.items():
entity['aliases'].append(entity['name'])
entity['aliases'] = [ t for t in entity['aliases'] if len(t) > 3 ]
entity['aliases'] += [ t.replace('\N{REGISTERED SIGN}','').strip() for t in entity['aliases'] ]
entity['aliases'] = sorted(set(entity['aliases']))
entities = { entityID:entity for entityID,entity in entities.items() if len(entity['aliases']) > 0 }
# Require coordinates
entities = { entityID:entity for entityID,entity in entities.items() if 'longitude' in entity and 'latitude' in entity }
print (" Got %d locations (from %d rows)" % (len(entities),totalCount))
print("Saving JSON file...")
with open(args.outJSON,'w') as f:
#entities_as_list = [ entities[entityID] for entityID in sorted(entities.keys()) ]
json.dump(entities,f,indent=2,sort_keys=True)
if __name__ == '__main__':
main()
|
the-stack_106_24113 | # -*- coding: utf-8 -*-
"""
werkzeug.routing
~~~~~~~~~~~~~~~~
When it comes to combining multiple controller or view functions (however
you want to call them) you need a dispatcher. A simple way would be
applying regular expression tests on the ``PATH_INFO`` and calling
registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates an URL map for an application with
two subdomains (www and kb) and some URL rules:
>>> m = Map([
... # Static URLs
... Rule('/', endpoint='static/index'),
... Rule('/about', endpoint='static/about'),
... Rule('/help', endpoint='static/help'),
... # Knowledge Base
... Subdomain('kb', [
... Rule('/', endpoint='kb/index'),
... Rule('/browse/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
... ])
... ], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The endpoint
in the rules can be anything, for example import paths or unique
identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
>>> c = m.bind('example.com')
>>> c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
>>> c.build("kb/browse", dict())
'http://kb.example.com/browse/'
>>> c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
>>> c.build("static/about")
'/about'
>>> c.build("static/index", force_external=True)
'http://www.example.com/'
>>> c = m.bind('example.com', subdomain='kb')
>>> c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
>>> c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
>>> c = m.bind('example.com')
>>> c.match("/")
('static/index', {})
>>> c.match("/about")
('static/about', {})
>>> c = m.bind('example.com', '/', 'kb')
>>> c.match("/")
('kb/index', {})
>>> c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a `NotFound` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a `RequestRedirect` exception. Both are subclasses of the
`HTTPException` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for `GET` and `HEAD` and
routing system tried to match a `POST` request) a `MethodNotAllowed`
exception is raised.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import ast
import difflib
import posixpath
import re
import uuid
from pprint import pformat
from threading import Lock
from ._compat import implements_to_string
from ._compat import iteritems
from ._compat import itervalues
from ._compat import native_string_result
from ._compat import string_types
from ._compat import text_type
from ._compat import to_bytes
from ._compat import to_unicode
from ._compat import wsgi_decoding_dance
from ._internal import _encode_idna
from ._internal import _get_environ
from .datastructures import ImmutableDict
from .datastructures import MultiDict
from .exceptions import BadHost
from .exceptions import HTTPException
from .exceptions import MethodNotAllowed
from .exceptions import NotFound
from .urls import _fast_url_quote
from .urls import url_encode
from .urls import url_join
from .urls import url_quote
from .utils import cached_property
from .utils import format_string
from .utils import redirect
from .wsgi import get_host
_rule_re = re.compile(
r"""
(?P<static>[^<]*) # static rule data
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<args>.*?)\))? # converter arguments
\: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
""",
re.VERBOSE,
)
_simple_rule_re = re.compile(r"<([^>]+)>")
_converter_args_re = re.compile(
r"""
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
[\w\d_.]+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
""",
re.VERBOSE | re.UNICODE,
)
_PYTHON_CONSTANTS = {"None": None, "True": True, "False": False}
def _pythonize(value):
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value)
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in "\"'":
value = value[1:-1]
return text_type(value)
def parse_converter_args(argstr):
argstr += ","
args = []
kwargs = {}
for item in _converter_args_re.finditer(argstr):
value = item.group("stringval")
if value is None:
value = item.group("value")
value = _pythonize(value)
if not item.group("name"):
args.append(value)
else:
name = item.group("name")
kwargs[name] = value
return tuple(args), kwargs
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, arguments, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
pos = 0
end = len(rule)
do_match = _rule_re.match
used_names = set()
while pos < end:
m = do_match(rule, pos)
if m is None:
break
data = m.groupdict()
if data["static"]:
yield None, None, data["static"]
variable = data["variable"]
converter = data["converter"] or "default"
if variable in used_names:
raise ValueError("variable name %r used twice." % variable)
used_names.add(variable)
yield converter, data["args"] or None, variable
pos = m.end()
if pos < end:
remaining = rule[pos:]
if ">" in remaining or "<" in remaining:
raise ValueError("malformed url rule: %r" % rule)
yield None, None, remaining
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 308
def __init__(self, new_url):
RoutingException.__init__(self, new_url)
self.new_url = new_url
def get_response(self, environ):
return redirect(self.new_url, self.code)
class RequestSlash(RoutingException):
"""Internal exception."""
class RequestAliasRedirect(RoutingException): # noqa: B903
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values):
self.matched_values = matched_values
@implements_to_string
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(self, endpoint, values, method, adapter=None):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
self.adapter = adapter
@cached_property
def suggested(self):
return self.closest_rule(self.adapter)
def closest_rule(self, adapter):
def _score_rule(rule):
return sum(
[
0.98
* difflib.SequenceMatcher(
None, rule.endpoint, self.endpoint
).ratio(),
0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
0.01 * bool(rule.methods and self.method in rule.methods),
]
)
if adapter and adapter.map._rules:
return max(adapter.map._rules, key=_score_rule)
def __str__(self):
message = []
message.append("Could not build url for endpoint %r" % self.endpoint)
if self.method:
message.append(" (%r)" % self.method)
if self.values:
message.append(" with values %r" % sorted(self.values.keys()))
message.append(".")
if self.suggested:
if self.endpoint == self.suggested.endpoint:
if self.method and self.method not in self.suggested.methods:
message.append(
" Did you mean to use methods %r?"
% sorted(self.suggested.methods)
)
missing_values = self.suggested.arguments.union(
set(self.suggested.defaults or ())
) - set(self.values.keys())
if missing_values:
message.append(
" Did you forget to specify values %r?" % sorted(missing_values)
)
else:
message.append(" Did you mean %r instead?" % self.suggested.endpoint)
return u"".join(message)
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class RuleFactory(object):
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map):
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain, rules):
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path, rules):
self.path = path.rstrip("/")
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate(object):
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules):
self.rules = list(rules)
def __call__(self, *args, **kwargs):
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(self, rules, context):
self.rules = rules
self.context = context
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in iteritems(rule.defaults):
if isinstance(value, string_types):
value = format_string(value, self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = format_string(rule.subdomain, self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, string_types):
new_endpoint = format_string(new_endpoint, self.context)
yield Rule(
format_string(rule.rule, self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes,
)
def _prefix_names(src):
"""ast parse and prefix names with `.` to avoid collision with user vars"""
tree = ast.parse(src).body[0]
if isinstance(tree, ast.Expr):
tree = tree.value
for node in ast.walk(tree):
if isinstance(node, ast.Name):
node.id = "." + node.id
return tree
_CALL_CONVERTER_CODE_FMT = "self._converters[{elem!r}].to_url()"
_IF_KWARGS_URL_ENCODE_CODE = """\
if kwargs:
q = '?'
params = self._encode_query_vars(kwargs)
else:
q = params = ''
"""
_IF_KWARGS_URL_ENCODE_AST = _prefix_names(_IF_KWARGS_URL_ENCODE_CODE)
_URL_ENCODE_AST_NAMES = (_prefix_names("q"), _prefix_names("params"))
@implements_to_string
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` he will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
.. versionchanged:: 0.6.1
`HEAD` is now automatically added to the methods if `GET` is
present. The reason for this is that existing code often did not
work properly in servers not rewriting `HEAD` to `GET`
automatically and it was not documented how `HEAD` should be
treated. This was considered a bug in Werkzeug because of that.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return 'foo/' + Foo.get_slug_for_id(id)
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
`alias`
If enabled this rule serves as an alias for another rule with the same
endpoint and arguments.
`host`
If provided and the URL map has host matching enabled this can be
used to provide a match rule for the whole host. This also means
that the subdomain feature is disabled.
.. versionadded:: 0.7
The `alias` and `host` parameters were added.
"""
def __init__(
self,
string,
defaults=None,
subdomain=None,
methods=None,
build_only=False,
endpoint=None,
strict_slashes=None,
redirect_to=None,
alias=False,
host=None,
):
if not string.startswith("/"):
raise ValueError("urls must start with a leading slash")
self.rule = string
self.is_leaf = not string.endswith("/")
self.map = None
self.strict_slashes = strict_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
if methods is None:
self.methods = None
else:
if isinstance(methods, str):
raise TypeError("param `methods` should be `Iterable[str]`, not `str`")
self.methods = set([x.upper() for x in methods])
if "HEAD" not in self.methods and "GET" in self.methods:
self.methods.add("HEAD")
self.endpoint = endpoint
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._argument_weights = None
def empty(self):
"""
Return an unbound copy of this rule.
This can be useful if want to reuse an already bound URL for another
map. See ``get_empty_kwargs`` to override what keyword arguments are
provided to the new copy.
"""
return type(self)(self.rule, **self.get_empty_kwargs())
def get_empty_kwargs(self):
"""
Provides kwargs for instantiating empty copy with empty()
Use this method to provide custom keyword arguments to the subclass of
``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass
has custom keyword arguments that are needed at instantiation.
Must return a ``dict`` that will be provided as kwargs to the new
instance of ``Rule``, following the initial ``self.rule`` value which
is always provided as the first, required positional argument.
"""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return dict(
defaults=defaults,
subdomain=self.subdomain,
methods=self.methods,
build_only=self.build_only,
endpoint=self.endpoint,
strict_slashes=self.strict_slashes,
redirect_to=self.redirect_to,
alias=self.alias,
host=self.host,
)
def get_rules(self, map):
yield self
def refresh(self):
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError("url rule %r already bound to map %r" % (self, self.map))
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
self.compile()
def get_converter(self, variable_name, converter_name, args, kwargs):
"""Looks up the converter for the given parameter.
.. versionadded:: 0.9
"""
if converter_name not in self.map.converters:
raise LookupError("the converter %r does not exist" % converter_name)
return self.map.converters[converter_name](self.map, *args, **kwargs)
def _encode_query_vars(self, query_vars):
return url_encode(
query_vars,
charset=self.map.charset,
sort=self.map.sort_parameters,
key=self.map.sort_key,
)
def compile(self):
"""Compiles the regular expression and stores it."""
assert self.map is not None, "rule not bound"
if self.map.host_matching:
domain_rule = self.host or ""
else:
domain_rule = self.subdomain or ""
self._trace = []
self._converters = {}
self._static_weights = []
self._argument_weights = []
regex_parts = []
def _build_regex(rule):
index = 0
for converter, arguments, variable in parse_rule(rule):
if converter is None:
regex_parts.append(re.escape(variable))
self._trace.append((False, variable))
for part in variable.split("/"):
if part:
self._static_weights.append((index, -len(part)))
else:
if arguments:
c_args, c_kwargs = parse_converter_args(arguments)
else:
c_args = ()
c_kwargs = {}
convobj = self.get_converter(variable, converter, c_args, c_kwargs)
regex_parts.append("(?P<%s>%s)" % (variable, convobj.regex))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._argument_weights.append(convobj.weight)
self.arguments.add(str(variable))
index = index + 1
_build_regex(domain_rule)
regex_parts.append("\\|")
self._trace.append((False, "|"))
_build_regex(self.rule if self.is_leaf else self.rule.rstrip("/"))
if not self.is_leaf:
self._trace.append((False, "/"))
self._build = self._compile_builder(False).__get__(self, None)
self._build_unknown = self._compile_builder(True).__get__(self, None)
if self.build_only:
return
regex = r"^%s%s$" % (
u"".join(regex_parts),
(not self.is_leaf or not self.strict_slashes)
and "(?<!/)(?P<__suffix__>/?)"
or "",
)
self._regex = re.compile(regex, re.UNICODE)
def match(self, path, method=None):
"""Check if the rule matches a given path. Path is a string in the
form ``"subdomain|/path"`` and is assembled by the map. If
the map is doing host matching the subdomain part will be the host
instead.
If the rule matches a dict with the converted values is returned,
otherwise the return value is `None`.
:internal:
"""
if not self.build_only:
m = self._regex.search(path)
if m is not None:
groups = m.groupdict()
# we have a folder like part of the url without a trailing
# slash and strict slashes enabled. raise an exception that
# tells the map to redirect to the same url but with a
# trailing slash
if (
self.strict_slashes
and not self.is_leaf
and not groups.pop("__suffix__")
and (
method is None or self.methods is None or method in self.methods
)
):
raise RequestSlash()
# if we are not in strict slashes mode we have to remove
# a __suffix__
elif not self.strict_slashes:
del groups["__suffix__"]
result = {}
for name, value in iteritems(groups):
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults:
result.update(self.defaults)
if self.alias and self.map.redirect_defaults:
raise RequestAliasRedirect(result)
return result
@staticmethod
def _get_func_code(code, name):
globs, locs = {}, {}
exec(code, globs, locs)
return locs[name]
def _compile_builder(self, append_unknown=True):
defaults = self.defaults or {}
dom_ops = []
url_ops = []
opl = dom_ops
for is_dynamic, data in self._trace:
if data == "|" and opl is dom_ops:
opl = url_ops
continue
# this seems like a silly case to ever come up but:
# if a default is given for a value that appears in the rule,
# resolve it to a constant ahead of time
if is_dynamic and data in defaults:
data = self._converters[data].to_url(defaults[data])
opl.append((False, data))
elif not is_dynamic:
opl.append(
(False, url_quote(to_bytes(data, self.map.charset), safe="/:|+"))
)
else:
opl.append((True, data))
def _convert(elem):
ret = _prefix_names(_CALL_CONVERTER_CODE_FMT.format(elem=elem))
ret.args = [ast.Name(str(elem), ast.Load())] # str for py2
return ret
def _parts(ops):
parts = [
_convert(elem) if is_dynamic else ast.Str(s=elem)
for is_dynamic, elem in ops
]
parts = parts or [ast.Str("")]
# constant fold
ret = [parts[0]]
for p in parts[1:]:
if isinstance(p, ast.Str) and isinstance(ret[-1], ast.Str):
ret[-1] = ast.Str(ret[-1].s + p.s)
else:
ret.append(p)
return ret
dom_parts = _parts(dom_ops)
url_parts = _parts(url_ops)
if not append_unknown:
body = []
else:
body = [_IF_KWARGS_URL_ENCODE_AST]
url_parts.extend(_URL_ENCODE_AST_NAMES)
def _join(parts):
if len(parts) == 1: # shortcut
return parts[0]
elif hasattr(ast, "JoinedStr"): # py36+
return ast.JoinedStr(parts)
else:
call = _prefix_names('"".join()')
call.args = [ast.Tuple(parts, ast.Load())]
return call
body.append(
ast.Return(ast.Tuple([_join(dom_parts), _join(url_parts)], ast.Load()))
)
# str is necessary for python2
pargs = [
str(elem)
for is_dynamic, elem in dom_ops + url_ops
if is_dynamic and elem not in defaults
]
kargs = [str(k) for k in defaults]
func_ast = _prefix_names("def _(): pass")
func_ast.name = "<builder:{!r}>".format(self.rule)
if hasattr(ast, "arg"): # py3
func_ast.args.args.append(ast.arg(".self", None))
for arg in pargs + kargs:
func_ast.args.args.append(ast.arg(arg, None))
func_ast.args.kwarg = ast.arg(".kwargs", None)
else:
func_ast.args.args.append(ast.Name(".self", ast.Param()))
for arg in pargs + kargs:
func_ast.args.args.append(ast.Name(arg, ast.Param()))
func_ast.args.kwarg = ".kwargs"
for _ in kargs:
func_ast.args.defaults.append(ast.Str(""))
func_ast.body = body
# use `ast.parse` instead of `ast.Module` for better portability
# python3.8 changes the signature of `ast.Module`
module = ast.parse("")
module.body = [func_ast]
# mark everything as on line 1, offset 0
# less error-prone than `ast.fix_missing_locations`
# bad line numbers cause an assert to fail in debug builds
for node in ast.walk(module):
if "lineno" in node._attributes:
node.lineno = 1
if "col_offset" in node._attributes:
node.col_offset = 0
code = compile(module, "<werkzeug routing>", "exec")
return self._get_func_code(code, func_ast.name)
def build(self, values, append_unknown=True):
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
try:
if append_unknown:
return self._build_unknown(**values)
else:
return self._build(**values)
except ValidationError:
return None
def provides_defaults_for(self, rule):
"""Check if this rule has defaults for a given rule.
:internal:
"""
return (
not self.build_only
and self.defaults
and self.endpoint == rule.endpoint
and self != rule
and self.arguments == rule.arguments
)
def suitable_for(self, values, method=None):
"""Check if the dict of values has enough data for url generation.
:internal:
"""
# if a method was given explicitly and that method is not supported
# by this rule, this rule is not suitable.
if (
method is not None
and self.methods is not None
and method not in self.methods
):
return False
defaults = self.defaults or ()
# all arguments required must be either in the defaults dict or
# the value dictionary otherwise it's not suitable
for key in self.arguments:
if key not in defaults and key not in values:
return False
# in case defaults are given we ensure that either the value was
# skipped or the value is the same as the default value.
if defaults:
for key, value in iteritems(defaults):
if key in values and value != values[key]:
return False
return True
def match_compare_key(self):
"""The match compare key for sorting.
Current implementation:
1. rules without any arguments come first for performance
reasons only as we expect them to match faster and some
common ones usually don't have any arguments (index pages etc.)
2. rules with more static parts come first so the second argument
is the negative length of the number of the static weights.
3. we order by static weights, which is a combination of index
and length
4. The more complex rules come first so the next argument is the
negative length of the number of argument weights.
5. lastly we order by the actual argument weights.
:internal:
"""
return (
bool(self.arguments),
-len(self._static_weights),
self._static_weights,
-len(self._argument_weights),
self._argument_weights,
)
def build_compare_key(self):
"""The build compare key for sorting.
:internal:
"""
return 1 if self.alias else 0, -len(self.arguments), -len(self.defaults or ())
def __eq__(self, other):
return self.__class__ is other.__class__ and self._trace == other._trace
__hash__ = None
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.rule
@native_string_result
def __repr__(self):
if self.map is None:
return u"<%s (unbound)>" % self.__class__.__name__
tmp = []
for is_dynamic, data in self._trace:
if is_dynamic:
tmp.append(u"<%s>" % data)
else:
tmp.append(data)
return u"<%s %s%s -> %s>" % (
self.__class__.__name__,
repr((u"".join(tmp)).lstrip(u"|")).lstrip(u"u"),
self.methods is not None and u" (%s)" % u", ".join(self.methods) or u"",
self.endpoint,
)
class BaseConverter(object):
"""Base class for all converters."""
regex = "[^/]+"
weight = 100
def __init__(self, map):
self.map = map
def to_python(self, value):
return value
def to_url(self, value):
if isinstance(value, (bytes, bytearray)):
return _fast_url_quote(value)
return _fast_url_quote(text_type(value).encode(self.map.charset))
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = "{%d}" % int(length)
else:
if maxlength is None:
maxlength = ""
else:
maxlength = int(maxlength)
length = "{%s,%s}" % (int(minlength), maxlength)
self.regex = "[^/]" + length
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = "(?:%s)" % "|".join([re.escape(x) for x in items])
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = "[^/].*?"
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None, signed=False):
if signed:
self.regex = self.signed_regex
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
self.signed = signed
def to_python(self, value):
if self.fixed_digits and len(value) != self.fixed_digits:
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or (
self.max is not None and value > self.max
):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = ("%%0%sd" % self.fixed_digits) % value
return str(value)
@property
def signed_regex(self):
return r"-?" + self.regex
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule("/page/<int:page>")
By default it only accepts unsigned, positive values. The ``signed``
parameter will enable signed, negative values. ::
Rule("/page/<int(signed=True):page>")
:param map: The :class:`Map`.
:param fixed_digits: The number of fixed digits in the URL. If you
set this to ``4`` for example, the rule will only match if the
URL looks like ``/0001/``. The default is variable length.
:param min: The minimal value.
:param max: The maximal value.
:param signed: Allow signed (negative) values.
.. versionadded:: 0.15
The ``signed`` parameter.
"""
regex = r"\d+"
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule("/probability/<float:probability>")
By default it only accepts unsigned, positive values. The ``signed``
parameter will enable signed, negative values. ::
Rule("/offset/<float(signed=True):offset>")
:param map: The :class:`Map`.
:param min: The minimal value.
:param max: The maximal value.
:param signed: Allow signed (negative) values.
.. versionadded:: 0.15
The ``signed`` parameter.
"""
regex = r"\d+\.\d+"
num_convert = float
def __init__(self, map, min=None, max=None, signed=False):
NumberConverter.__init__(self, map, min=min, max=max, signed=signed)
class UUIDConverter(BaseConverter):
"""This converter only accepts UUID strings::
Rule('/object/<uuid:identifier>')
.. versionadded:: 0.10
:param map: the :class:`Map`.
"""
regex = (
r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-"
r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}"
)
def to_python(self, value):
return uuid.UUID(value)
def to_url(self, value):
return str(value)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
"default": UnicodeConverter,
"string": UnicodeConverter,
"any": AnyConverter,
"path": PathConverter,
"int": IntegerConverter,
"float": FloatConverter,
"uuid": UUIDConverter,
}
class Map(object):
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: Take care of trailing slashes.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionadded:: 0.5
`sort_parameters` and `sort_key` was added.
.. versionadded:: 0.7
`encoding_errors` and `host_matching` was added.
"""
#: A dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
def __init__(
self,
rules=None,
default_subdomain="",
charset="utf-8",
strict_slashes=True,
redirect_defaults=True,
converters=None,
sort_parameters=False,
sort_key=None,
encoding_errors="replace",
host_matching=False,
):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self._remap_lock = Lock()
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(
self,
server_name,
script_name=None,
subdomain=None,
url_scheme="http",
default_method="GET",
path_info=None,
query_args=None,
):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionadded:: 0.7
`query_args` added
.. versionadded:: 0.8
`query_args` can now also be a string.
.. versionchanged:: 0.15
``path_info`` defaults to ``'/'`` if ``None``.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError("host matching enabled and a subdomain was provided")
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = "/"
if path_info is None:
path_info = "/"
try:
server_name = _encode_idna(server_name)
except UnicodeError:
raise BadHost()
return MapAdapter(
self,
server_name,
script_name,
subdomain,
url_scheme,
path_info,
default_method,
query_args,
)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
wsgi_server_name = get_host(environ).lower()
if server_name is None:
server_name = wsgi_server_name
else:
server_name = server_name.lower()
if subdomain is None and not self.host_matching:
cur_server_name = wsgi_server_name.split(".")
real_server_name = server_name.split(".")
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = "<invalid>"
else:
subdomain = ".".join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, self.charset)
script_name = _get_wsgi_string("SCRIPT_NAME")
path_info = _get_wsgi_string("PATH_INFO")
query_args = _get_wsgi_string("QUERY_STRING")
return Map.bind(
self,
server_name,
script_name,
subdomain,
environ["wsgi.url_scheme"],
environ["REQUEST_METHOD"],
path_info,
query_args=query_args,
)
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in itervalues(self._rules_by_endpoint):
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return "%s(%s)" % (self.__class__.__name__, pformat(list(rules)))
class MapAdapter(object):
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(
self,
map,
server_name,
script_name,
subdomain,
url_scheme,
path_info,
default_method,
query_args=None,
):
self.map = map
self.server_name = to_unicode(server_name)
script_name = to_unicode(script_name)
if not script_name.endswith(u"/"):
script_name += u"/"
self.script_name = script_name
self.subdomain = to_unicode(subdomain)
self.url_scheme = to_unicode(url_scheme)
self.path_info = to_unicode(path_info)
self.default_method = to_unicode(default_method)
self.query_args = query_args
def dispatch(
self, view_func, path_info=None, method=None, catch_http_exceptions=False
):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect as e:
return e
return view_func(endpoint, args)
except HTTPException as e:
if catch_http_exceptions:
return e
raise
def match(self, path_info=None, method=None, return_rule=False, query_args=None):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. They will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
.. versionadded:: 0.6
`return_rule` was added.
.. versionadded:: 0.7
`query_args` was added.
.. versionchanged:: 0.8
`query_args` can now also be a string.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
else:
path_info = to_unicode(path_info, self.map.charset)
if query_args is None:
query_args = self.query_args
method = (method or self.default_method).upper()
path = u"%s|%s" % (
self.map.host_matching and self.server_name or self.subdomain,
path_info and "/%s" % path_info.lstrip("/"),
)
have_match_for = set()
for rule in self.map._rules:
try:
rv = rule.match(path, method)
except RequestSlash:
raise RequestRedirect(
self.make_redirect_url(
url_quote(path_info, self.map.charset, safe="/:|+") + "/",
query_args,
)
)
except RequestAliasRedirect as e:
raise RequestRedirect(
self.make_alias_redirect_url(
path, rule.endpoint, e.matched_values, method, query_args
)
)
if rv is None:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
continue
if self.map.redirect_defaults:
redirect_url = self.get_default_redirect(rule, method, rv, query_args)
if redirect_url is not None:
raise RequestRedirect(redirect_url)
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, string_types):
def _handle_match(match):
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
raise RequestRedirect(
str(
url_join(
"%s://%s%s%s"
% (
self.url_scheme or "http",
self.subdomain + "." if self.subdomain else "",
self.server_name,
self.script_name,
),
redirect_url,
)
)
)
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
if have_match_for:
raise MethodNotAllowed(valid_methods=list(have_match_for))
raise NotFound()
def test(self, path_info=None, method=None):
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except HTTPException:
return False
return True
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method="--")
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException:
pass
return []
def get_host(self, domain_part):
"""Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
"""
if self.map.host_matching:
if domain_part is None:
return self.server_name
return to_unicode(domain_part, "ascii")
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
else:
subdomain = to_unicode(subdomain, "ascii")
return (subdomain + u"." if subdomain else u"") + self.server_name
def get_default_redirect(self, rule, method, values, query_args):
"""A helper that returns the URL to redirect to if it finds one.
This is used for default redirecting only.
:internal:
"""
assert self.map.redirect_defaults
for r in self.map._rules_by_endpoint[rule.endpoint]:
# every rule that comes after this one, including ourself
# has a lower priority for the defaults. We order the ones
# with the highest priority up for building.
if r is rule:
break
if r.provides_defaults_for(rule) and r.suitable_for(values, method):
values.update(r.defaults)
domain_part, path = r.build(values)
return self.make_redirect_url(path, query_args, domain_part=domain_part)
def encode_query_args(self, query_args):
if not isinstance(query_args, string_types):
query_args = url_encode(query_args, self.map.charset)
return query_args
def make_redirect_url(self, path_info, query_args=None, domain_part=None):
"""Creates a redirect URL.
:internal:
"""
suffix = ""
if query_args:
suffix = "?" + self.encode_query_args(query_args)
return str(
"%s://%s/%s%s"
% (
self.url_scheme or "http",
self.get_host(domain_part),
posixpath.join(
self.script_name[:-1].lstrip("/"), path_info.lstrip("/")
),
suffix,
)
)
def make_alias_redirect_url(self, path, endpoint, values, method, query_args):
"""Internally called to make an alias redirect URL."""
url = self.build(
endpoint, values, method, append_unknown=False, force_external=True
)
if query_args:
url += "?" + self.encode_query_args(query_args)
assert url != path, "detected invalid alias setting. No canonical URL found"
return url
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(
endpoint, values, self.default_method, append_unknown
)
if rv is not None:
return rv
# default method did not match or a specific method is passed,
# check all and go with first result.
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
rv = rule.build(values, append_unknown)
if rv is not None:
return rv
def build(
self,
endpoint,
values=None,
method=None,
force_external=False,
append_unknown=True,
):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
When processing those additional values, lists are furthermore
interpreted as multiple values (as per
:py:class:`werkzeug.datastructures.MultiDict`):
>>> urls.build("index", {'q': ['a', 'b', 'c']})
'/?q=a&q=b&q=c'
Passing a ``MultiDict`` will also add multiple values:
>>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b'))))
'/?p=z&q=a&q=b'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs. If the URL
scheme is not provided, this will generate
a protocol-relative URL.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
temp_values = {}
# iteritems(dict, values) is like `values.lists()`
# without the call or `list()` coercion overhead.
for key, value in iteritems(dict, values):
if not value:
continue
if len(value) == 1: # flatten single item lists
value = value[0]
if value is None: # drop None
continue
temp_values[key] = value
values = temp_values
else:
# drop None
values = dict(i for i in iteritems(values) if i[1] is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method, self)
domain_part, path = rv
host = self.get_host(domain_part)
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name)
or (not self.map.host_matching and domain_part == self.subdomain)
):
return "%s/%s" % (self.script_name.rstrip("/"), path.lstrip("/"))
return str(
"%s//%s%s/%s"
% (
self.url_scheme + ":" if self.url_scheme else "",
host,
self.script_name[:-1],
path.lstrip("/"),
)
)
|
the-stack_106_24114 | def hailstone(n):
while 1:
print(n)
if n == 1:
break
n = 3*n + 1 if n & 1 else n // 2
if __name__ == "__main__":
hailstone(int(input("Enter the starting number:\n"))) # try 7 or 27
|
the-stack_106_24117 | from typing import Dict, List, Optional
from attr import dataclass
from feast.feature import Feature
from feast.protos.feast.core.FeatureViewProjection_pb2 import (
FeatureViewProjection as FeatureViewProjectionProto,
)
@dataclass
class FeatureViewProjection:
"""
A feature view projection represents a selection of one or more features from a
single feature view.
Attributes:
name: The unique name of the feature view from which this projection is created.
name_alias: An optional alias for the name.
features: The list of features represented by the feature view projection.
join_key_map: A map to modify join key columns during retrieval of this feature
view projection.
"""
name: str
name_alias: Optional[str]
features: List[Feature]
join_key_map: Dict[str, str] = {}
def name_to_use(self):
return self.name_alias or self.name
def to_proto(self) -> FeatureViewProjectionProto:
feature_reference_proto = FeatureViewProjectionProto(
feature_view_name=self.name,
feature_view_name_alias=self.name_alias or "",
join_key_map=self.join_key_map,
)
for feature in self.features:
feature_reference_proto.feature_columns.append(feature.to_proto())
return feature_reference_proto
@staticmethod
def from_proto(proto: FeatureViewProjectionProto):
feature_view_projection = FeatureViewProjection(
name=proto.feature_view_name,
name_alias=proto.feature_view_name_alias,
features=[],
join_key_map=dict(proto.join_key_map),
)
for feature_column in proto.feature_columns:
feature_view_projection.features.append(Feature.from_proto(feature_column))
return feature_view_projection
@staticmethod
def from_definition(feature_grouping):
return FeatureViewProjection(
name=feature_grouping.name,
name_alias=None,
features=feature_grouping.features,
)
|
the-stack_106_24119 | from typing import Any, Dict, Optional, List
import subprocess
import json
import faldbt.lib as lib
from dbt.logger import GLOBAL_LOGGER as logger
import os
import shutil
from os.path import exists
import argparse
class DbtCliOutput:
def __init__(
self,
command: str,
return_code: int,
raw_output: str,
logs: List[Dict[str, Any]],
):
self._command = command
self._return_code = return_code
self._raw_output = raw_output
self._logs = logs
@property
def docs_url(self) -> Optional[str]:
return None
@property
def command(self) -> str:
return self._command
@property
def return_code(self) -> int:
return self._return_code
@property
def raw_output(self) -> str:
return self._raw_output
@property
def logs(self) -> List[Dict[str, Any]]:
return self._logs
def raise_for_dbt_run_errors(output: DbtCliOutput):
if output.return_code != 0:
raise RuntimeError("Error running dbt run")
def get_dbt_command_list(args: argparse.Namespace, models_list: List[str]) -> List[str]:
command_list = ["dbt", "--log-format", "json"]
if args.debug:
command_list += ["--debug"]
command_list += ["run"]
if args.project_dir:
command_list += ["--project-dir", args.project_dir]
if args.profiles_dir:
command_list += ["--profiles-dir", args.profiles_dir]
if args.threads:
command_list += ["--threads", args.threads]
if args.defer:
command_list += ["--defer"]
if args.state:
command_list += ["--state", args.state]
if args.target:
command_list += ["--target", args.target]
if args.vars is not None and args.vars != "{}":
command_list += ["--vars", args.vars]
if len(models_list) > 0:
if lib.DBT_VCURRENT.compare(lib.DBT_V1) < 0:
command_list += ["--models"] + models_list
else:
command_list += ["--select"] + models_list
# Assure all command parts are str
return list(map(str, command_list))
def dbt_run(
args: argparse.Namespace, models_list: List[str], target_path: str, run_index: int
):
"Run the dbt run command in a subprocess"
command_list = get_dbt_command_list(args, models_list)
# Execute the dbt CLI command in a subprocess.
full_command = " ".join(command_list)
logger.info(f"Executing command: {full_command}")
return_code = 0
logs = []
output = []
process = subprocess.Popen(
command_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
for raw_line in process.stdout or []:
line = raw_line.decode("utf-8")
output.append(line)
try:
json_line = json.loads(line)
except json.JSONDecodeError:
logger.error(line.rstrip())
pass
else:
logs.append(json_line)
logger.info(json_line.get("message", json_line.get("msg", line.rstrip())))
process.wait()
return_code = process.returncode
logger.debug(f"dbt exited with return code {return_code}")
raw_output = "\n".join(output)
_create_fal_result_file(target_path, run_index)
return DbtCliOutput(
command=full_command,
return_code=return_code,
raw_output=raw_output,
logs=logs,
)
def _create_fal_result_file(target_path: str, run_index: int):
fal_run_result = os.path.join(target_path, "run_results.json")
if exists(fal_run_result):
shutil.copy(
fal_run_result, os.path.join(target_path, f"fal_results_{run_index}.json")
)
|
the-stack_106_24121 | # Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
from __future__ import print_function
import dace
import numpy as np
# Dynamically creates DaCe programs with the same name
def program_generator(size, factor):
@dace.program(dace.float64[size],
dace.float64[size],
size=size,
factor=factor)
def program(input, output):
@dace.map(_[0:size])
def tasklet(i):
a << input[i]
b >> output[i]
b = a * factor
return program
def test():
print('Reloadable DaCe program test')
array_one = np.random.rand(10).astype(np.float64)
array_two = np.random.rand(20).astype(np.float64)
output_one = np.zeros(10, dtype=np.float64)
output_two = np.zeros(20, dtype=np.float64)
prog_one = program_generator(10, 2.0)
prog_two = program_generator(20, 4.0)
prog_one(array_one, output_one)
prog_two(array_two, output_two)
diff1 = np.linalg.norm(2.0 * array_one - output_one) / 10.0
diff2 = np.linalg.norm(4.0 * array_two - output_two) / 20.0
print("Differences:", diff1, diff2)
assert diff1 <= 1e-5 and diff2 <= 1e-5
if __name__ == "__main__":
test()
|
the-stack_106_24122 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Reliability calibration plugins."""
import operator
import warnings
import iris
import numpy as np
import scipy
from improver import BasePlugin, PostProcessingPlugin
from improver.calibration.utilities import (
check_forecast_consistency,
create_unified_frt_coord,
filter_non_matching_cubes,
)
from improver.metadata.probabilistic import (
find_threshold_coordinate,
probability_is_above_or_below,
)
from improver.metadata.utilities import generate_mandatory_attributes
from improver.utilities.cube_manipulation import MergeCubes, collapsed
class ConstructReliabilityCalibrationTables(BasePlugin):
"""A plugin for creating and populating reliability calibration tables."""
def __init__(
self,
n_probability_bins=5,
single_value_lower_limit=False,
single_value_upper_limit=False,
):
"""
Initialise class for creating reliability calibration tables. These
tables include data columns entitled observation_count,
sum_of_forecast_probabilities, and forecast_count, defined below.
n_probability_bins (int):
The total number of probability bins required in the reliability
tables. If single value limits are turned on, these are included in
this total.
single_value_lower_limit (bool):
Mandates that the lowest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus 0 to 1.0E-6.
single_value_upper_limit (bool):
Mandates that the highest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus (1 - 1.0E-6) to 1.
"""
self.single_value_tolerance = 1.0e-6
self.probability_bins = self._define_probability_bins(
n_probability_bins, single_value_lower_limit, single_value_upper_limit
)
self.table_columns = np.array(
["observation_count", "sum_of_forecast_probabilities", "forecast_count"]
)
self.expected_table_shape = (len(self.table_columns), n_probability_bins)
def __repr__(self):
"""Represent the configured plugin instance as a string."""
bin_values = ", ".join(
["[{:1.2f} --> {:1.2f}]".format(*item) for item in self.probability_bins]
)
result = "<ConstructReliabilityCalibrationTables: " "probability_bins: {}>"
return result.format(bin_values)
def _define_probability_bins(
self, n_probability_bins, single_value_lower_limit, single_value_upper_limit
):
"""
Define equally sized probability bins for use in a reliability table.
The range 0 to 1 is divided into ranges to give n_probability bins.
If single_value_lower_limit and / or single_value_upper_limit are True,
additional bins corresponding to values of 0 and / or 1 will be created,
each with a width defined by self.single_value_tolerance.
Args:
n_probability_bins (int):
The total number of probability bins desired in the
reliability tables. This number includes the extrema bins
(equals 0 and equals 1) if single value limits are turned on,
in which case the minimum number of bins is 3.
single_value_lower_limit (bool):
Mandates that the lowest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus 0 to 1.0E-6.
single_value_upper_limit (bool):
Mandates that the highest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus (1 - 1.0E-6) to 1.
Returns:
numpy.ndarray:
An array of 2-element arrays that contain the bounds of the
probability bins. These bounds are non-overlapping, with
adjacent bin boundaries spaced at the smallest representable
interval.
Raises:
ValueError: If trying to use both single_value_lower_limit and
single_value_upper_limit with 2 or fewer probability bins.
"""
if single_value_lower_limit and single_value_upper_limit:
if n_probability_bins <= 2:
msg = (
"Cannot use both single_value_lower_limit and "
"single_value_upper_limit with 2 or fewer "
"probability bins."
)
raise ValueError(msg)
n_probability_bins = n_probability_bins - 2
elif single_value_lower_limit or single_value_upper_limit:
n_probability_bins = n_probability_bins - 1
bin_lower = np.linspace(0, 1, n_probability_bins + 1, dtype=np.float32)
bin_upper = np.nextafter(bin_lower, 0, dtype=np.float32)
bin_upper[-1] = 1.0
bins = np.stack([bin_lower[:-1], bin_upper[1:]], 1).astype(np.float32)
if single_value_lower_limit:
bins[0, 0] = np.nextafter(self.single_value_tolerance, 1, dtype=np.float32)
lowest_bin = np.array([0, self.single_value_tolerance], dtype=np.float32)
bins = np.vstack([lowest_bin, bins]).astype(np.float32)
if single_value_upper_limit:
bins[-1, 1] = np.nextafter(
1.0 - self.single_value_tolerance, 0, dtype=np.float32
)
highest_bin = np.array(
[1.0 - self.single_value_tolerance, 1], dtype=np.float32
)
bins = np.vstack([bins, highest_bin]).astype(np.float32)
return bins
def _create_probability_bins_coord(self):
"""
Construct a dimension coordinate describing the probability bins
of the reliability table.
Returns:
iris.coords.DimCoord:
A dimension coordinate describing probability bins.
"""
values = np.mean(self.probability_bins, axis=1, dtype=np.float32)
probability_bins_coord = iris.coords.DimCoord(
values, long_name="probability_bin", units=1, bounds=self.probability_bins
)
return probability_bins_coord
def _create_reliability_table_coords(self):
"""
Construct coordinates that describe the reliability table rows. These
are observation_count, sum_of_forecast_probabilities, and
forecast_count. The order used here is the order in which the table
data is populated, so these must remain consistent with the
_populate_reliability_bins function.
Returns:
(tuple): tuple containing:
**index_coord** (iris.coords.DimCoord):
A numerical index dimension coordinate.
**name_coord** (iris.coords.AuxCoord):
An auxiliary coordinate that assigns names to the index
coordinates, where these names correspond to the
reliability table rows.
"""
index_coord = iris.coords.DimCoord(
np.arange(len(self.table_columns), dtype=np.int32),
long_name="table_row_index",
units=1,
)
name_coord = iris.coords.AuxCoord(
self.table_columns, long_name="table_row_name", units=1
)
return index_coord, name_coord
@staticmethod
def _define_metadata(forecast_slice):
"""
Define metadata that is specifically required for reliability table
cubes, whilst ensuring any mandatory attributes are also populated.
Args:
forecast_slice (iris.cube.Cube):
The source cube from which to get pre-existing metadata of use.
Returns:
dict:
A dictionary of attributes that are appropriate for the
reliability table cube.
"""
attributes = generate_mandatory_attributes([forecast_slice])
attributes["title"] = "Reliability calibration data table"
return attributes
def _create_reliability_table_cube(self, forecast, threshold_coord):
"""
Construct a reliability table cube and populate it with the provided
data. The returned cube will include a cycle hour coordinate, which
describes the model cycle hour at which the forecast data was produced.
It will further include the forecast period, threshold coordinate,
and spatial coordinates from the forecast cube.
Args:
forecast (iris.cube.Cube):
A cube slice across the spatial dimensions of the forecast
data. This slice provides the time and threshold values that
relate to the reliability_table_data.
threshold_coord (iris.coords.DimCoord):
The threshold coordinate.
Returns:
iris.cube.Cube:
A reliability table cube.
"""
def _get_coords_and_dims(coord_names):
"""Obtain the requested coordinates and their dimension index from
the forecast slice cube."""
coords_and_dims = []
leading_coords = [probability_bins_coord, reliability_index_coord]
for coord_name in coord_names:
crd = forecast_slice.coord(coord_name)
crd_dim = forecast_slice.coord_dims(crd)
crd_dim = crd_dim[0] + len(leading_coords) if crd_dim else ()
coords_and_dims.append((crd, crd_dim))
return coords_and_dims
forecast_slice = next(forecast.slices_over(["time", threshold_coord]))
expected_shape = self.expected_table_shape + forecast_slice.shape
dummy_data = np.zeros((expected_shape))
diagnostic = find_threshold_coordinate(forecast).name()
attributes = self._define_metadata(forecast)
# Define reliability table specific coordinates
probability_bins_coord = self._create_probability_bins_coord()
(
reliability_index_coord,
reliability_name_coord,
) = self._create_reliability_table_coords()
frt_coord = create_unified_frt_coord(forecast.coord("forecast_reference_time"))
# List of required non-spatial coordinates from the forecast
non_spatial_coords = ["forecast_period", diagnostic]
# Construct a list of coordinates in the desired order
dim_coords = [forecast.coord(axis=dim).name() for dim in ["x", "y"]]
dim_coords_and_dims = _get_coords_and_dims(dim_coords)
aux_coords_and_dims = _get_coords_and_dims(non_spatial_coords)
dim_coords_and_dims.append((reliability_index_coord, 0))
aux_coords_and_dims.append((reliability_name_coord, 0))
dim_coords_and_dims.append((probability_bins_coord, 1))
reliability_cube = iris.cube.Cube(
dummy_data,
units=1,
attributes=attributes,
dim_coords_and_dims=dim_coords_and_dims,
aux_coords_and_dims=aux_coords_and_dims,
)
reliability_cube.add_aux_coord(frt_coord)
reliability_cube.rename("reliability_calibration_table")
return reliability_cube
def _populate_reliability_bins(self, forecast, truth):
"""
For an x-y slice at a single validity time and threshold, populate
a reliability table using the provided truth.
Args:
forecast (numpy.ndarray or numpy.ma.MaskedArray):
An array containing data over an xy slice for a single validity
time and threshold.
truth (numpy.ndarray or numpy.ma.MaskedArray):
An array containing a thresholded gridded truth at an
equivalent validity time to the forecast array.
Returns:
numpy.ma.MaskedArray:
An array containing reliability table data for a single time
and threshold. The leading dimension corresponds to the rows
of a calibration table, the second dimension to the number of
probability bins, and the trailing dimensions are the spatial
dimensions of the forecast and truth cubes (which are
equivalent).
"""
observation_counts = []
forecast_probabilities = []
forecast_counts = []
for bin_min, bin_max in self.probability_bins:
observation_mask = (
((forecast >= bin_min) & (forecast <= bin_max)) & (np.isclose(truth, 1))
).astype(int)
forecast_mask = ((forecast >= bin_min) & (forecast <= bin_max)).astype(int)
forecasts_probability_values = forecast * forecast_mask
observation_counts.append(observation_mask)
forecast_probabilities.append(forecasts_probability_values)
forecast_counts.append(forecast_mask)
reliability_table = np.ma.stack(
[
np.ma.stack(observation_counts),
np.ma.stack(forecast_probabilities),
np.ma.stack(forecast_counts),
]
)
return reliability_table.astype(np.float32)
def _populate_masked_reliability_bins(self, forecast, truth):
"""
Support populating the reliability table bins with a masked truth. If a
masked truth is provided, a masked reliability table is returned.
Args:
forecast (numpy.ndarray):
An array containing data over an xy slice for a single validity
time and threshold.
truth (numpy.ma.MaskedArray):
An array containing a thresholded gridded truth at an
equivalent validity time to the forecast array.
Returns:
numpy.ma.MaskedArray:
An array containing reliability table data for a single time
and threshold. The leading dimension corresponds to the rows
of a calibration table, the second dimension to the number of
probability bins, and the trailing dimensions are the spatial
dimensions of the forecast and truth cubes (which are
equivalent).
"""
forecast = np.ma.masked_where(np.ma.getmask(truth), forecast)
table = self._populate_reliability_bins(forecast, truth)
# Zero data underneath mask to support bitwise addition of masks.
table.data[table.mask] = 0
return table
def _add_reliability_tables(self, forecast, truth, threshold_reliability):
"""
Add reliability tables. The presence of a masked truth is handled
separately to ensure support for a mask that changes with validity time.
Args:
forecast (numpy.ndarray):
An array containing data over an xy slice for a single validity
time and threshold.
truth (numpy.ndarray or numpy.ma.MaskedArray):
An array containing a thresholded gridded truth at an
equivalent validity time to the forecast array.
threshold_reliability (numpy.ndarray or numpy.ma.MaskedArray):
The current reliability table that will be added to.
Returns:
numpy.ndarray or numpy.ma.MaskedArray:
An array containing reliability table data for a single time
and threshold. The leading dimension corresponds to the rows
of a calibration table, the second dimension to the number of
probability bins, and the trailing dimensions are the spatial
dimensions of the forecast and truth cubes (which are
equivalent).
"""
if np.ma.is_masked(truth.data):
table = self._populate_masked_reliability_bins(forecast.data, truth.data)
# Bitwise addition of masks. This ensures that only points that are
# masked in both the existing and new reliability tables are kept
# as being masked within the resulting reliability table.
mask = threshold_reliability.mask & table.mask
threshold_reliability = np.ma.array(
threshold_reliability.data + table.data, mask=mask, dtype=np.float32,
)
else:
np.add(
threshold_reliability,
self._populate_reliability_bins(forecast.data, truth.data),
out=threshold_reliability,
dtype=np.float32,
)
return threshold_reliability
def process(self, historic_forecasts, truths):
"""
Slice data over threshold and time coordinates to construct reliability
tables. These are summed over time to give a single table for each
threshold, constructed from all the provided historic forecasts and
truths. If a masked truth is provided, a masked reliability table is
returned. If the mask within the truth varies at different timesteps,
any point that is unmasked for at least one timestep will have
unmasked values within the reliability table. Therefore historic
forecast points will only be used if they have a corresponding valid
truth point for each timestep.
.. See the documentation for an example of the resulting reliability
table cube.
.. include:: extended_documentation/calibration/
reliability_calibration/reliability_calibration_examples.rst
Note that the forecast and truth data used is probabilistic, i.e. has
already been thresholded relative to the thresholds of interest, using
the equality operator required. As such this plugin is agnostic as to
whether the data is thresholded below or above a given diagnostic
threshold.
Args:
historic_forecasts (iris.cube.Cube):
A cube containing the historical forecasts used in calibration.
These are expected to all have a consistent cycle hour, that is
the hour in the forecast reference time.
truths (iris.cube.Cube):
A cube containing the thresholded gridded truths used in
calibration.
Returns:
iris.cube.CubeList:
A cubelist of reliability table cubes, one for each threshold
in the historic forecast cubes.
Raises:
ValueError: If the forecast and truth cubes have differing
threshold coordinates.
"""
historic_forecasts, truths = filter_non_matching_cubes(
historic_forecasts, truths
)
threshold_coord = find_threshold_coordinate(historic_forecasts)
truth_threshold_coord = find_threshold_coordinate(truths)
if not threshold_coord == truth_threshold_coord:
msg = "Threshold coordinates differ between forecasts and truths."
raise ValueError(msg)
time_coord = historic_forecasts.coord("time")
check_forecast_consistency(historic_forecasts)
reliability_cube = self._create_reliability_table_cube(
historic_forecasts, threshold_coord
)
populate_bins_func = self._populate_reliability_bins
if np.ma.is_masked(truths.data):
populate_bins_func = self._populate_masked_reliability_bins
reliability_tables = iris.cube.CubeList()
threshold_slices = zip(
historic_forecasts.slices_over(threshold_coord),
truths.slices_over(threshold_coord),
)
for forecast_slice, truth_slice in threshold_slices:
time_slices = zip(
forecast_slice.slices_over(time_coord),
truth_slice.slices_over(time_coord),
)
forecast, truth = next(time_slices)
threshold_reliability = populate_bins_func(forecast.data, truth.data)
for forecast, truth in time_slices:
threshold_reliability = self._add_reliability_tables(
forecast, truth, threshold_reliability
)
reliability_entry = reliability_cube.copy(data=threshold_reliability)
reliability_entry.replace_coord(forecast_slice.coord(threshold_coord))
reliability_tables.append(reliability_entry)
return MergeCubes()(reliability_tables, copy=False)
class AggregateReliabilityCalibrationTables(BasePlugin):
"""This plugin enables the aggregation of multiple reliability calibration
tables, and/or the aggregation over coordinates in the tables."""
def __repr__(self):
"""Represent the configured plugin instance as a string."""
return "<AggregateReliabilityCalibrationTables>"
@staticmethod
def _check_frt_coord(cubes):
"""
Check that the reliability calibration tables do not have overlapping
forecast reference time bounds. If these coordinates overlap in time it
indicates that some of the same forecast data has contributed to more
than one table, thus aggregating them would double count these
contributions.
Args:
cubes (iris.cube.CubeList):
The list of reliability calibration tables for which the
forecast reference time coordinates should be checked.
Raises:
ValueError: If the bounds overlap.
"""
bounds = []
for cube in cubes:
bounds.extend(cube.coord("forecast_reference_time").bounds)
bounds = np.concatenate(bounds)
if not all(x < y for x, y in zip(bounds, bounds[1:])):
raise ValueError(
"Reliability calibration tables have overlapping "
"forecast reference time bounds, indicating that "
"the same forecast data has contributed to the "
"construction of both tables. Cannot aggregate."
)
def process(self, cubes, coordinates=None):
"""
Aggregate the input reliability calibration table cubes and return the
result.
Args:
cubes (list or iris.cube.CubeList):
The cube or cubes containing the reliability calibration tables
to aggregate.
coordinates (list or None):
A list of coordinates over which to aggregate the reliability
calibration table using summation. If the argument is None and
a single cube is provided, this cube will be returned
unchanged.
"""
coordinates = [] if coordinates is None else coordinates
try:
(cube,) = cubes
except ValueError:
cubes = iris.cube.CubeList(cubes)
self._check_frt_coord(cubes)
cube = cubes.merge_cube()
coordinates.append("forecast_reference_time")
else:
if not coordinates:
return cube
result = collapsed(cube, coordinates, iris.analysis.SUM)
frt = create_unified_frt_coord(cube.coord("forecast_reference_time"))
result.replace_coord(frt)
return result
class ManipulateReliabilityTable(BasePlugin):
"""
A plugin to manipulate the reliability tables before they are used to
calibrate a forecast. x and y coordinates on the reliability table must be
collapsed.
The result is a reliability diagram with monotonic observation frequency.
Steps taken are:
1. If any bin contains less than the minimum forecast count then try
combining this bin with whichever neighbour has the lowest sample count.
This process is repeated for all bins that are below the minimum forecast
count criterion.
2. If non-monotonicity of the observation frequency is detected, try
combining a pair of bins that appear non-monotonic. Only a single pair of
bins are combined.
3. If non-monotonicity of the observation frequency remains after trying
to combine a single pair of bins, replace non-monotonic bins by assuming a
constant observation frequency.
"""
def __init__(self, minimum_forecast_count=200):
"""
Initialise class for manipulating a reliability table.
Args:
minimum_forecast_count (int):
The minimum number of forecast counts in a forecast probability
bin for it to be used in calibration.
The default value of 200 is that used in Flowerdew 2014.
Raises:
ValueError: If minimum_forecast_count is less than 1.
References:
Flowerdew J. 2014. Calibrating ensemble reliability whilst
preserving spatial structure. Tellus, Ser. A Dyn. Meteorol.
Oceanogr. 66.
"""
if minimum_forecast_count < 1:
raise ValueError(
"The minimum_forecast_count must be at least 1 as empty "
"bins in the reliability table are not handled."
)
self.minimum_forecast_count = minimum_forecast_count
@staticmethod
def _extract_reliability_table_components(reliability_table):
"""Extract reliability table components from cube
Args:
reliability_table (iris.cube.Cube):
A reliability table to be manipulated.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, iris.coords.DimCoord]:
Tuple containing the updated observation count,
forecast probability sum, forecast count and probability bin
coordinate.
"""
observation_count = reliability_table.extract(
iris.Constraint(table_row_name="observation_count")
).data
forecast_probability_sum = reliability_table.extract(
iris.Constraint(table_row_name="sum_of_forecast_probabilities")
).data
forecast_count = reliability_table.extract(
iris.Constraint(table_row_name="forecast_count")
).data
probability_bin_coord = reliability_table.coord("probability_bin")
return (
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
)
@staticmethod
def _sum_pairs(array, upper):
"""
Returns a new array where a pair of values in the original array have
been replaced by their sum. Combines the value in the upper index with
the value in the upper-1 index.
Args:
array (numpy.ndarray):
Array to be modified.
upper (int):
Upper index of pair.
Returns:
numpy.ndarray:
Array where a pair of values has been replaced by their sum.
"""
result = array.copy()
result[upper - 1] = np.sum(array[upper - 1 : upper + 1])
return np.delete(result, upper)
@staticmethod
def _create_new_bin_coord(probability_bin_coord, upper):
"""
Create a new probability_bin coordinate by combining two adjacent
points on the probability_bin coordinate. This matches the combination
of the data for the two bins.
Args:
probability_bin_coord (iris.coords.DimCoord):
Original probability bin coordinate.
upper (int):
Upper index of pair.
Returns:
iris.coords.DimCoord:
Probability bin coordinate with updated points and bounds where
a pair of bins have been combined to create a single bin.
"""
old_bounds = probability_bin_coord.bounds
new_bounds = np.concatenate(
(
old_bounds[0 : upper - 1],
np.array([[old_bounds[upper - 1, 0], old_bounds[upper, 1]]]),
old_bounds[upper + 1 :],
)
)
new_points = np.mean(new_bounds, axis=1, dtype=np.float32)
new_bin_coord = iris.coords.DimCoord(
new_points, long_name="probability_bin", units=1, bounds=new_bounds
)
return new_bin_coord
def _combine_undersampled_bins(
self,
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
):
"""
Combine bins that are under-sampled i.e. that have a lower forecast
count than the minimum_forecast_count, so that information from these
poorly-sampled bins can contribute to the calibration. If multiple
bins are below the minimum forecast count, the bin closest to
meeting the minimum_forecast_count criterion is combined with whichever
neighbour has the lowest sample count. A new bin is then created by
summing the neighbouring pair of bins. This process is repeated for all
bins that are below the minimum forecast count criterion.
Args:
observation_count (numpy.ndarray):
Observation count extracted from reliability table.
forecast_probability_sum (numpy.ndarray):
Forecast probability sum extracted from reliability table.
forecast_count (numpy.ndarray):
Forecast count extracted from reliability table.
probability_bin_coord (iris.coords.DimCoord):
Original probability bin coordinate.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, iris.coords.DimCoord]
Tuple containing the updated observation count,
forecast probability sum, forecast count and probability bin
coordinate.
"""
while (
any(x < self.minimum_forecast_count for x in forecast_count)
and len(forecast_count) > 1
):
forecast_count_copy = forecast_count.copy()
# Find index of the bin with the highest forecast count that is
# below the minimum_forecast_count by setting forecast counts
# greater than the minimum_forecast_count to NaN.
forecast_count_copy[forecast_count >= self.minimum_forecast_count] = np.nan
# Note for multiple occurrences of the maximum,
# the index of the first occurrence is returned.
index = np.int32(np.nanargmax(forecast_count_copy))
# Determine the upper index of the pair of bins to be combined.
if index == 0:
# Must use higher bin
upper = index + 1
elif index + 1 == len(forecast_count):
# Index already defines the upper bin
upper = index
else:
# Define upper index to include bin with lowest sample count.
if forecast_count[index + 1] > forecast_count[index - 1]:
upper = index
else:
upper = index + 1
forecast_count = self._sum_pairs(forecast_count, upper)
observation_count = self._sum_pairs(observation_count, upper)
forecast_probability_sum = self._sum_pairs(forecast_probability_sum, upper)
probability_bin_coord = self._create_new_bin_coord(
probability_bin_coord, upper
)
return (
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
)
def _combine_bin_pair(
self,
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
):
"""
Combine a pair of bins when non-monotonicity of the observation
frequency is detected. Iterate top-down from the highest forecast
probability bin to the lowest probability bin when combining the bins.
Only allow a single pair of bins to be combined.
Args:
observation_count (numpy.ndarray):
Observation count extracted from reliability table.
forecast_probability_sum (numpy.ndarray):
Forecast probability sum extracted from reliability table.
forecast_count (numpy.ndarray):
Forecast count extracted from reliability table.
probability_bin_coord (iris.coords.DimCoord):
Original probability bin coordinate.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, iris.coords.DimCoord]
Tuple containing the updated observation count,
forecast probability sum, forecast count and probability bin
coordinate.
"""
observation_frequency = np.array(observation_count / forecast_count)
for upper in np.arange(len(observation_frequency) - 1, 0, -1):
(diff,) = np.diff(
[observation_frequency[upper - 1], observation_frequency[upper]]
)
if diff < 0:
forecast_count = self._sum_pairs(forecast_count, upper)
observation_count = self._sum_pairs(observation_count, upper)
forecast_probability_sum = self._sum_pairs(
forecast_probability_sum, upper
)
probability_bin_coord = self._create_new_bin_coord(
probability_bin_coord, upper
)
break
return (
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
)
@staticmethod
def _assume_constant_observation_frequency(observation_count, forecast_count):
"""
Decide which end bin (highest probability bin or lowest probability
bin) has the highest sample count. Iterate through the observation
frequency from the end bin with the highest sample count to the end bin
with the lowest sample count. Whilst iterating, compare each pair of
bins and, if a pair is non-monotonic, replace the value of the bin
closer to the lowest sample count end bin with the value of the
bin that is closer to the higher sample count end bin. Then calculate
the new observation count required to give a monotonic observation
frequency.
Args:
observation_count (numpy.ndarray):
Observation count extracted from reliability table.
forecast_count (numpy.ndarray):
Forecast count extracted from reliability table.
Returns:
numpy.ndarray:
Observation count computed from a monotonic observation frequency.
"""
observation_frequency = np.array(observation_count / forecast_count)
iterator = observation_frequency
operation = operator.lt
# Top down if forecast count is lower for lowest probability bin,
# than for highest probability bin.
if forecast_count[0] < forecast_count[-1]:
# Reverse array to iterate from top to bottom.
iterator = observation_frequency[::-1]
operation = operator.gt
for index, lower_bin in enumerate(iterator[:-1]):
(diff,) = np.diff([lower_bin, iterator[index + 1]])
if operation(diff, 0):
iterator[index + 1] = lower_bin
observation_frequency = iterator
if forecast_count[0] < forecast_count[-1]:
# Re-reverse array from bottom to top to ensure original ordering.
observation_frequency = iterator[::-1]
observation_count = observation_frequency * forecast_count
return observation_count
@staticmethod
def _update_reliability_table(
reliability_table,
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
):
"""
Update the reliability table data and the probability bin coordinate.
Args:
reliability_table (iris.cube.Cube):
A reliability table to be manipulated.
observation_count (numpy.ndarray):
Observation count extracted from reliability table.
forecast_probability_sum (numpy.ndarray):
Forecast probability sum extracted from reliability table.
forecast_count (numpy.ndarray):
Forecast count extracted from reliability table.
probability_bin_coord (iris.coords.DimCoord):
Original probability bin coordinate.
Returns:
iris.cube.Cube:
Updated reliability table.
"""
final_data = np.stack(
[observation_count, forecast_probability_sum, forecast_count]
)
nrows, ncols = final_data.shape
reliability_table = reliability_table[0:nrows, 0:ncols].copy(data=final_data)
reliability_table.replace_coord(probability_bin_coord)
return reliability_table
def process(self, reliability_table):
"""
Apply the steps needed to produce a reliability diagram with a
monotonic observation frequency.
Args:
reliability_table (iris.cube.Cube):
A reliability table to be manipulated. The only coordinates
expected on this cube are a threshold coordinate,
a table_row_index coordinate and corresponding table_row_name
coordinate and a probability_bin coordinate.
Returns:
iris.cube.CubeList:
Containing a reliability table cube for each threshold in the
input reliablity table. For tables where monotonicity has been
enforced the probability_bin coordinate will have one less
bin than the tables that were already monotonic. If
under-sampled bins have been combined, then the probability_bin
coordinate will have been reduced until all bins have more than
the minimum_forecast_count if possible; a single under-sampled
bin will be returned if combining all bins is still insufficient
to reach the minimum_forecast_count.
"""
threshold_coord = find_threshold_coordinate(reliability_table)
reliability_table_cubelist = iris.cube.CubeList()
for rel_table_slice in reliability_table.slices_over(threshold_coord):
(
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
) = self._extract_reliability_table_components(rel_table_slice)
if np.any(forecast_count < self.minimum_forecast_count):
(
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
) = self._combine_undersampled_bins(
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
)
rel_table_slice = self._update_reliability_table(
rel_table_slice,
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
)
# If the observation frequency is non-monotonic adjust the
# reliability table
observation_frequency = np.array(observation_count / forecast_count)
if not np.all(np.diff(observation_frequency) >= 0):
(
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
) = self._combine_bin_pair(
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
)
observation_count = self._assume_constant_observation_frequency(
observation_count, forecast_count
)
rel_table_slice = self._update_reliability_table(
rel_table_slice,
observation_count,
forecast_probability_sum,
forecast_count,
probability_bin_coord,
)
reliability_table_cubelist.append(rel_table_slice)
return reliability_table_cubelist
class ApplyReliabilityCalibration(PostProcessingPlugin):
"""
A plugin for the application of reliability calibration to probability
forecasts. This calibration is designed to improve the reliability of
probability forecasts without significantly degrading their resolution.
The method implemented here is described in Flowerdew J. 2014. Calibration
is always applied as long as there are at least two bins within the input
reliability table.
References:
Flowerdew J. 2014. Calibrating ensemble reliability whilst
preserving spatial structure. Tellus, Ser. A Dyn. Meteorol.
Oceanogr. 66.
"""
def __init__(self):
"""
Initialise class for applying reliability calibration.
"""
self.threshold_coord = None
@staticmethod
def _extract_matching_reliability_table(forecast, reliability_table):
"""
Extract the reliability table with a threshold coordinate
matching the forecast cube.
If no matching reliability table is found raise an exception.
Args:
forecast (iris.cube.Cube):
The forecast to be calibrated.
reliability_table (iris.cube.CubeList):
The reliability table to use for applying calibration.
Returns:
iris.cube.Cube:
A reliability table who's threshold coordinate matches
the forecast cube.
Raises:
ValueError: If no matching reliability table is found.
"""
threshold_coord = find_threshold_coordinate(forecast)
coord_values = {threshold_coord.name(): threshold_coord.points}
constr = iris.Constraint(coord_values=coord_values)
if isinstance(reliability_table, iris.cube.Cube):
extracted = reliability_table.extract(constr)
else:
extracted = reliability_table.extract(constr, strict=True)
if not extracted:
raise ValueError(
"No reliability table found to match threshold "
f"{find_threshold_coordinate(forecast).points[0]}."
)
return extracted
def _ensure_monotonicity_across_thresholds(self, cube):
"""
Ensures that probabilities change monotonically relative to thresholds
in the expected order, e.g. exceedance probabilities always remain the
same or decrease as the threshold values increase, below threshold
probabilities always remain the same or increase as the threshold
values increase.
Args:
cube (iris.cube.Cube):
The probability cube for which monotonicity is to be checked
and enforced. This cube is modified in place.
Raises:
ValueError: Threshold coordinate lacks the
spp__relative_to_threshold attribute.
Warns:
UserWarning: If the probabilities must be sorted to reinstate
expected monotonicity following calibration.
"""
(threshold_dim,) = cube.coord_dims(self.threshold_coord)
thresholding = probability_is_above_or_below(cube)
if thresholding is None:
msg = (
"Cube threshold coordinate does not define whether "
"thresholding is above or below the defined thresholds."
)
raise ValueError(msg)
if (
thresholding == "above"
and not (np.diff(cube.data, axis=threshold_dim) <= 0).all()
):
msg = (
"Exceedance probabilities are not decreasing monotonically "
"as the threshold values increase. Forced back into order."
)
warnings.warn(msg)
cube.data = np.sort(cube.data, axis=threshold_dim)[::-1]
if (
thresholding == "below"
and not (np.diff(cube.data, axis=threshold_dim) >= 0).all()
):
msg = (
"Below threshold probabilities are not increasing "
"monotonically as the threshold values increase. Forced "
"back into order."
)
warnings.warn(msg)
cube.data = np.sort(cube.data, axis=threshold_dim)
def _calculate_reliability_probabilities(self, reliability_table):
"""
Calculates forecast probabilities and observation frequencies from the
reliability table. If fewer than two bins are provided, Nones are
returned as no calibration can be applied. Fewer than two bins can occur
due to repeated combination of undersampled probability bins,
please see :class:`.ManipulateReliabilityTable`.
Args:
reliability_table (iris.cube.Cube):
A reliability table for a single threshold from which to
calculate the forecast probabilities and observation
frequencies.
Returns:
Optional[Tuple[numpy.ndarray, numpy.ndarray]]:
Tuple containing forecast probabilities calculated by dividing
the sum of forecast probabilities by the forecast count and
observation frequency calculated by dividing the observation
count by the forecast count.
"""
observation_count = reliability_table.extract(
iris.Constraint(table_row_name="observation_count")
).data
forecast_count = reliability_table.extract(
iris.Constraint(table_row_name="forecast_count")
).data
forecast_probability_sum = reliability_table.extract(
iris.Constraint(table_row_name="sum_of_forecast_probabilities")
).data
# If there are fewer than two bins, no calibration can be applied.
if len(np.atleast_1d(forecast_count)) < 2:
return None, None
forecast_probability = np.array(forecast_probability_sum / forecast_count)
observation_frequency = np.array(observation_count / forecast_count)
return forecast_probability, observation_frequency
@staticmethod
def _interpolate(
forecast_threshold, reliability_probabilities, observation_frequencies
):
"""
Perform interpolation of the forecast probabilities using the
reliability table data to produce the calibrated forecast. Where
necessary linear extrapolation will be applied. Any mask in place on
the forecast_threshold data is removed and reapplied after calibration.
Args:
forecast_threshold (numpy.ndarray):
The forecast probabilities to be calibrated.
reliability_probabilities (numpy.ndarray):
Probabilities taken from the reliability tables.
observation_frequencies (numpy.ndarray):
Observation frequencies that relate to the reliability
probabilities, taken from the reliability tables.
Returns:
numpy.ndarray:
The calibrated forecast probabilities. The final results are
clipped to ensure any extrapolation has not yielded
probabilities outside the range 0 to 1.
"""
shape = forecast_threshold.shape
mask = forecast_threshold.mask if np.ma.is_masked(forecast_threshold) else None
forecast_probabilities = np.ma.getdata(forecast_threshold).flatten()
interpolation_function = scipy.interpolate.interp1d(
reliability_probabilities, observation_frequencies, fill_value="extrapolate"
)
interpolated = interpolation_function(forecast_probabilities.data)
interpolated = interpolated.reshape(shape).astype(np.float32)
if mask is not None:
interpolated = np.ma.masked_array(interpolated, mask=mask)
return np.clip(interpolated, 0, 1)
def process(self, forecast, reliability_table):
"""
Apply reliability calibration to a forecast. The reliability table
and the forecast cube must share an identical threshold coordinate.
Args:
forecast (iris.cube.Cube):
The forecast to be calibrated.
reliability_table (iris.cube.Cube or iris.cube.CubeList):
The reliability table to use for applying calibration.
x and y dimensions must be collapsed.
Returns:
iris.cube.Cube:
The forecast cube following calibration.
"""
self.threshold_coord = find_threshold_coordinate(forecast)
forecast_thresholds = forecast.slices_over(self.threshold_coord)
uncalibrated_thresholds = []
calibrated_cubes = iris.cube.CubeList()
for forecast_threshold in forecast_thresholds:
reliability_threshold = self._extract_matching_reliability_table(
forecast_threshold, reliability_table
)
(
reliability_probabilities,
observation_frequencies,
) = self._calculate_reliability_probabilities(reliability_threshold)
if reliability_probabilities is None:
calibrated_cubes.append(forecast_threshold)
uncalibrated_thresholds.append(
forecast_threshold.coord(self.threshold_coord).points[0]
)
continue
interpolated = self._interpolate(
forecast_threshold.data,
reliability_probabilities,
observation_frequencies,
)
calibrated_cubes.append(forecast_threshold.copy(data=interpolated))
calibrated_forecast = calibrated_cubes.merge_cube()
self._ensure_monotonicity_across_thresholds(calibrated_forecast)
if uncalibrated_thresholds:
msg = (
"The following thresholds were not calibrated due to "
"insufficient forecast counts in reliability table bins: "
"{}".format(uncalibrated_thresholds)
)
warnings.warn(msg)
return calibrated_forecast
|
the-stack_106_24123 | #To reverse a given array
import array as arr
#using library
a= arr.array('d',[1,2,3,4,5])
a1 = a[::-1]
print('Array Reversal -> ',a1)
b = [1,2,3,4]
b1 = b[::-1]
print('array reverse ->',b1)
#reverse using reversed()
c = [52.5,78.9,63.7,935.9]
c.reverse()
print("Reversed array ->",c) |
the-stack_106_24125 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Written by Michele Comitini <[email protected]>
License: LGPL v3
Adds support for OAuth 2.0 authentication to web2py.
OAuth 2.0 spec: http://tools.ietf.org/html/rfc6749
"""
import time
import cgi
from gluon._compat import urllib2
from gluon._compat import urlencode
from gluon import current, redirect, HTTP
import json
class OAuthAccount(object):
"""
Login will be done via OAuth Framework, instead of web2py's
login form.
You need to override the get_user method to match your auth provider needs.
Example for facebook in your model (eg db.py)::
# define the auth_table before call to auth.define_tables()
auth_table = db.define_table(
auth.settings.table_user_name,
Field('first_name', length=128, default=""),
Field('last_name', length=128, default=""),
Field('username', length=128, default="", unique=True),
Field('password', 'password', length=256,
readable=False, label='Password'),
Field('registration_key', length=128, default= "",
writable=False, readable=False))
auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username)
auth.define_tables()
CLIENT_ID=\"<put your fb application id here>\"
CLIENT_SECRET=\"<put your fb application secret here>\"
AUTH_URL="http://..."
TOKEN_URL="http://..."
# remember to download and install facebook GraphAPI module in your app
from facebook import GraphAPI, GraphAPIError
from gluon.contrib.login_methods.oauth20_account import OAuthAccount
class FaceBookAccount(OAuthAccount):
'''OAuth impl for FaceBook'''
AUTH_URL="https://graph.facebook.com/oauth/authorize"
TOKEN_URL="https://graph.facebook.com/oauth/access_token"
def __init__(self):
OAuthAccount.__init__(self,
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
auth_url=self.AUTH_URL,
token_url=self.TOKEN_URL,
scope='user_photos,friends_photos')
self.graph = None
def get_user(self):
'''
Returns the user using the Graph API.
'''
if not self.accessToken():
return None
if not self.graph:
self.graph = GraphAPI((self.accessToken()))
user = None
try:
user = self.graph.get_object("me")
except GraphAPIError, e:
self.session.token = None
self.graph = None
if user:
return dict(first_name = user['first_name'],
last_name = user['last_name'],
username = user['id'])
auth.settings.actions_disabled=['register',
'change_password','request_reset_password','profile']
auth.settings.login_form=FaceBookAccount()
Any optional arg in the constructor will be passed asis to remote
server for requests. It can be used for the optional"scope" parameters for Facebook.
"""
def __redirect_uri(self, next=None):
"""
Build the uri used by the authenticating server to redirect
the client back to the page originating the auth request.
Appends the _next action to the generated url so the flows continues.
"""
r = current.request
http_host = r.env.http_host
if r.env.https == 'on':
url_scheme = 'https'
else:
url_scheme = r.env.wsgi_url_scheme
if next:
path_info = next
else:
path_info = r.env.path_info
uri = '%s://%s%s' % (url_scheme, http_host, path_info)
if r.get_vars and not next:
uri += '?' + urlencode(r.get_vars)
return uri
def __build_url_opener(self, uri):
"""
Build the url opener for managing HTTP Basic Athentication
"""
# Create an OpenerDirector with support
# for Basic HTTP Authentication...
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(realm=None,
uri=uri,
user=self.client_id,
passwd=self.client_secret)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
return opener
def accessToken(self):
"""
Return the access token generated by the authenticating server.
If token is already in the session that one will be used.
If token has expired refresh_token is used to get another token.
Otherwise the token is fetched from the auth server.
"""
refresh_token = None
if current.session.token and 'expires' in current.session.token:
expires = current.session.token['expires']
# reuse token until expiration
if expires == 0 or expires > time.time():
return current.session.token['access_token']
if 'refresh_token' in current.session.token:
refresh_token = current.session.token['refresh_token']
code = current.request.vars.code
if code or refresh_token:
data = dict(
client_id=self.client_id,
client_secret=self.client_secret,
)
if code:
data.update(
redirect_uri=current.session.redirect_uri,
code=code,
grant_type='authorization_code'
)
elif refresh_token:
data.update(
refresh_token=refresh_token,
grant_type='refresh_token'
)
open_url = None
opener = self.__build_url_opener(self.token_url)
try:
open_url = opener.open(self.token_url, urlencode(data).encode(), self.socket_timeout)
except urllib2.HTTPError as e:
tmp = e.read()
raise Exception(tmp)
finally:
if current.session.code:
del current.session.code # throw it away
if open_url:
try:
data = open_url.read()
try:
resp_type = open_url.info().get_content_type()
except:
# Old python 2 version. This does not work for python3
resp_type = open_url.info().gettype()
# try json style first
if not resp_type or resp_type[:16] == 'application/json':
try:
tokendata = json.loads(data)
current.session.token = tokendata
except Exception as e:
raise Exception("Cannot parse oauth server response %s %s" % (data, e))
else: # try facebook style first with x-www-form-encoded
tokendata = cgi.parse_qs(data)
current.session.token = \
dict([(k, v[-1]) for k, v in tokendata.items()])
if not tokendata: # parsing failed?
raise Exception("Cannot parse oauth server response %s" % data)
# set expiration absolute time try to avoid broken
# implementations where "expires_in" becomes "expires"
if 'expires_in' in current.session.token:
exps = 'expires_in'
elif 'expires' in current.session.token:
exps = 'expires'
else:
exps = None
current.session.token['expires'] = exps and \
int(current.session.token[exps]) + \
time.time()
finally:
opener.close()
return current.session.token['access_token']
current.session.token = None
return None
def __init__(self, g=None,
client_id=None, client_secret=None,
auth_url=None, token_url=None, socket_timeout=60, **args):
"""
first argument is unused. Here only for legacy reasons.
"""
if [client_id, client_secret, auth_url, token_url].count(None) > 0:
raise RuntimeError("""Following args are mandatory:
client_id,
client_secret,
auth_url,
token_url.
""")
self.client_id = client_id
self.client_secret = client_secret
self.auth_url = auth_url
self.token_url = token_url
self.args = args
self.socket_timeout = socket_timeout
def login_url(self, next="/"):
self.__oauth_login(next)
return next
def logout_url(self, next="/"):
del current.session.token
return next
def get_user(self):
"""
Override this method by sublcassing the class.
"""
if not current.session.token:
return None
return dict(first_name='Pinco',
last_name='Pallino',
username='pincopallino')
raise NotImplementedError("Must override get_user()")
# Following code is never executed. It can be used as example
# for overriding in subclasses.
if not self.accessToken():
return None
if not self.graph:
self.graph = GraphAPI((self.accessToken()))
user = None
try:
user = self.graph.get_object("me")
except GraphAPIError:
current.session.token = None
self.graph = None
if user:
return dict(first_name=user['first_name'],
last_name=user['last_name'],
username=user['id'])
def __oauth_login(self, next):
"""
This method redirects the user to the authenticating form
on authentication server if the authentication code
and the authentication token are not available to the
application yet.
Once the authentication code has been received this method is
called to set the access token into the session by calling
accessToken()
"""
token = self.accessToken()
if not token:
current.session.redirect_uri = self.__redirect_uri(next)
data = dict(redirect_uri=current.session.redirect_uri,
response_type='code',
client_id=self.client_id)
if self.args:
data.update(self.args)
auth_request_url = self.auth_url + "?" + urlencode(data)
raise HTTP(302,
"You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>",
Location=auth_request_url)
return
|
the-stack_106_24126 | # Database interactions
from pymongo import MongoClient
from datetime import datetime
from calendar import monthrange
from pycountry_convert import country_name_to_country_alpha2, country_alpha2_to_country_name
from pprint import pprint
import json
# client = MongoClient('localhost', 27017)
client = MongoClient('mongodb+srv://codeonavirus:[email protected]/test?retryWrites=true&w=majority')
db = client.app_database
articles = db.articles
def db_drop():
articles.drop()
def db_insert(source, article, date_format=r'%Y-%m-%d %H:%M:%S'):
# Convert date of publication to datetime
date = datetime.strptime(article['date_of_publication'], date_format)
article['date_of_publication'] = date
article['source'] = source
# Convert country to country code
for i, report in enumerate(article['reports']):
codes = {}
for j, location in enumerate(report['locations']):
if location['country'] == None:
continue
code = convert_country(location['country'])
if code in codes:
codes[code] += 1
else:
codes[code] = 1
article['reports'][i]['countries'] = list(codes.keys())
article['reports'][i]['country_count'] = codes
for j, disease in enumerate(report['diseases']):
article['reports'][i]['diseases'][j] = convert_disease(disease)
articles.insert_one(article)
def db_delete_GIM():
query = {"source": "GIM"}
n = articles.delete_many(query)
print(n.deleted_count, "documents deleted.")
def convert_disease(disease):
preset = {
'mers': 'mers-cov',
'enterotoxin': 'staphylococcal enterotoxin b',
'rotavirus': 'rotavirus infection',
'e.coli': 'ehec (e.coli)',
'e. coli': 'ehec (e.coli)',
'pneumococcus': 'pneumococcus pneumonia',
'marburg virus': 'marburg virus disease',
'hiv': 'hiv/aids',
'aids': 'hiv/aids',
'norovirus': 'norovirus infection',
'a/h1n1': 'influenza a/h1n1',
'a/h3n2': 'influenza a/h3n2',
'a/h5n1': 'influenza a/h5n1',
'a/h9n2': 'influenza a/h9n2',
'ebola': 'ebola haemorrhagic fever',
'crimean-congo hemorrhagic fever': 'crimean-congo haemorrhagic fever',
'vaccinia': 'vaccinia and cowpox',
'cowpox': 'vaccinia and cowpox',
'pneumococcal pneumonia': 'pneumococcus pneumonia',
'staphylococcal': 'staphylococcal enterotoxin b',
'enterovirus 71': 'enterovirus 71 infection',
'thypoid fever': 'typhoid fever'
}
if disease in preset:
return preset[disease]
return disease
def convert_country(country):
# Possible hard-coded countries here
preset = {
'The Gambia': 'GM',
'Kosovo': 'XK',
'Myanmar (Burma)': 'MM',
'Sint Maarten': 'SX',
'U.S. Virgin Islands': 'VI',
'Caribbean Netherlands': 'BQ',
'The Bahamas': 'BS',
'Serbia And Montenegro': 'CS',
'Macao S.A.R., China': 'MO',
'Hong Kong S.A.R., China': 'HK',
'Netherlands Antilles': 'ANT',
'Palestinian Territory': 'PS',
'Congo (Kinshasa)': 'CD',
'Congo (Brazzaville)': 'CG',
'Saint Helena': 'SH',
'Reunion': 'RE',
'St Lucia': 'LC',
'Vatican': 'VA',
'00120, Vatican City': 'VA',
'6798, Christmas Island': 'CX',
'FIQQ 1ZZ, Falkland Islands (Islas Malvinas)': 'FK',
'TKCA 1ZZ, Turks and Caicos Islands': 'TC'
}
if country in preset:
return preset[country]
try:
return country_name_to_country_alpha2(country)
# If any countries fall through filters
except Exception:
f = open('debug.txt', 'a+')
f.write('Failed convert_country: ' + country + '\n')
f.close()
return country
def convert_code(code):
preset = {
'TW': 'Taiwan',
'XK': 'Kosovo',
'LA': 'Laos',
'SY': 'Syria',
'MD': 'Moldova',
'BO': 'Bolivia',
'VE': 'Venezuela',
'KP': 'North Korea',
'FM': 'Federated States of Micronesia',
'FK': 'Falkland Islands',
'KR': 'South Korea',
'PS': 'Palestine',
'CD': 'Democratic Republic of the Congo',
'BQ': 'Bonaire, Sint Eustatius and Saba',
'BN': 'Brunei',
'TZ': 'Tanzania',
'VI': 'U.S. Virgin Islands',
'VA': 'Vatican City',
'IR': 'Iran',
'VG': 'British Virgin Islands'
}
if code in preset:
return preset[code]
return country_alpha2_to_country_name(code)
def prettify_disease(disease):
preset = {
'crimean-congo haemorrhagic fever': 'Crimean-Congo haemorrhagic fever',
'COVID-19': 'COVID-19',
'ehec (e.coli)': 'EHEC (E.coli)',
'hepatitis a': 'Hepatitis A',
'hepatitis b': 'Hepatitis B',
'hepatitis c': 'Hepatitis C',
'hepatitis d': 'Hepatitis D',
'hepatitis e': 'Hepatitis E',
'hiv/aids': 'HIV/AIDS',
'influenza a/h1n1': 'Influenza A/H1N1',
'influenza a/h1n2': 'Influenza A/H1N2',
'influenza a/h3n2': 'Influenza A/H3N2',
'influenza a/h5n1': 'Influenza A/H5N1',
'influenza a/h5n6': 'Influenza A/H5N6',
'influenza a/h7n2': 'Influenza A/H7N2',
'influenza a/h7n4': 'Influenza A/H7N4',
'influenza a/h7n9': 'Influenza A/H7N9',
'influenza a/h9n2': 'Influenza A/H9N2',
'mers-cov': 'MERS-CoV',
'sars': 'SARS',
'staphylococcal enterotoxin b': 'Staphylococcal enterotoxin B',
'vaccinia and cowpox': 'Vaccinia and Cowpox',
'west nile virus': 'West Nile virus'
}
if disease in preset:
return preset[disease]
else:
return disease.capitalize()
def check_new_article(url):
pipeline = [
{'match': {'url': url}}
]
result = list(articles.aggregate(pipeline))
if len(result) > 1:
print("Something went wrong!!!")
return len(result) == 1
def add_match_source(pipeline, source, i=0):
if source != '':
match = {'$match': {'source': source}}
pipeline.insert(i, match)
def add_match_country(pipeline, country, i=0):
if country != '':
match = {'$match': {'reports.countries': {'$in': [country]}}}
pipeline.insert(i, match)
def add_match_disease(pipeline, disease, i=0):
if disease != '':
match = {'$match': {'reports.diseases': {'$in': [disease]}}}
pipeline.insert(i, match)
def match_year_month(year, month=0):
start_month = 1 if month == 0 else month
start_date = datetime(year, start_month, 1)
end_month = 12 if month == 0 else month
end_day = monthrange(year, end_month)[1]
end_date = datetime(year, end_month, end_day, 23, 59, 59)
return {'$match': {'date_of_publication': {'$gte': start_date, '$lte': end_date}}}
def get_country_list(sort=False, name=False):
pipeline = [
{'$unwind': '$reports'},
{'$unwind': '$reports.countries'},
{'$group': {'_id': '$reports.countries'}}
]
result = [element['_id'] for element in articles.aggregate(pipeline)]
if sort:
if not name:
result.sort()
else:
result.sort(key=convert_code)
return result
def get_country_dict():
return { code: convert_code(code) for code in get_country_list()}
def get_disease_list(sort=False, prettify=False):
pipeline = [
{'$unwind': '$reports'},
{'$unwind': '$reports.diseases'},
{'$group': {'_id': '$reports.diseases'}}
]
result = [element['_id'] for element in articles.aggregate(pipeline)]
if sort:
result.sort()
if prettify:
result = [prettify_disease(disease) for disease in result]
return result
def get_disease_dict():
return {disease: prettify_disease(disease) for disease in get_disease_list()}
def get_article_summary(source, year, month, country, disease, sort=True):
pipeline = [
match_year_month(year, month),
{'$project': {'_id': 0, 'url': 1, 'headline': 1, 'date_of_publication': 1, 'source': 1, 'date': {'$dateToString': {'format': '%d/%m/%Y %H:%M:%S', 'date': '$date_of_publication'}}}}
]
add_match_source(pipeline, source)
add_match_country(pipeline, country)
add_match_disease(pipeline, disease)
result = list(articles.aggregate(pipeline))
if sort:
result.sort(key=lambda x: x['date_of_publication'])
return result
def get_article(url):
pipeline = [
{'$match': {'url': url}},
{'$project': {'_id': 0, 'url': 1, 'date_of_publication': 1, 'headline': 1, 'main_text': 1, 'date': {'$dateToString': {'format': '%d/%m/%Y %H:%M:%S', 'date': '$date_of_publication'}}}}
]
result = list(articles.aggregate(pipeline))
return result[0]
def get_num_reports_by_country(source, year, month, country, disease, merge=False, maxResults=0):
pipeline = [
match_year_month(year, month),
{'$unwind': '$reports'},
{'$unwind': '$reports.countries'},
{'$group': {'_id': '$reports.countries', 'value': {'$sum': 1}}},
{'$project': {'_id': 0, 'category': '$_id', 'value': 1}}
]
add_match_source(pipeline, source)
add_match_country(pipeline, country)
add_match_disease(pipeline, disease)
result = list(articles.aggregate(pipeline))
if maxResults > 0:
result.sort(key=lambda x: x['value'], reverse=True)
other = sum([pair['value'] for pair in result[maxResults:]])
result = result[:maxResults] + [{'category': 'remaining', 'value': other}]
if merge:
new_result = {}
for pair in result:
new_result[pair['category']] = pair['value']
return new_result
else:
return result
def get_num_reports_by_disease(source, year, month, country, disease, maxResults=0, prettify=False):
pipeline = [
match_year_month(year, month),
{'$unwind': '$reports'},
{'$unwind': '$reports.diseases'},
{'$group': {'_id': '$reports.diseases', 'value': {'$sum': 1}}},
{'$project': {'_id': 0, 'category': '$_id', 'value': 1}}
]
add_match_source(pipeline, source)
add_match_country(pipeline, country)
add_match_disease(pipeline, disease)
result = list(articles.aggregate(pipeline))
result.sort(key=lambda x: x['value'], reverse=True)
if maxResults > 0 and len(result) > maxResults:
other = sum([pair['value'] for pair in result[maxResults:]])
result = result[:maxResults] + [{'category': 'remaining', 'value': other}]
if prettify:
result = [{'category': prettify_disease(pair['category']), 'value': pair['value']} for pair in result]
return result
def get_num_total_reports():
pipeline = [
{'$count': 'total'},
]
result = list(articles.aggregate(pipeline))
return result[0]
def get_num_reports_by_year(source, country='', disease=''):
# country is a country code
pipeline = [
{'$unwind': '$reports'},
{'$project': {'article_year': {'$year': '$date_of_publication'}}},
{'$group': {'_id': '$article_year', 'value': {'$sum': 1}}},
{'$project': {'_id': 0, 'date': '$_id', 'value': 1}}
]
add_match_country(pipeline, country, 1)
add_match_disease(pipeline, disease, 1)
add_match_source(pipeline, source)
# if country != '':
# match = {'$match': {'reports.countries': {'$in': [country]}}}
# pipeline.insert(1, match)
# if disease != '':
# match = {'$match': {'reports.diseases': {'$in': [disease]}}}
# pipeline.insert(1, match)
result = list(articles.aggregate(pipeline))[:]
# Fill intermediate values
for i in range(1996, 2021):
if i not in [pair['date'] for pair in result]:
result.append({
'date': i,
'value': 0
})
result.sort(key=lambda x: x['date'])
return result
def download(source=''):
with open('results_mongo.json', 'w') as f:
pipeline = [
{'$project': {'_id': 0, 'headline': 1, 'main_text': 1, 'reports': 1}}
]
if source != '':
pipeline.insert(0, {'$match': {'source': source}})
result = list(articles.aggregate(pipeline))
json.dump(result, f) |
the-stack_106_24127 | e = 0
n1 = float(input('Digite um numero; '))
n2 = float(input('Digite outro numero; '))
while not e == 6:
e = int(input('''Escolha O que deseja fazer;
[1] soma
[2] multiplicar
[3] maior
[4] Calcular o fatorial de um número
[5] Digitar novos números
[6] sair
'''))
if e == 1:
s = n1 + n2
print(s)
elif e == 2:
m = n1 * n2
print(m)
elif e == 3:
if n1 > n2:
print('{} é maior que {}'.format(n1, n2))
else:
print('{} é maior que {}'.format(n2, n1))
elif e == 4:
n = int(input('Digite um numero inteiro; '))
f = n
while not n == 1:
f = f * (n - 1)
n -= 1
print(f)
elif e == 5:
n1 = float(input('Digite um numero; '))
n2 = float(input('Digite outro numero; '))
elif e == 6:
print('Saindo...')
else:
print('Digite um número que está no menu!') |
the-stack_106_24128 | from kfp import components
import kfp.dsl as dsl
fairness_check_ops = components.load_component_from_url('https://raw.githubusercontent.com/Trusted-AI/AIF360/master/mlops/kubeflow/bias_detector_pytorch/component.yaml')
robustness_check_ops = components.load_component_from_url('https://raw.githubusercontent.com/Trusted-AI/adversarial-robustness-toolbox/main/utils/mlops/kubeflow/robustness_evaluation_fgsm_pytorch/component.yaml')
@dsl.pipeline(
name="Launch trusted-ai pipeline",
description="An example for trusted-ai integration."
)
def trusted_ai(
namespace="anonymous",
fgsm_attack_epsilon='0.2',
model_class_file='PyTorchModel.py',
model_class_name='ThreeLayerCNN',
feature_testset_path='processed_data/X_test.npy',
label_testset_path='processed_data/y_test.npy',
protected_label_testset_path='processed_data/p_test.npy',
favorable_label='0.0',
unfavorable_label='1.0',
privileged_groups="[{'race': 0.0}]",
unprivileged_groups="[{'race': 4.0}]",
loss_fn='torch.nn.CrossEntropyLoss()',
optimizer='torch.optim.Adam(model.parameters(), lr=0.001)',
clip_values='(0, 1)',
nb_classes='2',
input_shape='(1,3,64,64)'):
job_manifest = {
"apiVersion": "batch/v1",
"kind": "Job",
"metadata": {
"name": "trusted-ai-train-job",
"namespace": namespace
},
"spec": {
"ttlSecondsAfterFinished": 100,
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"restartPolicy": "Never",
"containers": [
{"name": "classification-training",
"image": "aipipeline/gender-classification:latest",
"command": [
"python", "-u", "gender_classification_training.py", "--data_bucket", "mlpipeline",
"--result_bucket", "mlpipeline"
],
"env": [{'name': 'S3_ENDPOINT', 'value': 'minio-service.kubeflow:9000'}]
}
],
}
}
}
}
train_step = dsl.ResourceOp(
name="trust-ai-train-step",
k8s_resource=job_manifest,
action='create',
success_condition='status.succeeded > 0',
failure_condition='status.failed > 0')
fairness_check = fairness_check_ops(model_id='training-example',
model_class_file=model_class_file,
model_class_name=model_class_name,
feature_testset_path=feature_testset_path,
label_testset_path=label_testset_path,
protected_label_testset_path=protected_label_testset_path,
favorable_label=favorable_label,
unfavorable_label=unfavorable_label,
privileged_groups=privileged_groups,
unprivileged_groups=unprivileged_groups,
data_bucket_name='mlpipeline',
result_bucket_name='mlpipeline').after(train_step).set_image_pull_policy("Always")
robustness_check = robustness_check_ops(model_id='training-example',
epsilon=fgsm_attack_epsilon,
model_class_file=model_class_file,
model_class_name=model_class_name,
feature_testset_path=feature_testset_path,
label_testset_path=label_testset_path,
loss_fn=loss_fn,
optimizer=optimizer,
clip_values=clip_values,
nb_classes=nb_classes,
input_shape=input_shape,
data_bucket_name='mlpipeline',
result_bucket_name='mlpipeline').after(train_step).set_image_pull_policy("Always")
if __name__ == '__main__':
from kfp_tekton.compiler import TektonCompiler
TektonCompiler().compile(trusted_ai, __file__.replace('.py', '.yaml'))
|
the-stack_106_24131 | import json
import importlib
from assetmunki.interop import Serializeable
class Asset(Serializeable):
_columns = [
'machine.serial_number',
'machine.machine_name',
'machine.machine_model',
'machine.machine_desc',
'machine.hostname',
'reportdata.long_username',
'reportdata.console_user',
'machine.os_version',
'machine.buildversion',
'machine.cpu',
'machine.physical_memory',
'warranty.purchase_date',
'warranty.end_date',
'warranty.status',
'munkireport.manifestname',
'diskreport.totalsize',
'diskreport.volumetype',
'diskreport.media_type'
]
_attrs = [
'serial_number',
'machine_name',
'machine_model',
'machine_desc',
'hostname',
'long_username',
'console_user',
'os_version',
'buildversion',
'cpu',
'physical_memory',
'purchase_date',
'end_date',
'manifestname',
'totalsize'
]
|
the-stack_106_24133 | from bs4 import BeautifulSoup
from terminaltables import SingleTable
import requests, re
def searchCopainsdavant(nom, city):
url = "http://copainsdavant.linternaute.com/s/?ty=1&prenom=%s&nom=%s&nomjf=&annee=&anneeDelta=&ville=%s"
name = nom
if " " in name:
nom = name.split(" ")[1]
prenom = name.split(" ")[0]
else:
prenom = ""
nom = name
data = requests.get(url % (prenom, nom, city)).content.decode('utf-8')
soup = BeautifulSoup(data, "html.parser")
nameList = soup.find_all("div", {"class": "grid_last"})
addresseList = soup.find_all("span", {"class": "app_list--result__search__place"})
urlList = soup.find_all("h3")
birthdayList = []
travailList = []
urlList2 = []
for url in urlList:
url = url.find("a")
urls = str(url)
href = re.search(r"/p/([a-zA-Z0-9_-]+)", urls).group()
urlList2.append(href)
for url in urlList2:
data = requests.get("http://copainsdavant.linternaute.com/%s" % (url)).content.decode('utf-8')
soup = BeautifulSoup(data, "html.parser")
birthdayList0 = soup.find_all("abbr", {"class": "bday"})
item = len(birthdayList0)
if item == 0:
birthdayList0.append("None")
for b in birthdayList0:
birthdayList.append(str(b))
travailList0 = soup.find_all("p", {"class": "title"})
item = len(travailList0)
if item == 0:
travailList0.append("None")
for t in travailList0:
travailList.append(str(t))
namesList2 = []
addressesList2 = []
birthdayList2 = []
travailList2 = []
for name in nameList:
name = name.find("a")
namesList2.append(name.string)
for addr in addresseList:
addressesList2.append(addr.string.strip())
for date in birthdayList:
date = date.replace("<abbr class=\"bday\" title=\"", "").replace("00:00:00\">", "- ").replace("</abbr>", "").replace("\">", "")
birthdayList2.append(date)
for travail in travailList:
travail = travail.replace("<p class=\"title\">", "").replace("</p>", "")
travailList2.append(travail)
regroup = zip(namesList2, addressesList2, birthdayList2, travailList2, urlList2)
title = " Copain D'avant "
TABLE_DATA = [
('Name', 'Adresse', 'Date', 'Work', 'url'),
]
count = 0
for info in regroup:
count += 1
name = info[0]
adresse = info[1]
adresse = adresse.split(" - ")[0]
dateBirthday = info[2]
try:
dateBirthday = dateBirthday.split(" - ")[1]
except:
pass
travail = info[3]
url = info[4]
infos = (name, adresse, dateBirthday, travail, url)
TABLE_DATA.append(infos)
if count > 0:
table_instance = SingleTable(TABLE_DATA, title)
print(table_instance.table) |
the-stack_106_24135 | import numpy as np
import imgaug.augmenters as iaa
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
from PIL import Image
from parameters import tag_image, tag_label, tag_name, label_folder_name
import random
import os
from typing import Union
class AugManager(object):
def __init__(self, iaalist=None):
if iaalist is None:
iaalist = iaa.Sequential([
iaa.Sometimes(0.5, iaa.ChannelShuffle(0.3)),
iaa.Sometimes(0.5, iaa.MultiplyHue((0.5, 1.5))),
iaa.Sometimes(0.5, iaa.AddToHueAndSaturation((-50, 50), per_channel=True)),
iaa.Sometimes(0.5, iaa.Fliplr(0.5)),
iaa.Sometimes(0.5, iaa.Flipud(0.5)),
iaa.Sometimes(0.5, iaa.Rotate((-50, 50)))
], random_order=True)
self.transformSet = iaalist
self.outscale = random.choice([0.8, 0.85, 0.9, 0.95])
def __call__(self, input_dict : {str : Union[Image.Image, np.ndarray]}) -> dict:
image, label = input_dict[tag_image], input_dict[tag_label]
image = np.array(image)
label = np.array(label)
# size measure
y_max = image.shape[0]
x_max = image.shape[1]
# np.ndarray -> imgaug.augmentables.segmaps.SegmentationMapsOnImage
label = SegmentationMapsOnImage(label, shape=image.shape)
# augmentation
zoomset = iaa.OneOf([
iaa.Identity(), # do nothing
iaa.Affine(scale=self.outscale), # zoom out
RandomCrop(y_max, x_max).cut() # zoom in
])
image, label = zoomset(image=image, segmentation_maps=label)
image, label = self.transformSet(image=image, segmentation_maps=label)
# imgaug.augmentables.segmaps.SegmentationMapsOnImage -> np.ndarray
label = label.get_arr()
return {tag_image : image,
tag_label : label}
def augstore(self, src:dict, dst_base:str,
dataname_extension='.tiff', labelname_extension='.tif',
identifier=None):
os.makedirs(dst_base, exist_ok=True)
os.makedirs(os.path.join(dst_base, label_folder_name), exist_ok=True)
# get image
image = src[tag_image] # PIL.Image.Image
label = src[tag_label] # PIL.Image.Image
name = src[tag_name] # str
# PIL -> numpy
image = np.array(image)
label = np.array(label)
# size measure
y_max = image.shape[0]
x_max = image.shape[1]
# np.ndarray -> imgaug.augmentables.segmaps.SegmentationMapsOnImage
label = SegmentationMapsOnImage(label, shape=label.shape)
# augmentation
zoomset = iaa.OneOf([
iaa.Identity(), # do nothing
iaa.Affine(scale=self.outscale), # zoom out
RandomCrop(y_max, x_max).cut() # zoom in
])
image, label = zoomset(image=image, segmentation_maps=label)
image, label = self.transformSet(image=image, segmentation_maps=label)
# imgaug.augmentables.segmaps.SegmentationMapsOnImage -> np.ndarray
label = label.get_arr()
if not identifier == None:
name = name + '_' + str(identifier)
# numpy -> PIL.Image.Image
image = Image.fromarray(image)
label = Image.fromarray(label)
image.save(os.path.join(dst_base, name + dataname_extension))
label.save(os.path.join(dst_base, label_folder_name, name + labelname_extension))
return {tag_image : image,
tag_label : label,
tag_name : name}
class RandomCrop(object):
def __init__(self, max_height, max_width):
assert isinstance(max_height, int) and max_height >= 1, 'max_height must be positive integer type.'
assert isinstance(max_width, int) and max_width >= 1, 'max_width must be positive integer type.'
self.percent_limit = 0.15
self.top, self.right, self.bottom, self.left = self.operate_location(max_height, max_width)
def operate_location(self, max_height, max_width):
import random
max_height = max_height + 1
max_width = max_width + 1
min_height = int(self.percent_limit * max_height)
min_width = int(self.percent_limit * max_width)
fix_height = random.randint(min_height, max_height)
fix_width = random.randint(min_width, max_width)
left = random.randint(0, max_width - fix_width)
up = random.randint(0, max_height - fix_height)
right = max_width - fix_width - left
down = max_height - fix_height - up
return up, right, down, left
def cut(self):
return iaa.Crop(px=(self.top, self.right, self.bottom, self.left))
|
the-stack_106_24136 | import torch
import torch.nn as nn
class Model(torch.nn.Module):
def __init__(self, input_shape, outputs_count, hidden_count = 256):
super(Model, self).__init__()
self.device = "cpu" #torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.layers = [
nn.Linear(input_shape[0] + outputs_count, hidden_count),
nn.ReLU(),
nn.Linear(hidden_count, hidden_count//2),
nn.ReLU(),
nn.Linear(hidden_count//2, 1)
]
torch.nn.init.xavier_uniform_(self.layers[0].weight)
torch.nn.init.xavier_uniform_(self.layers[2].weight)
torch.nn.init.uniform_(self.layers[4].weight, -0.003, 0.003)
self.model = nn.Sequential(*self.layers)
self.model.to(self.device)
print(self.model)
def forward(self, state, action):
x = torch.cat([state, action], dim = 1)
return self.model(x)
def save(self, path):
print("saving to ", path)
torch.save(self.model.state_dict(), path + "trained/model_critic.pt")
def load(self, path):
print("loading from ", path)
self.model.load_state_dict(torch.load(path + "trained/model_critic.pt", map_location = self.device))
self.model.eval()
|
the-stack_106_24137 | """jc - JSON CLI output utility `cksum` command output parser
This parser works with the following checksum calculation utilities:
- `sum`
- `cksum`
Usage (cli):
$ cksum file.txt | jc --cksum
or
$ jc cksum file.txt
Usage (module):
import jc.parsers.cksum
result = jc.parsers.cksum.parse(cksum_command_output)
Compatibility:
'linux', 'darwin', 'cygwin', 'aix', 'freebsd'
Examples:
$ cksum * | jc --cksum -p
[
{
"filename": "__init__.py",
"checksum": 4294967295,
"blocks": 0
},
{
"filename": "airport.py",
"checksum": 2208551092,
"blocks": 3745
},
{
"filename": "airport_s.py",
"checksum": 1113817598,
"blocks": 4572
},
...
]
"""
import jc.utils
class info():
version = '1.0'
description = 'cksum command parser'
author = 'Kelly Brazil'
author_email = '[email protected]'
details = 'Parses cksum and sum program output'
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
compatible = ['linux', 'darwin', 'cygwin', 'aix', 'freebsd']
magic_commands = ['cksum', 'sum']
__version__ = info.version
def process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (dictionary) raw structured data to process
Returns:
List of dictionaries. Structured data with the following schema:
[
{
"filename": string,
"checksum": integer,
"blocks": integer
}
]
"""
for entry in proc_data:
int_list = ['checksum', 'blocks']
for key in int_list:
if key in entry:
try:
entry[key] = int(entry[key])
except (ValueError):
entry[key] = None
return proc_data
def parse(data, raw=False, quiet=False):
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) output preprocessed JSON if True
quiet: (boolean) suppress warning messages if True
Returns:
List of dictionaries. Raw or processed structured data.
"""
if not quiet:
jc.utils.compatibility(__name__, info.compatible)
raw_output = []
if jc.utils.has_data(data):
for line in filter(None, data.splitlines()):
item = {
'filename': line.split(maxsplit=2)[2],
'checksum': line.split(maxsplit=2)[0],
'blocks': line.split(maxsplit=2)[1]
}
raw_output.append(item)
if raw:
return raw_output
else:
return process(raw_output)
|
the-stack_106_24139 | import os
from indra.preassembler import Preassembler, render_stmt_graph, \
flatten_evidence, flatten_stmts
from indra.sources import reach
from indra.statements import *
from indra.ontology.bio import bio_ontology
from indra.ontology.world import world_ontology
def test_duplicates():
src = Agent('SRC', db_refs = {'HGNC': '11283'})
ras = Agent('RAS', db_refs = {'FA': '03663'})
st1 = Phosphorylation(src, ras)
st2 = Phosphorylation(src, ras)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_duplicates()
assert len(pa.unique_stmts) == 1
def test_duplicates_copy():
src = Agent('SRC', db_refs = {'HGNC': '11283'})
ras = Agent('RAS', db_refs = {'FA': '03663'})
st1 = Phosphorylation(src, ras, evidence=[Evidence(text='Text 1')])
st2 = Phosphorylation(src, ras, evidence=[Evidence(text='Text 2')])
stmts = [st1, st2]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_duplicates()
assert len(pa.unique_stmts) == 1
assert len(stmts) == 2
assert len(stmts[0].evidence) == 1
assert len(stmts[1].evidence) == 1
def test_duplicates_sorting():
mc = ModCondition('phosphorylation')
map2k1_1 = Agent('MAP2K1', mods=[mc])
mc1 = ModCondition('phosphorylation', 'serine', '218')
mc2 = ModCondition('phosphorylation', 'serine', '222')
mc3 = ModCondition('phosphorylation', 'serine', '298')
map2k1_2 = Agent('MAP2K1', mods=[mc1, mc2, mc3])
mapk3 = Agent('MAPK3')
st1 = Phosphorylation(map2k1_1, mapk3, position='218')
st2 = Phosphorylation(map2k1_2, mapk3)
st3 = Phosphorylation(map2k1_1, mapk3, position='218')
stmts = [st1, st2, st3]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_duplicates()
assert len(pa.unique_stmts) == 2
def test_combine_duplicates():
raf = Agent('RAF1')
mek = Agent('MEK1')
erk = Agent('ERK2')
p1 = Phosphorylation(raf, mek,
evidence=Evidence(text='foo'))
p2 = Phosphorylation(raf, mek,
evidence=Evidence(text='bar'))
p3 = Phosphorylation(raf, mek,
evidence=Evidence(text='baz'))
p4 = Phosphorylation(raf, mek,
evidence=Evidence(text='beep'))
p5 = Phosphorylation(mek, erk,
evidence=Evidence(text='foo2'))
p6 = Dephosphorylation(mek, erk,
evidence=Evidence(text='bar2'))
p7 = Dephosphorylation(mek, erk,
evidence=Evidence(text='baz2'))
p8 = Dephosphorylation(mek, erk,
evidence=Evidence(text='beep2'))
p9 = Dephosphorylation(Agent('SRC'), Agent('KRAS'),
evidence=Evidence(text='beep'))
stmts = [p1, p2, p3, p4, p5, p6, p7, p8, p9]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_duplicates()
# The statements come out sorted by their matches_key
assert len(pa.unique_stmts) == 4, len(pa.unique_stmts)
num_evs =[len(s.evidence) for s in pa.unique_stmts]
assert pa.unique_stmts[0].matches(p6) # MEK dephos ERK
assert num_evs[0] == 3, num_evs[0]
assert pa.unique_stmts[1].matches(p9) # SRC dephos KRAS
assert num_evs[1] == 1, num_evs[1]
assert pa.unique_stmts[2].matches(p5) # MEK phos ERK
assert num_evs[2] == 1, num_evs[2]
assert pa.unique_stmts[3].matches(p1) # RAF phos MEK
assert num_evs[3] == 4, num_evs[3]
def test_combine_evidence_exact_duplicates():
raf = Agent('RAF1')
mek = Agent('MEK1')
p1 = Phosphorylation(raf, mek,
evidence=Evidence(text='foo'))
p2 = Phosphorylation(raf, mek,
evidence=Evidence(text='bar'))
p3 = Phosphorylation(raf, mek,
evidence=Evidence(text='bar'))
stmts = [p1, p2, p3]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_duplicates()
# The statements come out sorted by their matches_key
assert len(pa.unique_stmts) == 1
assert len(pa.unique_stmts[0].evidence) == 2
assert set(ev.text for ev in pa.unique_stmts[0].evidence) == \
set(['foo', 'bar'])
def test_combine_evidence_exact_duplicates_different_raw_text():
raf1 = Agent('RAF1', db_refs={'TEXT': 'Raf'})
raf2 = Agent('RAF1', db_refs={'TEXT': 'RAF'})
mek = Agent('MEK1')
p1 = Phosphorylation(raf1, mek,
evidence=Evidence(text='foo'))
p2 = Phosphorylation(raf1, mek,
evidence=Evidence(text='bar'))
p3 = Phosphorylation(raf2, mek,
evidence=Evidence(text='bar'))
stmts = [p1, p2, p3]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_duplicates()
# The statements come out sorted by their matches_key
assert len(pa.unique_stmts) == 1
assert len(pa.unique_stmts[0].evidence) == 3
assert set(ev.text for ev in pa.unique_stmts[0].evidence) == \
set(['foo', 'bar', 'bar'])
def test_superfamily_refinement():
"""A gene-level statement should be supported by a family-level
statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
ras = Agent('RAS', db_refs = {'FPLX': 'RAS'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
st1 = Phosphorylation(src, ras, 'tyrosine', '32')
st2 = Phosphorylation(src, nras, 'tyrosine', '32')
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the gene-level
# one, supported by the family one.
assert len(stmts) == 1
assert (stmts[0].equals(st2))
assert (len(stmts[0].supported_by) == 1)
assert (stmts[0].supported_by[0].equals(st1))
def test_superfamily_refinement_isa_or_partof():
src = Agent('SRC', db_refs = {'HGNC': '11283'})
prkag1 = Agent('PRKAG1', db_refs = {'HGNC': '9385'})
ampk = Agent('AMPK', db_refs = {'FPLX': 'AMPK'})
st1 = Phosphorylation(src, ampk, 'tyrosine', '32')
st2 = Phosphorylation(src, prkag1, 'tyrosine', '32')
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the gene-level
# one, supported by the family one.
assert len(stmts) == 1
assert stmts[0].equals(st2)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st1)
def test_modification_refinement():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nras)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
assert len(stmts) == 1
assert stmts[0].equals(st1)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st2)
def test_modification_refinement_residue_noenz():
erbb3 = Agent('Erbb3')
st1 = Phosphorylation(None, erbb3)
st2 = Phosphorylation(None, erbb3, 'Y')
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_related()
assert len(pa.related_stmts) == 1
def test_modification_refinement_noenz():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(None, nras, 'tyrosine', '32')
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
assert len(stmts) == 1
assert stmts[0].equals(st1)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st2)
assert stmts[0].supported_by[0].supports[0].equals(st1)
def test_modification_refinement_noenz2():
"""A more specific modification statement should be supported by a more
generic modification statement.
Similar to test_modification_refinement_noenz for statements where one
argument is associated with a component in the hierarchy (SIRT1 in this
case) but the other is not (BECN1).
"""
sirt1 = Agent('SIRT1', db_refs={'HGNC':'14929', 'UP':'Q96EB6',
'TEXT':'SIRT1'})
becn1 = Agent('BECN1', db_refs={'HGNC': '1034', 'UP': 'Q14457',
'TEXT': 'Beclin 1'})
st1 = Deacetylation(sirt1, becn1)
st2 = Deacetylation(None, becn1)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
assert (len(stmts) == 1)
assert (stmts[0].equals(st1))
assert (len(stmts[0].supported_by) == 1)
assert (stmts[0].supported_by[0].equals(st2))
assert (stmts[0].supported_by[0].supports[0].equals(st1))
def test_modification_norefinement_noenz():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
st1 = Phosphorylation(src, nras)
st2 = Phosphorylation(None, nras, 'Y', '32',
evidence=[Evidence(text='foo')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# Modification is less specific, enzyme more specific in st1, therefore
# these statements shouldn't be combined.
assert len(stmts) == 2
assert len(stmts[1].evidence)==1
def test_modification_norefinement_subsfamily():
"""A more specific modification statement should be supported by a more
generic modification statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
ras = Agent('RAS', db_refs = {'FPLX': 'RAS'})
st1 = Phosphorylation(src, nras)
st2 = Phosphorylation(src, ras, 'Y', '32',
evidence=[Evidence(text='foo')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# Modification is less specific, enzyme more specific in st1, therefore
# these statements shouldn't be combined.
assert len(stmts) == 2
assert len(stmts[0].evidence) == 1, stmts
def test_modification_norefinement_enzfamily():
"""A more specific modification statement should be supported by a more
generic modification statement."""
mek = Agent('MEK')
raf = Agent('RAF')
braf = Agent('BRAF')
st1 = Phosphorylation(raf, mek, 'Y', '32',
evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# Modification is less specific, enzyme more specific in st1, therefore
# these statements shouldn't be combined.
assert len(stmts) == 2
assert len(stmts[1].evidence)==1
def test_bound_condition_refinement():
"""A statement with more specific bound context should be supported by a
less specific statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
gtp = Agent('GTP', db_refs = {'CHEBI': '15996'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
nrasgtp = Agent('NRAS', db_refs = {'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp, True)])
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nrasgtp, 'tyrosine', '32')
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
assert len(stmts) == 1
assert stmts[0].equals(st2)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st1)
def test_bound_condition_norefinement():
"""A statement with more specific bound context should be supported by a
less specific statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
gtp = Agent('GTP', db_refs = {'CHEBI': '15996'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
nrasgtp = Agent('NRAS', db_refs = {'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp, True)])
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nrasgtp)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
# The bound condition is more specific in st2 but the modification is less
# specific. Therefore these statements should not be combined.
assert len(stmts) == 2
def test_bound_condition_deep_refinement():
"""A statement with more specific bound context should be supported by a
less specific statement."""
src = Agent('SRC', db_refs = {'HGNC': '11283'})
gtp1 = Agent('GTP', db_refs = {'CHEBI': '15996'})
gtp2 = Agent('GTP', mods=[ModCondition('phosphorylation')],
db_refs = {'CHEBI': '15996'})
nrasgtp1 = Agent('NRAS', db_refs = {'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp1, True)])
nrasgtp2 = Agent('NRAS', db_refs = {'HGNC': '7989'},
bound_conditions=[BoundCondition(gtp2, True)])
st1 = Phosphorylation(src, nrasgtp1, 'tyrosine', '32')
st2 = Phosphorylation(src, nrasgtp2, 'tyrosine', '32')
# The top-level list should contain only one statement, the more specific
# modification, supported by the less-specific modification.
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related()
assert len(stmts) == 1
assert stmts[0].equals(st2)
assert len(stmts[0].supported_by) == 1
assert stmts[0].supported_by[0].equals(st1)
def test_complex_refinement():
ras = Agent('RAS')
raf = Agent('RAF')
mek = Agent('MEK')
st1 = Complex([ras, raf])
st2 = Complex([mek, ras, raf])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_related()
assert len(pa.unique_stmts) == 2
assert len(pa.related_stmts) == 2
def test_complex_agent_refinement():
ras = Agent('RAS')
raf1 = Agent('RAF', mods=[ModCondition('ubiquitination', None, None, True)])
raf2 = Agent('RAF', mods=[ModCondition('ubiquitination', None, None, False)])
st1 = Complex([ras, raf1])
st2 = Complex([ras, raf2])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_related()
assert len(pa.unique_stmts) == 2
assert len(pa.related_stmts) == 2
def test_mod_sites_refinement():
"""A statement with more specific modification context should be supported
by a less-specific statement."""
# TODO
assert True
def test_binding_site_refinement():
"""A statement with information about a binding site for an interaction
between two proteins should be supported by a statement without this
information."""
# TODO
assert True
def test_activating_substitution_refinement():
"""Should only be refinement if entities are a refinement and all
fields match."""
mc1 = MutCondition('12', 'G', 'D')
mc2 = MutCondition('61', 'Q', 'L')
nras1 = Agent('NRAS', mutations=[mc1], db_refs = {'HGNC': '7989'})
nras2 = Agent('NRAS', mutations=[mc2], db_refs = {'HGNC': '7989'})
ras = Agent('RAS', mutations=[mc1], db_refs={'FPLX': 'RAS'})
st1 = ActiveForm(ras, 'gtpbound', True,
evidence=Evidence(text='bar'))
st2 = ActiveForm(nras1, 'gtpbound', True,
evidence=Evidence(text='foo'))
st3 = ActiveForm(nras2, 'gtpbound', True,
evidence=Evidence(text='bar'))
st4 = ActiveForm(nras1, 'phosphatase', True,
evidence=Evidence(text='bar'))
st5 = ActiveForm(nras1, 'gtpbound', False,
evidence=Evidence(text='bar'))
assert st2.refinement_of(st1, bio_ontology)
assert not st3.refinement_of(st1, bio_ontology)
assert not st4.refinement_of(st1, bio_ontology)
assert not st5.refinement_of(st1, bio_ontology)
assert not st1.refinement_of(st2, bio_ontology)
assert not st3.refinement_of(st2, bio_ontology)
assert not st4.refinement_of(st2, bio_ontology)
assert not st5.refinement_of(st2, bio_ontology)
assert not st1.refinement_of(st3, bio_ontology)
assert not st2.refinement_of(st3, bio_ontology)
assert not st4.refinement_of(st3, bio_ontology)
assert not st5.refinement_of(st3, bio_ontology)
assert not st1.refinement_of(st4, bio_ontology)
assert not st2.refinement_of(st4, bio_ontology)
assert not st3.refinement_of(st4, bio_ontology)
assert not st5.refinement_of(st4, bio_ontology)
assert not st1.refinement_of(st5, bio_ontology)
assert not st2.refinement_of(st5, bio_ontology)
assert not st3.refinement_of(st5, bio_ontology)
assert not st4.refinement_of(st5, bio_ontology)
def test_translocation():
st1 = Translocation(Agent('AKT'), None, None)
st2 = Translocation(Agent('AKT'), None, 'plasma membrane')
st3 = Translocation(Agent('AKT'), None, 'nucleus')
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
pa.combine_related()
assert len(pa.related_stmts) == 2, pa.related_stmts
def test_grounding_aggregation():
braf1 = Agent('BRAF', db_refs={'TEXT': 'braf', 'HGNC': '1097'})
braf2 = Agent('BRAF', db_refs={'TEXT': 'BRAF'})
braf3 = Agent('BRAF', db_refs={'TEXT': 'Braf', 'UP': 'P15056'})
braf4 = Agent('BRAF', db_refs={'TEXT': 'B-raf', 'UP': 'P15056',
'HGNC': '1097'})
st1 = Phosphorylation(None, braf1)
st2 = Phosphorylation(None, braf2)
st3 = Phosphorylation(None, braf3)
st4 = Phosphorylation(None, braf4)
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3, unique_stmts
def test_grounding_aggregation_complex():
mek = Agent('MEK')
braf1 = Agent('BRAF', db_refs={'TEXT': 'braf', 'HGNC': '1097'})
braf2 = Agent('BRAF', db_refs={'TEXT': 'BRAF', 'dummy': 'dummy'})
braf3 = Agent('BRAF', db_refs={'TEXT': 'Braf', 'UP': 'P15056'})
st1 = Complex([mek, braf1])
st2 = Complex([braf2, mek])
st3 = Complex([mek, braf3])
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3, unique_stmts
def test_render_stmt_graph():
braf = Agent('BRAF', db_refs={'HGNC': '1097'})
mek1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
mek = Agent('MEK', db_refs={'FPLX':'MEK'})
# Statements
p0 = Phosphorylation(braf, mek)
p1 = Phosphorylation(braf, mek1)
p2 = Phosphorylation(braf, mek1, position='218')
p3 = Phosphorylation(braf, mek1, position='222')
p4 = Phosphorylation(braf, mek1, 'serine')
p5 = Phosphorylation(braf, mek1, 'serine', '218')
p6 = Phosphorylation(braf, mek1, 'serine', '222')
stmts = [p0, p1, p2, p3, p4, p5, p6]
pa = Preassembler(bio_ontology, stmts=stmts)
pa.combine_related()
graph = render_stmt_graph(pa.related_stmts, reduce=False)
# One node for each statement
assert len(graph.nodes()) == 7
# Edges:
# p0 supports p1-p6 = 6 edges
# p1 supports p2-p6 = 5 edges
# p2 supports p5 = 1 edge
# p3 supports p6 = 1 edge
# p4 supports p5-p6 = 2 edges
# (p5 and p6 support none--they are top-level)
# 6 + 5 + 1 + 1 + 2 = 15 edges
assert len(graph.edges()) == 15
def test_flatten_evidence_hierarchy():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='bar')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_related()
assert len(pa.related_stmts) == 1
flattened = flatten_evidence(pa.related_stmts)
assert len(flattened) == 1
top_stmt = flattened[0]
assert len(top_stmt.evidence) == 2
assert 'bar' in [e.text for e in top_stmt.evidence]
assert 'foo' in [e.text for e in top_stmt.evidence]
assert len(top_stmt.supported_by) == 1
supporting_stmt = top_stmt.supported_by[0]
assert len(supporting_stmt.evidence) == 1
assert supporting_stmt.evidence[0].text == 'foo'
supporting_stmt.evidence[0].text = 'changed_foo'
assert supporting_stmt.evidence[0].text == 'changed_foo'
assert 'changed_foo' not in [e.text for e in top_stmt.evidence]
assert 'foo' in [e.text for e in top_stmt.evidence]
assert {ev.annotations.get('support_type') for ev in top_stmt.evidence} \
== {'direct', 'supported_by'}
def test_flatten_evidence_multilevel():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S',
evidence=[Evidence(text='bar')])
st3 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='baz')])
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3])
pa.combine_related()
assert len(pa.related_stmts) == 1
flattened = flatten_evidence(pa.related_stmts)
assert len(flattened) == 1
top_stmt = flattened[0]
assert len(top_stmt.evidence) == 3, len(top_stmt.evidence)
anns = [ev.annotations['support_type'] for ev in top_stmt.evidence]
assert anns.count('direct') == 1
assert anns.count('supported_by') == 2
def test_flatten_evidence_hierarchy_supports():
braf = Agent('BRAF')
mek = Agent('MAP2K1')
st1 = Phosphorylation(braf, mek, evidence=[Evidence(text='foo')])
st2 = Phosphorylation(braf, mek, 'S', '218',
evidence=[Evidence(text='bar')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa_stmts = pa.combine_related(return_toplevel=False)
assert len(pa_stmts) == 2
flattened = flatten_evidence(pa_stmts, collect_from='supports')
assert len(flattened) == 2
top_stmt = flattened[1]
assert len(top_stmt.evidence) == 1
assert 'bar' in [e.text for e in top_stmt.evidence]
assert len(top_stmt.supported_by) == 1
supporting_stmt = top_stmt.supported_by[0]
assert len(supporting_stmt.evidence) == 2
assert set([e.text for e in supporting_stmt.evidence]) == {'foo', 'bar'}
def test_flatten_stmts():
st1 = Phosphorylation(Agent('MAP3K5'), Agent('RAF1'), 'S', '338')
st2 = Phosphorylation(None, Agent('RAF1'), 'S', '338')
st3 = Phosphorylation(None, Agent('RAF1'))
st4 = Phosphorylation(Agent('PAK1'), Agent('RAF1'), 'S', '338')
st5 = Phosphorylation(None, Agent('RAF1'), evidence=Evidence(text='foo'))
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4, st5])
pa.combine_duplicates()
pa.combine_related()
assert len(pa.related_stmts) == 2
assert len(flatten_stmts(pa.unique_stmts)) == 4
assert len(flatten_stmts(pa.related_stmts)) == 4
def test_complex_refinement_order():
st1 = Complex([Agent('MED23'), Agent('ELK1')])
st2 = Complex([Agent('ELK1', mods=[ModCondition('phosphorylation')]),
Agent('MED23')])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_duplicates()
pa.combine_related()
assert len(pa.related_stmts) == 1
def test_activation_refinement():
subj = Agent('alcohol', db_refs={'CHEBI': 'CHEBI:16236',
'HMDB': 'HMDB00108',
'PUBCHEM': '702',
'TEXT': 'alcohol'})
obj = Agent('endotoxin', db_refs={'TEXT': 'endotoxin'})
st1 = Inhibition(subj, obj)
st2 = Activation(subj, obj)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_duplicates()
assert len(pa.unique_stmts) == 2
pa.combine_related()
assert len(pa.related_stmts) == 2
def test_homodimer_refinement():
egfr = Agent('EGFR')
erbb = Agent('ERBB2')
st1 = Complex([erbb, erbb])
st2 = Complex([erbb, egfr])
pa = Preassembler(bio_ontology, stmts=[st1, st2])
pa.combine_duplicates()
assert len(pa.unique_stmts) == 2
pa.combine_related()
assert len(pa.related_stmts) == 2
def test_return_toplevel():
src = Agent('SRC', db_refs = {'HGNC': '11283'})
nras = Agent('NRAS', db_refs = {'HGNC': '7989'})
st1 = Phosphorylation(src, nras, 'tyrosine', '32')
st2 = Phosphorylation(src, nras)
pa = Preassembler(bio_ontology, stmts=[st1, st2])
stmts = pa.combine_related(return_toplevel=True)
assert len(stmts) == 1
assert len(stmts[0].supported_by) == 1
assert len(stmts[0].supported_by[0].supports) == 1
stmts = pa.combine_related(return_toplevel=False)
assert len(stmts) == 2
ix = 1 if stmts[0].residue else 0
assert len(stmts[1-ix].supported_by) == 1
assert len(stmts[1-ix].supported_by[0].supports) == 1
assert len(stmts[ix].supports) == 1
assert len(stmts[ix].supports[0].supported_by) == 1
def test_multiprocessing():
braf = Agent('BRAF', db_refs={'HGNC': '1097'})
mek1 = Agent('MAP2K1', db_refs={'HGNC': '6840'})
mek = Agent('MEK', db_refs={'FPLX':'MEK'})
# Statements
p0 = Phosphorylation(braf, mek)
p1 = Phosphorylation(braf, mek1)
p2 = Phosphorylation(braf, mek1, position='218')
p3 = Phosphorylation(braf, mek1, position='222')
p4 = Phosphorylation(braf, mek1, 'serine')
p5 = Phosphorylation(braf, mek1, 'serine', '218')
p6 = Phosphorylation(braf, mek1, 'serine', '222')
p7 = Dephosphorylation(braf, mek1)
stmts = [p0, p1, p2, p3, p4, p5, p6, p7]
pa = Preassembler(bio_ontology, stmts=stmts)
# Size cutoff set to a low number so that one group will run remotely
# and one locally
toplevel = pa.combine_related(return_toplevel=True, poolsize=1,
size_cutoff=2)
assert len(toplevel) == 3, 'Got %d toplevel statements.' % len(toplevel)
def test_conversion_refinement():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
gtp = Agent('GTP')
gdp = Agent('GDP')
st1 = Conversion(ras, gtp, gdp)
st2 = Conversion(hras, gtp, gdp)
st3 = Conversion(hras, [gtp, gdp], gdp)
st4 = Conversion(hras, [gdp, gtp], gdp)
pa = Preassembler(bio_ontology, stmts=[st1, st2, st3, st4])
toplevel_stmts = pa.combine_related()
assert len(toplevel_stmts) == 2
def test_influence_duplicate():
gov = 'wm/concept/causal_factor/social_and_political/government'
agr = 'wm/concept/causal_factor/agriculture/crop_production'
cgov = Event(Concept('government', db_refs={'WM': [(gov, 1.0)]}))
cagr = Event(Concept('agriculture', db_refs={'WM': [(agr, 1.0)]}))
print(cgov.matches_key())
stmt1 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos1')])
stmt2 = Influence(cagr, cgov, evidence=[Evidence(source_api='eidos2')])
stmt3 = Influence(cgov, cagr, evidence=[Evidence(source_api='eidos3')])
pa = Preassembler(world_ontology, [stmt1, stmt2, stmt3])
unique_stmts = pa.combine_duplicates()
unique_stmts = sorted(unique_stmts, key=lambda x: len(x.evidence))
assert len(unique_stmts) == 2
assert len(unique_stmts[0].evidence) == 1
assert len(unique_stmts[1].evidence) == 2, unique_stmts
sources = [e.source_api for e in unique_stmts[1].evidence]
assert set(sources) == {'eidos1', 'eidos3'}
def test_influence_refinement():
tran = 'wm/concept/causal_factor/access/infrastructure_access/'\
'transportation'
ship = 'wm/concept/causal_factor/access/infrastructure_access/' \
'transportation/shipping'
agr = 'wm/concept/causal_factor/economic_and_commerce/' \
'economic_activity/livelihood'
ctran = Event(Concept('transportation', db_refs={'WM': [(tran, 1.0)]}))
cship = Event(Concept('trucking', db_refs={'WM': [(ship, 1.0)]}))
cagr = Event(Concept('agriculture', db_refs={'WM': [(agr, 1.0)]}))
stmt1 = Influence(ctran, cagr, evidence=[Evidence(source_api='eidos1')])
stmt2 = Influence(cship, cagr, evidence=[Evidence(source_api='eidos2')])
stmt3 = Influence(cagr, ctran, evidence=[Evidence(source_api='eidos3')])
pa = Preassembler(world_ontology, [stmt1, stmt2, stmt3])
rel_stmts = pa.combine_related()
assert len(rel_stmts) == 2, rel_stmts
truck_stmt = [st for st in rel_stmts if st.subj.concept.name ==
'trucking'][0]
assert len(truck_stmt.supported_by) == 1
assert truck_stmt.supported_by[0].subj.concept.name == 'transportation'
def test_find_contradicts():
st1 = Inhibition(Agent('a'), Agent('b'))
st2 = Activation(Agent('a'), Agent('b'))
st3 = IncreaseAmount(Agent('a'), Agent('b'))
st4 = DecreaseAmount(Agent('a'), Agent('b'))
st5 = ActiveForm(Agent('a',
mods=[ModCondition('phosphorylation', None, None, True)]),
'kinase', True)
st6 = ActiveForm(Agent('a',
mods=[ModCondition('phosphorylation', None, None, True)]),
'kinase', False)
pa = Preassembler(bio_ontology, [st1, st2, st3, st4, st5, st6])
contradicts = pa.find_contradicts()
assert len(contradicts) == 3
for s1, s2 in contradicts:
assert {s1.uuid, s2.uuid} in ({st1.uuid, st2.uuid},
{st3.uuid, st4.uuid},
{st5.uuid, st6.uuid})
def test_find_contradicts_refinement():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
kras = Agent('KRAS', db_refs={'HGNC': '6407'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
st1 = Phosphorylation(Agent('x'), ras)
st2 = Dephosphorylation(Agent('x'), kras)
st3 = Dephosphorylation(Agent('x'), hras)
pa = Preassembler(bio_ontology, [st1, st2, st3])
contradicts = pa.find_contradicts()
assert len(contradicts) == 2
for s1, s2 in contradicts:
assert {s1.uuid, s2.uuid} in ({st1.uuid, st2.uuid},
{st1.uuid, st3.uuid})
def test_preassemble_related_complex():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
kras = Agent('KRAS', db_refs={'HGNC': '6407'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
st1 = Complex([kras, hras])
st2 = Complex([kras, ras])
st3 = Complex([hras, kras])
st4 = Complex([ras, kras])
pa = Preassembler(bio_ontology, [st1, st2, st3, st4])
uniq = pa.combine_duplicates()
assert len(uniq) == 2
top = pa.combine_related()
assert len(top) == 1
def test_normalize_opposites():
concept1 = 'wm/concept/causal_factor/food_security/food_stability'
concept2 = 'wm/concept/causal_factor/food_insecurity/food_instability'
concept3 = ('wm/concept/causal_factor/crisis_and_disaster/'
'environmental_disasters/natural_disaster/flooding')
# First test the inherently positive being the main grounding
dbr = {'WM': [(concept1, 1.0), (concept2, 0.5), (concept3, 0.1)]}
ev = Event(Concept('x', db_refs=dbr),
delta=QualitativeDelta(polarity=1))
pa = Preassembler(world_ontology, stmts=[ev])
pa.normalize_opposites(ns='WM')
# We are normalizing to food supply since that is the inherently
# positive concept
assert pa.stmts[0].concept.db_refs['WM'][0] == \
(concept1, 1.0), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].concept.db_refs['WM'][1] == \
(concept1, 0.5), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].concept.db_refs['WM'][2] == \
(concept3, 0.1), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].delta.polarity == 1
# Next test the inherently negative being the main grounding
dbr = {'WM': [(concept2, 1.0), (concept1, 0.5), (concept3, 0.1)]}
ev = Event(Concept('x', db_refs=dbr),
delta=QualitativeDelta(polarity=1))
pa = Preassembler(world_ontology, stmts=[ev])
pa.normalize_opposites(ns='WM')
# We are normalizing to food supply since that is the inherently
# positive concept
assert pa.stmts[0].concept.db_refs['WM'][0] == \
(concept1, 1.0), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].concept.db_refs['WM'][1] == \
(concept1, 0.5), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].concept.db_refs['WM'][2] == \
(concept3, 0.1), pa.stmts[0].concept.db_refs['WM']
assert pa.stmts[0].delta.polarity == -1
def test_normalize_opposites_influence():
concept1 = 'wm/concept/causal_factor/food_security/food_stability'
concept2 = 'wm/concept/causal_factor/food_insecurity/food_instability'
dbr1 = {'WM': [(concept1, 1.0), (concept2, 0.5)]}
dbr2 = {'WM': [(concept2, 1.0), (concept1, 0.5)]}
stmt = Influence(Event(Concept('x', db_refs=dbr1),
delta=QualitativeDelta(polarity=1)),
Event(Concept('y', db_refs=dbr2),
delta=QualitativeDelta(polarity=-1)))
pa = Preassembler(world_ontology, stmts=[stmt])
pa.normalize_opposites(ns='WM')
assert pa.stmts[0].subj.delta.polarity == 1
assert pa.stmts[0].obj.delta.polarity == 1
def test_normalize_opposites_association():
concept1 = 'wm/concept/causal_factor/food_security/food_stability'
concept2 = 'wm/concept/causal_factor/food_insecurity/food_instability'
dbr1 = {'WM': [(concept1, 1.0), (concept2, 0.5)]}
dbr2 = {'WM': [(concept2, 1.0), (concept1, 0.5)]}
stmt = Association([Event(Concept('x', db_refs=dbr1),
delta=QualitativeDelta(polarity=1)),
Event(Concept('y', db_refs=dbr2),
delta=QualitativeDelta(polarity=-1))])
pa = Preassembler(world_ontology, stmts=[stmt])
pa.normalize_opposites(ns='WM')
assert pa.stmts[0].members[0].delta.polarity == 1
assert pa.stmts[0].members[1].delta.polarity == 1
def test_agent_text_storage():
A1 = Agent('A', db_refs={'TEXT': 'A'})
A2 = Agent('A', db_refs={'TEXT': 'alpha'})
B1 = Agent('B', db_refs={'TEXT': 'bag'})
B2 = Agent('B', db_refs={'TEXT': 'bug'})
C = Agent('C')
D = Agent('D')
inp = [
Complex([A1, B1], evidence=Evidence(text='A complex bag.')),
Complex([B2, A2], evidence=Evidence(text='bug complex alpha once.')),
Complex([B2, A2], evidence=Evidence(text='bug complex alpha again.')),
Complex([A1, C, B2], evidence=Evidence(text='A complex C bug.')),
Phosphorylation(A1, B1, evidence=Evidence(text='A phospo bags.')),
Phosphorylation(A2, B2, evidence=Evidence(text='alpha phospho bugs.')),
Conversion(D, [A1, B1], [C, D],
evidence=Evidence(text='D: A bag -> C D')),
Conversion(D, [B1, A2], [C, D],
evidence=Evidence(text='D: bag a -> C D')),
Conversion(D, [B2, A2], [D, C],
evidence=Evidence(text='D: bug a -> D C')),
Conversion(D, [B1, A1], [C, D],
evidence=Evidence(text='D: bag A -> C D')),
Conversion(D, [A1], [A1, C],
evidence=Evidence(text='D: A -> A C'))
]
pa = Preassembler(bio_ontology, inp)
unq1 = pa.combine_duplicates()
assert len(unq1) == 5, len(unq1)
assert all([len(ev.annotations['prior_uuids']) == 1
for s in unq1 for ev in s.evidence
if len(s.evidence) > 1]),\
'There can only be one prior evidence per uuid at this stage.'
ev_uuid_dict = {ev.annotations['prior_uuids'][0]: ev.annotations['agents']
for s in unq1 for ev in s.evidence}
for s in inp:
raw_text = [ag.db_refs.get('TEXT')
for ag in s.agent_list(deep_sorted=True)]
assert raw_text == ev_uuid_dict[s.uuid]['raw_text'],\
str(raw_text) + '!=' + str(ev_uuid_dict[s.uuid]['raw_text'])
# Now run pa on the above corpus plus another statement.
inp2 = unq1 + [
Complex([A1, C, B1], evidence=Evidence(text='A complex C bag.'))
]
pa2 = Preassembler(bio_ontology, inp2)
unq2 = pa2.combine_duplicates()
assert len(unq2) == 5, len(unq2)
old_ev_list = []
new_ev = None
for s in unq2:
for ev in s.evidence:
if ev.text == inp2[-1].evidence[0].text:
new_ev = ev
else:
old_ev_list.append(ev)
assert all([len(ev.annotations['prior_uuids']) == 2 for ev in old_ev_list])
assert new_ev
assert len(new_ev.annotations['prior_uuids']) == 1
def test_agent_coordinates():
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'reach_coordinates.json')
stmts = reach.process_json_file(path).statements
pa = Preassembler(bio_ontology, stmts)
unique_stmt = pa.combine_duplicates()[0]
agent_annots = [ev.annotations['agents'] for ev in unique_stmt.evidence]
assert all(a['raw_text'] == ['MEK1', 'ERK2'] for a in agent_annots)
assert {tuple(a['coords']) for a in agent_annots} == {((21, 25), (0, 4)),
((0, 4), (15, 19))}
def test_association_duplicate():
ev1 = Event(Concept('a'))
ev2 = Event(Concept('b'))
ev3 = Event(Concept('c'))
# Order of members does not matter
st1 = Association([ev1, ev2], evidence=[Evidence(source_api='eidos1')])
st2 = Association([ev1, ev3], evidence=[Evidence(source_api='eidos2')])
st3 = Association([ev2, ev1], evidence=[Evidence(source_api='eidos3')])
st4 = Association([ev2, ev3], evidence=[Evidence(source_api='eidos4')])
st5 = Association([ev2, ev3], evidence=[Evidence(source_api='eidos5')])
eidos_ont = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'../sources/eidos/eidos_ontology.rdf')
pa = Preassembler(world_ontology, [st1, st2, st3, st4, st5])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3
assert len(unique_stmts[0].evidence) == 2
assert len(unique_stmts[1].evidence) == 1
assert len(unique_stmts[2].evidence) == 2
sources = [e.source_api for e in unique_stmts[0].evidence]
assert set(sources) == {'eidos1', 'eidos3'}
def test_association_refinement():
unrelated = 'wm/concept/causal_factor/wild_food_sources'
parent = 'wm/concept/causal_factor/health_and_life'
child = 'wm/concept/causal_factor/health_and_life/' \
'living_condition/food_safety'
parent_event = Event(Concept('parent', db_refs={'WM': [(parent, 1.0)]}))
unrelated_event = \
Event(Concept('unrelated', db_refs={'WM': [(unrelated, 1.0)]}))
child_event = Event(Concept('child',
db_refs={'WM': [(child, 1.0)]}))
st1 = Association([parent_event, unrelated_event],
evidence=[Evidence(source_api='eidos1')])
st2 = Association([unrelated_event, parent_event],
evidence=[Evidence(source_api='eidos2')])
st3 = Association([parent_event, child_event],
evidence=[Evidence(source_api='eidos3')])
st4 = Association([unrelated_event, child_event],
evidence=[Evidence(source_api='eidos4')])
pa = Preassembler(world_ontology, [st1, st2, st3, st4])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 3
top_level_stmts = pa.combine_related()
assert len(top_level_stmts) == 2, top_level_stmts
names = {tuple(sorted(e.concept.name for e in stmt.members)): stmt
for stmt in top_level_stmts}
stmt = names[('child', 'unrelated')]
assert len(stmt.supported_by) == 1
assert {e.concept.name for e in stmt.supported_by[0].members} == \
{'parent', 'unrelated'}
def test_matches_key_fun():
from indra.statements import WorldContext, RefContext
def has_location(stmt):
if not stmt.context or not stmt.context.geo_location or \
not stmt.context.geo_location.db_refs.get('GEOID'):
return False
return True
def event_location_matches(stmt):
if isinstance(stmt, Event):
if not has_location(stmt):
context_key = None
else:
context_key = stmt.context.geo_location.db_refs['GEOID']
matches_key = str((stmt.concept.matches_key(), context_key))
else:
matches_key = stmt.matches_key()
return matches_key
def event_location_refinement(st1, st2, ontology, entities_refined):
if isinstance(st1, Event) and isinstance(st2, Event):
ref = st1.refinement_of(st2, ontology)
if not ref:
return False
if not has_location(st2):
return True
elif not has_location(st1) and has_location(st2):
return False
else:
return st1.context.geo_location.db_refs['GEOID'] == \
st2.context.geo_location.db_refs['GEOID']
context1 = WorldContext(geo_location=RefContext('x',
db_refs={'GEOID': '1'}))
context2 = WorldContext(geo_location=RefContext('x',
db_refs={'GEOID': '2'}))
health = 'wm/concept/causal_factor/health_and_life'
e1 = Event(Concept('health', db_refs={'WM': [(health, 1.0)]}),
context=context1,
evidence=Evidence(text='1', source_api='eidos'))
e2 = Event(Concept('health', db_refs={'WM': [(health, 1.0)]}),
context=context2,
evidence=Evidence(text='2', source_api='eidos'))
e3 = Event(Concept('health', db_refs={'WM': [(health, 1.0)]}),
context=context2,
evidence=Evidence(text='3', source_api='eidos'))
pa = Preassembler(world_ontology, [e1, e2, e3],
matches_fun=event_location_matches,
refinement_fun=event_location_refinement)
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 2, unique_stmts
from indra.tools.assemble_corpus import run_preassembly
stmts = run_preassembly([e1, e2, e3], matches_fun=event_location_matches,
refinement_fun=event_location_refinement)
assert len(stmts) == 2, stmts
def test_uppro_assembly():
ag1 = Agent('x', db_refs={'UP': 'P01019', 'UPPRO': 'PRO_0000032457'})
ag2 = Agent('y', db_refs={'UP': 'P01019', 'UPPRO': 'PRO_0000032458'})
assert ag1.get_grounding() == ('UPPRO', ag1.db_refs['UPPRO'])
assert ag2.get_grounding() == ('UPPRO', ag2.db_refs['UPPRO'])
stmt1 = Phosphorylation(None, ag1)
stmt2 = Phosphorylation(None, ag2)
assert stmt1.matches_key() != stmt2.matches_key()
pa = Preassembler(bio_ontology, [stmt1, stmt2])
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 2, unique_stmts
from indra.tools import assemble_corpus as ac
stmts = ac.map_grounding([stmt1, stmt2])
pa = Preassembler(bio_ontology, stmts)
unique_stmts = pa.combine_duplicates()
assert len(unique_stmts) == 2
def test_split_idx():
ras = Agent('RAS', db_refs={'FPLX': 'RAS'})
kras = Agent('KRAS', db_refs={'HGNC': '6407'})
hras = Agent('HRAS', db_refs={'HGNC': '5173'})
st1 = Phosphorylation(Agent('x'), ras)
st2 = Phosphorylation(Agent('x'), kras)
st3 = Phosphorylation(Agent('x'), hras)
pa = Preassembler(bio_ontology)
maps = pa._generate_id_maps([st1, st2, st3])
assert (1, 0) in maps, maps
assert (2, 0) in maps, maps
assert pa._comparison_counter == 2
pa = Preassembler(bio_ontology)
maps = pa._generate_id_maps([st1, st2, st3], split_idx=1)
assert (2, 0) in maps, maps
assert (1, 0) not in maps, maps
assert pa._comparison_counter == 1
|
the-stack_106_24140 | import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from itertools import product
import pprint
import shutil
import os
import sonnet as snt
import itertools
from tensorflow.python.util import nest
import matplotlib.pyplot as plt
from matplotlib import animation
import matplotlib.gridspec as gridspec
import matplotlib.patches as patches
from matplotlib.colors import to_rgb
from dps import cfg
from dps.utils import Param, map_structure, Config, execute_command, cd, AttrDict
from dps.utils.tf import RenderHook, tf_mean_sum, tf_shape, MLP
from dps.utils.tensor_arrays import apply_keys, append_to_tensor_arrays, make_tensor_arrays
from auto_yolo.models.core import AP, xent_loss, coords_to_pixel_space, concrete_binary_sample_kl
from auto_yolo.models.object_layer import GridObjectLayer, ConvGridObjectLayer, ObjectRenderer
from auto_yolo.models.networks import SpatialAttentionLayerV2, DummySpatialAttentionLayer
from auto_yolo.models.obj_kl import ObjKL
from silot.core import VideoNetwork, MOTMetrics, get_object_ids
from silot.propagation import ObjectPropagationLayer, SQAIRPropagationLayer
def get_object_features(objects, use_abs_posn, is_posterior):
prop_state = objects.prop_state if is_posterior else objects.prior_prop_state
if use_abs_posn:
return tf.concat(
[objects.abs_posn,
objects.normalized_box[..., 2:],
objects.attr,
objects.z,
objects.obj,
prop_state], axis=-1)
else:
return tf.concat(
[objects.normalized_box[..., 2:],
objects.attr,
objects.z,
objects.obj,
prop_state], axis=-1)
class PropagationObjKL(ObjKL):
def __call__(self, tensors, prior_d_obj_log_odds):
kl = concrete_binary_sample_kl(
tensors["d_obj_pre_sigmoid"],
tensors["d_obj_log_odds"], self.obj_concrete_temp,
prior_d_obj_log_odds, self.obj_concrete_temp)
batch_size = tf_shape(tensors["d_obj_pre_sigmoid"])[0]
return tf.reduce_sum(tf.reshape(kl, (batch_size, -1)), 1)
def select_top_k_objects(prop, disc):
batch_size, *prop_other, final = tf_shape(prop.obj)
assert final == 1
n_prop_objects = np.product(prop_other)
_, *disc_other, _ = tf_shape(disc.obj)
n_disc_objects = np.product(disc_other)
n_disc_obj_dim = len(disc_other)
prop_presence = tf.reshape(prop.obj, (batch_size, n_prop_objects))
disc_presence = tf.reshape(disc.obj, (batch_size, n_disc_objects))
all_presence = tf.concat([prop_presence, disc_presence], axis=1)
_, top_k_indices = tf.nn.top_k(all_presence, k=n_prop_objects, sorted=False)
top_k_indices = tf.sort(top_k_indices, axis=1)
top_k_indices = tf.reshape(top_k_indices, (batch_size, n_prop_objects))
from_prop = tf.cast(top_k_indices < n_prop_objects, tf.int32)
n_from_prop = tf.reduce_sum(from_prop, axis=1)
scatter_indices = tf.concat(
[tf.tile(tf.range(batch_size)[:, None, None], (1, n_prop_objects, 1)),
top_k_indices[:, :, None]],
axis=2
)
# create an array of shape (batch_size, n_prop_objects+n_disc_objects) that
# has a 1 for every index that is in the top_k for that batch element
in_top_k = tf.scatter_nd(
scatter_indices, tf.ones((batch_size, n_prop_objects), dtype=tf.int32),
(batch_size, n_prop_objects+n_disc_objects))
from_disc_idx = n_from_prop
new_indices = []
is_new = []
for i in range(n_prop_objects):
# indices to use for gather if i is not present in top_k
gather_indices = tf.concat([tf.range(batch_size)[:, None], from_disc_idx[:, None]], axis=1)
other = tf.gather_nd(top_k_indices, gather_indices)
i_present = in_top_k[:, i]
indices = tf.where(tf.cast(i_present, tf.bool), i * tf.ones_like(other), other)
from_disc_idx += 1 - i_present
new_indices.append(indices)
is_new.append(1 - i_present)
top_k_indices = tf.stack(new_indices, axis=1)
is_new = tf.stack(is_new, axis=1)
batch_indices = tf.tile(tf.range(batch_size)[:, None, None], (1, n_prop_objects, 1))
index_array = tf.concat([batch_indices, top_k_indices[:, :, None]], axis=2)
selected_objects = AttrDict()
shared_keys = disc.keys() & prop.keys()
for key in shared_keys:
trailing_dims = tf_shape(disc[key])[1+n_disc_obj_dim:]
disc_value = tf.reshape(disc[key], (batch_size, n_disc_objects, *trailing_dims))
values = tf.concat([prop[key], disc_value], axis=1)
selected_objects[key] = tf.gather_nd(values, index_array)
selected_objects.update(
pred_n_objects=tf.reduce_sum(selected_objects.obj, axis=(1, 2)),
pred_n_objects_hard=tf.reduce_sum(tf.round(selected_objects.obj), axis=(1, 2)),
final_weights=tf.one_hot(top_k_indices, n_prop_objects + n_disc_objects, axis=-1),
is_new=is_new,
)
return selected_objects
class Prior_AP(AP):
def get_feed_dict(self, updater):
return {updater.network._prior_start_step: self.start_frame}
class Prior_MOTMetrics(MOTMetrics):
def get_feed_dict(self, updater):
return {updater.network._prior_start_step: self.start_frame}
class SILOT(VideoNetwork):
build_backbone = Param()
build_discovery_feature_fuser = Param()
build_mlp = Param()
build_obj_kl = Param()
build_background_model = Param()
n_backbone_features = Param()
n_objects_per_cell = Param()
train_reconstruction = Param()
reconstruction_weight = Param()
train_kl = Param()
kl_weight = Param()
prior_start_step = Param()
eval_prior_start_step = Param()
learn_prior = Param()
n_hidden = Param()
disc_dropout_prob = Param()
anchor_box = Param()
object_shape = Param()
independent_prop = Param()
use_sqair_prop = Param()
conv_discovery = Param()
use_abs_posn = Param()
disc_layer = None
disc_feature_extractor = None
prop_layer = None
prior_prop_layer = None
prop_cell = None
prior_prop_cell = None
prop_feature_extractor = None
object_renderer = None
obj_kl = None
background_model = None
def __init__(self, *args, **kwargs):
self._prior_start_step = tf.constant(self.prior_start_step, tf.int32)
super().__init__(*args, **kwargs)
@property
def eval_funcs(self):
if getattr(self, '_eval_funcs', None) is None:
if "annotations" in self._tensors:
ap_iou_values = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
eval_funcs = {"AP_at_point_{}".format(int(10 * v)): AP(v) for v in ap_iou_values}
eval_funcs["AP"] = AP(ap_iou_values)
eval_funcs["AP_train"] = AP(ap_iou_values, is_training=True)
if cfg.mot_metrics:
eval_funcs["MOT"] = MOTMetrics()
eval_funcs["MOT_train"] = MOTMetrics(is_training=True)
if self.learn_prior:
eval_funcs["prior_AP"] = Prior_AP(ap_iou_values, start_frame=self.eval_prior_start_step)
if cfg.mot_metrics:
eval_funcs["prior_MOT"] = Prior_MOTMetrics(start_frame=self.eval_prior_start_step)
self._eval_funcs = eval_funcs
else:
self._eval_funcs = {}
return self._eval_funcs
def _loop_cond(self, f, *_):
return f < self.dynamic_n_frames
def _loop_body(
self, f, abs_posn, normalized_box, attr, z, obj, prop_state, prior_prop_state,
ys_logit, xs_logit, z_logit, *tensor_arrays):
objects = AttrDict(
abs_posn=abs_posn,
normalized_box=normalized_box,
ys_logit=ys_logit,
xs_logit=xs_logit,
attr=attr,
z=z,
z_logit=z_logit,
obj=obj,
prop_state=prop_state,
prior_prop_state=prior_prop_state,
)
structured_result = self._inner_loop_body(f, objects)
tensor_arrays = append_to_tensor_arrays(f, structured_result, tensor_arrays)
selected_objects = structured_result.selected_objects
f += 1
return [
f,
selected_objects.abs_posn,
selected_objects.normalized_box,
selected_objects.attr,
selected_objects.z,
selected_objects.obj,
selected_objects.prop_state,
selected_objects.prior_prop_state,
selected_objects.ys_logit,
selected_objects.xs_logit,
selected_objects.z_logit,
*tensor_arrays]
def _inner_loop_body(self, f, objects):
use_prior_objects = tf.logical_and(
tf.logical_and(0 <= self._prior_start_step, self._prior_start_step <= f),
tf.constant(self.learn_prior, tf.bool))
float_use_prior_objects = tf.cast(use_prior_objects, tf.float32)
# --- prop ---
object_locs = objects.normalized_box[..., :2]
object_features = get_object_features(objects, self.use_abs_posn, is_posterior=True)
object_features_for_prop = self.prop_feature_extractor(
object_locs, object_features, object_locs, object_features, self.is_training)
post_prop_objects = self.prop_layer(
self.inp[:, f], object_features_for_prop, objects, self.is_training, is_posterior=True)
if self.learn_prior:
object_features = get_object_features(objects, self.use_abs_posn, is_posterior=False)
object_features_for_prop = self.prop_feature_extractor(
object_locs, object_features, object_locs, object_features, self.is_training)
prior_prop_objects = self.prior_prop_layer(
self.inp[:, f], object_features_for_prop, objects, self.is_training, is_posterior=False)
prop_objects = tf.cond(
use_prior_objects,
lambda: prior_prop_objects,
lambda: post_prop_objects)
prop_objects.prop_state = post_prop_objects.prop_state
prop_objects.prior_prop_state = prior_prop_objects.prior_prop_state
else:
prior_prop_objects = None
prop_objects = post_prop_objects
# --- get features of the propagated objects for discovery ---
object_locs = prop_objects.normalized_box[..., :2]
object_features = get_object_features(prop_objects, self.use_abs_posn, is_posterior=True)
object_features_for_disc = self.disc_feature_extractor(
object_locs, object_features, self.grid_cell_centers, None, self.is_training)
object_features_for_disc = tf.reshape(
object_features_for_disc, (self.batch_size, self.H, self.W, self.n_hidden))
# --- fuse features of the propagated objects with bottom-up features from the current frame ---
post_disc_features_inp = tf.concat(
[object_features_for_disc, self.backbone_output[:, f]], axis=-1)
post_disc_features = self.discovery_feature_fuser(
post_disc_features_inp, self.n_backbone_features, self.is_training)
post_disc_features = tf.reshape(
post_disc_features, (self.batch_size, self.H, self.W, self.n_backbone_features))
# --- discovery ---
post_disc_objects = self.disc_layer(
self.inp[:, f], post_disc_features, self.is_training,
is_posterior=True, prop_state=self.initial_prop_state)
post_disc_objects.abs_posn = (
post_disc_objects.normalized_box[..., :2]
+ tf.cast(self._tensors['offset'][:, None], tf.float32) / self.anchor_box
)
# --- discovery dropout ---
disc_mask_dist = tfp.distributions.Bernoulli(
(1.-self.disc_dropout_prob) * tf.ones(self.batch_size))
disc_mask = tf.cast(disc_mask_dist.sample(), tf.float32)
do_mask = self.float_is_training * tf.cast(f > 0, tf.float32)
disc_mask = do_mask * disc_mask + (1 - do_mask) * tf.ones(self.batch_size)
# Don't discover objects if using prior
disc_mask = float_use_prior_objects * tf.zeros(self.batch_size) + (1 - float_use_prior_objects) * disc_mask
disc_mask = disc_mask[:, None, None]
post_disc_objects.obj = disc_mask * post_disc_objects.obj
disc_objects = post_disc_objects
# --- object selection ---
selected_objects = select_top_k_objects(prop_objects, disc_objects)
# --- background prediction ---
if self.background_model is not None:
mask = self.object_renderer(
selected_objects, self._tensors["background"][:, f], self.is_training, mask_only=True)
background, bg_losses = self.background_model(self._tensors['inp'][:, f], mask)
else:
background = self._tensors['background'][:, f]
# --- rendering ---
render_tensors = self.object_renderer(selected_objects, background, self.is_training)
# --- appearance of object sets for plotting ---
prop_objects.update(
self.object_renderer(prop_objects, None, self.is_training, appearance_only=True))
disc_objects.update(
self.object_renderer(disc_objects, None, self.is_training, appearance_only=True))
selected_objects.update(
self.object_renderer(selected_objects, None, self.is_training, appearance_only=True))
# --- kl ---
prop_indep_prior_kl = self.prop_layer.compute_kl(post_prop_objects)
disc_indep_prior_kl = self.disc_layer.compute_kl(post_disc_objects)
obj_for_kl = AttrDict()
for name in "obj_pre_sigmoid obj_log_odds obj_prob".split():
obj_for_kl[name] = tf.concat(
[post_prop_objects["d_" + name], post_disc_objects[name]], axis=1)
obj_for_kl['obj'] = tf.concat([post_prop_objects['obj'], post_disc_objects['obj']], axis=1)
indep_prior_obj_kl = self.obj_kl(obj_for_kl)
_tensors = AttrDict(
post=AttrDict(
prop=prop_objects,
disc=disc_objects,
select=selected_objects,
render=render_tensors,
),
selected_objects=selected_objects,
prop_indep_prior_kl=prop_indep_prior_kl,
disc_indep_prior_kl=disc_indep_prior_kl,
indep_prior_obj_kl=indep_prior_obj_kl,
**render_tensors,
)
if self.learn_prior:
_tensors.prior = AttrDict(prop=prior_prop_objects)
prop_learned_prior_kl = self.prop_layer.compute_kl(post_prop_objects, prior=prior_prop_objects)
prop_learned_prior_kl['d_obj_kl'] = self.learned_prior_obj_kl(
post_prop_objects, prior_prop_objects['d_obj_log_odds'])
_tensors['prop_learned_prior_kl'] = prop_learned_prior_kl
return _tensors
def build_initial_object_feed_dict(self, objects):
return {v: objects[k] for k, v in self.initial_objects.items()}
def build_representation(self):
# --- init modules ---
self.maybe_build_subnet("backbone")
self.maybe_build_subnet("discovery_feature_fuser")
if self.build_background_model is not None:
self.maybe_build_subnet('background_model')
self.B = self.n_objects_per_cell
batch_size, T, *rest = tf_shape(self.inp)
_inp = tf.reshape(self.inp, (batch_size*T, *rest))
backbone_output = self.backbone(_inp, self.n_backbone_features, self.is_training)
output_shape = tf_shape(backbone_output)
rest = output_shape[1:]
self.backbone_output = tf.reshape(backbone_output, (batch_size, T, *rest))
n_grid_cells = self.backbone.layer_info[-1]['n_grid_cells']
grid_cell_size = self.backbone.layer_info[-1]['grid_cell_size']
self.H, self.W = [int(i) for i in n_grid_cells]
self.HWB = self.H * self.W * self.B
self.pixels_per_cell = tuple(int(i) for i in grid_cell_size)
H, W = self.H, self.W
if self.disc_layer is None:
if self.conv_discovery:
self.disc_layer = ConvGridObjectLayer(
flatten=True, pixels_per_cell=self.pixels_per_cell, scope="discovery")
else:
self.disc_layer = GridObjectLayer(pixels_per_cell=self.pixels_per_cell, scope="discovery")
if self.prop_layer is None:
if self.prop_cell is None:
self.prop_cell = cfg.build_prop_cell(2*self.n_hidden, name="prop_cell")
# self.prop_cell must be a Sonnet RNNCore
self.initial_prop_state = snt.trainable_initial_state(
1, self.prop_cell.state_size, tf.float32, name="prop_cell_initial_state")
prop_class = SQAIRPropagationLayer if self.use_sqair_prop else ObjectPropagationLayer
self.prop_layer = prop_class(self.prop_cell, scope="propagation")
if self.prior_prop_layer is None and self.learn_prior:
if self.prior_prop_cell is None:
self.prior_prop_cell = cfg.build_prop_cell(2*self.n_hidden, name="prior_prop_cell")
prop_class = SQAIRPropagationLayer if self.use_sqair_prop else ObjectPropagationLayer
self.prior_prop_layer = prop_class(self.prior_prop_cell, scope="prior_propagation")
if self.object_renderer is None:
self.object_renderer = ObjectRenderer(self.anchor_box, self.object_shape, scope="renderer")
if self.disc_feature_extractor is None:
self.disc_feature_extractor = SpatialAttentionLayerV2(
n_hidden=self.n_hidden,
build_mlp=lambda scope: MLP(n_units=[self.n_hidden, self.n_hidden], scope=scope),
do_object_wise=False,
scope="discovery_feature_extractor",
)
if self.prop_feature_extractor is None:
if self.independent_prop:
self.prop_feature_extractor = DummySpatialAttentionLayer(
n_hidden=self.n_hidden,
build_mlp=lambda scope: MLP(n_units=[self.n_hidden, self.n_hidden], scope=scope),
do_object_wise=True,
scope="propagation_feature_extractor",
)
else:
self.prop_feature_extractor = SpatialAttentionLayerV2(
n_hidden=self.n_hidden,
build_mlp=lambda scope: MLP(n_units=[self.n_hidden, self.n_hidden], scope=scope),
do_object_wise=True,
scope="propagation_feature_extractor",
)
if self.obj_kl is None:
self.obj_kl = self.build_obj_kl()
self.learned_prior_obj_kl = PropagationObjKL()
# centers of the grid cells in normalized (anchor box) space.
y = (np.arange(H, dtype='f') + 0.5) * self.pixels_per_cell[0] / self.anchor_box[0]
x = (np.arange(W, dtype='f') + 0.5) * self.pixels_per_cell[1] / self.anchor_box[1]
x, y = np.meshgrid(x, y)
self.grid_cell_centers = tf.constant(np.concatenate([y.flatten()[:, None], x.flatten()[:, None]], axis=1))
self.initial_objects = objects = self.prop_layer.null_object_set(self.batch_size)
f = tf.constant(0, dtype=tf.int32)
structure = self._inner_loop_body(f, objects)
tensor_arrays = make_tensor_arrays(structure, self.dynamic_n_frames)
loop_vars = [
f, objects.abs_posn, objects.normalized_box, objects.attr, objects.z, objects.obj,
objects.prop_state, objects.prior_prop_state, objects.ys_logit, objects.xs_logit, objects.z_logit,
*tensor_arrays]
result = tf.while_loop(self._loop_cond, self._loop_body, loop_vars)
first_ta = min(i for i, ta in enumerate(result) if isinstance(ta, tf.TensorArray))
tensor_arrays = result[first_ta:]
tensors = map_structure(lambda ta: ta.stack(), tensor_arrays, is_leaf=lambda t: isinstance(t, tf.TensorArray))
tensors = map_structure(
lambda t: tf.transpose(t, (1, 0, *range(2, len(t.shape)))),
tensors, is_leaf=lambda t: isinstance(t, tf.Tensor))
tensors = apply_keys(structure, tensors)
self._tensors.update(tensors)
self._tensors.update(**self._tensors['selected_objects'])
pprint.pprint(self._tensors)
# --- specify values to record ---
self.record_tensors(
batch_size=self.batch_size,
float_is_training=self.float_is_training,
)
prop_to_record = (
"yt xt ys xs z attr obj d_yt_logit d_xt_logit ys_logit xs_logit d_z_logit d_attr d_obj".split())
post_prop = self._tensors.post.prop
self.record_tensors(**{"post_prop_{}".format(k): post_prop[k] for k in prop_to_record})
self.record_tensors(
**{"post_prop_{}".format(k): v for k, v in post_prop.items() if k.endswith('_std')})
if "d_attr_gate" in post_prop:
self.record_tensors(post_prop_d_attr_gate=post_prop["d_attr_gate"])
if "f_gate" in post_prop:
self.record_tensors(post_prop_d_attr_f_gate=post_prop["f_gate"])
self.record_tensors(post_prop_d_attr_i_gate=post_prop["i_gate"])
self.record_tensors(post_prop_d_attr_t_gate=post_prop["t_gate"])
disc_to_record = "cell_y cell_x height width yt xt ys xs z attr obj".split()
post_disc = self._tensors.post.disc
self.record_tensors(**{"post_disc_{}".format(k): post_disc[k] for k in disc_to_record})
self.record_tensors(
**{"post_disc_{}".format(k): v for k, v in post_disc.items() if k.endswith('_std')})
if self.learn_prior:
prior_prop = self._tensors.prior.prop
self.record_tensors(**{"prior_prop_{}".format(k): prior_prop[k] for k in prop_to_record})
self.record_tensors(
**{"prior_prop_{}".format(k): v for k, v in prior_prop.items() if k.endswith('_std')})
# --- losses ---
if self.train_reconstruction:
output = self._tensors['output']
inp = self._tensors['inp']
self._tensors['per_pixel_reconstruction_loss'] = xent_loss(pred=output, label=inp)
self.losses['reconstruction'] = (
self.reconstruction_weight * tf_mean_sum(self._tensors['per_pixel_reconstruction_loss'])
)
if self.train_kl:
prop_obj = self._tensors.post.prop.obj
prop_indep_prior_kl = self._tensors["prop_indep_prior_kl"]
self.losses.update(
**{"prop_indep_prior_{}".format(k): self.kl_weight * tf_mean_sum(prop_obj * kl)
for k, kl in prop_indep_prior_kl.items()
if "obj" not in k}
)
disc_obj = self._tensors.post.disc.obj
disc_indep_prior_kl = self._tensors["disc_indep_prior_kl"]
self.losses.update(
**{"disc_indep_prior_{}".format(k): self.kl_weight * tf_mean_sum(disc_obj * kl)
for k, kl in disc_indep_prior_kl.items()
if "obj" not in k}
)
self.losses.update(
indep_prior_obj_kl=self.kl_weight * tf_mean_sum(self._tensors["indep_prior_obj_kl"]),
)
if self.learn_prior:
prop_learned_prior_kl = self._tensors["prop_learned_prior_kl"]
self.losses.update(
**{"prop_learned_prior_{}".format(k): self.kl_weight * tf_mean_sum(prop_obj * kl)
for k, kl in prop_learned_prior_kl.items()
if "obj" not in k}
)
self.losses.update(
learned_prior_obj_kl=self.kl_weight * tf_mean_sum(prop_learned_prior_kl["d_obj_kl"]),
)
if cfg.background_cfg.mode in ("learn_and_transform", "learn"):
self.losses.update(
bg_attr_kl=self.kl_weight * tf_mean_sum(self._tensors["bg_attr_kl"]),
)
if cfg.background_cfg.mode == "learn_and_transform":
self.losses.update(
bg_transform_kl=self.kl_weight * tf_mean_sum(self._tensors["bg_transform_kl"]),
)
# --- other evaluation metrics ---
if "n_annotations" in self._tensors:
count_1norm = tf.to_float(
tf.abs(
tf.to_int32(self._tensors["pred_n_objects_hard"])
- self._tensors["n_valid_annotations"]))
count_1norm_relative = (
count_1norm / tf.maximum(tf.cast(self._tensors["n_valid_annotations"], tf.float32), 1e-6))
self.record_tensors(
pred_n_objects=self._tensors["pred_n_objects"],
pred_n_objects_hard=self._tensors["pred_n_objects_hard"],
count_1norm_relative=count_1norm_relative,
count_1norm=count_1norm,
count_error=count_1norm > 0.5,
)
class SILOT_RenderHook(RenderHook):
N = 4
linewidth = 2
on_color = np.array(to_rgb("xkcd:azure"))
off_color = np.array(to_rgb("xkcd:red"))
selected_color = np.array(to_rgb("xkcd:neon green"))
unselected_color = np.array(to_rgb("xkcd:fire engine red"))
gt_color = "xkcd:yellow"
glimpse_color = "xkcd:green"
cutoff = 0.5
dpi = 50
def build_fetches(self, updater):
prop_names = (
"d_obj xs_logit d_xt_logit ys_logit d_yt_logit d_z_logit xs xt ys yt "
"glimpse normalized_box obj glimpse_prime z appearance glimpse_prime_box"
).split()
if updater.network.use_sqair_prop:
prop_names.extend(['glimpse_prime_mask', 'glimpse_mask'])
disc_names = "obj z appearance normalized_box glimpse".split()
select_names = "obj z normalized_box final_weights yt xt ys xs".split()
render_names = "output".split()
_fetches = Config(
post=Config(
disc=Config(**{n: 0 for n in disc_names}),
prop=Config(**{n: 0 for n in prop_names}),
select=Config(**{n: 0 for n in select_names}),
render=Config(**{n: 0 for n in render_names}),
),
)
fetches = ' '.join(list(_fetches.keys()))
fetches += " inp background offset"
network = updater.network
if "n_annotations" in network._tensors:
fetches += " annotations n_annotations"
if 'prediction' in network._tensors:
fetches += " prediction targets"
if "actions" in network._tensors:
fetches += " actions"
if "bg_y" in network._tensors:
fetches += " bg_y bg_x bg_h bg_w bg_raw"
return fetches
def __call__(self, updater):
fetched = self._fetch(updater)
fetched = Config(fetched)
self._prepare_fetched(updater, fetched)
self._plot(updater, fetched, post=True)
if updater.network.learn_prior and cfg.plot_prior:
extra_feed_dict = {updater.network._prior_start_step: updater.network.eval_prior_start_step}
fetched = self._fetch(updater, extra_feed_dict)
self._prepare_fetched(updater, fetched)
self._plot(updater, fetched, post=False)
def _prepare_fetched(self, updater, fetched):
inp = fetched['inp']
prediction = fetched.get("prediction", None)
targets = fetched.get("targets", None)
N, T, image_height, image_width, _ = inp.shape
background = fetched['background']
modes = "post"
for mode in modes.split():
for kind in "disc prop select".split():
yt, xt, ys, xs = np.split(fetched[mode][kind].normalized_box, 4, axis=-1)
pixel_space_box = coords_to_pixel_space(
yt, xt, ys, xs, (image_height, image_width), updater.network.anchor_box, top_left=True)
fetched[mode][kind].pixel_space_box = np.concatenate(pixel_space_box, axis=-1)
g_yt, g_xt, g_ys, g_xs = np.split(fetched[mode]["prop"].glimpse_prime_box, 4, axis=-1)
glimpse_prime_pixel_space_box = coords_to_pixel_space(
g_yt, g_xt, g_ys, g_xs, (image_height, image_width), updater.network.anchor_box, top_left=True)
fetched[mode]["prop"].glimpse_prime_pixel_space_box = np.concatenate(glimpse_prime_pixel_space_box, axis=-1)
output = fetched[mode].render.output
fetched[mode].render.diff = self.normalize_images(np.abs(inp - output).mean(axis=-1, keepdims=True))
fetched[mode].render.xent = self.normalize_images(
xent_loss(pred=output, label=inp, tf=False).mean(axis=-1, keepdims=True))
n_annotations = fetched.get("n_annotations", np.zeros(N, dtype='i'))
annotations = fetched.get("annotations", None)
# actions = fetched.get("actions", None)
learned_bg = "bg_y" in fetched
bg_y = fetched.get("bg_y", None)
bg_x = fetched.get("bg_x", None)
bg_h = fetched.get("bg_h", None)
bg_w = fetched.get("bg_w", None)
bg_raw = fetched.get("bg_raw", None)
fetched.update(
prediction=prediction,
targets=targets,
background=background,
n_annotations=n_annotations,
annotations=annotations,
learned_bg=learned_bg,
bg_y=bg_y,
bg_x=bg_x,
bg_h=bg_h,
bg_w=bg_w,
bg_raw=bg_raw,
)
def _plot(self, updater, fetched, post):
# Create a plot showing what each object is generating
def flt(main=None, **floats):
if main is not None:
s = main + ": "
else:
s = ''
s += ', '.join("{}={:.2f}".format(k, v) for k, v in floats.items())
return s
N, T, image_height, image_width, _ = fetched['inp'].shape
H, W, B = updater.network.H, updater.network.W, updater.network.B
fig_unit_size = 3
n_other_plots = 10
# number of objects per image
M = 5
if updater.network.use_sqair_prop:
M += 2 # for masks
fig_width = max(M*W, n_other_plots)
n_prop_objects = updater.network.prop_layer.n_prop_objects
n_prop_rows = int(np.ceil(n_prop_objects / W))
fig_height = B * H + 4 + 2*n_prop_rows + 2
for idx in range(N):
# --- set up figure and axes ---
fig = plt.figure(figsize=(fig_unit_size*fig_width, fig_unit_size*fig_height))
time_text = fig.suptitle('', fontsize=20, fontweight='bold')
gs = gridspec.GridSpec(fig_height, fig_width, figure=fig)
post_disc_axes = np.array([[fig.add_subplot(gs[i, j]) for j in range(M*W)] for i in range(B*H)])
post_prop_axes = np.array([
[fig.add_subplot(gs[B*H+4+i, j]) for j in range(M*W)]
for i in range(n_prop_rows)])
post_prop_axes = post_prop_axes.flatten()
post_select_axes = np.array([
[fig.add_subplot(gs[B*H+4+n_prop_rows+i, j]) for j in range(M*W)]
for i in range(n_prop_rows)])
post_select_axes = post_select_axes.flatten()
post_other_axes = []
for i in range(2):
for j in range(int(fig_width/2)):
start_y = B*H + 2*i
end_y = start_y + 2
start_x = 2*j
end_x = start_x + 2
ax = fig.add_subplot(gs[start_y:end_y, start_x:end_x])
post_other_axes.append(ax)
post_other_axes = np.array(post_other_axes)
post_axes = np.concatenate(
[post_disc_axes.flatten(), post_prop_axes.flatten(),
post_select_axes.flatten(), post_other_axes.flatten()],
axis=0)
axes_sets = [
('post', post_disc_axes, post_prop_axes, post_select_axes, post_other_axes)
]
bottom_axes = np.array([fig.add_subplot(gs[-2:, 2*i:2*(i+1)]) for i in range(int(fig_width/2))])
all_axes = np.concatenate([post_axes, bottom_axes], axis=0)
for ax in all_axes.flatten():
ax.set_axis_off()
# --- plot data ---
lw = self.linewidth
print("Plotting {} for {}...".format(idx, "posterior" if post else "prior"))
def func(t):
print("timestep {}".format(t))
time_text.set_text('t={},offset={}'.format(t, fetched.offset[idx]))
ax_inp = bottom_axes[0]
self.imshow(ax_inp, fetched.inp[idx, t])
if t == 0:
ax_inp.set_title('input')
ax = bottom_axes[1]
self.imshow(ax, fetched.background[idx, t])
if t == 0:
ax.set_title('background')
if fetched.learned_bg:
ax = bottom_axes[2]
bg_y, bg_x, bg_h, bg_w = fetched.bg_y, fetched.bg_x, fetched.bg_h, fetched.bg_w
self.imshow(ax, fetched.bg_raw[idx])
if t == 0:
title = flt('bg_raw', y=bg_y[idx, t, 0], x=bg_x[idx, t, 0], h=bg_h[idx, t, 0], w=bg_w[idx, t, 0])
ax.set_title(title)
height = bg_h[idx, t, 0] * image_height
top = (bg_y[idx, t, 0] + 1) / 2 * image_height - height / 2
width = bg_w[idx, t, 0] * image_width
left = (bg_x[idx, t, 0] + 1) / 2 * image_width - width / 2
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor="xkcd:green", facecolor='none')
ax.add_patch(rect)
for i, (name, disc_axes, prop_axes, select_axes, other_axes) in enumerate(axes_sets):
_fetched = getattr(fetched, name)
final_weights = _fetched.select.final_weights[idx, t].sum(axis=0)
obj_idx = 0
# --- disc objects ---
for h, w, b in product(range(H), range(W), range(B)):
obj = _fetched.disc.obj[idx, t, obj_idx, 0]
z = _fetched.disc.z[idx, t, obj_idx, 0]
ax = disc_axes[h * B + b, M * w]
color = obj * self.on_color + (1-obj) * self.off_color
self.imshow(ax, _fetched.disc.glimpse[idx, t, obj_idx, :, :, :])
obj_rect = patches.Rectangle(
(1., 0), 0.2, 1, clip_on=False, transform=ax.transAxes, facecolor=color)
ax.add_patch(obj_rect)
ax = disc_axes[h * B + b, M * w + 1]
self.imshow(ax, _fetched.disc.appearance[idx, t, obj_idx, :, :, :3])
fw = final_weights[n_prop_objects + obj_idx]
color = fw * self.selected_color + (1-fw) * self.unselected_color
obj_rect = patches.Rectangle(
(1., 0), 0.2, 1, clip_on=False, transform=ax.transAxes, facecolor=color)
ax.add_patch(obj_rect)
yt, xt, ys, xs = _fetched.disc.normalized_box[idx, t, obj_idx]
nbox = "bx={:.2f},{:.2f},{:.2f},{:.2f}".format(yt, xt, ys, xs)
ax.set_title(flt(nbox, obj=obj, z=z, final_weight=fw))
ax = disc_axes[h * B + b, M * w + 2]
self.imshow(ax, _fetched.disc.appearance[idx, t, obj_idx, :, :, 3], cmap="gray")
obj_idx += 1
# --- prop objects ---
for k in range(n_prop_objects):
obj = _fetched.prop.obj[idx, t, k, 0]
z = _fetched.prop.z[idx, t, k, 0]
d_obj = _fetched.prop.d_obj[idx, t, k, 0]
xs_logit = _fetched.prop.xs_logit[idx, t, k, 0]
ys_logit = _fetched.prop.ys_logit[idx, t, k, 0]
d_xt_logit = _fetched.prop.d_xt_logit[idx, t, k, 0]
d_yt_logit = _fetched.prop.d_yt_logit[idx, t, k, 0]
xs = _fetched.prop.xs[idx, t, k, 0]
ys = _fetched.prop.ys[idx, t, k, 0]
xt = _fetched.prop.xt[idx, t, k, 0]
yt = _fetched.prop.yt[idx, t, k, 0]
# --- object location superimposed on reconstruction ---
ax_idx = M*k
ax = prop_axes[ax_idx]
self.imshow(ax, _fetched.render.output[idx, t])
color = obj * self.on_color + (1-obj) * self.off_color
top, left, height, width = _fetched.prop.pixel_space_box[idx, t, k]
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=color, facecolor='none')
ax.add_patch(rect)
top, left, height, width = _fetched.prop.glimpse_prime_pixel_space_box[idx, t, k]
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=self.glimpse_color, facecolor='none')
ax.add_patch(rect)
# --- glimpse ---
ax_idx += 1
ax = prop_axes[ax_idx]
self.imshow(ax, _fetched.prop.glimpse[idx, t, k, :, :, :])
color = obj * self.on_color + (1-obj) * self.off_color
obj_rect = patches.Rectangle(
(1., 0), 0.2, 1, clip_on=False, transform=ax.transAxes, facecolor=color)
ax.add_patch(obj_rect)
# --- glimpse mask ---
if updater.network.use_sqair_prop:
ax_idx += 1
ax = prop_axes[ax_idx]
self.imshow(ax, _fetched.prop.glimpse_mask[idx, t, k, :, :, 0], cmap="gray")
# --- glimpse_prime ---
ax_idx += 1
ax = prop_axes[ax_idx]
self.imshow(ax, _fetched.prop.glimpse_prime[idx, t, k, :, :, :])
fw = final_weights[k]
color = fw * self.selected_color + (1-fw) * self.unselected_color
obj_rect = patches.Rectangle(
(1., 0), 0.2, 1, clip_on=False, transform=ax.transAxes, facecolor=color)
ax.add_patch(obj_rect)
# --- glimpse_prime mask ---
if updater.network.use_sqair_prop:
ax_idx += 1
ax = prop_axes[ax_idx]
self.imshow(ax, _fetched.prop.glimpse_prime_mask[idx, t, k, :, :, 0], cmap="gray")
# --- appearance ---
ax_idx += 1
ax = prop_axes[ax_idx]
self.imshow(ax, _fetched.prop.appearance[idx, t, k, :, :, :3])
nbox = "bx={:.2f},{:.2f},{:.2f},{:.2f}".format(yt, xt, ys, xs)
d_nbox = "dbxl={:.2f},{:.2f},{:.2f},{:.2f}".format(d_yt_logit, d_xt_logit, ys_logit, xs_logit)
ax.set_title(flt(nbox + ", " + d_nbox, dobj=d_obj, obj=obj, z=z,))
# --- alpha ---
ax_idx += 1
ax = prop_axes[ax_idx]
self.imshow(ax, _fetched.prop.appearance[idx, t, k, :, :, 3], cmap="gray")
# --- select object ---
prop_weight_images = None
prop_weight_images = _fetched.select.final_weights[idx, t, :, :n_prop_objects]
_H = int(np.ceil(n_prop_objects / W))
padding = W * _H - n_prop_objects
prop_weight_images = np.pad(prop_weight_images, ((0, 0), (0, padding)), 'constant')
prop_weight_images = prop_weight_images.reshape(n_prop_objects, _H, W, 1)
prop_weight_images = (
prop_weight_images * self.selected_color + (1-prop_weight_images) * self.unselected_color)
final_weight_images = _fetched.select.final_weights[idx, t, :, n_prop_objects:]
final_weight_images = final_weight_images.reshape(n_prop_objects, H, W, 1)
final_weight_images = (
final_weight_images * self.selected_color + (1-final_weight_images) * self.unselected_color)
for k in range(n_prop_objects):
obj = _fetched.select.obj[idx, t, k, 0]
z = _fetched.select.z[idx, t, k, 0]
xs = _fetched.select.xs[idx, t, k, 0]
ys = _fetched.select.ys[idx, t, k, 0]
xt = _fetched.select.xt[idx, t, k, 0]
yt = _fetched.select.yt[idx, t, k, 0]
ax = select_axes[M*k]
self.imshow(ax, prop_weight_images[k])
ax = select_axes[M*k+1]
ax.set_title(flt(obj=obj, z=z, xs=xs, ys=ys, xt=xt, yt=yt))
self.imshow(ax, final_weight_images[k])
color = obj * self.on_color + (1-obj) * self.off_color
obj_rect = patches.Rectangle(
(-0.2, 0), 0.2, 1, clip_on=False, transform=ax.transAxes, facecolor=color)
ax.add_patch(obj_rect)
# --- other ---
ax = other_axes[6]
self.imshow(ax, fetched.inp[idx, t])
if t == 0:
ax.set_title('input')
ax = other_axes[7]
self.imshow(ax, _fetched.render.output[idx, t])
if t == 0:
ax.set_title('reconstruction')
ax = other_axes[8]
self.imshow(ax, _fetched.render.diff[idx, t])
if t == 0:
ax.set_title('abs error')
ax = other_axes[9]
self.imshow(ax, _fetched.render.xent[idx, t])
if t == 0:
ax.set_title('xent')
gt_axes = []
axis_idx = 0
names = ('select', 'disc', 'prop')
for name in names:
ax_all_bb = other_axes[axis_idx]
self.imshow(ax_all_bb, _fetched.render.output[idx, t])
if t == 0:
ax_all_bb.set_title('{} all bb'.format(name))
ax_on_bb = other_axes[axis_idx+1]
self.imshow(ax_on_bb, _fetched.render.output[idx, t])
if t == 0:
ax_on_bb.set_title('{} on bb'.format(name))
axis_idx += 2
gt_axes.extend([ax_all_bb, ax_on_bb])
flat_obj = getattr(_fetched, name).obj[idx, t].flatten()
flat_box = getattr(_fetched, name).pixel_space_box[idx, t].reshape(-1, 4)
# Plot proposed bounding boxes
for o, (top, left, height, width) in zip(flat_obj, flat_box):
color = o * self.on_color + (1-o) * self.off_color
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=color, facecolor='none')
ax_all_bb.add_patch(rect)
if o > self.cutoff:
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=color, facecolor='none')
ax_on_bb.add_patch(rect)
# Plot true bounding boxes
for k in range(fetched.n_annotations[idx]):
valid, _, _, top, bottom, left, right = fetched.annotations[idx, t, k]
if not valid:
continue
height = bottom - top
width = right - left
for ax in gt_axes:
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=self.gt_color, facecolor='none')
ax.add_patch(rect)
plt.subplots_adjust(left=0.02, right=.98, top=.95, bottom=0.02, wspace=0.1, hspace=0.12)
anim = animation.FuncAnimation(fig, func, frames=T, interval=500)
prefix = "post" if post else "prior"
path = self.path_for('{}/{}'.format(prefix, idx), updater, ext="mp4")
anim.save(path, writer='ffmpeg', codec='hevc', extra_args=['-preset', 'ultrafast'], dpi=self.dpi)
plt.close(fig)
shutil.copyfile(
path,
os.path.join(
os.path.dirname(path),
'latest_stage{:0>4}.mp4'.format(updater.stage_idx)))
class SimpleSILOT_RenderHook(SILOT_RenderHook):
N = 32
discovery_color = np.array(to_rgb("xkcd:neon green"))
propagation_color = np.array(to_rgb("xkcd:azure"))
def build_fetches(self, updater):
select_names = "is_new obj normalized_box".split()
_fetches = Config(
post=Config(
select=Config(**{n: 0 for n in select_names}),
),
)
fetches = list(_fetches.keys())
fetches.extend("output inp background offset".split())
return fetches
def __call__(self, updater):
fetched = self._fetch(updater)
fetched = Config(fetched)
self._prepare_fetched(updater, fetched)
self._plot(updater, fetched, post=True)
if updater.network.learn_prior and cfg.plot_prior:
extra_feed_dict = {updater.network._prior_start_step: updater.network.eval_prior_start_step}
fetched = self._fetch(updater, extra_feed_dict)
self._prepare_fetched(updater, fetched)
self._plot(updater, fetched, post=False)
def _prepare_fetched(self, updater, fetched):
inp = fetched['inp']
N, T, image_height, image_width, _ = inp.shape
yt, xt, ys, xs = np.split(fetched.post.select.normalized_box, 4, axis=-1)
pixel_space_box = coords_to_pixel_space(
yt, xt, ys, xs, (image_height, image_width), updater.network.anchor_box, top_left=True)
fetched.post.select.pixel_space_box = np.concatenate(pixel_space_box, axis=-1)
return fetched
def _plot(self, updater, fetched, post):
N, T, image_height, image_width, _ = fetched['inp'].shape
lw = self.linewidth
for idx in range(N):
fig, axes = plt.subplots(1, 2, figsize=(20, 10))
def func(t):
ax_inp = axes[0]
if t == 0:
ax_inp.set_title('inp')
self.imshow(ax_inp, fetched['inp'][idx, t])
ax_outp = axes[1]
if t == 0:
ax_outp.set_title('output')
self.imshow(ax_outp, fetched['output'][idx, t])
flat_box = fetched.post.select.pixel_space_box[idx, t].reshape(-1, 4)
flat_is_new = fetched.post.select.is_new[idx, t].flatten()
flat_obj = fetched.post.select.obj[idx, t].flatten()
# Plot proposed bounding boxes
for o, is_new, (top, left, height, width) in zip(flat_obj, flat_is_new, flat_box):
if is_new:
color = self.discovery_color
else:
color = self.propagation_color
if o > cfg.obj_threshold:
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=color, facecolor='none')
ax_inp.add_patch(rect)
if o > cfg.obj_threshold:
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=color, facecolor='none')
ax_outp.add_patch(rect)
plt.subplots_adjust(left=0.02, right=.98, top=.95, bottom=0.02, wspace=0.1, hspace=0.12)
anim = animation.FuncAnimation(fig, func, frames=T, interval=500)
prefix = "post" if post else "prior"
path = self.path_for('{}/{}'.format(prefix, idx), updater, ext="mp4")
anim.save(path, writer='ffmpeg', codec='hevc', extra_args=['-preset', 'ultrafast'], dpi=self.dpi)
plt.close(fig)
shutil.copyfile(
path,
os.path.join(
os.path.dirname(path),
'latest_stage{:0>4}.mp4'.format(updater.stage_idx)))
class PaperSILOT_RenderHook(SILOT_RenderHook):
""" A more compact rendering, suitable for inclusion in papers. """
N = 16
def build_fetches(self, updater):
select_names = "is_new obj z appearance normalized_box".split()
render_names = "output".split()
_fetches = Config(
post=Config(
select=Config(**{n: 0 for n in select_names}),
render=Config(**{n: 0 for n in render_names}),
),
)
fetches = ' '.join(list(_fetches.keys()))
fetches += " inp"
network = updater.network
if "n_annotations" in network._tensors:
fetches += " annotations n_annotations"
return fetches
def _prepare_fetched(self, updater, fetched):
inp = fetched['inp']
N, T, image_height, image_width, _ = inp.shape
yt, xt, ys, xs = np.split(fetched.post.select.normalized_box, 4, axis=-1)
pixel_space_box = coords_to_pixel_space(
yt, xt, ys, xs, (image_height, image_width), updater.network.anchor_box, top_left=True)
fetched.post.select.pixel_space_box = np.concatenate(pixel_space_box, axis=-1)
def _plot(self, updater, fetched, post):
N, T, image_height, image_width, _ = fetched['inp'].shape
obj = fetched.post.select.obj
is_new = fetched.post.select.is_new
threshold = cfg.obj_threshold
tracking_ids = get_object_ids(obj, is_new, threshold, on_only=False)
n_prop_objects = updater.network.prop_layer.n_prop_objects
n_cells = int(np.sqrt(n_prop_objects))
fig_unit_size = 0.5
fig_width = T * n_cells
colors = list(plt.get_cmap('Dark2').colors)
# colors = list(plt.get_cmap('Set3').colors)
for idx in range(N):
n_visible_objects = int((obj[idx] > threshold).sum((1, 2)).max())
n_visible_rows = int(np.ceil(n_visible_objects / n_cells))
fig_height = 2 * n_cells + n_visible_rows
# fig_height = 2 * n_prop_objects + n_visible_objects
# --- set up figure and axes ---
fig = plt.figure(figsize=(fig_unit_size*fig_width, fig_unit_size*fig_height))
gs = gridspec.GridSpec(fig_height, fig_width, figure=fig)
ground_truth_axes = [fig.add_subplot(gs[0:n_cells, t*n_cells:(t+1)*n_cells]) for t in range(T)]
gta = ground_truth_axes[0]
gta.text(-0.1, 0.5, 'input', transform=gta.transAxes, ha='center', va='center', rotation=90)
dummy_axis = fig.add_subplot(gs[n_cells:n_cells + n_visible_rows, :n_cells])
dummy_axis.set_axis_off()
dummy_axis.text(-0.1, 0.5, 'objects', transform=dummy_axis.transAxes, ha='center', va='center', rotation=90)
grid_axes = np.array([
[fig.add_subplot(gs[n_cells+i, t*n_cells+j])
for i, j in itertools.product(range(n_visible_rows), range(n_cells))]
for t in range(T)])
reconstruction_axes = [
fig.add_subplot(gs[n_cells + n_visible_rows:2*n_cells + n_visible_rows, t*n_cells:(t+1)*n_cells])
for t in range(T)]
ra = reconstruction_axes[0]
ra.text(-0.1, 0.5, 'reconstruction', transform=ra.transAxes, ha='center', va='center', rotation=90)
all_axes = np.concatenate(
[ground_truth_axes,
grid_axes.flatten(),
reconstruction_axes], axis=0)
for ax in all_axes.flatten():
ax.set_axis_off()
# --- plot data ---
lw = self.linewidth
print("Plotting {} for {}...".format(idx, "posterior" if post else "prior"))
for t in range(T):
ax_inp = ground_truth_axes[t]
self.imshow(ax_inp, fetched.inp[idx, t])
ax_outp = reconstruction_axes[t]
self.imshow(ax_outp, fetched.post.render.output[idx, t])
old = []
new = []
for i in range(n_prop_objects):
if obj[idx, t, i, 0] > threshold:
if is_new[idx, t, i]:
new.append(i)
else:
old.append(i)
flat_box = fetched.post.select.pixel_space_box[idx, t].reshape(-1, 4)
axis_idx = 0
for i in old + new:
if axis_idx < len(old):
ls = '-'
else:
ls = '--'
ax = grid_axes[t, axis_idx]
app = fetched.post.select.appearance[idx, t, i, :, :, :3]
alpha = fetched.post.select.appearance[idx, t, i, :, :, 3:]
masked_apperance = app * alpha
self.imshow(ax, masked_apperance)
tracking_id = tracking_ids[idx, t][i]
color = colors[tracking_id % len(colors)]
rect = patches.Rectangle(
(0.0, 0.0), 1.0, 1.0, clip_on=False, linewidth=3, ls=ls,
transform=ax.transAxes, edgecolor=color, facecolor='none')
ax.add_patch(rect)
top, left, height, width = flat_box[i]
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, ls=ls, edgecolor=color, facecolor='none')
ax_inp.add_patch(rect)
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, ls=ls, edgecolor=color, facecolor='none')
ax_outp.add_patch(rect)
axis_idx += 1
plt.subplots_adjust(left=0.02, right=.98, top=.98, bottom=0.02, wspace=0.1, hspace=0.1)
prefix = "post" if post else "prior"
self.savefig("{}/{}".format(prefix, idx), fig, updater)
class LongVideoSILOT_RenderHook(SILOT_RenderHook):
""" Render a long video.
Lets assume the datset uses some inherent batch size, equal to the number of parallel videos it contains.
Call this B. So the first B observations of the dataset are the first cfg.n_frames of each of the videos.
The next B observations are the next n_frames of the each of the videos, etc.
B should be the same as batch_size. n_batches should be a in the config. Assume that video is padded with 0 frames
when the videos stop.
"""
linewidth = 4
def build_fetches(self, updater):
select_names = "is_new obj normalized_box".split()
_fetches = Config(
post=Config(
select=Config(**{n: 0 for n in select_names}),
),
)
fetches = list(_fetches.keys())
fetches.extend("output inp background offset".split())
return fetches
def start_stage(self, training_loop, updater, stage_idx):
to_fetch = {'select': updater.network._tensors['selected_objects'].copy()}
try:
tensors = updater.tensors
except AttributeError:
tensors = updater._tensors
to_fetch['output'] = tensors['output']
to_fetch['inp'] = tensors['inp']
self.to_fetch = to_fetch
def __call__(self, updater):
fetched = self._fetch(updater, True)
self._plot(updater, fetched, True)
# if updater.network.learn_prior and cfg.plot_prior:
# fetched = self._fetch(updater, False)
# self._plot(updater, fetched, False)
def _fetch(self, updater, post):
sess = tf.get_default_session()
fetched = []
objects = None
feed_dict = self.get_feed_dict(updater)
for i in range(cfg.n_batches):
print("Working on batch {}".format(i))
if not post:
if objects:
feed_dict[updater.network._prior_start_step] = 0
else:
feed_dict[updater.network._prior_start_step] = updater.network.eval_prior_start_step
if objects:
object_feed_dict = updater.network.build_initial_object_feed_dict(objects)
feed_dict.update(object_feed_dict)
_fetched = sess.run(self.to_fetch, feed_dict=feed_dict)
objects = nest.map_structure(lambda s: s[:, -1], _fetched['select'])
fetched.append(Config(_fetched))
fetched = {
k: np.concatenate([d[k] for d in fetched], axis=1)
for k, v in fetched[0].items()
}
fetched = Config(fetched)
_, _, image_height, image_width, _ = fetched.inp.shape
yt, xt, ys, xs = np.split(fetched.select.normalized_box, 4, axis=-1)
pixel_space_box = coords_to_pixel_space(
yt, xt, ys, xs, (image_height, image_width), updater.network.anchor_box, top_left=True)
fetched.select.pixel_space_box = np.concatenate(pixel_space_box, axis=-1)
return fetched
def _plot(self, updater, fetched, post):
N, T, image_height, image_width, _ = fetched['inp'].shape
height_scale = image_height / image_width
lw = self.linewidth
obj = fetched.select.obj
is_new = fetched.select.is_new
tracking_ids = get_object_ids(obj, is_new, cfg.obj_threshold, on_only=False)
colors = list(plt.get_cmap('Dark2').colors)
for idx in range(N):
fig_unit_size = 10
figsize = (2*fig_unit_size, height_scale*fig_unit_size)
fig, axes = plt.subplots(1, 2, figsize=figsize)
for ax in axes.flatten():
ax.set_axis_off()
def func(t):
ax_inp = axes[0]
self.imshow(ax_inp, fetched['inp'][idx, t])
ax_outp = axes[1]
self.imshow(ax_outp, fetched['output'][idx, t])
flat_box = fetched.select.pixel_space_box[idx, t].reshape(-1, 4)
flat_is_new = fetched.select.is_new[idx, t].flatten()
flat_obj = fetched.select.obj[idx, t].flatten()
flat_tracking_ids = tracking_ids[idx, t]
# Plot proposed bounding boxes
for o, is_new, _id, (top, left, height, width) in zip(flat_obj, flat_is_new, flat_tracking_ids, flat_box):
color = colors[_id % len(colors)]
ls = '--' if is_new else '-'
if o > cfg.obj_threshold:
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=color, facecolor='none', ls=ls)
ax_inp.add_patch(rect)
if o > cfg.obj_threshold:
rect = patches.Rectangle(
(left, top), width, height, linewidth=lw, edgecolor=color, facecolor='none', ls=ls)
ax_outp.add_patch(rect)
plt.subplots_adjust(left=0.01, right=.99, top=.99, bottom=0.01, wspace=0.01, hspace=0.12)
anim = animation.FuncAnimation(fig, func, frames=T, interval=500)
prefix = "post" if post else "prior"
path = self.path_for('{}/{}'.format(prefix, idx), updater, ext="mp4")
anim.save(path, writer='ffmpeg', codec='hevc', extra_args=['-preset', 'ultrafast'], dpi=self.dpi)
plt.close(fig)
latest = os.path.join(os.path.dirname(path), 'latest_stage{:0>4}.mp4'.format(updater.stage_idx))
shutil.copyfile(path, latest)
to_gif_command = (
'ffmpeg -i latest_stage0000.mp4 -vf '
'"fps=10,scale=1000:-1:flags=lanczos,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse" '
'-loop 0 output.gif'
)
with cd(os.path.dirname(path)):
execute_command(to_gif_command, output="loud")
|
the-stack_106_24141 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import six
import string
import random
try:
import h5py
H5PY_SUPPORTED = True
except Exception as e:
print("hdf5 is not supported on this machine (please install/reinstall h5py for optimal experience)")
H5PY_SUPPORTED = False
import numpy as np
import tensorflow as tf
import tflearn.variables as vs
def get_from_module(identifier, module_params, module_name, instantiate=False, kwargs=None):
if isinstance(identifier, six.string_types):
res = module_params.get(identifier)
if not res:
res = module_params.get(identifier.lower())
if not res:
raise Exception('Invalid ' + str(module_name) + ': ' + str(identifier))
if instantiate and not kwargs:
return res()
elif instantiate and kwargs:
return res(**kwargs)
else:
return res
return identifier
# ------------------
# Ops utils
# ------------------
def get_layer_by_name(name_or_scope):
""" get_layer.
Retrieve the output tensor of a layer with the given name or scope.
Arguments:
name_or_scope: `str`. The name (or scope) given to the layer to
retrieve.
Returns:
A Tensor.
"""
# Track output tensor.
c = tf.get_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name_or_scope)
if len(c) == 0:
raise Exception("No layer found for this name.")
if len(c) > 1:
return c
return c[0]
def get_incoming_shape(incoming):
""" Returns the incoming data shape """
if isinstance(incoming, tf.Tensor):
return incoming.get_shape().as_list()
elif type(incoming) in [np.array, list, tuple]:
return np.shape(incoming)
else:
raise Exception("Invalid incoming layer.")
def get_tensor_parents_placeholders(tensor):
""" Get all placeholders that is depending the given tensor. """
placeholders_list = []
if tensor.op.type == 'Placeholder':
placeholders_list.append(tensor)
if tensor.op:
for t in tensor.op.inputs:
if not 'read:0' in t.name:
placeholders_list += get_tensor_parents_placeholders(t)
return list(set(placeholders_list))
def get_tensor_parents(tensor):
""" Get all calculation and data parent tensors (Not read). """
parents_list = []
parents_list.append(tensor)
if tensor.op:
for t in tensor.op.inputs:
if not 'read:0' in t.name:
parents_list += get_tensor_parents(t)
return parents_list
def get_all_tensor_parents(tensor):
""" Get all parents tensors. """
parents_list = []
parents_list.append(tensor)
if tensor.op:
for t in tensor.op.inputs:
parents_list += get_tensor_parents(t)
return list(set(parents_list))
def get_tensor_children_placeholders(tensor):
""" Get all placeholders that is depending the given tensor. """
placeholders_list = []
if tensor.op.type == 'Placeholder':
placeholders_list.append(tensor)
if tensor.op:
for t in tensor.op.outputs:
if not 'read:0' in t.name:
placeholders_list += get_tensor_children_placeholders(t)
return list(set(placeholders_list))
def get_tensor_children(tensor):
""" Get all calculation and data parent tensors (Not read). """
children_list = []
children_list.append(tensor)
if tensor.op:
for t in tensor.op.outputs:
if not 'read:0' in t.name:
children_list += get_tensor_children(t)
return list(set(children_list))
def get_all_tensor_children(tensor):
""" Get all parents tensors. """
children_list = []
children_list.append(tensor)
if tensor.op:
for t in tensor.op.outputs:
children_list += get_all_tensor_children(t)
return list(set(children_list))
# ------------------
# Other utils
# ------------------
def to_list(data):
if data is None: return None
if type(data) in [tuple, list]:
return data
return [data]
def standarize_data(data):
if data is None: return None
if type(data) in [tuple, list]:
return [np.asarray(x) for x in data]
if type(data) is dict:
return data
return [np.asarray(data)]
def standarize_dict(d):
for key in d:
if isinstance(d[key], list):
d[key] = np.asarray(d[key])
def del_duplicated(l):
res = []
for e in l:
if e not in res:
res.append(e)
return res
#return list(np.unique(np.array(l)))
def make_batches(samples_size, batch_size):
nb_batch = int(np.ceil(samples_size/float(batch_size)))
return [(i*batch_size, min(samples_size, (i+1)*batch_size)) for i in range(0, nb_batch)]
def slice_array(X, start=None, stop=None):
if type(X) == list:
if hasattr(start, '__len__'):
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
if H5PY_SUPPORTED:
if type(X) == h5py.Dataset:
return [X[i] for i in start]
if hasattr(start, '__len__'):
return X[start]
else:
return X[start:stop]
def get_dict_first_element(input_dict):
for key in input_dict:
return input_dict[key]
def get_tensor_with_parent_name(tensor):
""" Get a tensor name with its parent tensor's name as prefix. """
tensor_name = tensor.name
if tensor.op.inputs[0].name is not None:
return tensor.op.inputs[0].name + "_" + tensor_name
return tensor_name
def format_scope_name(scope_name, prefix, suffix):
""" Add a predix and a suffix to a scope name. """
if prefix is not "":
if not prefix[-1] == "/":
prefix += "/"
if suffix is not "":
if not suffix[0] == "/":
suffix = "/" + suffix
return prefix + scope_name + suffix
def check_scope_path(scope_name):
scope_name = scope_name.replace("//", "/")
return scope_name
def feed_dict_builder(X, Y, net_inputs, net_targets):
""" Format provided data to a dictionary format compatible with
Tensorflow data feeding. It match all X and Y data provided with
net_inputs and net_targets provided placeholders. In case of inputs
data list, matching is made respectively.
Examples:
```python
# Building feed dictionary
>> feed_dict = feed_dict_builder(X, Y, input1, output1)
>> {input1: X, output1: Y}
>> feed_dict = feed_dict_builder({input1: X}, Y, input1, output1)
>> {input1: X, output1: Y}
>> feed_dict = feed_dict_builder([X1, X2], Y, [in1, in2], out1)
>> {in1: X1, in2: X2, output1: Y}
# For validation split:
>> val_feed_dict = feed_dict_builder(0.1, 0.1, input1, output1)
>> {input1: 0.1, output1: 0.1}
```
Arguments:
X: `array` or `dict`. The input data.
Y: `array`, `dict` or `float`. The targets (labels).
net_inputs: `list`. The network data inputs `Placeholders`.
net_targets: `list`. The network targets `Placeholders`.
Returns:
`dict`. A Tensorflow-ready dictionary to feed data.
Raises:
Exception if X and net_inputs or Y and net_targets list length doesn't
match.
"""
feed_dict = {}
if not (is_none(X) or is_none(net_inputs)):
# If input data are not a dict, we match them by creation order
if not isinstance(X, dict):
# If validation split, copy that value to the whole placeholders
if isinstance(X, float):
X = [X for _i in net_inputs]
elif len(net_inputs) > 1:
try: #TODO: Fix brodcast issue if different
if np.ndim(X) < 2:
raise ValueError("Multiple inputs but only one data "
"feeded. Please verify number of "
"inputs and data provided match.")
elif len(X) != len(net_inputs):
raise Exception(str(len(X)) + " inputs feeded, "
"but expected: " + str(len(net_inputs)) +
". If you are using notebooks, please "
"make sure that you didn't run graph "
"construction cell multiple time, "
"or try to enclose your graph within "
"`with tf.Graph().as_default():` or "
"use `tf.reset_default_graph()`")
except Exception:
# Skip verif
pass
else:
X = [X]
for i, x in enumerate(X):
feed_dict[net_inputs[i]] = x
else:
# If a dict is provided
for key, val in X.items():
# Do nothing if dict already fits {placeholder: data} template
if isinstance(key, tf.Tensor):
continue
else: # Else retrieve placeholder with its name
var = vs.get_inputs_placeholder_by_name(key)
if var is None:
raise Exception("Feed dict asks for variable named '%s' but no "
"such variable is known to exist" % key)
feed_dict[var] = val
if not (is_none(Y) or is_none(net_targets)):
if not isinstance(Y, dict):
# Verify network has targets
if len(net_targets) == 0:
return feed_dict
# If validation split, copy that value to every target placeholder.
if isinstance(Y, float):
Y = [Y for _t in net_targets]
elif len(net_targets) > 1:
try: #TODO: Fix brodcast issue if different
if np.ndim(Y) < 2:
raise ValueError("Multiple outputs but only one data "
"feeded. Please verify number of outputs "
"and data provided match.")
elif len(Y) != len(net_targets):
raise Exception(str(len(Y)) + " outputs feeded, "
"but expected: " + str(len(net_targets)))
except Exception:
# skip verif
pass
else:
Y = [Y]
for i, y in enumerate(Y):
feed_dict[net_targets[i]] = y
else:
# If a dict is provided
for key, val in Y.items():
# Do nothing if dict already fits {placeholder: data} template
if isinstance(key, tf.Tensor):
continue
else: # Else retrieve placeholder with its name
var = vs.get_targets_placeholder_by_name(key)
if var is None:
raise Exception("Feed dict asks for variable named '%s' but no "
"such variable is known to exist" % key)
feed_dict[var] = val
return feed_dict
def is_none(val):
# Check if a value is None or not, required because `np.array is None` is
# ambiguous and raise Exception.
if type(val) is np.array:
return False
else:
return val is None
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def check_dir_name(dir_path):
if isinstance(dir_path, str):
if len(dir_path) > 0:
if dir_path[-1] != "/":
dir_path += "/"
return dir_path
else:
raise ValueError("Incorrect string format for directory path.")
def check_restore_tensor(tensor_to_check, exclvars):
for exclvar in exclvars:
if isinstance(exclvar, str):
if exclvar.split(':')[0] in tensor_to_check.name:
return False
elif exclvar.name.split(':')[0] in tensor_to_check.name:
return False
return True
# ----------------------------
# Parameter formatting helpers
# ----------------------------
# Auto format kernel
def autoformat_kernel_2d(strides):
if isinstance(strides, int):
return [1, strides, strides, 1]
elif isinstance(strides, (tuple, list)):
if len(strides) == 2:
return [1, strides[0], strides[1], 1]
elif len(strides) == 4:
return [strides[0], strides[1], strides[2], strides[3]]
else:
raise Exception("strides length error: " + str(len(strides))
+ ", only a length of 2 or 4 is supported.")
else:
raise Exception("strides format error: " + str(type(strides)))
# Auto format filter size
# Output shape: (rows, cols, input_depth, out_depth)
def autoformat_filter_conv2d(fsize, in_depth, out_depth):
if isinstance(fsize,int):
return [fsize, fsize, in_depth, out_depth]
elif isinstance(fsize, (tuple, list)):
if len(fsize) == 2:
return [fsize[0], fsize[1], in_depth, out_depth]
else:
raise Exception("filter length error: " + str(len(fsize))
+ ", only a length of 2 is supported.")
else:
raise Exception("filter format error: " + str(type(fsize)))
# Auto format padding
def autoformat_padding(padding):
if padding in ['same', 'SAME', 'valid', 'VALID']:
return str.upper(padding)
else:
raise Exception("Unknown padding! Accepted values: 'same', 'valid'.")
# Auto format filter size
# Output shape: (rows, cols, input_depth, out_depth)
def autoformat_filter_conv3d(fsize, in_depth, out_depth):
if isinstance(fsize, int):
return [fsize, fsize, fsize, in_depth, out_depth]
elif isinstance(fsize, (tuple, list)):
if len(fsize) == 3:
return [fsize[0], fsize[1],fsize[2], in_depth, out_depth]
else:
raise Exception("filter length error: " + str(len(fsize))
+ ", only a length of 3 is supported.")
else:
raise Exception("filter format error: " + str(type(fsize)))
# Auto format stride for 3d convolution
def autoformat_stride_3d(strides):
if isinstance(strides, int):
return [1, strides, strides, strides, 1]
elif isinstance(strides, (tuple, list)):
if len(strides) == 3:
return [1, strides[0], strides[1],strides[2], 1]
elif len(strides) == 5:
assert strides[0] == strides[4] == 1, "Must have strides[0] = strides[4] = 1"
return [strides[0], strides[1], strides[2], strides[3], strides[4]]
else:
raise Exception("strides length error: " + str(len(strides))
+ ", only a length of 3 or 5 is supported.")
else:
raise Exception("strides format error: " + str(type(strides)))
# Auto format kernel for 3d convolution
def autoformat_kernel_3d(kernel):
if isinstance(kernel, int):
return [1, kernel, kernel, kernel, 1]
elif isinstance(kernel, (tuple, list)):
if len(kernel) == 3:
return [1, kernel[0], kernel[1], kernel[2], 1]
elif len(kernel) == 5:
assert kernel[0] == kernel[4] == 1, "Must have kernel_size[0] = kernel_size[4] = 1"
return [kernel[0], kernel[1], kernel[2], kernel[3], kernel[4]]
else:
raise Exception("kernel length error: " + str(len(kernel))
+ ", only a length of 3 or 5 is supported.")
else:
raise Exception("kernel format error: " + str(type(kernel)))
def repeat(inputs, repetitions, layer, *args, **kwargs):
outputs = inputs
for i in range(repetitions):
outputs = layer(outputs, *args, **kwargs)
return outputs
def fix_saver(collection_lists=None):
# Workaround to prevent serialization warning by removing objects
if collection_lists is None:
try:
# Try latest api
l = tf.get_collection_ref("summary_tags")
l4 = tf.get_collection_ref(tf.GraphKeys.GRAPH_CONFIG)
except Exception:
l = tf.get_collection("summary_tags")
l4 = tf.get_collection(tf.GraphKeys.GRAPH_CONFIG)
l_stags = list(l)
l4_stags = list(l4)
del l[:]
del l4[:]
try:
# Try latest api
l1 = tf.get_collection_ref(tf.GraphKeys.DATA_PREP)
l2 = tf.get_collection_ref(tf.GraphKeys.DATA_AUG)
except Exception:
l1 = tf.get_collection(tf.GraphKeys.DATA_PREP)
l2 = tf.get_collection(tf.GraphKeys.DATA_AUG)
l1_dtags = list(l1)
l2_dtags = list(l2)
del l1[:]
del l2[:]
try: # Do not save exclude variables
l3 = tf.get_collection_ref(tf.GraphKeys.EXCL_RESTORE_VARS)
except Exception:
l3 = tf.get_collection(tf.GraphKeys.EXCL_RESTORE_VARS)
l3_tags = list(l3)
del l3[:]
return [l_stags, l1_dtags, l2_dtags, l3_tags, l4_stags]
else:
# 0.7+ workaround, restore values
for t in collection_lists[0]:
tf.add_to_collection("summary_tags", t)
for t in collection_lists[4]:
tf.add_to_collection(tf.GraphKeys.GRAPH_CONFIG, t)
for t in collection_lists[1]:
tf.add_to_collection(tf.GraphKeys.DATA_PREP, t)
for t in collection_lists[2]:
tf.add_to_collection(tf.GraphKeys.DATA_AUG, t)
for t in collection_lists[3]:
tf.add_to_collection(tf.GraphKeys.EXCL_RESTORE_VARS, t)
|
the-stack_106_24142 | import dash
from dash.dependencies import Output, Event
import dash_core_components as dcc
import dash_html_components as html
import plotly
import random
import plotly.graph_objs as go
from collections import deque
X = deque(maxlen=20)
X.append(1)
Y = deque(maxlen=20)
Y.append(1)
app = dash.Dash(__name__)
app.layout = html.Div(
[
dcc.Graph(id='live-graph', animate=True),
dcc.Interval(
id='graph-update',
interval=1*1000
),
]
)
@app.callback(Output('live-graph', 'figure'),
events=[Event('graph-update', 'interval')])
def update_graph_scatter():
X.append(X[-1]+1)
Y.append(Y[-1]+Y[-1]*random.uniform(-0.1,0.1))
data = plotly.graph_objs.Scatter(
x=list(X),
y=list(Y),
name='Scatter',
mode= 'lines+markers'
)
return {'data': [data],'layout' : go.Layout(xaxis=dict(range=[min(X),max(X)]),
yaxis=dict(range=[min(Y),max(Y)]),)}
if __name__ == '__main__':
app.run_server(debug=True)
|
the-stack_106_24147 | # Copyright 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Dict, Optional
if TYPE_CHECKING:
from monai.deploy.core import Application
from monai.deploy.core.datastores import Datastore, DatastoreFactory
class Executor(ABC):
"""This is the base class that enables execution of an application."""
def __init__(self, app: "Application", datastore: Optional[Datastore] = None, **kwargs: Dict):
"""Constructor of the class.
Given an application it invokes the compose method on the app, which
in turn creates the necessary operator and links them up.
Args:
app: An application that needs to be executed.
datastore: A data store that is used to store the data.
"""
self._app = app
if datastore:
self._datastore = datastore
else:
self._datastore = DatastoreFactory.create(DatastoreFactory.DEFAULT)
@property
def app(self) -> "Application":
"""Returns the application that is executed by the executor."""
return self._app
@property
def datastore(self) -> Datastore:
"""Returns the data store that is used to store the data."""
return self._datastore
@abstractmethod
def run(self):
"""Run the app.
It is called to execute an application.
This method needs to be implemented by specific concrete subclasses
of `Executor`.
"""
pass
|
the-stack_106_24148 | import functools
import torch
from scipy.linalg import lapack as scll
from falkon.la_helpers import potrf
from falkon.options import FalkonOptions
from falkon.utils.helpers import choose_fn
__all__ = ("check_init", "inplace_set_diag_th", "inplace_add_diag_th",
"lauum_wrapper", "potrf_wrapper")
def check_init(*none_check):
def _checker(fun):
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
is_init = True
for el in none_check:
if getattr(self, el, None) is None:
is_init = False
break
if not is_init:
raise RuntimeError(
"FALKON preconditioner is not initialized. Please run "
"`init` before any other method on the "
"preconditioner.")
return fun(self, *args, **kwargs)
return wrapper
return _checker
def inplace_set_diag_th(A: torch.Tensor, k: torch.Tensor) -> torch.Tensor:
A.diagonal().copy_(k)
return A
def inplace_add_diag_th(A: torch.Tensor, k: float) -> torch.Tensor:
# Assumes M is square (or wide also works).
# Need to use .diagonal() as .diag() makes a copy
A.diagonal().add_(k)
return A
def lauum_wrapper(A: torch.Tensor, upper: bool, use_cuda: bool, opt: FalkonOptions) -> torch.Tensor:
if use_cuda:
from falkon.ooc_ops.ooc_lauum import gpu_lauum
return gpu_lauum(A, upper=upper, write_opposite=True, overwrite=True, opt=opt)
else:
Anp = A.numpy()
lauum = choose_fn(Anp.dtype, scll.dlauum, scll.slauum, "LAUUM")
sol, info = lauum(Anp, lower=int(not upper), overwrite_c=1)
if info != 0:
raise RuntimeError(f"Lapack LAUUM failed with error code {info}.")
return torch.from_numpy(sol)
def potrf_wrapper(A: torch.Tensor, clean: bool, upper: bool, use_cuda: bool, opt: FalkonOptions) -> torch.Tensor:
if use_cuda:
from falkon.ooc_ops.ooc_potrf import gpu_cholesky
return gpu_cholesky(A, upper=upper, clean=clean, overwrite=True, opt=opt)
else:
return potrf(A, upper=upper, clean=clean, overwrite=True, cuda=False)
|
the-stack_106_24150 | import argparse
from app.conf.yaml_conf import read_conf
parser = argparse.ArgumentParser(description='Sentrix server.')
parser.add_argument('--conf', dest='conf', action='store', default='configuration.yaml',
help='Path to the server configuration yaml file')
subparsers = parser.add_subparsers()
# server command
server_parser = subparsers.add_parser('server', help='Run server')
args = parser.parse_args()
conf = read_conf(args.conf) |
the-stack_106_24151 | import matplotlib.pyplot as plt
import numpy as np
import multiprocessing
#multiprocessing.freeze_support() # <- may be required on windows
def plot(datax, datay, name):
x = datax
y = datay**2
plt.scatter(x, y, label=name)
plt.legend()
plt.show()
def multiP():
for i in range(2):
p = multiprocessing.Process(target=plot, args=(i, i, i))
p.start()
if __name__ == "__main__":
input('Value: ')
multiP() |
the-stack_106_24154 | from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.field import Field
from django.contrib.gis.gdal.geometries import OGRGeometry, OGRGeomType
from django.contrib.gis.gdal.prototypes import ds as capi, geom as geom_api
from django.utils.encoding import force_bytes, force_str
# For more information, see the OGR C API source code:
# https://www.gdal.org/ogr__api_8h.html
#
# The OGR_F_* routines are relevant here.
class Feature(GDALBase):
"""
This class that wraps an OGR Feature, needs to be instantiated
from a Layer object.
"""
destructor = capi.destroy_feature
def __init__(self, feat, layer):
"""
Initialize Feature from a pointer and its Layer object.
"""
if not feat:
raise GDALException("Cannot create OGR Feature, invalid pointer given.")
self.ptr = feat
self._layer = layer
def __getitem__(self, index):
"""
Get the Field object at the specified index, which may be either
an integer or the Field's string label. Note that the Field object
is not the field's _value_ -- use the `get` method instead to
retrieve the value (e.g. an integer) instead of a Field instance.
"""
if isinstance(index, str):
i = self.index(index)
elif 0 <= index < self.num_fields:
i = index
else:
raise IndexError(
"Index out of range when accessing field in a feature: %s." % index
)
return Field(self, i)
def __len__(self):
"Return the count of fields in this feature."
return self.num_fields
def __str__(self):
"The string name of the feature."
return "Feature FID %d in Layer<%s>" % (self.fid, self.layer_name)
def __eq__(self, other):
"Do equivalence testing on the features."
return bool(capi.feature_equal(self.ptr, other._ptr))
# #### Feature Properties ####
@property
def encoding(self):
return self._layer._ds.encoding
@property
def fid(self):
"Return the feature identifier."
return capi.get_fid(self.ptr)
@property
def layer_name(self):
"Return the name of the layer for the feature."
name = capi.get_feat_name(self._layer._ldefn)
return force_str(name, self.encoding, strings_only=True)
@property
def num_fields(self):
"Return the number of fields in the Feature."
return capi.get_feat_field_count(self.ptr)
@property
def fields(self):
"Return a list of fields in the Feature."
return [
force_str(
capi.get_field_name(capi.get_field_defn(self._layer._ldefn, i)),
self.encoding,
strings_only=True,
)
for i in range(self.num_fields)
]
@property
def geom(self):
"Return the OGR Geometry for this Feature."
# Retrieving the geometry pointer for the feature.
geom_ptr = capi.get_feat_geom_ref(self.ptr)
return OGRGeometry(geom_api.clone_geom(geom_ptr))
@property
def geom_type(self):
"Return the OGR Geometry Type for this Feature."
return OGRGeomType(capi.get_fd_geom_type(self._layer._ldefn))
# #### Feature Methods ####
def get(self, field):
"""
Return the value of the field, instead of an instance of the Field
object. May take a string of the field name or a Field object as
parameters.
"""
field_name = getattr(field, "name", field)
return self[field_name].value
def index(self, field_name):
"Return the index of the given field name."
i = capi.get_field_index(self.ptr, force_bytes(field_name))
if i < 0:
raise IndexError("Invalid OFT field name given: %s." % field_name)
return i
|
the-stack_106_24156 | """Common test functions."""
from unittest.mock import MagicMock, PropertyMock, patch
from uuid import uuid4
from aiohttp import web
from aiohttp.test_utils import TestClient
import pytest
from supervisor.api import RestAPI
from supervisor.bootstrap import initialize_coresys
from supervisor.coresys import CoreSys
from supervisor.dbus.const import DBUS_NAME_NM, DBUS_OBJECT_BASE
from supervisor.dbus.network import NetworkManager
from supervisor.dbus.network.interface import NetworkInterface
from supervisor.docker import DockerAPI
from supervisor.utils.gdbus import DBus
from tests.common import load_fixture, load_json_fixture
# pylint: disable=redefined-outer-name, protected-access
@pytest.fixture
def docker() -> DockerAPI:
"""Mock DockerAPI."""
images = [MagicMock(tags=["homeassistant/amd64-hassio-supervisor:latest"])]
with patch("docker.DockerClient", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.images", return_value=MagicMock()
), patch("supervisor.docker.DockerAPI.containers", return_value=MagicMock()), patch(
"supervisor.docker.DockerAPI.api", return_value=MagicMock()
), patch(
"supervisor.docker.DockerAPI.images.list", return_value=images
), patch(
"supervisor.docker.DockerAPI.info",
return_value=MagicMock(),
), patch(
"supervisor.docker.DockerConfig",
return_value=MagicMock(),
):
docker_obj = DockerAPI()
docker_obj.info.logging = "journald"
docker_obj.info.storage = "overlay2"
docker_obj.info.version = "1.0.0"
docker_obj.config.registries = {}
yield docker_obj
@pytest.fixture
def dbus() -> DBus:
"""Mock DBUS."""
async def mock_get_properties(_, interface):
return load_json_fixture(f"{interface.replace('.', '_')}.json")
async def mock_send(_, command, silent=False):
if silent:
return ""
filetype = "xml" if "--xml" in command else "fixture"
fixture = f"{command[6].replace('/', '_')[1:]}.{filetype}"
return load_fixture(fixture)
with patch("supervisor.utils.gdbus.DBus._send", new=mock_send), patch(
"supervisor.dbus.interface.DBusInterface.is_connected",
return_value=True,
), patch("supervisor.utils.gdbus.DBus.get_properties", new=mock_get_properties):
dbus_obj = DBus(DBUS_NAME_NM, DBUS_OBJECT_BASE)
yield dbus_obj
@pytest.fixture
async def network_manager(dbus) -> NetworkManager:
"""Mock NetworkManager."""
async def dns_update():
pass
with patch("supervisor.dbus.network.NetworkManager.dns", return_value=MagicMock()):
nm_obj = NetworkManager()
nm_obj.dns.update = dns_update
nm_obj.dbus = dbus
await nm_obj.connect()
await nm_obj.update()
yield nm_obj
@pytest.fixture
async def coresys(loop, docker, dbus, network_manager, aiohttp_client) -> CoreSys:
"""Create a CoreSys Mock."""
with patch("supervisor.bootstrap.initialize_system_data"), patch(
"supervisor.bootstrap.setup_diagnostics"
), patch(
"supervisor.bootstrap.fetch_timezone",
return_value="Europe/Zurich",
), patch(
"aiohttp.ClientSession",
return_value=TestClient.session,
):
coresys_obj = await initialize_coresys()
# Mock save json
coresys_obj.ingress.save_data = MagicMock()
# Mock test client
coresys_obj.arch._default_arch = "amd64"
coresys_obj._machine = "qemux86-64"
coresys_obj._machine_id = uuid4()
# Mock host communication
coresys_obj._dbus = dbus
coresys_obj._dbus.network = network_manager
# Mock docker
coresys_obj._docker = docker
yield coresys_obj
@pytest.fixture
def sys_machine():
"""Mock sys_machine."""
with patch("supervisor.coresys.CoreSys.machine", new_callable=PropertyMock) as mock:
yield mock
@pytest.fixture
def sys_supervisor():
"""Mock sys_supervisor."""
with patch(
"supervisor.coresys.CoreSys.supervisor", new_callable=PropertyMock
) as mock:
mock.return_value = MagicMock()
yield MagicMock
@pytest.fixture
async def api_client(aiohttp_client, coresys: CoreSys):
"""Fixture for RestAPI client."""
api = RestAPI(coresys)
api.webapp = web.Application()
await api.load()
yield await aiohttp_client(api.webapp)
@pytest.fixture
async def network_interface(dbus):
"""Fixture for a network interface."""
interface = NetworkInterface()
await interface.connect(dbus, "/org/freedesktop/NetworkManager/ActiveConnection/1")
await interface.connection.update_information()
yield interface
@pytest.fixture
def store_manager(coresys: CoreSys):
"""Fixture for the store manager."""
sm_obj = coresys.store
sm_obj.repositories = set(coresys.config.addons_repositories)
with patch("supervisor.store.data.StoreData.update", return_value=MagicMock()):
yield sm_obj
|
the-stack_106_24157 | import collections
import logging
from typing import Dict, List, Optional, Set, Tuple, Union, Callable
from blspy import AugSchemeMPL, G1Element
from Dortbip158 import PyBIP158
from clvm.casts import int_from_bytes
from Dort.consensus.block_record import BlockRecord
from Dort.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward
from Dort.consensus.block_root_validation import validate_block_merkle_roots
from Dort.full_node.mempool_check_conditions import mempool_check_conditions_dict
from Dort.consensus.blockchain_interface import BlockchainInterface
from Dort.consensus.coinbase import create_farmer_coin, create_pool_coin
from Dort.consensus.constants import ConsensusConstants
from Dort.consensus.cost_calculator import NPCResult, calculate_cost_of_program
from Dort.consensus.find_fork_point import find_fork_point_in_chain
from Dort.full_node.block_store import BlockStore
from Dort.full_node.coin_store import CoinStore
from Dort.full_node.mempool_check_conditions import get_name_puzzle_conditions
from Dort.types.blockchain_format.coin import Coin
from Dort.types.blockchain_format.sized_bytes import bytes32
from Dort.types.coin_record import CoinRecord
from Dort.types.condition_opcodes import ConditionOpcode
from Dort.types.condition_with_args import ConditionWithArgs
from Dort.types.full_block import FullBlock
from Dort.types.generator_types import BlockGenerator
from Dort.types.name_puzzle_condition import NPC
from Dort.types.unfinished_block import UnfinishedBlock
from Dort.util.condition_tools import (
pkm_pairs_for_conditions_dict,
coin_announcements_names_for_npc,
puzzle_announcements_names_for_npc,
)
from Dort.util.errors import Err
from Dort.util.generator_tools import (
additions_for_npc,
tx_removals_and_additions,
)
from Dort.util.hash import std_hash
from Dort.util.ints import uint32, uint64, uint128
log = logging.getLogger(__name__)
async def validate_block_body(
constants: ConsensusConstants,
blocks: BlockchainInterface,
block_store: BlockStore,
coin_store: CoinStore,
peak: Optional[BlockRecord],
block: Union[FullBlock, UnfinishedBlock],
height: uint32,
npc_result: Optional[NPCResult],
fork_point_with_peak: Optional[uint32],
get_block_generator: Callable,
) -> Tuple[Optional[Err], Optional[NPCResult]]:
"""
This assumes the header block has been completely validated.
Validates the transactions and body of the block. Returns None for the first value if everything
validates correctly, or an Err if something does not validate. For the second value, returns a CostResult
only if validation succeeded, and there are transactions. In other cases it returns None. The NPC result is
the result of running the generator with the previous generators refs. It is only present for transaction
blocks which have spent coins.
"""
if isinstance(block, FullBlock):
assert height == block.height
prev_transaction_block_height: uint32 = uint32(0)
# 1. For non transaction-blocs: foliage block, transaction filter, transactions info, and generator must
# be empty. If it is a block but not a transaction block, there is no body to validate. Check that all fields are
# None
if block.foliage.foliage_transaction_block_hash is None:
if (
block.foliage_transaction_block is not None
or block.transactions_info is not None
or block.transactions_generator is not None
):
return Err.NOT_BLOCK_BUT_HAS_DATA, None
prev_tb: BlockRecord = blocks.block_record(block.prev_header_hash)
while not prev_tb.is_transaction_block:
prev_tb = blocks.block_record(prev_tb.prev_hash)
assert prev_tb.timestamp is not None
if (
prev_tb.timestamp > constants.INITIAL_FREEZE_END_TIMESTAMP
and len(block.transactions_generator_ref_list) > 0
):
return Err.NOT_BLOCK_BUT_HAS_DATA, None
return None, None # This means the block is valid
# All checks below this point correspond to transaction blocks
# 2. For blocks, foliage block, transactions info must not be empty
if block.foliage_transaction_block is None or block.transactions_info is None:
return Err.IS_TRANSACTION_BLOCK_BUT_NO_DATA, None
assert block.foliage_transaction_block is not None
# keeps track of the reward coins that need to be incorporated
expected_reward_coins: Set[Coin] = set()
# 3. The transaction info hash in the Foliage block must match the transaction info
if block.foliage_transaction_block.transactions_info_hash != std_hash(block.transactions_info):
return Err.INVALID_TRANSACTIONS_INFO_HASH, None
# 4. The foliage block hash in the foliage block must match the foliage block
if block.foliage.foliage_transaction_block_hash != std_hash(block.foliage_transaction_block):
return Err.INVALID_FOLIAGE_BLOCK_HASH, None
# 5. The reward claims must be valid for the previous blocks, and current block fees
# If height == 0, expected_reward_coins will be left empty
if height > 0:
# Add reward claims for all blocks from the prev prev block, until the prev block (including the latter)
prev_transaction_block = blocks.block_record(block.foliage_transaction_block.prev_transaction_block_hash)
prev_transaction_block_height = prev_transaction_block.height
assert prev_transaction_block.fees is not None
pool_coin = create_pool_coin(
prev_transaction_block_height,
prev_transaction_block.pool_puzzle_hash,
calculate_pool_reward(prev_transaction_block.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
prev_transaction_block_height,
prev_transaction_block.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(prev_transaction_block.height) + prev_transaction_block.fees),
constants.GENESIS_CHALLENGE,
)
# Adds the previous block
expected_reward_coins.add(pool_coin)
expected_reward_coins.add(farmer_coin)
# For the second block in the chain, don't go back further
if prev_transaction_block.height > 0:
curr_b = blocks.block_record(prev_transaction_block.prev_hash)
while not curr_b.is_transaction_block:
expected_reward_coins.add(
create_pool_coin(
curr_b.height,
curr_b.pool_puzzle_hash,
calculate_pool_reward(curr_b.height),
constants.GENESIS_CHALLENGE,
)
)
expected_reward_coins.add(
create_farmer_coin(
curr_b.height,
curr_b.farmer_puzzle_hash,
calculate_base_farmer_reward(curr_b.height),
constants.GENESIS_CHALLENGE,
)
)
curr_b = blocks.block_record(curr_b.prev_hash)
if set(block.transactions_info.reward_claims_incorporated) != expected_reward_coins:
return Err.INVALID_REWARD_COINS, None
if block.foliage_transaction_block.timestamp > constants.INITIAL_FREEZE_END_TIMESTAMP:
if len(block.transactions_info.reward_claims_incorporated) != len(expected_reward_coins):
# No duplicates, after transaction freeze period. Duplicates cause no issues because we filter them out
# anyway.
return Err.INVALID_REWARD_COINS, None
removals: List[bytes32] = []
coinbase_additions: List[Coin] = list(expected_reward_coins)
additions: List[Coin] = []
coin_announcement_names: Set[bytes32] = set()
puzzle_announcement_names: Set[bytes32] = set()
npc_list: List[NPC] = []
removals_puzzle_dic: Dict[bytes32, bytes32] = {}
cost: uint64 = uint64(0)
# We check in header validation that timestamp is not more that 10 minutes into the future
if (
block.foliage_transaction_block.timestamp <= constants.INITIAL_FREEZE_END_TIMESTAMP
and block.transactions_generator is not None
):
# 6. No transactions before INITIAL_TRANSACTION_FREEZE timestamp
return Err.INITIAL_TRANSACTION_FREEZE, None
else:
# 7a. The generator root must be the hash of the serialized bytes of
# the generator for this block (or zeroes if no generator)
if block.transactions_generator is not None:
if std_hash(bytes(block.transactions_generator)) != block.transactions_info.generator_root:
return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None
else:
if block.transactions_info.generator_root != bytes([0] * 32):
return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None
# 8a. The generator_ref_list must be the hash of the serialized bytes of
# the generator ref list for this block (or 'one' bytes [0x01] if no generator)
# 8b. The generator ref list length must be less than or equal to MAX_GENERATOR_REF_LIST_SIZE entries
# 8c. The generator ref list must not point to a height >= this block's height
if block.transactions_generator_ref_list in (None, []):
if block.transactions_info.generator_refs_root != bytes([1] * 32):
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
else:
# If we have a generator reference list, we must have a generator
if block.transactions_generator is None:
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
# The generator_refs_root must be the hash of the concatenation of the List[uint32]
generator_refs_hash = std_hash(b"".join([bytes(i) for i in block.transactions_generator_ref_list]))
if block.transactions_info.generator_refs_root != generator_refs_hash:
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
if len(block.transactions_generator_ref_list) > constants.MAX_GENERATOR_REF_LIST_SIZE:
return Err.TOO_MANY_GENERATOR_REFS, None
if any([index >= height for index in block.transactions_generator_ref_list]):
return Err.FUTURE_GENERATOR_REFS, None
if block.transactions_generator is not None:
# Get List of names removed, puzzles hashes for removed coins and conditions created
assert npc_result is not None
cost = calculate_cost_of_program(block.transactions_generator, npc_result, constants.COST_PER_BYTE)
npc_list = npc_result.npc_list
# 7. Check that cost <= MAX_BLOCK_COST_CLVM
log.debug(
f"Cost: {cost} max: {constants.MAX_BLOCK_COST_CLVM} "
f"percent full: {round(100 * (cost / constants.MAX_BLOCK_COST_CLVM), 2)}%"
)
if cost > constants.MAX_BLOCK_COST_CLVM:
return Err.BLOCK_COST_EXCEEDS_MAX, None
# 8. The CLVM program must not return any errors
if npc_result.error is not None:
return Err(npc_result.error), None
for npc in npc_list:
removals.append(npc.coin_name)
removals_puzzle_dic[npc.coin_name] = npc.puzzle_hash
additions = additions_for_npc(npc_list)
coin_announcement_names = coin_announcements_names_for_npc(npc_list)
puzzle_announcement_names = puzzle_announcements_names_for_npc(npc_list)
else:
assert npc_result is None
# 9. Check that the correct cost is in the transactions info
if block.transactions_info.cost != cost:
return Err.INVALID_BLOCK_COST, None
additions_dic: Dict[bytes32, Coin] = {}
# 10. Check additions for max coin amount
# Be careful to check for 64 bit overflows in other languages. This is the max 64 bit unsigned integer
# We will not even reach here because Coins do type checking (uint64)
for coin in additions + coinbase_additions:
additions_dic[coin.name()] = coin
if coin.amount < 0:
return Err.COIN_AMOUNT_NEGATIVE, None
if coin.amount > constants.MAX_COIN_AMOUNT:
return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None
# 11. Validate addition and removal roots
root_error = validate_block_merkle_roots(
block.foliage_transaction_block.additions_root,
block.foliage_transaction_block.removals_root,
additions + coinbase_additions,
removals,
)
if root_error:
return root_error, None
# 12. The additions and removals must result in the correct filter
byte_array_tx: List[bytes32] = []
for coin in additions + coinbase_additions:
byte_array_tx.append(bytearray(coin.puzzle_hash))
for coin_name in removals:
byte_array_tx.append(bytearray(coin_name))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded_filter = bytes(bip158.GetEncoded())
filter_hash = std_hash(encoded_filter)
if filter_hash != block.foliage_transaction_block.filter_hash:
return Err.INVALID_TRANSACTIONS_FILTER_HASH, None
# 13. Check for duplicate outputs in additions
addition_counter = collections.Counter(_.name() for _ in additions + coinbase_additions)
for k, v in addition_counter.items():
if v > 1:
return Err.DUPLICATE_OUTPUT, None
# 14. Check for duplicate spends inside block
removal_counter = collections.Counter(removals)
for k, v in removal_counter.items():
if v > 1:
return Err.DOUBLE_SPEND, None
# 15. Check if removals exist and were not previously spent. (unspent_db + diff_store + this_block)
# The fork point is the last block in common between the peak chain and the chain of `block`
if peak is None or height == 0:
fork_h: int = -1
elif fork_point_with_peak is not None:
fork_h = fork_point_with_peak
else:
fork_h = find_fork_point_in_chain(blocks, peak, blocks.block_record(block.prev_header_hash))
# Get additions and removals since (after) fork_h but not including this block
# The values include: the coin that was added, the height of the block in which it was confirmed, and the
# timestamp of the block in which it was confirmed
additions_since_fork: Dict[bytes32, Tuple[Coin, uint32, uint64]] = {} # This includes coinbase additions
removals_since_fork: Set[bytes32] = set()
# For height 0, there are no additions and removals before this block, so we can skip
if height > 0:
# First, get all the blocks in the fork > fork_h, < block.height
prev_block: Optional[FullBlock] = await block_store.get_full_block(block.prev_header_hash)
reorg_blocks: Dict[uint32, FullBlock] = {}
curr: Optional[FullBlock] = prev_block
assert curr is not None
while curr.height > fork_h:
if curr.height == 0:
break
curr = await block_store.get_full_block(curr.prev_header_hash)
assert curr is not None
reorg_blocks[curr.height] = curr
if fork_h != -1:
assert len(reorg_blocks) == height - fork_h - 1
curr = prev_block
assert curr is not None
while curr.height > fork_h:
# Coin store doesn't contain coins from fork, we have to run generator for each block in fork
if curr.transactions_generator is not None:
# These blocks are in the past and therefore assumed to be valid, so get_block_generator won't raise
curr_block_generator: Optional[BlockGenerator] = await get_block_generator(curr)
assert curr_block_generator is not None and curr.transactions_info is not None
curr_npc_result = get_name_puzzle_conditions(
curr_block_generator,
min(constants.MAX_BLOCK_COST_CLVM, curr.transactions_info.cost),
cost_per_byte=constants.COST_PER_BYTE,
safe_mode=False,
)
removals_in_curr, additions_in_curr = tx_removals_and_additions(curr_npc_result.npc_list)
else:
removals_in_curr = []
additions_in_curr = []
for c_name in removals_in_curr:
assert c_name not in removals_since_fork
removals_since_fork.add(c_name)
for c in additions_in_curr:
assert c.name() not in additions_since_fork
assert curr.foliage_transaction_block is not None
additions_since_fork[c.name()] = (c, curr.height, curr.foliage_transaction_block.timestamp)
for coinbase_coin in curr.get_included_reward_coins():
assert coinbase_coin.name() not in additions_since_fork
assert curr.foliage_transaction_block is not None
additions_since_fork[coinbase_coin.name()] = (
coinbase_coin,
curr.height,
curr.foliage_transaction_block.timestamp,
)
if curr.height == 0:
break
curr = reorg_blocks[curr.height - 1]
assert curr is not None
removal_coin_records: Dict[bytes32, CoinRecord] = {}
for rem in removals:
if rem in additions_dic:
# Ephemeral coin
rem_coin: Coin = additions_dic[rem]
new_unspent: CoinRecord = CoinRecord(
rem_coin,
height,
height,
True,
False,
block.foliage_transaction_block.timestamp,
)
removal_coin_records[new_unspent.name] = new_unspent
else:
unspent = await coin_store.get_coin_record(rem)
if unspent is not None and unspent.confirmed_block_index <= fork_h:
# Spending something in the current chain, confirmed before fork
# (We ignore all coins confirmed after fork)
if unspent.spent == 1 and unspent.spent_block_index <= fork_h:
# Check for coins spent in an ancestor block
return Err.DOUBLE_SPEND, None
removal_coin_records[unspent.name] = unspent
else:
# This coin is not in the current heaviest chain, so it must be in the fork
if rem not in additions_since_fork:
# Check for spending a coin that does not exist in this fork
log.error(f"Err.UNKNOWN_UNSPENT: COIN ID: {rem} NPC RESULT: {npc_result}")
return Err.UNKNOWN_UNSPENT, None
new_coin, confirmed_height, confirmed_timestamp = additions_since_fork[rem]
new_coin_record: CoinRecord = CoinRecord(
new_coin,
confirmed_height,
uint32(0),
False,
False,
confirmed_timestamp,
)
removal_coin_records[new_coin_record.name] = new_coin_record
# This check applies to both coins created before fork (pulled from coin_store),
# and coins created after fork (additions_since_fork)
if rem in removals_since_fork:
# This coin was spent in the fork
return Err.DOUBLE_SPEND_IN_FORK, None
removed = 0
for unspent in removal_coin_records.values():
removed += unspent.coin.amount
added = 0
for coin in additions:
added += coin.amount
# 16. Check that the total coin amount for added is <= removed
if removed < added:
return Err.MINTING_COIN, None
fees = removed - added
assert fees >= 0
assert_fee_sum: uint128 = uint128(0)
for npc in npc_list:
if ConditionOpcode.RESERVE_FEE in npc.condition_dict:
fee_list: List[ConditionWithArgs] = npc.condition_dict[ConditionOpcode.RESERVE_FEE]
for cvp in fee_list:
fee = int_from_bytes(cvp.vars[0])
if fee < 0:
return Err.RESERVE_FEE_CONDITION_FAILED, None
assert_fee_sum = uint128(assert_fee_sum + fee)
# 17. Check that the assert fee sum <= fees, and that each reserved fee is non-negative
if fees < assert_fee_sum:
return Err.RESERVE_FEE_CONDITION_FAILED, None
# 18. Check that the fee amount + farmer reward < maximum coin amount
if fees + calculate_base_farmer_reward(height) > constants.MAX_COIN_AMOUNT:
return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None
# 19. Check that the computed fees are equal to the fees in the block header
if block.transactions_info.fees != fees:
return Err.INVALID_BLOCK_FEE_AMOUNT, None
# 20. Verify that removed coin puzzle_hashes match with calculated puzzle_hashes
for unspent in removal_coin_records.values():
if unspent.coin.puzzle_hash != removals_puzzle_dic[unspent.name]:
return Err.WRONG_PUZZLE_HASH, None
# 21. Verify conditions
# create hash_key list for aggsig check
pairs_pks: List[G1Element] = []
pairs_msgs: List[bytes] = []
for npc in npc_list:
assert height is not None
unspent = removal_coin_records[npc.coin_name]
error = mempool_check_conditions_dict(
unspent,
coin_announcement_names,
puzzle_announcement_names,
npc.condition_dict,
prev_transaction_block_height,
block.foliage_transaction_block.timestamp,
)
if error:
return error, None
for pk, m in pkm_pairs_for_conditions_dict(
npc.condition_dict, npc.coin_name, constants.AGG_SIG_ME_ADDITIONAL_DATA
):
pairs_pks.append(pk)
pairs_msgs.append(m)
# 22. Verify aggregated signature
# TODO: move this to pre_validate_blocks_multiprocessing so we can sync faster
if not block.transactions_info.aggregated_signature:
return Err.BAD_AGGREGATE_SIGNATURE, None
# noinspection PyTypeChecker
if not AugSchemeMPL.aggregate_verify(pairs_pks, pairs_msgs, block.transactions_info.aggregated_signature):
return Err.BAD_AGGREGATE_SIGNATURE, None
return None, npc_result
|
the-stack_106_24159 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Image2Image.py
# Author: Yuxin Wu
import cv2
import numpy as np
import tensorflow as tf
import glob
import os
import argparse
from tensorpack import *
from tensorpack.utils.gpu import get_num_gpu
from tensorpack.utils.viz import stack_patches
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils.scope_utils import auto_reuse_variable_scope
from GAN import GANTrainer, GANModelDesc
"""
To train Image-to-Image translation model with image pairs:
./Image2Image.py --data /path/to/datadir --mode {AtoB,BtoA}
# datadir should contain jpg images of shpae 2s x s, formed by A and B
# you can download some data from the original authors:
# https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/
Training visualization will appear be in tensorboard.
To visualize on test set:
./Image2Image.py --sample --data /path/to/test/datadir --mode {AtoB,BtoA} --load model
"""
BATCH = 1
IN_CH = 3
OUT_CH = 3
LAMBDA = 100
NF = 64 # number of filter
def BNLReLU(x, name=None):
x = BatchNorm('bn', x)
return tf.nn.leaky_relu(x, alpha=0.2, name=name)
def visualize_tensors(name, imgs, scale_func=lambda x: (x + 1.) * 128., max_outputs=1):
"""Generate tensor for TensorBoard (casting, clipping)
Args:
name: name for visualization operation
*imgs: multiple tensors as list
scale_func: scale input tensors to fit range [0, 255]
Example:
visualize_tensors('viz1', [img1])
visualize_tensors('viz2', [img1, img2, img3], max_outputs=max(30, BATCH))
"""
xy = scale_func(tf.concat(imgs, axis=2))
xy = tf.cast(tf.clip_by_value(xy, 0, 255), tf.uint8, name='viz')
tf.summary.image(name, xy, max_outputs=30)
class Model(GANModelDesc):
def inputs(self):
SHAPE = 256
return [tf.placeholder(tf.float32, (None, SHAPE, SHAPE, IN_CH), 'input'),
tf.placeholder(tf.float32, (None, SHAPE, SHAPE, OUT_CH), 'output')]
def generator(self, imgs):
# imgs: input: 256x256xch
# U-Net structure, it's slightly different from the original on the location of relu/lrelu
with argscope(BatchNorm, training=True), \
argscope(Dropout, is_training=True):
# always use local stat for BN, and apply dropout even in testing
with argscope(Conv2D, kernel_size=4, strides=2, activation=BNLReLU):
e1 = Conv2D('conv1', imgs, NF, activation=tf.nn.leaky_relu)
e2 = Conv2D('conv2', e1, NF * 2)
e3 = Conv2D('conv3', e2, NF * 4)
e4 = Conv2D('conv4', e3, NF * 8)
e5 = Conv2D('conv5', e4, NF * 8)
e6 = Conv2D('conv6', e5, NF * 8)
e7 = Conv2D('conv7', e6, NF * 8)
e8 = Conv2D('conv8', e7, NF * 8, activation=BNReLU) # 1x1
with argscope(Conv2DTranspose, activation=BNReLU, kernel_size=4, strides=2):
return (LinearWrap(e8)
.Conv2DTranspose('deconv1', NF * 8)
.Dropout()
.ConcatWith(e7, 3)
.Conv2DTranspose('deconv2', NF * 8)
.Dropout()
.ConcatWith(e6, 3)
.Conv2DTranspose('deconv3', NF * 8)
.Dropout()
.ConcatWith(e5, 3)
.Conv2DTranspose('deconv4', NF * 8)
.ConcatWith(e4, 3)
.Conv2DTranspose('deconv5', NF * 4)
.ConcatWith(e3, 3)
.Conv2DTranspose('deconv6', NF * 2)
.ConcatWith(e2, 3)
.Conv2DTranspose('deconv7', NF * 1)
.ConcatWith(e1, 3)
.Conv2DTranspose('deconv8', OUT_CH, activation=tf.tanh)())
@auto_reuse_variable_scope
def discriminator(self, inputs, outputs):
""" return a (b, 1) logits"""
l = tf.concat([inputs, outputs], 3)
with argscope(Conv2D, kernel_size=4, strides=2, activation=BNLReLU):
l = (LinearWrap(l)
.Conv2D('conv0', NF, activation=tf.nn.leaky_relu)
.Conv2D('conv1', NF * 2)
.Conv2D('conv2', NF * 4)
.Conv2D('conv3', NF * 8, strides=1, padding='VALID')
.Conv2D('convlast', 1, strides=1, padding='VALID', activation=tf.identity)())
return l
def build_graph(self, input, output):
input, output = input / 128.0 - 1, output / 128.0 - 1
with argscope([Conv2D, Conv2DTranspose], kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
fake_output = self.generator(input)
with tf.variable_scope('discrim'):
real_pred = self.discriminator(input, output)
fake_pred = self.discriminator(input, fake_output)
self.build_losses(real_pred, fake_pred)
errL1 = tf.reduce_mean(tf.abs(fake_output - output), name='L1_loss')
self.g_loss = tf.add(self.g_loss, LAMBDA * errL1, name='total_g_loss')
add_moving_summary(errL1, self.g_loss)
# tensorboard visualization
if IN_CH == 1:
input = tf.image.grayscale_to_rgb(input)
if OUT_CH == 1:
output = tf.image.grayscale_to_rgb(output)
fake_output = tf.image.grayscale_to_rgb(fake_output)
visualize_tensors('input,output,fake', [input, output, fake_output], max_outputs=max(30, BATCH))
self.collect_variables()
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2e-4, trainable=False)
return tf.train.AdamOptimizer(lr, beta1=0.5, epsilon=1e-3)
def split_input(img):
"""
img: an RGB image of shape (s, 2s, 3).
:return: [input, output]
"""
# split the image into left + right pairs
s = img.shape[0]
assert img.shape[1] == 2 * s
input, output = img[:, :s, :], img[:, s:, :]
if args.mode == 'BtoA':
input, output = output, input
if IN_CH == 1:
input = cv2.cvtColor(input, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
if OUT_CH == 1:
output = cv2.cvtColor(output, cv2.COLOR_RGB2GRAY)[:, :, np.newaxis]
return [input, output]
def get_data():
datadir = args.data
imgs = glob.glob(os.path.join(datadir, '*.jpg'))
ds = ImageFromFile(imgs, channel=3, shuffle=True)
ds = MapData(ds, lambda dp: split_input(dp[0]))
augs = [imgaug.Resize(286), imgaug.RandomCrop(256)]
ds = AugmentImageComponents(ds, augs, (0, 1))
ds = BatchData(ds, BATCH)
ds = PrefetchData(ds, 100, 1)
return ds
def sample(datadir, model_path):
pred = PredictConfig(
session_init=get_model_loader(model_path),
model=Model(),
input_names=['input', 'output'],
output_names=['viz'])
imgs = glob.glob(os.path.join(datadir, '*.jpg'))
ds = ImageFromFile(imgs, channel=3, shuffle=True)
ds = MapData(ds, lambda dp: split_input(dp[0]))
ds = AugmentImageComponents(ds, [imgaug.Resize(256)], (0, 1))
ds = BatchData(ds, 6)
pred = SimpleDatasetPredictor(pred, ds)
for o in pred.get_result():
o = o[0][:, :, :, ::-1]
stack_patches(o, nr_row=3, nr_col=2, viz=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--sample', action='store_true', help='run sampling')
parser.add_argument('--data', help='Image directory', required=True)
parser.add_argument('--mode', choices=['AtoB', 'BtoA'], default='AtoB')
parser.add_argument('-b', '--batch', type=int, default=1)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
BATCH = args.batch
if args.sample:
assert args.load
sample(args.data, args.load)
else:
logger.auto_set_dir()
data = QueueInput(get_data())
trainer = GANTrainer(data, Model(), get_num_gpu())
trainer.train_with_defaults(
callbacks=[
PeriodicTrigger(ModelSaver(), every_k_epochs=3),
ScheduledHyperParamSetter('learning_rate', [(200, 1e-4)])
],
steps_per_epoch=data.size(),
max_epoch=300,
session_init=SaverRestore(args.load) if args.load else None
)
|
the-stack_106_24160 | import math
from .EmbConstant import *
from .EmbThreadShv import get_thread_set
from .ReadHelper import (
read_int_8,
read_int_16be,
read_int_32be,
read_string_8,
signed8,
signed16,
)
def read(f, out, settings=None):
in_jump = False
f.seek(0x56, 1) # header text
length = read_int_8(f)
out.metadata("name", read_string_8(f, length))
design_width = read_int_8(f)
design_height = read_int_8(f)
skip = math.ceil(design_height / 2.0) * design_width
f.seek(4 + int(skip), 1)
color_count = read_int_8(f)
f.seek(18, 1)
threads = get_thread_set()
stitch_per_color = {}
for i in range(color_count):
stitch_count = read_int_32be(f)
color_code = read_int_8(f)
thread = threads[color_code % len(threads)]
out.add_thread(thread)
stitch_per_color[i] = stitch_count
f.seek(9, 1)
f.seek(-2, 1)
stitches_since_stop = 0
current_color_index = 0
try:
max_stitches = stitch_per_color[current_color_index]
except IndexError:
max_stitches = 0
while True:
flags = STITCH
if in_jump:
flags = JUMP
b0 = read_int_8(f)
b1 = read_int_8(f)
if b1 is None:
break
if stitches_since_stop >= max_stitches:
out.color_change()
stitches_since_stop = 0
current_color_index += 1
try:
max_stitches = stitch_per_color[current_color_index]
except KeyError:
max_stitches = 0xFFFFFFFF
if b0 == 0x80:
stitches_since_stop += 1
if b1 == 3:
continue
elif b1 == 2:
in_jump = False
continue
elif b1 == 1:
stitches_since_stop += 2
sx = signed16(read_int_16be(f))
sy = signed16(read_int_16be(f))
in_jump = True
out.move(sx, sy)
continue
dx = signed8(b0)
dy = signed8(b1)
stitches_since_stop += 1
out.add_stitch_relative(flags, dx, dy)
out.end()
|
the-stack_106_24161 | import logging
import os
import shutil
from services import dump_asc_12, dump_asc_16, dump_hzk_16, dump_hzk_12, make_font
logging.basicConfig(level=logging.DEBUG)
outputs_dir = 'outputs/'
docs_dir = 'docs/'
releases_dir = 'releases/'
def main():
if os.path.exists(outputs_dir):
shutil.rmtree(outputs_dir)
dump_asc_12.run()
dump_asc_16.run()
dump_hzk_12.run()
dump_hzk_16.run()
make_font.run()
shutil.copy(os.path.join(outputs_dir, 'hzk-pixel-12px.woff2'), os.path.join(docs_dir, 'hzk-pixel-12px.woff2'))
shutil.copy(os.path.join(outputs_dir, 'hzk-pixel-16px.woff2'), os.path.join(docs_dir, 'hzk-pixel-16px.woff2'))
shutil.copy(os.path.join(outputs_dir, 'preview-12px.png'), os.path.join(docs_dir, 'preview-12px.png'))
shutil.copy(os.path.join(outputs_dir, 'preview-16px.png'), os.path.join(docs_dir, 'preview-16px.png'))
if not os.path.exists(releases_dir):
os.makedirs(releases_dir)
shutil.copy(os.path.join(outputs_dir, 'hzk-pixel-12px.otf'), os.path.join(releases_dir, 'hzk-pixel-12px.otf'))
shutil.copy(os.path.join(outputs_dir, 'hzk-pixel-12px.woff2'), os.path.join(releases_dir, 'hzk-pixel-12px.woff2'))
shutil.copy(os.path.join(outputs_dir, 'hzk-pixel-12px.ttf'), os.path.join(releases_dir, 'hzk-pixel-12px.ttf'))
shutil.copy(os.path.join(outputs_dir, 'hzk-pixel-16px.otf'), os.path.join(releases_dir, 'hzk-pixel-16px.otf'))
shutil.copy(os.path.join(outputs_dir, 'hzk-pixel-16px.woff2'), os.path.join(releases_dir, 'hzk-pixel-16px.woff2'))
shutil.copy(os.path.join(outputs_dir, 'hzk-pixel-16px.ttf'), os.path.join(releases_dir, 'hzk-pixel-16px.ttf'))
if __name__ == '__main__':
main()
|
the-stack_106_24162 | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2021, Knowledge Engineering Group (KEG), Tsinghua University
# Modified by Jiezhong Qiu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretrain MSA"""
import torch
import torch.nn.functional as F
from megatron import get_args, get_tokenizer
from megatron import print_rank_0
from megatron import get_timers
from megatron import mpu
from megatron.data.msa_dataset import build_train_valid_test_datasets
from megatron.model import MSAModel, MSAModelFirstStage, MSAModelIntermediateStage, MSAModelLastStage
from megatron.model.transformer import Collector
from megatron.training import pretrain
from megatron.utils import average_losses_across_data_parallel_group
from megatron.utils import get_msa_masks_and_position_ids
from megatron.model.msa_model import bert_extended_attention_mask
from megatron import IterCounter
def model_provider():
"""Build the model."""
print_rank_0('building MSA model ...')
args = get_args()
if mpu.get_pipeline_model_parallel_world_size() > 1:
# Determine model based on position of stage in pipeline.
if mpu.is_pipeline_first_stage():
model = MSAModelFirstStage(
num_tokentypes=0)
elif mpu.is_pipeline_last_stage():
model = MSAModelLastStage(
num_tokentypes=0,
add_binary_head=False,
parallel_output=True)
else:
model = MSAModelIntermediateStage(
num_tokentypes=0)
else:
model = MSAModel(
num_tokentypes=0,
add_binary_head=False,
parallel_output=True)
return model
def tokens_to_seq(alig):
msa_vocab = {0: '[PAD]', 1: '[MASK]', 2: '[CLS]', 3: '[SEP]', 4: '[UNK]', 5: 'A', 6: 'B', 7: 'C', 8: 'D', 9: 'E', 10: 'F', 11: 'G', 12: 'H', 13: 'I', 14: 'K', 15: 'L', 16: 'M', 17: 'N', 18: 'O', 19: 'P', 20: 'Q', 21: 'R', 22: 'S', 23: 'T', 24: 'U', 25: 'V', 26: 'W', 27: 'X', 28: 'Y', 29: 'Z', 30: '-', 31: '|'}
# seq = [''.join([msa_vocab[idx.item()] for idx in alig]) for alig in raw_msa_sample]
seq = ''.join([msa_vocab[idx.item()] for idx in alig])
return seq
def get_batch(data_iterator):
"""Build the batch."""
args = get_args()
tokenizer = get_tokenizer()
# Items and their type.
frags_train = args.frags_train
if frags_train != 0:
keys = ['frags_text', 'frags_labels', 'frags_loss_mask', 'frags_offset', 'frags_msa_aligns', 'frags_msa_length', 'raw_msa_sample'] # , 'padding_mask']
else:
keys = ['text', 'labels', 'loss_mask', 'offset', 'msa_aligns', 'msa_length', 'raw_msa_sample'] # , 'padding_mask']
datatype = torch.int64
# Broadcast data.
if data_iterator is not None:
data = next(data_iterator)
else:
data = None
# TODO: support protein string return
# data, seq = data
# data, msa_shape, seq = data
data_b = mpu.broadcast_data(keys, data, datatype)
# Unpack.
if frags_train != 0:
tokens = data_b['frags_text'].long()[0]
loss_mask = data_b['frags_loss_mask'].float()[0]
lm_labels = data_b['frags_labels'].long()[0]
offset = data_b['frags_offset'].long()[0]
msa_aligns = data_b['frags_msa_aligns'].long()[0]
msa_length = data_b['frags_msa_length'].long()[0]
else:
tokens = data_b['text'].long()[0]
loss_mask = data_b['loss_mask'].float()[0]
lm_labels = data_b['labels'].long()[0]
offset = data_b['offset'].long()[0]
msa_aligns = data_b['msa_aligns'].long()[0]
msa_length = data_b['msa_length'].long()[0]
raw_msa_sample = data_b['raw_msa_sample'].long()[0]
msa_shape = (msa_aligns, msa_length)
# padding_mask = data_b['padding_mask'].long()[0]
# Get the masks and postition ids.
# micro_batch_size, seq_length = data.size()
# Attention mask (lower triangular).
# if reset_attention_mask:
# att_mask_batch = micro_batch_size
# else:
# att_mask_batch = 1
# attention_mask = torch.ones(
# (att_mask_batch, seq_length, seq_length), device=data.device).view(
# att_mask_batch, 1, seq_length, seq_length)
# Position ids.
# seq_aligns, seq_length = msa_shape
# TODO: well done debug: here I can found the bug in offset -1 (cause insertion of [CLS]), max_offset should be 256, not 257
# print(f'{msa_shape[1].item()=}, {offset=}')
position_ids = torch.arange(msa_shape[1].item(), dtype=torch.long,
device=tokens.device) + offset
position_ids[0] = 0
# print(f'{position_ids=}')
if args.fake_input:
position_ids += 2
position_ids = position_ids.unsqueeze(0).expand_as(tokens)
# position_ids = position_ids
# TODO: position_ids + 2
# if get_args().fake_input:
# position_ids += 2
# position_ids = (torch.arange(msa_shape[1].item(), dtype=torch.long,
# device=tokens.device) + 2).unsqueeze(0).expand_as(tokens)
# return tokens, loss_mask, lm_labels, padding_mask, attention_mask, position_ids # , seq
# print(f'{tokens=}, {loss_mask=}, {lm_labels=}, {position_ids=}')
seq = tokens_to_seq(raw_msa_sample[0]) if args.attention_save else []
return tokens, loss_mask, lm_labels, position_ids, seq
def forward_step(data_iterator, model, input_tensor):
"""Forward step."""
args = get_args()
timers = get_timers()
# Get the batch.
timers('batch-generator').start()
# TODO: support protein string return
# tokens, loss_mask, lm_labels, padding_mask, attention_mask, position_ids, seq \
tokens, loss_mask, lm_labels, position_ids, seq \
= get_batch(data_iterator)
timers('batch-generator').stop()
# print_rank_0('in-pretrain_msa.py get... {}'.format(IterCounter.get_iter()))
# extended_attention_mask = bert_extended_attention_mask(padding_mask) + attention_mask
# Forward pass through the model.
if mpu.is_pipeline_first_stage():
assert input_tensor is None
if mpu.is_pipeline_last_stage():
if args.attention_save:
# if tokens.shape[1] > 1023:
eval_max_length = args.eval_max_length
print(f'len={tokens.shape[1]}')
if tokens.shape[1] > eval_max_length:
print(f'skipping one sample longer than {eval_max_length}, len={tokens.shape[1]}')
return 0, {'lm loss': 0}
# NOTICE: remember to change return function of `get_batch` function
Collector.append(seq)
output_tensor = model(tokens, tokentype_ids=None,
lm_labels=lm_labels, position_ids=position_ids)
else:
output_tensor = model(tokens, tokentype_ids=None)
elif mpu.is_pipeline_last_stage():
assert input_tensor is not None
output_tensor = model(input_tensor, lm_labels=lm_labels)
else:
assert input_tensor is not None
output_tensor = model(input_tensor, position_ids=position_ids)
if mpu.is_pipeline_last_stage():
lm_loss_, _ = output_tensor
lm_loss_ = lm_loss_.float()
loss_mask = loss_mask.float()
lm_loss = torch.sum(
lm_loss_.view(-1) * loss_mask.reshape(-1)) / loss_mask.sum()
loss = lm_loss
averaged_losses = average_losses_across_data_parallel_group([lm_loss,])
return loss, {'lm loss': averaged_losses[0]}
return output_tensor
def train_valid_test_datasets_provider(train_val_test_num_samples):
"""Build train, valid, and test datasets."""
args = get_args()
print_rank_0('> building train, validation, and test datasets '
'for MSA ...')
train_ds, valid_ds, test_ds = build_train_valid_test_datasets(
data_prefix=args.data_path,
data_impl=args.data_impl,
splits_string=args.split,
train_valid_test_num_samples=train_val_test_num_samples,
seq_length=args.seq_length,
masked_lm_prob=args.mask_prob,
seed=args.seed,
skip_warmup=(not args.mmap_warmup))
print_rank_0("> finished creating MSA datasets ...")
return train_ds, valid_ds, test_ds
if __name__ == "__main__":
pretrain(train_valid_test_datasets_provider, model_provider, forward_step,
args_defaults={'tokenizer_type': 'BertWordPieceLowerCase'})
if get_args().attention_save:
Collector.dump('./data/attention')
|
the-stack_106_24164 | """
Derived module from dmdbase.py for forward/backward dmd.
"""
import numpy as np
from scipy.linalg import sqrtm
from .dmdbase import DMDBase
class FbDMD(DMDBase):
"""
Forward/backward DMD class.
:param svd_rank: the rank for the truncation; If 0, the method computes the
optimal rank and uses it for truncation; if positive interger, the
method uses the argument for the truncation; if float between 0 and 1,
the rank is the number of the biggest singular values that are needed
to reach the 'energy' specified by `svd_rank`; if -1, the method does
not compute truncation.
:type svd_rank: int or float
:param int tlsq_rank: rank truncation computing Total Least Square. Default
is 0, that means TLSQ is not applied.
:param bool exact: flag to compute either exact DMD or projected DMD.
Default is False.
:param bool opt: flag to compute optimal amplitudes. See :class:`DMDBase`.
Default is False.
:param rescale_mode: Scale Atilde as shown in
10.1016/j.jneumeth.2015.10.010 (section 2.4) before computing its
eigendecomposition. None means no rescaling, 'auto' means automatic
rescaling using singular values, otherwise the scaling factors.
:type rescale_mode: {'auto'} or None or numpy.ndarray
Reference: Dawson et al. https://arxiv.org/abs/1507.02264
"""
def fit(self, X):
"""
Compute the Dynamics Modes Decomposition to the input data.
:param X: the input snapshots.
:type X: numpy.ndarray or iterable
"""
self._snapshots, self._snapshots_shape = self._col_major_2darray(X)
n_samples = self._snapshots.shape[1]
X = self._snapshots[:, :-1]
Y = self._snapshots[:, 1:]
X, Y = self._compute_tlsq(X, Y, self.tlsq_rank)
Uy, sy, Vy = self._compute_svd(Y, self.svd_rank)
Ux, sx, Vx = self._compute_svd(X, self.svd_rank)
if len(sy) != len(sx):
raise ValueError(
'The optimal truncation produced different number of singular'
'values for the X and Y matrix, please specify different'
'svd_rank')
# Backward operator
bAtilde = self._build_lowrank_op(Uy, sy, Vy, X)
# Forward operator
fAtilde = self._build_lowrank_op(Ux, sx, Vx, Y)
self._Atilde = sqrtm(fAtilde.dot(np.linalg.inv(bAtilde)))
self._eigs, self._modes = self._eig_from_lowrank_op(
self._Atilde, Y, Ux, sx, Vx, self.exact)
self.original_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}
self.dmd_time = {'t0': 0, 'tend': n_samples - 1, 'dt': 1}
self._b = self._compute_amplitudes(self._modes, self._snapshots,
self._eigs, self.opt)
return self
|
the-stack_106_24165 | import hashlib
import hmac
from buidl.helper import (
big_endian_to_int,
encode_base58_checksum,
hash160,
hash256,
int_to_big_endian,
raw_decode_base58,
)
from buidl._libsec import ffi, lib
GLOBAL_CTX = ffi.gc(
lib.secp256k1_context_create(
lib.SECP256K1_CONTEXT_SIGN | lib.SECP256K1_CONTEXT_VERIFY
),
lib.secp256k1_context_destroy,
)
P = 2 ** 256 - 2 ** 32 - 977
N = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
class S256Point:
def __init__(self, csec=None, usec=None):
if usec:
self.usec = usec
self.csec = None
sec_cache = usec
elif csec:
self.csec = csec
self.usec = None
sec_cache = csec
else:
raise RuntimeError("need a serialization")
self.c = ffi.new("secp256k1_pubkey *")
if not lib.secp256k1_ec_pubkey_parse(
GLOBAL_CTX, self.c, sec_cache, len(sec_cache)
):
raise ValueError("libsecp256k1 produced error")
def __eq__(self, other):
return self.sec() == other.sec()
def __repr__(self):
return "S256Point({})".format(self.sec(compressed=False).hex())
def __rmul__(self, coefficient):
coef = coefficient % N
new_key = ffi.new("secp256k1_pubkey *")
s = self.sec(compressed=False)
lib.secp256k1_ec_pubkey_parse(GLOBAL_CTX, new_key, s, len(s))
lib.secp256k1_ec_pubkey_tweak_mul(GLOBAL_CTX, new_key, coef.to_bytes(32, "big"))
serialized = ffi.new("unsigned char [65]")
output_len = ffi.new("size_t *", 65)
lib.secp256k1_ec_pubkey_serialize(
GLOBAL_CTX, serialized, output_len, new_key, lib.SECP256K1_EC_UNCOMPRESSED
)
return self.__class__(usec=bytes(serialized))
def __add__(self, scalar):
"""Multiplies scalar by generator, adds result to current point"""
coef = scalar % N
new_key = ffi.new("secp256k1_pubkey *")
s = self.sec(compressed=False)
lib.secp256k1_ec_pubkey_parse(GLOBAL_CTX, new_key, s, len(s))
lib.secp256k1_ec_pubkey_tweak_add(GLOBAL_CTX, new_key, coef.to_bytes(32, "big"))
serialized = ffi.new("unsigned char [65]")
output_len = ffi.new("size_t *", 65)
lib.secp256k1_ec_pubkey_serialize(
GLOBAL_CTX, serialized, output_len, new_key, lib.SECP256K1_EC_UNCOMPRESSED
)
return self.__class__(usec=bytes(serialized))
def verify(self, z, sig):
msg = z.to_bytes(32, "big")
sig_data = sig.cdata()
return lib.secp256k1_ecdsa_verify(GLOBAL_CTX, sig_data, msg, self.c)
def sec(self, compressed=True):
"""returns the binary version of the SEC format"""
if compressed:
if not self.csec:
serialized = ffi.new("unsigned char [33]")
output_len = ffi.new("size_t *", 33)
lib.secp256k1_ec_pubkey_serialize(
GLOBAL_CTX,
serialized,
output_len,
self.c,
lib.SECP256K1_EC_COMPRESSED,
)
self.csec = bytes(ffi.buffer(serialized, 33))
return self.csec
else:
if not self.usec:
serialized = ffi.new("unsigned char [65]")
output_len = ffi.new("size_t *", 65)
lib.secp256k1_ec_pubkey_serialize(
GLOBAL_CTX,
serialized,
output_len,
self.c,
lib.SECP256K1_EC_UNCOMPRESSED,
)
self.usec = bytes(ffi.buffer(serialized, 65))
return self.usec
def hash160(self, compressed=True):
# get the sec
sec = self.sec(compressed)
# hash160 the sec
return hash160(sec)
def p2pkh_script(self, compressed=True):
"""Returns the p2pkh Script object"""
h160 = self.hash160(compressed)
# avoid circular dependency
from buidl.script import P2PKHScriptPubKey
return P2PKHScriptPubKey(h160)
def p2wpkh_script(self):
"""Returns the p2wpkh Script object"""
h160 = self.hash160(True)
# avoid circular dependency
from buidl.script import P2WPKHScriptPubKey
return P2WPKHScriptPubKey(h160)
def p2sh_p2wpkh_redeem_script(self):
"""Returns the RedeemScript for a p2sh-p2wpkh redemption"""
return self.p2wpkh_script().redeem_script()
def address(self, compressed=True, network="mainnet"):
"""Returns the p2pkh address string"""
return self.p2pkh_script(compressed).address(network)
def bech32_address(self, network="mainnet"):
"""Returns the p2wpkh bech32 address string"""
return self.p2wpkh_script().address(network)
def p2sh_p2wpkh_address(self, network="mainnet"):
"""Returns the p2sh-p2wpkh base58 address string"""
return self.p2wpkh_script().p2sh_address(network)
def verify_message(self, message, sig):
"""Verify a message in the form of bytes. Assumes that the z
is calculated using hash256 interpreted as a big-endian integer"""
# calculate the hash256 of the message
h256 = hash256(message)
# z is the big-endian interpretation. use big_endian_to_int
z = big_endian_to_int(h256)
# verify the message using the self.verify method
return self.verify(z, sig)
@classmethod
def parse(self, sec_bin):
"""returns a Point object from a SEC binary (not hex)"""
if sec_bin[0] == 4:
return S256Point(usec=sec_bin)
else:
return S256Point(csec=sec_bin)
G = S256Point(
usec=bytes.fromhex(
"0479be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8"
)
)
class Signature:
def __init__(self, der=None, c=None):
if der:
self.der_cache = der
self.c = ffi.new("secp256k1_ecdsa_signature *")
if not lib.secp256k1_ecdsa_signature_parse_der(
GLOBAL_CTX, self.c, der, len(der)
):
raise RuntimeError("badly formatted signature {}".format(der.hex()))
elif c:
self.c = c
self.der_cache = None
else:
raise RuntimeError("need der or c object")
def __eq__(self, other):
return self.der() == other.der()
def __repr__(self):
return "Signature{}".format(self.der().hex())
def der(self):
if not self.der_cache:
der = ffi.new("unsigned char[72]")
der_length = ffi.new("size_t *", 72)
lib.secp256k1_ecdsa_signature_serialize_der(
GLOBAL_CTX, der, der_length, self.c
)
self.der_cache = bytes(ffi.buffer(der, der_length[0]))
return self.der_cache
def cdata(self):
return self.c
@classmethod
def parse(cls, der):
return cls(der=der)
class PrivateKey:
def __init__(self, secret, network="mainnet", compressed=True):
self.secret = secret
self.point = secret * G
self.network = network
self.compressed = compressed
def hex(self):
return "{:x}".format(self.secret).zfill(64)
def sign(self, z):
secret = self.secret.to_bytes(32, "big")
msg = z.to_bytes(32, "big")
csig = ffi.new("secp256k1_ecdsa_signature *")
if not lib.secp256k1_ecdsa_sign(
GLOBAL_CTX, csig, msg, secret, ffi.NULL, ffi.NULL
):
raise RuntimeError("something went wrong with c signing")
sig = Signature(c=csig)
if not self.point.verify(z, sig):
raise RuntimeError("something went wrong with signing")
return sig
def deterministic_k(self, z):
k = b"\x00" * 32
v = b"\x01" * 32
if z > N:
z -= N
z_bytes = int_to_big_endian(z, 32)
secret_bytes = int_to_big_endian(self.secret, 32)
s256 = hashlib.sha256
k = hmac.new(k, v + b"\x00" + secret_bytes + z_bytes, s256).digest()
v = hmac.new(k, v, s256).digest()
k = hmac.new(k, v + b"\x01" + secret_bytes + z_bytes, s256).digest()
v = hmac.new(k, v, s256).digest()
while True:
v = hmac.new(k, v, s256).digest()
candidate = big_endian_to_int(v)
if candidate >= 1 and candidate < N:
return candidate
k = hmac.new(k, v + b"\x00", s256).digest()
v = hmac.new(k, v, s256).digest()
def sign_message(self, message):
"""Sign a message in the form of bytes instead of the z. The z should
be assumed to be the hash256 of the message interpreted as a big-endian
integer."""
# compute the hash256 of the message
h256 = hash256(message)
# z is the big-endian interpretation. use big_endian_to_int
z = big_endian_to_int(h256)
# sign the message using the self.sign method
return self.sign(z)
@classmethod
def parse(cls, wif):
"""Converts WIF to a PrivateKey object"""
raw = raw_decode_base58(wif)
if len(raw) == 34:
compressed = True
if raw[-1] != 1:
raise ValueError("Invalid WIF")
raw = raw[:-1]
else:
compressed = False
secret = big_endian_to_int(raw[1:])
if raw[0] == 0xEF:
network = "testnet"
elif raw[0] == 0x80:
network = "mainnet"
else:
raise ValueError("Invalid WIF")
return cls(secret, network=network, compressed=compressed)
def wif(self, compressed=True):
# convert the secret from integer to a 32-bytes in big endian using num.to_bytes(32, 'big')
secret_bytes = self.secret.to_bytes(32, "big")
# prepend b'\xef' on testnet, b'\x80' on mainnet
if self.network == "mainnet":
prefix = b"\x80"
else:
prefix = b"\xef"
# append b'\x01' if compressed
if compressed:
suffix = b"\x01"
else:
suffix = b""
# encode_base58_checksum the whole thing
return encode_base58_checksum(prefix + secret_bytes + suffix)
|
the-stack_106_24166 | #! /usr/bin/env python3
#
# Show a compact release note summary of a range of Git commits.
#
# Example use: release-notes.py --help
#
# Note: the first commit in the range is excluded!
#
# Requires:
# - GitPython https://pypi.python.org/pypi/GitPython/
# - You need to configure your local repo to pull the PR refs from
# GitHub. To do this, add a line like:
# fetch = +refs/pull/*/head:refs/pull/origin/*
# to the GitHub remote section of .git/config.
#
# Disclaimer: this program is provided without warranties of any kind,
# including suitability for any purpose. The author(s) will not be
# responsible if this script eats your left sock.
#
# Known limitations:
#
# - if different people with the same name contribute, this script
# will be confused. (it will merge their work under one entry).
# - the list of aliases below must be manually modified when
# contributors change their git name and/or email address.
#
# Note: there are unit tests in the release-notes subdirectory!
#
# pylint: disable=line-too-long, invalid-name, missing-function-docstring, too-many-branches, redefined-outer-name
import sys
import itertools
import re
import datetime
import time
from gitdb import exc
import subprocess
import os.path
from optparse import OptionParser
from gitdb import exc
from git import Repo
from git.repo.fun import name_to_object
from git.util import Stats
import os.path
#
# Global behavior constants
#
# minimum sha length to disambiguate
shamin = 9
# Basic mailmap functionality using the AUTHORS file.
mmre = re.compile(r'^(?P<name>.*?)\s+<(?P<addr>[^>]*)>(?P<aliases>(?:[^<]*<[^>]*>)*)$')
mmare = re.compile('(?P<alias>[^<]*)<(?P<addr>[^>]*)>')
crdb_folk = set()
class P:
def __init__(self, name, addr):
self.name = name
self.email = addr
self.aliases = [(name, addr)]
self.crdb = '@cockroachlabs.com' in addr
if self.crdb:
crdb_folk.add(self)
def __repr__(self):
return "%s <%s>" % (self.name, self.email)
def __lt__(self, other):
return self.name < other.name or (self.name == other.name and self.email < other.email)
mmap_bycanon = {}
mmap_byaddr = {}
mmap_byname = {}
def define_person(name, addr):
p = P(name, addr)
canon = (name, addr)
if canon in mmap_bycanon:
print('warning: duplicate person %r, ignoring', canon)
return None
mmap_bycanon[canon] = p
byaddr = mmap_byaddr.get(addr, [])
byaddr.append(p)
mmap_byaddr[addr] = byaddr
byname = mmap_byname.get(name, [])
byname.append(p)
mmap_byname[name] = byname
return p
if not os.path.exists('AUTHORS'):
print('warning: AUTHORS missing in current directory.', file=sys.stderr)
print('Maybe use "cd" to navigate to the working tree root.', file=sys.stderr)
else:
with open('AUTHORS', 'r') as f:
for line in f.readlines():
if line.strip().startswith('#'):
continue
m = mmre.match(line)
if m is None:
continue
p = define_person(m.group('name'), m.group('addr'))
if p is None:
continue
p.crdb = '@cockroachlabs.com' in line
if p.crdb:
crdb_folk.add(p)
aliases = m.group('aliases')
aliases = mmare.findall(aliases)
for alias, addr in aliases:
name = alias.strip()
byaddr = mmap_byaddr.get(addr, [])
if p not in byaddr:
byaddr.append(p)
mmap_byaddr[addr] = byaddr
if name == '':
name = p.name
canon = (name, addr)
if canon in mmap_bycanon:
print('warning: duplicate alias %r, ignoring', canon)
continue
mmap_bycanon[canon] = p
p.aliases.append(canon)
byname = mmap_byname.get(name, [])
if p not in byname:
byname.append(p)
mmap_byname[name] = byname
# lookup_person retrieves the main identity of a person given one of their
# names or email aliases in the mailmap.
def lookup_person(name, email):
key = (name, email)
if key in mmap_bycanon:
# lucky case.
return mmap_bycanon[key]
# Name+email didn't work.
# Let's see email next.
if email in mmap_byaddr:
candidates = mmap_byaddr[email]
if len(candidates) > 1:
print('warning: no direct name match for', (name, email),
'and addr', email, 'is ambiguous,',
'keeping as-is', file=sys.stderr)
return define_person(name, email)
return candidates[0]
# Email didn't work either. That's not great.
if name in mmap_byname:
candidates = mmap_byname[name]
if len(candidates) > 1:
print('warning: no direct name match for', (name, email),
'and name', name, 'is ambiguous,',
'keeping as-is', file=sys.stderr)
return define_person(name, email)
return candidates[0]
return define_person(name, email)
# Section titles for release notes.
relnotetitles = {
'cli change': "Command-line changes",
'sql change': "SQL language changes",
'admin ui change': "Admin UI changes",
'general change': "General changes",
'build change': "Build changes",
'enterprise change': "Enterprise edition changes",
'backward-incompatible change': "Backward-incompatible changes",
'performance improvement': "Performance improvements",
'bug fix': "Bug fixes",
'security update': "Security updates",
}
# Order in which to show the sections.
relnote_sec_order = [
'backward-incompatible change',
'security update',
'general change',
'enterprise change',
'sql change',
'cli change',
'admin ui change',
'bug fix',
'performance improvement',
'build change',
]
# Release note category common misspellings.
cat_misspells = {
'sql': 'sql change',
'general': 'general change',
'core change': 'general change',
'bugfix': 'bug fix',
'performance change': 'performance improvement',
'performance': 'performance improvement',
'ui': 'admin ui change',
'backwards-incompatible change': 'backward-incompatible change',
'enterprise': 'enterprise change',
'security': 'security update',
'security change': 'security update',
}
#
# Release note format
#
# The following release note formats have been seen in the wild:
#
# Release note (xxx): yyy <- canonical
# Release Notes: None
# Release note (xxx): yyy
# Release note (xxx) : yyy
# Release note: (xxx): yyy
# Release note: xxx: yyy
# Release note: (xxx) yyy
# Release note: yyy (no category)
# Release note (xxx, zzz): yyy
norelnote = re.compile(r'^[rR]elease [nN]otes?: *[Nn]one', flags=re.M)
# Captures :? (xxx) ?: yyy
form1 = r':? *\((?P<cat1>[^)]*)\) *:?'
# Captures : xxx: yyy - this must be careful not to capture too much, we just accept one or two words
form2 = r': *(?P<cat2>[^ ]+(?: +[^ ]+)?) *:'
# Captures : yyy - no category
form3 = r':(?P<cat3>)'
relnote = re.compile(r'(?:^|[\n\r])[rR]elease [nN]otes? *(?:' + form1 + '|' + form2 + '|' + form3 + r') *(?P<note>.*)$', flags=re.S)
coauthor = re.compile(r'^Co-authored-by: (?P<name>[^<]*) <(?P<email>.*)>', flags=re.M)
fixannot = re.compile(r'^([fF]ix(es|ed)?|[cC]lose(d|s)?) #', flags=re.M)
#
# Merge commit format
#
# The following merge commits have been seen in the wild:
#
# Merge pull request #XXXXX from ... <- GitHub merges
# .... (#XXXX) <- GitHub merges (alt format)
# Merge #XXXXX #XXXXX #XXXXX <- Bors merges
merge_numbers = re.compile(r'^Merge( pull request)?(?P<numbers>( #[0-9]+)+)')
simple_merge = re.compile(r'.*\((?P<numbers>#[0-9]+)\)$', re.M)
#
# Initialization / option parsing
#
parser = OptionParser()
parser.add_option("-k", "--sort-key", dest="sort_key", default="title",
help="sort by KEY (pr, title, insertions, deletions, files, sha, date; default: title)", metavar="KEY")
parser.add_option("-r", "--reverse", action="store_true", dest="reverse_sort", default=False,
help="reverse sort")
parser.add_option("-f", "--from", dest="from_commit",
help="list history from COMMIT. Note: the first commit is excluded.", metavar="COMMIT")
parser.add_option("-t", "--until", dest="until_commit", default="HEAD",
help="list history up and until COMMIT (default: HEAD)", metavar="COMMIT")
parser.add_option("-p", "--pull-ref", dest="pull_ref_prefix", default="refs/pull/origin",
help="prefix for pull request refs (default: refs/pull/origin)", metavar="PREFIX")
parser.add_option("--hide-unambiguous-shas", action="store_true", dest="hide_shas", default=False,
help="omit commit SHAs from the release notes and per-contributor sections")
parser.add_option("--hide-per-contributor-section", action="store_true", dest="hide_per_contributor", default=False,
help="omit the per-contributor section")
parser.add_option("--hide-downloads-section", action="store_true", dest="hide_downloads", default=False,
help="omit the email sign-up and downloads section")
parser.add_option("--hide-header", action="store_true", dest="hide_header", default=False,
help="omit the title and date header")
parser.add_option("--exclude-from", dest="exclude_from_commit",
help="exclude history starting after COMMIT. Note: COMMIT itself is excluded.", metavar="COMMIT")
parser.add_option("--exclude-until", dest="exclude_until_commit",
help="exclude history ending at COMMIT", metavar="COMMIT")
parser.add_option("--one-line", dest="one_line", action="store_true", default=False,
help="unwrap release notes on a single line")
(options, args) = parser.parse_args()
sortkey = options.sort_key
revsort = options.reverse_sort
pull_ref_prefix = options.pull_ref_prefix
hideshas = options.hide_shas
hidepercontributor = options.hide_per_contributor
hidedownloads = options.hide_downloads
hideheader = options.hide_header
repo = Repo('.')
heads = repo.heads
def reformat_note(note_lines):
sep = '\n'
if options.one_line:
sep = ' '
return sep.join(note_lines).strip()
# Check that pull_ref_prefix is valid
testrefname = "%s/1" % pull_ref_prefix
try:
repo.commit(testrefname)
except exc.ODBError:
print("Unable to find pull request refs at %s." % pull_ref_prefix, file=sys.stderr)
print("Is your repo set up to fetch them? Try adding", file=sys.stderr)
print(" fetch = +refs/pull/*/head:%s/*" % pull_ref_prefix, file=sys.stderr)
print("to the GitHub remote section of .git/config.", file=sys.stderr)
sys.exit(1)
def find_commits(from_commit_ref, until_commit_ref):
try:
firstCommit = repo.commit(from_commit_ref)
except exc.ODBError:
print("Unable to find the first commit of the range.", file=sys.stderr)
print("No ref named %s." % from_commit_ref, file=sys.stderr)
sys.exit(1)
try:
finalCommit = repo.commit(until_commit_ref)
except exc.ODBError:
print("Unable to find the last commit of the range.", file=sys.stderr)
print("No ref named %s." % until_commit_ref, file=sys.stderr)
sys.exit(1)
return firstCommit, finalCommit
if not options.until_commit:
print("no value specified with --until, try --until=xxxxx (without space after =)", file=sys.stderr)
sys.exit(1)
if not options.from_commit:
print("no value specified with --from, try --from=xxxx (without space after =)", file=sys.stderr)
sys.exit(1)
firstCommit, commit = find_commits(options.from_commit, options.until_commit)
if commit == firstCommit:
print("Commit range is empty!", file=sys.stderr)
print(parser.get_usage(), file=sys.stderr)
print("Example use:", file=sys.stderr)
print(" %s --help" % sys.argv[0], file=sys.stderr)
print(" %s --from xxx >output.md" % sys.argv[0], file=sys.stderr)
print(" %s --from xxx --until yyy >output.md" % sys.argv[0], file=sys.stderr)
print("Note: the first commit is excluded. Use e.g.: --from <prev-release-tag> --until <new-release-candidate-sha>", file=sys.stderr)
sys.exit(0)
excludedFirst, excludedLast = None, None
if options.exclude_from_commit or options.exclude_until_commit:
if not options.exclude_from_commit or not options.exclude_until_commit:
print("Both -xf and -xt must be specified, or not at all.")
sys.exit(1)
excludedFirst, excludedLast = find_commits(options.exclude_from_commit, options.exclude_until_commit)
#
# Reading data from repository
#
def identify_commit(c):
return '%s ("%s", %s)' % (
c.hexsha, c.message.split('\n', 1)[0],
datetime.datetime.fromtimestamp(c.committed_date).ctime())
def check_reachability(start, end):
# Is the first commit reachable from the current one?
base = repo.merge_base(start, end)
if len(base) == 0:
print("error: %s:%s\nand %s:%s\nhave no common ancestor" % (
options.from_commit, identify_commit(start),
options.until_commit, identify_commit(end)), file=sys.stderr)
sys.exit(1)
commonParent = base[0]
if start != commonParent:
print("warning: %s:%s\nis not an ancestor of %s:%s!" % (
options.from_commit, identify_commit(start),
options.until_commit, identify_commit(end)), file=sys.stderr)
print(file=sys.stderr)
ageindays = int((start.committed_date - commonParent.committed_date) / 86400)
prevlen = sum((1 for x in repo.iter_commits(commonParent.hexsha + '...' + start.hexsha)))
print("The first common ancestor is %s" % identify_commit(commonParent), file=sys.stderr)
print("which is %d commits older than %s:%s\nand %d days older. Using that as origin." %\
(prevlen, options.from_commit, identify_commit(start), ageindays), file=sys.stderr)
print(file=sys.stderr)
start = commonParent
return start, end
firstCommit, commit = check_reachability(firstCommit, commit)
options.from_commit = firstCommit.hexsha
def extract_release_notes(currentCommit):
msglines = currentCommit.message.split('\n')
curnote = []
innote = False
foundnote = False
cat = None
notes = []
for line in msglines:
m = coauthor.search(line)
if m is not None:
# A Co-authored-line finishes the parsing of the commit message,
# because it's included at the end only.
break
m = fixannot.search(line)
if m is not None:
# Fix/Close etc. Ignore.
continue
m = norelnote.search(line)
if m is not None:
# Release note: None
#
# Remember we found a note (so the commit is not marked as "missing
# a release note"), but we won't collect it.
foundnote = True
continue
m = relnote.search(line)
if m is None:
# Current line does not contain a release note separator.
# If we were already collecting a note, continue collecting it.
if innote:
curnote.append(line)
continue
# We have a release note boundary. If we were collecting a
# note already, complete it.
if innote:
notes.append((cat, reformat_note(curnote)))
curnote = []
innote = False
# Start a new release note.
firstline = m.group('note').strip()
if firstline.lower() == 'none':
# Release note: none - there's no note yet.
continue
foundnote = True
innote = True
# Capitalize the first line.
if firstline != "":
firstline = firstline[0].upper() + firstline[1:]
curnote = [firstline]
cat = m.group('cat1')
if cat is None:
cat = m.group('cat2')
if cat is None:
cat = 'missing category'
# Normalize to tolerate various capitalizations.
cat = cat.lower()
# If there are multiple categories separated by commas or slashes, use the first as grouping key.
cat = cat.split(',', 1)[0]
cat = cat.split('/', 1)[0]
# If there is any misspell, correct it.
if cat in cat_misspells:
cat = cat_misspells[cat]
if innote:
notes.append((cat, reformat_note(curnote)))
return foundnote, notes
spinner = itertools.cycle(['/', '-', '\\', '|'])
spin_counter = 0
def spin():
global spin_counter
# Display a progress bar
spin_counter += 1
if spin_counter % 10 == 0:
if spin_counter % 100 == 0:
print("\b..", end='', file=sys.stderr)
print("\b", end='', file=sys.stderr)
print(next(spinner), end='', file=sys.stderr)
sys.stderr.flush()
def get_direct_history(startCommit, lastCommit):
history = []
for c in repo.iter_commits(startCommit.hexsha + '..' + lastCommit.hexsha, first_parent=True):
history.append(c)
return history
excluded_notes = set()
if excludedFirst is not None:
#
# Collect all the notes to exclude during collection below.
#
print("Collecting EXCLUDED release notes from\n%s\nuntil\n%s" %
(identify_commit(excludedFirst), identify_commit(excludedLast)), file=sys.stderr)
# First ensure that the loop below will terminate.
excludedFirst, excludedLast = check_reachability(excludedFirst, excludedLast)
# Collect all the merge points, so we can measure progress.
mergepoints = get_direct_history(excludedFirst, excludedLast)
# Now collect all commits.
print("Collecting EXCLUDED release notes...", file=sys.stderr)
i = 0
progress = 0
lastTime = time.time()
for c in repo.iter_commits(excludedFirst.hexsha + '..' + excludedLast.hexsha):
progress = int(100. * float(i) / len(mergepoints))
newTime = time.time()
if newTime >= lastTime + 5:
print("\b%d%%.." % progress, file=sys.stderr, end='')
lastTime = newTime
i += 1
spin()
# Collect the release notes in that commit.
_, notes = extract_release_notes(c)
for cat, note in notes:
excluded_notes.add((cat, note))
print("\b100%\n", file=sys.stderr)
print("Collecting release notes from\n%s\nuntil\n%s" % (identify_commit(firstCommit), identify_commit(commit)), file=sys.stderr)
release_notes = {}
missing_release_notes = []
def collect_authors(commit):
authors = set()
author = lookup_person(commit.author.name, commit.author.email)
if author.name != 'GitHub':
authors.add(author)
author = lookup_person(commit.committer.name, commit.committer.email)
if author.name != 'GitHub':
authors.add(author)
for m in coauthor.finditer(commit.message):
aname = m.group('name').strip()
amail = m.group('email').strip()
author = lookup_person(aname, amail)
authors.add(author)
return authors
def process_release_notes(pr, title, commit):
authors = collect_authors(commit)
foundnote, notes = extract_release_notes(commit)
# At the end the notes will be presented in reverse order, because
# we explore the commits in reverse order. However within 1 commit
# the notes are in the correct order. So reverse them upfront here,
# so that the 2nd reverse gets them in the right order again.
for cat, note in reversed(notes):
if (cat, note) not in excluded_notes:
completenote(commit, cat, note, authors, pr, title)
missing_item = None
if not foundnote:
# Missing release note. Keep track for later.
missing_item = makeitem(pr, title, commit.hexsha[:shamin], authors)
return missing_item, authors
def makeitem(pr, prtitle, sha, authors):
return {'authors': authors,
'sha': sha,
'pr': pr,
'title': prtitle,
'note': None}
def completenote(commit, cat, notemsg, authors, pr, title):
item = makeitem(pr, title, commit.hexsha[:shamin], authors)
item['note'] = notemsg
# Now collect per category.
catnotes = release_notes.get(cat, [])
catnotes.append(item)
release_notes[cat] = catnotes
per_group_history = {}
individual_authors = set()
allprs = set()
# This function groups and counts all the commits that belong to a particular PR.
# Some description is in order regarding the logic here: it should visit all
# commits that are on the PR and only on the PR. If there's some secondary
# branch merge included on the PR, as long as those commits don't otherwise end
# up reachable from the target branch, they'll be included. If there's a back-
# merge from the target branch, that should be excluded.
#
# Examples:
#
# ### secondary branch merged into PR
#
# Dev branched off of K, made a commit J, made a commit G while someone else
# committed H, merged H from the secondary branch to the topic branch in E,
# made a final commit in C, then merged to master in A.
#
# A <-- master
# |\
# | \
# B C <-- PR tip
# | |
# | |
# D E <-- secondary merge
# | |\
# | | \
# F G H <-- secondary branch
# | | /
# | |/
# I J
# | /
# |/
# K <-- merge base
#
# C, E, G, H, and J will each be checked. None of them are ancestors of B,
# so they will all be visited. E will be not be counted because the message
# starts with "Merge", so in the end C, G, H, and J will be included.
#
# ### back-merge from target branch
#
# Dev branched off H, made one commit G, merged the latest F from master in E,
# made one final commit in C, then merged the PR.
#
# A <-- master
# |\
# | \
# B C <-- PR tip
# | |
# | |
# D E <-- back-merge
# | /|
# |/ |
# F G
# | /
# |/
# H <-- merge base
#
# C, E, F, and G will each be checked. F is an ancestor of B, so it will be
# excluded. E starts with "Merge", so it will not be counted. Only C and G will
# have statistics included.
def analyze_pr(merge, pr):
allprs.add(pr)
refname = pull_ref_prefix + "/" + pr[1:]
tip = name_to_object(repo, refname)
noteexpr = re.compile("^%s: (?P<message>.*) r=.* a=.*" % pr[1:], flags=re.M)
m = noteexpr.search(merge.message)
title = ''
if m is None:
# GitHub merge
title = merge.message.split('\n', 3)[2]
else:
# Bors merge
title = m.group('message')
title = title.strip()
merge_base_result = repo.merge_base(merge.parents[0], tip)
if len(merge_base_result) == 0:
print("uh-oh! can't find merge base! pr", pr, file=sys.stderr)
sys.exit(-1)
merge_base = merge_base_result[0]
seen_commits = set()
missing_items = []
authors = set()
ncommits = 0
for commit in repo.iter_commits(merge_base.hexsha + '..' + tip.hexsha):
spin()
if commit in seen_commits:
# We may be seeing the same commit twice if a feature branch has
# been forked in sub-branches. Just skip over what we've seen
# already.
continue
seen_commits.add(commit)
if not commit.message.startswith("Merge"):
missing_item, prauthors = process_release_notes(pr, title, commit)
authors.update(prauthors)
ncommits += 1
if missing_item is not None:
missing_items.append(missing_item)
if ncommits == len(missing_items):
# None of the commits found had a release note. List them.
for item in missing_items:
missing_release_notes.append(item)
text = repo.git.diff(merge_base.hexsha, tip.hexsha, '--', numstat=True)
stats = Stats._list_from_string(repo, text)
collect_item(pr, title, merge.hexsha[:shamin], ncommits, authors, stats.total, merge.committed_date)
def collect_item(pr, prtitle, sha, ncommits, authors, stats, prts):
individual_authors.update(authors)
if len(authors) == 0:
authors.add("Unknown Author")
item = makeitem(pr, prtitle, sha, authors)
item.update({'ncommits': ncommits,
'insertions': stats['insertions'],
'deletions': stats['deletions'],
'files': stats['files'],
'lines': stats['lines'],
'date': datetime.date.fromtimestamp(prts).isoformat(),
})
al = item['authors']
k = str(sorted(al))
history = per_group_history.get(k, (al, []))
history[1].append(item)
per_group_history[k] = history
def analyze_standalone_commit(commit):
# Some random out-of-branch commit. Let's not forget them.
authors = collect_authors(commit)
title = commit.message.split('\n', 1)[0].strip()
item = makeitem('#unknown', title, commit.hexsha[:shamin], authors)
missing_release_notes.append(item)
collect_item('#unknown', title, commit.hexsha[:shamin], 1, authors, commit.stats.total, commit.committed_date)
# Collect all the merge points so we can report progress.
mergepoints = get_direct_history(firstCommit, commit)
i = 0
progress = 0
lastTime = time.time()
for commit in mergepoints:
progress = int(100. * float(i) / len(mergepoints))
newTime = time.time()
if newTime >= lastTime + 5:
print("\b.%d%%\n." % progress, file=sys.stderr, end='')
lastTime = newTime
i += 1
spin()
ctime = datetime.datetime.fromtimestamp(commit.committed_date).ctime()
numbermatch = merge_numbers.search(commit.message)
if numbermatch is None:
# Try again with the alternate format.
firstline = commit.message.split('\n', 1)[0]
numbermatch = simple_merge.search(firstline)
# Analyze the commit
if numbermatch is not None:
prs = numbermatch.group("numbers").strip().split(" ")
for pr in prs:
print(" \r%s (%s) " % (pr, ctime), end='', file=sys.stderr)
analyze_pr(commit, pr)
else:
print(" \r%s (%s) " % (commit.hexsha[:shamin], ctime), end='', file=sys.stderr)
analyze_standalone_commit(commit)
print("\b\nAnalyzing authors...", file=sys.stderr)
sys.stderr.flush()
allgroups = list(per_group_history.keys())
allgroups.sort(key=lambda x: x.lower())
print("\b\nComputing first-time contributors...", end='', file=sys.stderr)
ext_contributors = individual_authors - crdb_folk
firsttime_contributors = []
for a in individual_authors:
# Find all aliases known for this person
aliases = a.aliases
# Collect the history for every alias
hist = b''
for al in aliases:
spin()
cmd = subprocess.run(["git", "log", "--author=%s <%s>" % al, options.from_commit, '-n', '1'], stdout=subprocess.PIPE, check=True)
hist += cmd.stdout
if len(hist) == 0:
# No commit from that author older than the first commit
# selected, so that's a first-time author.
firsttime_contributors.append(a)
print("\b\n", file=sys.stderr)
sys.stderr.flush()
#
# Presentation of results.
#
# Print the release notes.
# Start with known sections.
current_version = subprocess.check_output(["git", "describe", "--tags", "--match=v[0-9]*", options.until_commit], universal_newlines=True).strip()
previous_version = subprocess.check_output(["git", "describe", "--tags", "--match=v[0-9]*", options.from_commit], universal_newlines=True).strip()
if not hideheader:
print("---")
print("title: What's New in", current_version)
print("toc: true")
print("summary: Additions and changes in CockroachDB version", current_version, "since version", previous_version)
print("---")
print()
print("## " + time.strftime("%B %d, %Y"))
print()
# Print the release notes sign-up and Downloads section.
if not hidedownloads:
print("""Get future release notes emailed to you:
<div class="hubspot-install-form install-form-1 clearfix">
<script>
hbspt.forms.create({
css: '',
cssClass: 'install-form',
portalId: '1753393',
formId: '39686297-81d2-45e7-a73f-55a596a8d5ff',
formInstanceId: 1,
target: '.install-form-1'
});
</script>
</div>""")
print()
print("""### Downloads
<div id="os-tabs" class="clearfix">
<a href="https://binaries.cockroachdb.com/cockroach-""" + current_version + """.darwin-10.9-amd64.tgz"><button id="mac" data-eventcategory="mac-binary-release-notes">Mac</button></a>
<a href="https://binaries.cockroachdb.com/cockroach-""" + current_version + """.linux-amd64.tgz"><button id="linux" data-eventcategory="linux-binary-release-notes">Linux</button></a>
<a href="https://binaries.cockroachdb.com/cockroach-""" + current_version + """.src.tgz"><button id="source" data-eventcategory="source-release-notes">Source</button></a>
</div>
""")
print("""### Docker image
{% include copy-clipboard.html %}
~~~shell
$ docker pull cockroachdb/cockroach""" + ("-unstable:" if "-" in current_version else ":") + current_version + """
~~~
""")
print()
seenshas = set()
seenprs = set()
def renderlinks(item):
ret = '[%(pr)s][%(pr)s]' % item
seenprs.add(item['pr'])
if not hideshas:
ret += ' [%(sha)s][%(sha)s]' % item
seenshas.add(item['sha'])
return ret
for sec in relnote_sec_order:
r = release_notes.get(sec, None)
if r is None:
# No change in this section, nothing to print.
continue
sectitle = relnotetitles[sec]
print("###", sectitle)
print()
for item in reversed(r):
print("-", item['note'].replace('\n', '\n '), renderlinks(item))
print()
extrasec = set()
for sec in release_notes:
if sec in relnote_sec_order:
# already handled above, don't do anything.
continue
extrasec.add(sec)
if len(extrasec) > 0 or len(missing_release_notes) > 0:
print("### Miscellaneous")
print()
if len(extrasec) > 0:
extrasec_sorted = sorted(list(extrasec))
for extrasec in extrasec_sorted:
print("#### %s" % extrasec.capitalize())
print()
for item in release_notes[extrasec]:
print("-", item['note'].replace('\n', '\n '), renderlinks(item))
print()
if len(missing_release_notes) > 0:
print("#### Changes without release note annotation")
print()
for item in missing_release_notes:
authors = ', '.join(str(x) for x in sorted(item['authors']))
print("- [%(pr)s][%(pr)s] [%(sha)s][%(sha)s] %(title)s" % item, "(%s)" % authors)
seenshas.add(item['sha'])
seenprs.add(item['pr'])
print()
# Print the Doc Updates section.
print("### Doc updates")
print()
print("Docs team: Please add these manually.")
print()
# Print the Contributors section.
print("### Contributors")
print()
print("This release includes %d merged PR%s by %s author%s." %
(len(allprs), len(allprs) != 1 and "s" or "",
len(individual_authors), (len(individual_authors) != 1 and "s" or "")))
ext_contributors = individual_authors - crdb_folk
notified_authors = sorted(set(ext_contributors) | set(firsttime_contributors))
if len(notified_authors) > 0:
print("We would like to thank the following contributors from the CockroachDB community:")
print()
for person in notified_authors:
print("-", person.name, end='')
if person in firsttime_contributors:
annot = ""
if person.crdb:
annot = ", CockroachDB team member"
print(" (first-time contributor%s)" % annot, end='')
print()
print()
# Print the per-author contribution list.
if not hidepercontributor:
print("### PRs merged by contributors")
print()
if not hideshas:
fmt = " - %(date)s [%(pr)-6s][%(pr)-6s] [%(sha)s][%(sha)s] (+%(insertions)4d -%(deletions)4d ~%(lines)4d/%(files)2d) %(title)s"
else:
fmt = " - %(date)s [%(pr)-6s][%(pr)-6s] (+%(insertions)4d -%(deletions)4d ~%(lines)4d/%(files)2d) %(title)s"
for group in allgroups:
al, items = per_group_history[group]
items.sort(key=lambda x: x[sortkey], reverse=not revsort)
print("- %s:" % ', '.join(a.name for a in sorted(al)))
for item in items:
print(fmt % item, end='')
if not hideshas:
seenshas.add(item['sha'])
seenprs.add(item['pr'])
ncommits = item['ncommits']
if ncommits > 1:
print(" (", end='')
print("%d commits" % ncommits, end='')
print(")", end='')
print()
print()
print()
# Link the PRs and SHAs
for pr in sorted(seenprs):
print("[%s]: https://github.com/cockroachdb/cockroach/pull/%s" % (pr, pr[1:]))
for sha in sorted(seenshas):
print("[%s]: https://github.com/cockroachdb/cockroach/commit/%s" % (sha, sha))
print()
|
the-stack_106_24167 | ###############################################################
# pytest -v --capture=no tests/1_local/test_variables.py
# pytest -v tests/1_local/test_variables.py
# pytest -v --capture=no tests/1_local/test_variables.py::TestVariables::<METHODNAME>
###############################################################
import pytest
from cloudmesh.common.Shell import Shell
from cloudmesh.common.util import HEADING
from cloudmesh.common.util import path_expand
from cloudmesh.common.variables import Variables
def _run(command):
print()
print("Command:", command)
result = Shell.run(command)
print("Result:", result)
return result
@pytest.mark.incremental
class TestVariables(object):
def test_variables_assign(self):
HEADING("assign key=value")
v = Variables()
n = len(v)
v["gregor"] = "gregor"
assert (len(v) == n + 1)
assert "gregor" in v
v.close()
def test_variables_delete(self):
HEADING("delete")
v = Variables()
del v["gregor"]
assert "gregor" not in v
v.close()
def test_variables_add(self):
HEADING("directory add ")
d = {"a": "1", "b": "2"}
v = Variables()
v + d
print(v)
assert "a" in v and "b" in v
del v["a"]
del v["b"]
v + d
assert "a" in v and "b" in v
v - d
assert "a" not in v and "b" not in v
print(v)
v.close()
def test_test_variable_remove(self):
HEADING("directory and key subtract ")
d = {"a": "1", "b": "2"}
v = Variables()
v + d
print(v)
assert "a" in v and "b" in v
v - d.keys()
assert "a" not in v and "b" not in v
print(v)
v.close()
def test_cli_set(self):
HEADING()
r = _run("cms var deleteme=abc")
print(r)
data = path_expand("~/.cloudmesh/variables.dat")
cat = _run(f"cat {data}")
print(cat)
assert "deleteme: abc" in cat
v = Variables()
print("Data", v.__dict__["data"].__dict__)
value = v['deleteme']
print("Value:", value)
assert value == 'abc'
def test_cli_get(self):
HEADING()
r = _run("cms var deleteme")
v = Variables()
print(r)
assert v['deleteme'] == 'abc'
def test_cli_list(self):
HEADING()
r = _run("cms var list")
v = Variables()
print(r)
assert v['deleteme'] == 'abc'
assert "deleteme='abc'" in r
def test_cli_delete(self):
HEADING()
r = _run("cms var delete deleteme")
v = Variables()
print("Result:", r)
print("Variable:", v)
assert v['deleteme'] != 'abc'
|
the-stack_106_24168 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KFP DSL v2 compiler.
This is an experimental implementation of KFP compiler that compiles KFP
pipeline into Pipeline IR:
https://docs.google.com/document/d/1PUDuSQ8vmeKSBloli53mp7GIvzekaY7sggg6ywy35Dk/
"""
import collections
import inspect
import json
import uuid
import warnings
from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Tuple, Union
import kfp
from kfp.compiler._k8s_helper import sanitize_k8s_name
from kfp.components import _python_op
from kfp import dsl
from kfp.dsl import _for_loop
from kfp.dsl import _pipeline_param
from kfp.v2.compiler import compiler_utils
from kfp.dsl import component_spec as dsl_component_spec
from kfp.dsl import dsl_utils
from kfp.dsl import importer_node
from kfp.dsl import type_utils
from kfp.pipeline_spec import pipeline_spec_pb2
from google.protobuf import json_format
_GroupOrOp = Union[dsl.OpsGroup, dsl.BaseOp]
class Compiler(object):
"""Experimental DSL compiler that targets the PipelineSpec IR.
It compiles pipeline function into PipelineSpec json string.
PipelineSpec is the IR protobuf message that defines a pipeline:
https://github.com/kubeflow/pipelines/blob/237795539f7b85bac77435e2464367226ee19391/api/v2alpha1/pipeline_spec.proto#L8
In this initial implementation, we only support components authored through
Component yaml spec. And we don't support advanced features like conditions,
static and dynamic loops, etc.
Example:
How to use the compiler to construct pipeline_spec json:
@dsl.pipeline(
name='name',
description='description'
)
def my_pipeline(a: int = 1, b: str = "default value"):
...
kfp.v2.compiler.Compiler().compile(my_pipeline, 'path/to/pipeline.json')
"""
def _get_groups_for_ops(
self, root_group: dsl.OpsGroup) -> Dict[str, List[dsl.OpsGroup]]:
"""Helper function to get groups that contain the specified ops.
Each pipeline has a root group. Each group has a list of operators (leaf)
and groups.
This function traverse the tree and get all ancestor groups for all
operators.
Args:
root_group: The root node of a ops tree or subtree.
Returns:
A dict. Key is the operator's name. Value is a list of ancestor groups
including the op itself. The list of a given operator is sorted in a way
that the farthest group is the first and operator itself is the last.
"""
def _get_op_groups_helper(
current_groups: List[dsl.OpsGroup],
ops_to_groups: Dict[str, List[dsl.OpsGroup]]) -> None:
root_group = current_groups[-1]
for g in root_group.groups:
# Add recursive opsgroup in the ops_to_groups
# such that the i/o dependency can be propagated to the ancester opsgroups
if g.recursive_ref:
ops_to_groups[g.name] = [x.name for x in current_groups] + [g.name]
continue
current_groups.append(g)
_get_op_groups_helper(current_groups, ops_to_groups)
del current_groups[-1]
for op in root_group.ops:
ops_to_groups[op.name] = [x.name for x in current_groups] + [op.name]
ops_to_groups = {}
current_groups = [root_group]
_get_op_groups_helper(current_groups, ops_to_groups)
return ops_to_groups
#TODO: combine with the _get_groups_for_ops
def _get_groups_for_opsgroups(
self, root_group: dsl.OpsGroup) -> Dict[str, List[dsl.OpsGroup]]:
"""Helper function to get groups that contain the specified opsgroup.
Each pipeline has a root group. Each group has a list of operators (leaf)
and groups.
This function traverse the tree and get all ancestor groups for all
opsgroups.
Args:
root_group: The root node of a groups tree or subtree.
Returns:
A dict. Key is the opsgroup's name. Value is a list of ancestor groups
including the opsgroup itself. The list of a given opsgroup is sorted in a
way that the farthest group is the first and opsgroup itself is the last.
"""
def _get_opsgroup_groups_helper(
current_groups: dsl.OpsGroup,
opsgroups_to_groups: Dict[str, List[dsl.OpsGroup]]) -> None:
root_group = current_groups[-1]
for g in root_group.groups:
# Add recursive opsgroup in the ops_to_groups
# such that the i/o dependency can be propagated to the ancester opsgroups
if g.recursive_ref:
continue
opsgroups_to_groups[g.name] = [x.name for x in current_groups
] + [g.name]
current_groups.append(g)
_get_opsgroup_groups_helper(current_groups, opsgroups_to_groups)
del current_groups[-1]
opsgroups_to_groups = {}
current_groups = [root_group]
_get_opsgroup_groups_helper(current_groups, opsgroups_to_groups)
return opsgroups_to_groups
def _get_groups(self, root_group: dsl.OpsGroup) -> Dict[str, dsl.OpsGroup]:
"""Helper function to get all groups (not including ops) in a pipeline."""
def _get_groups_helper(group):
groups = {group.name: group}
for g in group.groups:
# Skip the recursive opsgroup because no templates
# need to be generated for the recursive opsgroups.
if not g.recursive_ref:
groups.update(_get_groups_helper(g))
return groups
return _get_groups_helper(root_group)
def _get_uncommon_ancestors(
self,
op_groups: Dict[str, List[dsl.OpsGroup]],
opsgroup_groups: Dict[str, List[dsl.OpsGroup]],
op1: dsl.BaseOp,
op2: dsl.BaseOp,
) -> Tuple[List[_GroupOrOp], List[_GroupOrOp]]:
"""Helper function to get unique ancestors between two ops.
For example, op1's ancestor groups are [root, G1, G2, G3, op1], op2's
ancestor groups are
[root, G1, G4, op2], then it returns a tuple ([G2, G3, op1], [G4, op2]).
"""
#TODO: extract a function for the following two code module
if op1.name in op_groups:
op1_groups = op_groups[op1.name]
elif op1.name in opsgroup_groups:
op1_groups = opsgroup_groups[op1.name]
else:
raise ValueError(op1.name + ' does not exist.')
if op2.name in op_groups:
op2_groups = op_groups[op2.name]
elif op2.name in opsgroup_groups:
op2_groups = opsgroup_groups[op2.name]
else:
raise ValueError(op2.name + ' does not exist.')
both_groups = [op1_groups, op2_groups]
common_groups_len = sum(
1 for x in zip(*both_groups) if x == (x[0],) * len(x))
group1 = op1_groups[common_groups_len:]
group2 = op2_groups[common_groups_len:]
return (group1, group2)
def _get_condition_params_for_ops(
self, root_group: dsl.OpsGroup) -> Dict[str, dsl.PipelineParam]:
"""Get parameters referenced in conditions of ops."""
conditions = collections.defaultdict(set)
def _get_condition_params_for_ops_helper(group, current_conditions_params):
new_current_conditions_params = current_conditions_params
if group.type == 'condition':
new_current_conditions_params = list(current_conditions_params)
if isinstance(group.condition.operand1, dsl.PipelineParam):
new_current_conditions_params.append(group.condition.operand1)
if isinstance(group.condition.operand2, dsl.PipelineParam):
new_current_conditions_params.append(group.condition.operand2)
for op in group.ops:
for param in new_current_conditions_params:
conditions[op.name].add(param)
for g in group.groups:
# If the subgroup is a recursive opsgroup, propagate the pipelineparams
# in the condition expression, similar to the ops.
if g.recursive_ref:
for param in new_current_conditions_params:
conditions[g.name].add(param)
else:
_get_condition_params_for_ops_helper(g, new_current_conditions_params)
_get_condition_params_for_ops_helper(root_group, [])
return conditions
def _get_next_group_or_op(self, to_visit: List, already_visited: Set):
"""Get next group or op to visit."""
if len(to_visit) == 0:
return None
next = to_visit.pop(0)
while next in already_visited:
next = to_visit.pop(0)
already_visited.add(next)
return next
def _get_for_loop_ops(self, new_root) -> Dict[str, dsl.ParallelFor]:
to_visit = self._get_all_subgroups_and_ops(new_root)
op_name_to_op = {}
already_visited = set()
while len(to_visit):
next_op = self._get_next_group_or_op(to_visit, already_visited)
if next_op is None:
break
to_visit.extend(self._get_all_subgroups_and_ops(next_op))
if isinstance(next_op, dsl.ParallelFor):
op_name_to_op[next_op.name] = next_op
return op_name_to_op
def _get_all_subgroups_and_ops(self, group: dsl.OpsGroup):
"""Get all ops and groups contained within this group."""
subgroups = []
if hasattr(group, 'ops'):
subgroups.extend(group.ops)
if hasattr(group, 'groups'):
subgroups.extend(group.groups)
return subgroups
def _get_inputs_outputs(
self,
pipeline: dsl.Pipeline,
args: List[dsl.PipelineParam],
root_group: dsl.OpsGroup,
op_groups: Dict[str, List[dsl.OpsGroup]],
opsgroup_groups: Dict[str, List[dsl.OpsGroup]],
condition_params: Dict[str, dsl.PipelineParam],
op_name_to_for_loop_op: Dict[str, dsl.ParallelFor],
) -> Tuple[Dict[str, List[Tuple[dsl.PipelineParam, str]]], Dict[
str, List[Tuple[dsl.PipelineParam, str]]]]:
"""Get inputs and outputs of each group and op.
Args:
pipeline: The instantiated pipeline object.
args: The list of pipeline function arguments as PipelineParam.
root_group: The root OpsGroup.
op_groups: The dict of op name to parent groups.
opsgroup_groups: The dict of opsgroup name to parent groups.
condition_params: The dict of group name to pipeline params referenced in
the conditions in that group.
op_name_to_for_loop_op: The dict of op name to loop ops.
Returns:
A tuple (inputs, outputs).
inputs and outputs are dicts with key being the group/op names and values
being list of tuples (param, producing_op_name). producing_op_name is the
name of the op that produces the param. If the param is a pipeline param
(no producer op), then producing_op_name is None.
"""
inputs = collections.defaultdict(set)
outputs = collections.defaultdict(set)
# Fix possible missing type -- PipelineParam parsed from command line args
# doesn't contain the type information, as the `.param_type` is not included
# during PipelineParam serialization.
all_params = {param.pattern: param for param in args}
for op in pipeline.ops.values():
for param in op.inputs + list(op.outputs.values()) + list(
condition_params[op.name]):
if param.pattern not in all_params:
all_params[param.pattern] = param
else:
param.param_type = param.param_type or all_params[
param.pattern].param_type
all_params[param.pattern].param_type = param.param_type
for op in pipeline.ops.values():
# op's inputs and all params used in conditions for that op are both
# considered.
for param in op.inputs + list(condition_params[op.name]):
# if the value is already provided (immediate value), then no need to
# expose it as input for its parent groups.
if param.value:
continue
if param.op_name:
upstream_op = pipeline.ops[param.op_name]
upstream_groups, downstream_groups = (
self._get_uncommon_ancestors(op_groups, opsgroup_groups,
upstream_op, op))
for i, group_name in enumerate(downstream_groups):
if i == 0:
# If it is the first uncommon downstream group, then the input
# comes from the first uncommon upstream group.
inputs[group_name].add((param, upstream_groups[0]))
else:
# If not the first downstream group, then the input is passed down
# from its ancestor groups so the upstream group is None.
inputs[group_name].add((param, None))
for i, group_name in enumerate(upstream_groups):
if i == len(upstream_groups) - 1:
# If last upstream group, it is an operator and output comes from container.
outputs[group_name].add((param, None))
else:
# If not last upstream group, output value comes from one of its child.
outputs[group_name].add((param, upstream_groups[i + 1]))
else:
if not op.is_exit_handler:
for group_name in op_groups[op.name][::-1]:
# if group is for loop group and param is that loop's param, then the param
# is created by that for loop ops_group and it shouldn't be an input to
# any of its parent groups.
inputs[group_name].add((param, None))
if group_name in op_name_to_for_loop_op:
# for example:
# loop_group.loop_args.name = 'loop-item-param-99ca152e'
# param.name = 'loop-item-param-99ca152e--a'
loop_group = op_name_to_for_loop_op[group_name]
if loop_group.loop_args.name in param.name:
break
# Generate the input/output for recursive opsgroups
# It propagates the recursive opsgroups IO to their ancester opsgroups
def _get_inputs_outputs_recursive_opsgroup(group: dsl.OpsGroup):
#TODO: refactor the following codes with the above
if group.recursive_ref:
params = [(param, False) for param in group.inputs]
params.extend([
(param, True) for param in list(condition_params[group.name])
])
for param, is_condition_param in params:
if param.value:
continue
if param.op_name:
upstream_op = pipeline.ops[param.op_name]
upstream_groups, downstream_groups = \
self._get_uncommon_ancestors(op_groups, opsgroup_groups, upstream_op, group)
for i, g in enumerate(downstream_groups):
if i == 0:
inputs[g].add((param, upstream_groups[0]))
# There is no need to pass the condition param as argument to the downstream ops.
#TODO: this might also apply to ops. add a TODO here and think about it.
elif i == len(downstream_groups) - 1 and is_condition_param:
continue
else:
inputs[g].add((param, None))
for i, g in enumerate(upstream_groups):
if i == len(upstream_groups) - 1:
outputs[g].add((param, None))
else:
outputs[g].add((param, upstream_groups[i + 1]))
elif not is_condition_param:
for g in op_groups[group.name]:
inputs[g].add((param, None))
for subgroup in group.groups:
_get_inputs_outputs_recursive_opsgroup(subgroup)
_get_inputs_outputs_recursive_opsgroup(root_group)
# Generate the input for SubGraph along with parallelfor
for subgraph in opsgroup_groups:
if subgraph in op_name_to_for_loop_op:
# The opsgroup list is sorted with the farthest group as the first and the opsgroup
# itself as the last. To get the latest opsgroup which is not the opsgroup itself -2 is used.
parent = opsgroup_groups[subgraph][-2]
if parent and parent.startswith('subgraph'):
# propagate only op's pipeline param from subgraph to parallelfor
loop_op = op_name_to_for_loop_op[subgraph]
pipeline_param = loop_op.loop_args.items_or_pipeline_param
if loop_op.items_is_pipeline_param and pipeline_param.op_name:
inputs[parent].add((pipeline_param, pipeline_param.op_name))
return inputs, outputs
def _get_dependencies(
self,
pipeline: dsl.Pipeline,
root_group: dsl.OpsGroup,
op_groups: Dict[str, dsl.OpsGroup],
opsgroups_groups: Dict[str, dsl.OpsGroup],
opsgroups: Dict[str, dsl.OpsGroup],
condition_params: Dict[str, dsl.PipelineParam],
) -> Dict[str, List[_GroupOrOp]]:
"""Get dependent groups and ops for all ops and groups.
Args:
pipeline: The instantiated pipeline object.
root_group: The root OpsGroup.
op_groups: The dict of op name to parent groups.
opsgroup_groups: The dict of opsgroup name to parent groups.
opsgroups: The dict of opsgroup name to opsgroup.
condition_params: The dict of group name to pipeline params referenced in
the conditions in that group.
Returns:
A dict. Key is group/op name, value is a list of dependent groups/ops.
The dependencies are calculated in the following way: if op2 depends on
op1, and their ancestors are [root, G1, G2, op1] and
[root, G1, G3, G4, op2], then G3 is dependent on G2. Basically dependency
only exists in the first uncommon ancesters in their ancesters chain. Only
sibling groups/ops can have dependencies.
"""
dependencies = collections.defaultdict(set)
for op in pipeline.ops.values():
upstream_op_names = set()
for param in op.inputs + list(condition_params[op.name]):
if param.op_name:
upstream_op_names.add(param.op_name)
upstream_op_names |= set(op.dependent_names)
for upstream_op_name in upstream_op_names:
# the dependent op could be either a BaseOp or an opsgroup
if upstream_op_name in pipeline.ops:
upstream_op = pipeline.ops[upstream_op_name]
elif upstream_op_name in opsgroups:
upstream_op = opsgroups[upstream_op_name]
else:
raise ValueError('compiler cannot find the ' + upstream_op_name)
upstream_groups, downstream_groups = self._get_uncommon_ancestors(
op_groups, opsgroups_groups, upstream_op, op)
dependencies[downstream_groups[0]].add(upstream_groups[0])
# Generate dependencies based on the recursive opsgroups
#TODO: refactor the following codes with the above
def _get_dependency_opsgroup(
group: dsl.OpsGroup, dependencies: Dict[str, List[_GroupOrOp]]) -> None:
upstream_op_names = set(
[dependency.name for dependency in group.dependencies])
if group.recursive_ref:
for param in group.inputs + list(condition_params[group.name]):
if param.op_name:
upstream_op_names.add(param.op_name)
for op_name in upstream_op_names:
if op_name in pipeline.ops:
upstream_op = pipeline.ops[op_name]
elif op_name in opsgroups:
upstream_op = opsgroups[op_name]
else:
raise ValueError('compiler cannot find the ' + op_name)
upstream_groups, downstream_groups = (
self._get_uncommon_ancestors(op_groups, opsgroups_groups,
upstream_op, group))
dependencies[downstream_groups[0]].add(upstream_groups[0])
for subgroup in group.groups:
_get_dependency_opsgroup(subgroup, dependencies)
_get_dependency_opsgroup(root_group, dependencies)
return dependencies
def _resolve_value_or_reference(
self, value_or_reference: Union[str, dsl.PipelineParam]) -> str:
"""_resolve_value_or_reference resolves values and PipelineParams.
The values and PipelineParams could be task parameters or input parameters.
Args:
value_or_reference: value or reference to be resolved. It could be basic
python types or PipelineParam
"""
if isinstance(value_or_reference, dsl.PipelineParam):
input_name = dsl_component_spec.additional_input_name_for_pipelineparam(
value_or_reference)
if type_utils.is_parameter_type(value_or_reference.param_type):
return "inputs.parameters['{input_name}'].{value_field}".format(
input_name=input_name,
value_field=type_utils.get_parameter_type_field_name(
value_or_reference.param_type))
else:
raise NotImplementedError(
'Use artifact as dsl.Condition operand is not implemented yet.')
else:
if isinstance(value_or_reference, str):
return "'{}'".format(value_or_reference)
else:
return str(value_or_reference)
def _update_loop_specs(
self,
group: dsl.OpsGroup,
subgroup: _GroupOrOp,
group_component_spec: pipeline_spec_pb2.ComponentSpec,
subgroup_component_spec: pipeline_spec_pb2.ComponentSpec,
subgroup_task_spec: pipeline_spec_pb2.PipelineTaskSpec,
) -> None:
"""Update IR specs for loop.
Args:
group: The dsl.ParallelFor OpsGroup.
subgroup: One of the subgroups of dsl.ParallelFor.
group_component_spec: The component spec of the group to update in place.
subgroup_component_spec: The component spec of the subgroup to update.
subgroup_task_spec: The task spec of the subgroup to update.
"""
input_names = [
input_name for input_name in subgroup_task_spec.inputs.parameters
]
for input_name in input_names:
if subgroup_task_spec.inputs.parameters[input_name].HasField(
'component_input_parameter'):
loop_argument_name = subgroup_task_spec.inputs.parameters[
input_name].component_input_parameter
else:
producer_task_name = dsl_utils.remove_task_name_prefix(
subgroup_task_spec.inputs.parameters[input_name]
.task_output_parameter.producer_task)
producer_task_output_key = subgroup_task_spec.inputs.parameters[
input_name].task_output_parameter.output_parameter_key
loop_argument_name = '{}-{}'.format(producer_task_name,
producer_task_output_key)
# Loop arguments are from dynamic input: pipeline param or task output
if _for_loop.LoopArguments.name_is_withparams_loop_argument(
loop_argument_name):
arg_and_var_name = (
_for_loop.LoopArgumentVariable
.parse_loop_args_name_and_this_var_name(loop_argument_name))
# The current IR representation is insufficient for referencing a subvar
# which is a key in a list of dictionaries.
if arg_and_var_name:
raise NotImplementedError(
'Use subvar in dsl.ParallelFor with dynamic loop arguments is not '
'supported. Got subvar: {}'.format(arg_and_var_name[1]))
assert group.items_is_pipeline_param
pipeline_param = group.loop_args.items_or_pipeline_param
input_parameter_name = pipeline_param.full_name
# Correct loop argument input type in the parent component spec.
# The loop argument was categorized as an artifact due to its missing
# or non-primitive type annotation. But it should always be String
# typed, as its value is a serialized JSON string.
dsl_component_spec.pop_input_from_component_spec(
group_component_spec, input_parameter_name)
group_component_spec.input_definitions.parameters[
input_parameter_name].type = pipeline_spec_pb2.PrimitiveType.STRING
subgroup_task_spec.inputs.parameters[
input_parameter_name].component_input_parameter = (
input_parameter_name)
subgroup_task_spec.parameter_iterator.item_input = input_name
subgroup_task_spec.parameter_iterator.items.input_parameter = (
input_parameter_name)
# Loop arguments comme from static raw values known at compile time.
elif _for_loop.LoopArguments.name_is_withitems_loop_argument(
loop_argument_name):
# Prepare the raw values, either the whole list or the sliced list based
# on subvar_name.
subvar_name = None
if _for_loop.LoopArgumentVariable.name_is_loop_arguments_variable(
loop_argument_name):
subvar_name = _for_loop.LoopArgumentVariable.get_subvar_name(
loop_argument_name)
loop_args = group.loop_args.to_list_for_task_yaml()
if subvar_name:
raw_values = [loop_arg.get(subvar_name) for loop_arg in loop_args]
else:
raw_values = loop_args
# If the loop iterator component expects `str` or `int` typed items from
# the loop argument, make sure the item values are string values.
# This is because both integers and strings are assigned to protobuf
# [Value.string_value](https://github.com/protocolbuffers/protobuf/blob/133e5e75263be696c06599ab97614a1e1e6d9c66/src/google/protobuf/struct.proto#L70)
# Such a conversion is not needed for `float` type. which uses protobuf
# [Value.number_value](https://github.com/protocolbuffers/protobuf/blob/133e5e75263be696c06599ab97614a1e1e6d9c66/src/google/protobuf/struct.proto#L68)
if subgroup_component_spec.input_definitions.parameters[
input_name].type in [
pipeline_spec_pb2.PrimitiveType.STRING,
pipeline_spec_pb2.PrimitiveType.INT
]:
raw_values = [str(v) for v in raw_values]
if subgroup_component_spec.input_definitions.parameters[
input_name].type == pipeline_spec_pb2.PrimitiveType.INT:
warnings.warn(
'The loop iterator component is expecting an `int` value.'
'Consider changing the input type to either `str` or `float`.')
subgroup_task_spec.parameter_iterator.item_input = input_name
subgroup_task_spec.parameter_iterator.items.raw = json.dumps(raw_values)
else:
raise AssertionError(
'Unexpected loop argument: {}'.format(loop_argument_name))
# Clean up unused inputs from task spec and parent component spec.
dsl_component_spec.pop_input_from_task_spec(subgroup_task_spec,
input_name)
dsl_component_spec.pop_input_from_component_spec(group_component_spec,
loop_argument_name)
def _group_to_dag_spec(
self,
group: dsl.OpsGroup,
inputs: Dict[str, List[Tuple[dsl.PipelineParam, str]]],
outputs: Dict[str, List[Tuple[dsl.PipelineParam, str]]],
dependencies: Dict[str, List[_GroupOrOp]],
pipeline_spec: pipeline_spec_pb2.PipelineSpec,
deployment_config: pipeline_spec_pb2.PipelineDeploymentConfig,
rootgroup_name: str,
) -> None:
"""Generate IR spec given an OpsGroup.
Args:
group: The OpsGroup to generate spec for.
inputs: The inputs dictionary. The keys are group/op names and values are
lists of tuples (param, producing_op_name).
outputs: The outputs dictionary. The keys are group/op names and values
are lists of tuples (param, producing_op_name).
dependencies: The group dependencies dictionary. The keys are group/op
names, and the values are lists of dependent groups/ops.
pipeline_spec: The pipeline_spec to update in-place.
deployment_config: The deployment_config to hold all executors.
rootgroup_name: The name of the group root. Used to determine whether the
component spec for the current group should be the root dag.
"""
group_component_name = dsl_utils.sanitize_component_name(group.name)
if group.name == rootgroup_name:
group_component_spec = pipeline_spec.root
else:
group_component_spec = pipeline_spec.components[group_component_name]
# Generate task specs and component specs for the dag.
subgroups = group.groups + group.ops
for subgroup in subgroups:
subgroup_task_spec = getattr(subgroup, 'task_spec',
pipeline_spec_pb2.PipelineTaskSpec())
subgroup_component_spec = getattr(subgroup, 'component_spec',
pipeline_spec_pb2.ComponentSpec())
is_loop_subgroup = (isinstance(group, dsl.ParallelFor))
is_recursive_subgroup = (
isinstance(subgroup, dsl.OpsGroup) and subgroup.recursive_ref)
# Special handling for recursive subgroup: use the existing opsgroup name
if is_recursive_subgroup:
subgroup_key = subgroup.recursive_ref.name
else:
subgroup_key = subgroup.name
subgroup_task_spec.task_info.name = (
subgroup_task_spec.task_info.name or
dsl_utils.sanitize_task_name(subgroup_key))
# human_name exists for ops only, and is used to de-dupe component spec.
subgroup_component_name = (
subgroup_task_spec.component_ref.name or
dsl_utils.sanitize_component_name(
getattr(subgroup, 'human_name', subgroup_key)))
subgroup_task_spec.component_ref.name = subgroup_component_name
if isinstance(subgroup, dsl.OpsGroup) and subgroup.type == 'graph':
raise NotImplementedError(
'dsl.graph_component is not yet supported in KFP v2 compiler.')
if isinstance(subgroup, dsl.OpsGroup) and subgroup.type == 'exit_handler':
raise NotImplementedError(
'dsl.ExitHandler is not yet supported in KFP v2 compiler.')
importer_tasks = []
# Add importer node when applicable
for input_name in subgroup_task_spec.inputs.artifacts:
if not subgroup_task_spec.inputs.artifacts[
input_name].task_output_artifact.producer_task:
type_schema = type_utils.get_input_artifact_type_schema(
input_name, subgroup._metadata.inputs)
importer_name = importer_node.generate_importer_base_name(
dependent_task_name=subgroup_task_spec.task_info.name,
input_name=input_name)
importer_task_spec = importer_node.build_importer_task_spec(
importer_name)
importer_comp_spec = importer_node.build_importer_component_spec(
importer_base_name=importer_name,
input_name=input_name,
input_type_schema=type_schema)
importer_task_name = importer_task_spec.task_info.name
importer_comp_name = importer_task_spec.component_ref.name
importer_exec_label = importer_comp_spec.executor_label
group_component_spec.dag.tasks[importer_task_name].CopyFrom(
importer_task_spec)
pipeline_spec.components[importer_comp_name].CopyFrom(
importer_comp_spec)
subgroup_task_spec.inputs.artifacts[
input_name].task_output_artifact.producer_task = (
importer_task_name)
subgroup_task_spec.inputs.artifacts[
input_name].task_output_artifact.output_artifact_key = (
importer_node.OUTPUT_KEY)
# Retrieve the pre-built importer spec
importer_spec = subgroup.importer_specs[input_name]
deployment_config.executors[importer_exec_label].importer.CopyFrom(
importer_spec)
importer_tasks.append(importer_task_name)
subgroup_inputs = inputs.get(subgroup.name, [])
subgroup_params = [param for param, _ in subgroup_inputs]
tasks_in_current_dag = [
dsl_utils.sanitize_task_name(subgroup.name) for subgroup in subgroups
] + importer_tasks
input_parameters_in_current_dag = [
input_name
for input_name in group_component_spec.input_definitions.parameters
]
input_artifacts_in_current_dag = [
input_name
for input_name in group_component_spec.input_definitions.artifacts
]
is_parent_component_root = group_component_spec == pipeline_spec.root
# Additional spec modifications for dsl.ParallelFor's subgroups.
if is_loop_subgroup:
self._update_loop_specs(group, subgroup, group_component_spec,
subgroup_component_spec, subgroup_task_spec)
elif isinstance(subgroup, dsl.ContainerOp):
dsl_component_spec.update_task_inputs_spec(
subgroup_task_spec,
group_component_spec.input_definitions,
subgroup_params,
tasks_in_current_dag,
input_parameters_in_current_dag,
input_artifacts_in_current_dag,
)
if isinstance(subgroup, dsl.OpsGroup) and subgroup.type == 'condition':
# "punch the hole", adding inputs needed by its subgroup or tasks.
dsl_component_spec.build_component_inputs_spec(
component_spec=subgroup_component_spec,
pipeline_params=subgroup_params,
is_root_component=False,
)
dsl_component_spec.build_task_inputs_spec(
subgroup_task_spec,
subgroup_params,
tasks_in_current_dag,
is_parent_component_root,
)
condition = subgroup.condition
operand_values = []
for operand in [condition.operand1, condition.operand2]:
operand_values.append(self._resolve_value_or_reference(operand))
condition_string = '{} {} {}'.format(operand_values[0],
condition.operator,
operand_values[1])
subgroup_task_spec.trigger_policy.CopyFrom(
pipeline_spec_pb2.PipelineTaskSpec.TriggerPolicy(
condition=condition_string))
# Generate dependencies section for this task.
if dependencies.get(subgroup.name, None):
group_dependencies = list(dependencies[subgroup.name])
group_dependencies.sort()
subgroup_task_spec.dependent_tasks.extend(
[dsl_utils.sanitize_task_name(dep) for dep in group_dependencies])
if isinstance(subgroup, dsl.ParallelFor):
if subgroup.parallelism is not None:
warnings.warn(
'Setting parallelism in ParallelFor is not supported yet.'
'The setting is ignored.')
# Remove loop arguments related inputs from parent group component spec.
input_names = [param.full_name for param, _ in inputs[subgroup.name]]
for input_name in input_names:
if _for_loop.LoopArguments.name_is_loop_argument(input_name):
dsl_component_spec.pop_input_from_component_spec(
group_component_spec, input_name)
if subgroup.items_is_pipeline_param:
# These loop args are a 'withParam' rather than 'withItems'.
# i.e., rather than a static list, they are either the output of
# another task or were input as global pipeline parameters.
pipeline_param = subgroup.loop_args.items_or_pipeline_param
input_parameter_name = pipeline_param.full_name
if pipeline_param.op_name:
subgroup_task_spec.inputs.parameters[
input_parameter_name].task_output_parameter.producer_task = (
dsl_utils.sanitize_task_name(pipeline_param.op_name))
subgroup_task_spec.inputs.parameters[
input_parameter_name].task_output_parameter.output_parameter_key = (
pipeline_param.name)
else:
subgroup_task_spec.inputs.parameters[
input_parameter_name].component_input_parameter = (
input_parameter_name)
if pipeline_param.op_name is None:
# Input parameter is from pipeline func rather than component output.
# Correct loop argument input type in the parent component spec.
# The loop argument was categorized as an artifact due to its missing
# or non-primitive type annotation. But it should always be String
# typed, as its value is a serialized JSON string.
dsl_component_spec.pop_input_from_component_spec(
group_component_spec, input_parameter_name)
group_component_spec.input_definitions.parameters[
input_parameter_name].type = pipeline_spec_pb2.PrimitiveType.STRING
# Add component spec if not exists
if subgroup_component_name not in pipeline_spec.components:
pipeline_spec.components[subgroup_component_name].CopyFrom(
subgroup_component_spec)
# Add task spec
group_component_spec.dag.tasks[
subgroup_task_spec.task_info.name].CopyFrom(subgroup_task_spec)
# Add executor spec, if applicable.
container_spec = getattr(subgroup, 'container_spec', None)
if container_spec:
if compiler_utils.is_v2_component(subgroup):
compiler_utils.refactor_v2_container_spec(container_spec)
executor_label = subgroup_component_spec.executor_label
if executor_label not in deployment_config.executors:
deployment_config.executors[executor_label].container.CopyFrom(
container_spec)
# Add AIPlatformCustomJobSpec, if applicable.
custom_job_spec = getattr(subgroup, 'custom_job_spec', None)
if custom_job_spec:
executor_label = subgroup_component_spec.executor_label
if executor_label not in deployment_config.executors:
deployment_config.executors[
executor_label].custom_job.custom_job.update(custom_job_spec)
pipeline_spec.deployment_spec.update(
json_format.MessageToDict(deployment_config))
def _create_pipeline_spec(
self,
args: List[dsl.PipelineParam],
pipeline: dsl.Pipeline,
) -> pipeline_spec_pb2.PipelineSpec:
"""Creates the pipeline spec object.
Args:
args: The list of pipeline arguments.
pipeline: The instantiated pipeline object.
Returns:
A PipelineSpec proto representing the compiled pipeline.
Raises:
NotImplementedError if the argument is of unsupported types.
"""
compiler_utils.validate_pipeline_name(pipeline.name)
deployment_config = pipeline_spec_pb2.PipelineDeploymentConfig()
pipeline_spec = pipeline_spec_pb2.PipelineSpec()
pipeline_spec.pipeline_info.name = pipeline.name
pipeline_spec.sdk_version = 'kfp-{}'.format(kfp.__version__)
# Schema version 2.0.0 is required for kfp-pipeline-spec>0.1.3.1
pipeline_spec.schema_version = '2.0.0'
dsl_component_spec.build_component_inputs_spec(
component_spec=pipeline_spec.root,
pipeline_params=args,
is_root_component=True)
root_group = pipeline.groups[0]
opsgroups = self._get_groups(root_group)
op_name_to_parent_groups = self._get_groups_for_ops(root_group)
opgroup_name_to_parent_groups = self._get_groups_for_opsgroups(root_group)
condition_params = self._get_condition_params_for_ops(root_group)
op_name_to_for_loop_op = self._get_for_loop_ops(root_group)
inputs, outputs = self._get_inputs_outputs(
pipeline,
args,
root_group,
op_name_to_parent_groups,
opgroup_name_to_parent_groups,
condition_params,
op_name_to_for_loop_op,
)
dependencies = self._get_dependencies(
pipeline,
root_group,
op_name_to_parent_groups,
opgroup_name_to_parent_groups,
opsgroups,
condition_params,
)
for opsgroup_name in opsgroups.keys():
self._group_to_dag_spec(
opsgroups[opsgroup_name],
inputs,
outputs,
dependencies,
pipeline_spec,
deployment_config,
root_group.name,
)
return pipeline_spec
# TODO: Sanitizing beforehand, so that we don't need to sanitize here.
def _sanitize_and_inject_artifact(self, pipeline: dsl.Pipeline) -> None:
"""Sanitize operator/param names and inject pipeline artifact location. """
# Sanitize operator names and param names
sanitized_ops = {}
for op in pipeline.ops.values():
sanitized_name = sanitize_k8s_name(op.name)
op.name = sanitized_name
for param in op.outputs.values():
param.name = sanitize_k8s_name(param.name, True)
if param.op_name:
param.op_name = sanitize_k8s_name(param.op_name)
if op.output is not None and not isinstance(
op.output, dsl._container_op._MultipleOutputsError):
op.output.name = sanitize_k8s_name(op.output.name, True)
op.output.op_name = sanitize_k8s_name(op.output.op_name)
if op.dependent_names:
op.dependent_names = [
sanitize_k8s_name(name) for name in op.dependent_names
]
if isinstance(op, dsl.ContainerOp) and op.file_outputs is not None:
sanitized_file_outputs = {}
for key in op.file_outputs.keys():
sanitized_file_outputs[sanitize_k8s_name(key,
True)] = op.file_outputs[key]
op.file_outputs = sanitized_file_outputs
elif isinstance(op, dsl.ResourceOp) and op.attribute_outputs is not None:
sanitized_attribute_outputs = {}
for key in op.attribute_outputs.keys():
sanitized_attribute_outputs[sanitize_k8s_name(key, True)] = \
op.attribute_outputs[key]
op.attribute_outputs = sanitized_attribute_outputs
if isinstance(op, dsl.ContainerOp):
if op.input_artifact_paths:
op.input_artifact_paths = {
sanitize_k8s_name(key, True): value
for key, value in op.input_artifact_paths.items()
}
if op.artifact_arguments:
op.artifact_arguments = {
sanitize_k8s_name(key, True): value
for key, value in op.artifact_arguments.items()
}
sanitized_ops[sanitized_name] = op
pipeline.ops = sanitized_ops
# The name of this method is used to check if compiling for v2.
# See `is_compiling_for_v2` in `kfp/dsl/_component_bridge.py`
def _create_pipeline_v2(
self,
pipeline_func: Callable[..., Any],
pipeline_root: Optional[str] = None,
pipeline_name: Optional[str] = None,
pipeline_parameters_override: Optional[Mapping[str, Any]] = None,
) -> pipeline_spec_pb2.PipelineJob:
"""Creates a pipeline instance and constructs the pipeline spec from it.
Args:
pipeline_func: Pipeline function with @dsl.pipeline decorator.
pipeline_root: The root of the pipeline outputs. Optional.
pipeline_name: The name of the pipeline. Optional.
pipeline_parameters_override: The mapping from parameter names to values.
Optional.
Returns:
A PipelineJob proto representing the compiled pipeline.
"""
# Create the arg list with no default values and call pipeline function.
# Assign type information to the PipelineParam
pipeline_meta = _python_op._extract_component_interface(pipeline_func)
pipeline_name = pipeline_name or pipeline_meta.name
pipeline_root = pipeline_root or getattr(pipeline_func, 'output_directory',
None)
if not pipeline_root:
warnings.warn('pipeline_root is None or empty. A valid pipeline_root '
'must be provided at job submission.')
args_list = []
signature = inspect.signature(pipeline_func)
for arg_name in signature.parameters:
arg_type = None
for pipeline_input in pipeline_meta.inputs or []:
if arg_name == pipeline_input.name:
arg_type = pipeline_input.type
break
args_list.append(
dsl.PipelineParam(
sanitize_k8s_name(arg_name, True), param_type=arg_type))
with dsl.Pipeline(pipeline_name) as dsl_pipeline:
pipeline_func(*args_list)
self._sanitize_and_inject_artifact(dsl_pipeline)
# Fill in the default values.
args_list_with_defaults = []
if pipeline_meta.inputs:
args_list_with_defaults = [
dsl.PipelineParam(
sanitize_k8s_name(input_spec.name, True),
param_type=input_spec.type,
value=input_spec.default) for input_spec in pipeline_meta.inputs
]
# Making the pipeline group name unique to prevent name clashes with templates
pipeline_group = dsl_pipeline.groups[0]
temp_pipeline_group_name = uuid.uuid4().hex
pipeline_group.name = temp_pipeline_group_name
pipeline_spec = self._create_pipeline_spec(
args_list_with_defaults,
dsl_pipeline,
)
pipeline_parameters = {
param.name: param for param in args_list_with_defaults
}
# Update pipeline parameters override if there were any.
pipeline_parameters_override = pipeline_parameters_override or {}
for k, v in pipeline_parameters_override.items():
if k not in pipeline_parameters:
raise ValueError('Pipeline parameter {} does not match any known '
'pipeline argument.'.format(k))
pipeline_parameters[k].value = v
runtime_config = compiler_utils.build_runtime_config_spec(
output_directory=pipeline_root, pipeline_parameters=pipeline_parameters)
pipeline_job = pipeline_spec_pb2.PipelineJob(runtime_config=runtime_config)
pipeline_job.pipeline_spec.update(json_format.MessageToDict(pipeline_spec))
return pipeline_job
def compile(self,
pipeline_func: Callable[..., Any],
output_path: str,
pipeline_root: Optional[str] = None,
pipeline_name: Optional[str] = None,
pipeline_parameters: Optional[Mapping[str, Any]] = None,
type_check: bool = True) -> None:
"""Compile the given pipeline function into pipeline job json.
Args:
pipeline_func: Pipeline function with @dsl.pipeline decorator.
output_path: The output pipeline job .json file path. for example,
"~/pipeline_job.json"
pipeline_root: The root of the pipeline outputs. Optional. The
pipeline_root value can be specified either from this `compile()` method
or through the `@dsl.pipeline` decorator. If it's specified in both
places, the value provided here prevails.
pipeline_name: The name of the pipeline. Optional.
pipeline_parameters: The mapping from parameter names to values. Optional.
type_check: Whether to enable the type check or not, default: True.
"""
type_check_old_value = kfp.TYPE_CHECK
try:
kfp.TYPE_CHECK = type_check
pipeline_job = self._create_pipeline_v2(
pipeline_func=pipeline_func,
pipeline_root=pipeline_root,
pipeline_name=pipeline_name,
pipeline_parameters_override=pipeline_parameters)
self._write_pipeline(pipeline_job, output_path)
finally:
kfp.TYPE_CHECK = type_check_old_value
def _write_pipeline(self, pipeline_job: pipeline_spec_pb2.PipelineJob,
output_path: str) -> None:
"""Dump pipeline spec into json file.
Args:
pipeline_job: IR pipeline job spec.
ouput_path: The file path to be written.
Raises:
ValueError: if the specified output path doesn't end with the acceptable
extentions.
"""
json_text = json_format.MessageToJson(pipeline_job)
if output_path.endswith('.json'):
with open(output_path, 'w') as json_file:
json_file.write(json_text)
else:
raise ValueError(
'The output path {} should ends with ".json".'.format(output_path))
|
the-stack_106_24169 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 17777)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
the-stack_106_24170 | """
Test selectors level 2.
```
*
:first-child
E > F
E + F
[foo]
[foo='bar']
[foo~='bar']
[foo|='en']
:hover
:focus
:lang(en)
::pseudo-element (not implemented)
@at-rule (not implemented)
/* comments */
```
We will currently fail on pseudo-elements `::pseudo-element` as they are not real elements.
At the time of CSS2, they were known as `:pseudo-element`. Soup Sieve will raise an error about
an unknown pseudo-class when single `:` is used.
We currently fail on at-rules `@at-rule` as they are not applicable in the Soup Sieve environment.
"""
from __future__ import unicode_literals
from . import util
import soupsieve as sv
class TestLevel2(util.TestCase):
"""Test level 2 selectors."""
def test_direct_child(self):
"""Test direct child."""
markup = """
<div>
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre>
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
"""
# Spaces
self.assert_selector(
markup,
"div > span",
["3"],
flags=util.HTML5
)
# No spaces
self.assert_selector(
markup,
"div>span",
["3"],
flags=util.HTML5
)
def test_direct_sibling(self):
"""Test direct sibling."""
markup = """
<div>
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre>
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
"""
# Spaces
self.assert_selector(
markup,
"span + span",
["5", "6"],
flags=util.HTML5
)
# No spaces
self.assert_selector(
markup,
"span+span",
["5", "6"],
flags=util.HTML5
)
# Complex
self.assert_selector(
markup,
"span#\\34 + span#\\35",
["5"],
flags=util.HTML5
)
def test_wild_tag(self):
"""Test wild tag."""
self.assert_selector(
"""
<div id="div">
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre id="pre">
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
""",
"body *",
["0", "1", "2", "3", "4", "5", "6", "div", "pre"],
flags=util.HTML5
)
def test_attribute(self):
"""Test attribute."""
markup = """
<div id="div">
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre id="pre">
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
"""
self.assert_selector(
markup,
"[href]",
["2"],
flags=util.HTML5
)
# With spaces
self.assert_selector(
markup,
"[ href ]",
["2"],
flags=util.HTML5
)
def test_multi_attribute(self):
"""Test multiple attribute."""
self.assert_selector(
"""
<div id="div">
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre id="pre">
<span id="4" class="test">Child 1</span>
<span id="5" class="test" data-test="test">Child 2</span>
<span id="6">Child 3</span>
<span id="6">Child 3</span>
</pre>
</div>
""",
"span[id].test[data-test=test]",
["5"],
flags=util.HTML5
)
def test_attribute_equal(self):
"""Test attribute with value that equals specified value."""
markup = """
<div id="div">
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre id="pre">
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
"""
# No quotes
self.assert_selector(
markup,
'[id=\\35]',
["5"],
flags=util.HTML5
)
# Single quoted
self.assert_selector(
markup,
"[id='5']",
["5"],
flags=util.HTML5
)
# Double quoted
self.assert_selector(
markup,
'[id="5"]',
["5"],
flags=util.HTML5
)
# With spaces
self.assert_selector(
markup,
'[ id = "5" ]',
["5"],
flags=util.HTML5
)
self.assert_selector(
markup,
'[ID="5"]',
["5"],
flags=util.HTML5
)
self.assert_selector(
markup,
'[ id = "5" ]',
["5"],
flags=util.HTML
)
self.assert_selector(
markup,
'[ID="5"]',
["5"],
flags=util.HTML
)
self.assert_selector(
'<span bad="5"></span>',
'[ id = "5" ]',
[],
flags=util.HTML
)
def test_attribute_type(self):
"""Type is treated as case insensitive in HTML."""
markup = """
<html>
<body>
<div id="div">
<p type="TEST" id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a type="test" id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre id="pre">
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
</body>
</html>
"""
self.assert_selector(
markup,
'[type="test"]',
["0", '2'],
flags=util.HTML5
)
self.assert_selector(
markup,
'[type="test"]',
['2'],
flags=util.XML
)
def test_attribute_start_dash(self):
"""Test attribute whose dash separated value starts with the specified value."""
self.assert_selector(
"""
<div id="div">
<p id="0" lang="en-us">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre id="pre">
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
""",
"[lang|=en]",
["0"],
flags=util.HTML5
)
def test_attribute_contains_space(self):
"""Test attribute whose space separated list contains the specified value."""
markup = """
<div id="div">
<p id="0" class="test1 test2 test3">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre id="pre" class="test-a test-b">
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
"""
# Middle of list
self.assert_selector(
markup,
"[class~=test2]",
["0"],
flags=util.HTML5
)
# Shouldn't match anything
self.assert_selector(
markup,
'[class~="test1 test2"]',
[],
flags=util.HTML5
)
self.assert_selector(
markup,
'[class~=""]',
[],
flags=util.HTML5
)
self.assert_selector(
markup,
'[class~="test1\\ test2"]',
[],
flags=util.HTML5
)
# Start of list
self.assert_selector(
markup,
"[class~=test-a]",
["pre"],
flags=util.HTML5
)
# End of list
self.assert_selector(
markup,
"[class~=test-b]",
["pre"],
flags=util.HTML5
)
def test_first_child(self):
"""Test first child."""
self.assert_selector(
"""
<div id="div">
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre id="pre">
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
""",
"span:first-child",
["1", "4"],
flags=util.HTML5
)
def test_hover(self):
"""Test hover."""
markup = """
<div>
<p>Some text <span id="1" class="foo:bar:foobar"> in a paragraph</span>.
<a id="2" class="bar" href="http://google.com">Link</a>
<a id="3">Placeholder text.</a>
</p>
</div>
"""
self.assert_selector(
markup,
"a:hover",
[],
flags=util.HTML5
)
def test_focus(self):
"""Test focus."""
markup = """
<form action="#">
<fieldset id='a' disabled>
<legend>
Simple fieldset <input type="radio" id="1" checked>
<fieldset id='b' disabled>
<legend>Simple fieldset <input type="radio" id="2" checked></legend>
<input type="radio" id="3" checked>
<label for="radio">radio</label>
</fieldset>
</legend>
<fieldset id='c' disabled>
<legend>Simple fieldset <input type="radio" id="4" checked></legend>
<input type="radio" id="5" checked>
<label for="radio">radio</label>
</fieldset>
<input type="radio" id="6" checked>
<label for="radio">radio</label>
</fieldset>
<optgroup id="opt-enable">
<option id="7" disabled>option</option>
</optgroup>
<optgroup id="8" disabled>
<option id="9">option</option>
</optgroup>
<a href="" id="link">text</a>
</form>
"""
self.assert_selector(
markup,
"input:focus",
[],
flags=util.HTML5
)
self.assert_selector(
markup,
"input:not(:focus)",
["1", "2", "3", "4", "5", "6"],
flags=util.HTML5
)
def test_lang(self):
"""Test language."""
markup = """
<div lang="de-DE">
<p id="1"></p>
</div>
<div lang="de-DE-1996">
<p id="2"></p>
</div>
<div lang="de-Latn-DE">
<p id="3"></p>
</div>
<div lang="de-Latf-DE">
<p id="4"></p>
</div>
<div lang="de-Latn-DE-1996">
<p id="5"></p>
</div>
<p id="6" lang="de-DE"></p>
"""
self.assert_selector(
markup,
"p:lang(de)",
['1', '2', '3', '4', '5', '6'],
flags=util.HTML5
)
def test_pseudo_element(self):
"""Test pseudo element."""
with self.assertRaises(NotImplementedError):
sv.compile(':first-line')
with self.assertRaises(NotImplementedError):
sv.compile('::first-line')
def test_at_rule(self):
"""Test at-rule (not supported)."""
with self.assertRaises(NotImplementedError):
sv.compile('@page :left')
def test_comments(self):
"""Test comments."""
markup = """
<div>
<p id="0">Some text <span id="1"> in a paragraph</span>.</p>
<a id="2" href="http://google.com">Link</a>
<span id="3">Direct child</span>
<pre>
<span id="4">Child 1</span>
<span id="5">Child 2</span>
<span id="6">Child 3</span>
</pre>
</div>
"""
self.assert_selector(
markup,
"""
/* Start comment */
div
/* This still works as new lines and whitespace count as descendant combiner.
This comment won't be seen. */
span#\\33
/* End comment */
""",
['3'],
flags=util.HTML5
)
self.assert_selector(
markup,
"""
span:not(
/* Comments should basically work like they do in real CSS. */
span#\\33 /* Don't select id 3 */
)
""",
['1', '4', '5', '6'],
flags=util.HTML5
)
|
the-stack_106_24173 | from click import Group
from django.contrib.auth import authenticate # , login
from django.shortcuts import get_object_or_404, render
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet, mixins
from django.contrib.auth.models import User
from rest_framework import generics, status, viewsets
from rest_framework.views import APIView
from . import models, serializers, permissions
from rest_framework.permissions import IsAdminUser
from django.contrib.auth.models import Group
class Permission1Class:
permission_classes = (IsAdminUser | permissions.IsUserPermission,)
class FoodViewSet(Permission1Class, ModelViewSet):
serializer_class = serializers.FoodSerializer
queryset = models.Food.objects.all()
class MeatViewSet(Permission1Class, ModelViewSet):
serializer_class = serializers.MeatSerializer
queryset = models.Meat.objects.all()
class CategoryListView(generics.ListCreateAPIView):
serializer_class = serializers.CategorySerializer
queryset = models.Category.objects.all()
permission_classes = [IsAdminUser | permissions.IsVendor | permissions.IsReadOnly]
class CategoryDetailView(generics.RetrieveUpdateDestroyAPIView):
serializer_class = serializers.CategorySerializer
queryset = models.Category.objects.all()
permission_classes = [permissions.IsVendorAndOwner | permissions.IsReadOnly]
class Permission2Class:
permission_classes = (permissions.IsAccountOwner,)
class CustomerViewSet(Permission2Class, ModelViewSet):
serializer_class = serializers.CustomerSerializer
queryset = models.Customer.objects.all()
class VendorViewSet(Permission2Class, ModelViewSet):
serializer_class = serializers.VendorSerializer
queryset = models.Vendor.objects.all()
class FoodImageViewSet(Permission1Class, ModelViewSet):
serializer_class = serializers.FoodImageSerializer
queryset = models.FoodImage.objects.all()
class MeatImageViewSet(Permission1Class, ModelViewSet):
serializerclass = serializers.MeatImageSerializer
queryset = models.MeatImage.objects.all()
class SignUpView(generics.CreateAPIView):
authentication_classes = ()
permission_classes = ()
serializer_class = serializers.UserSerializer
queryset = User.objects.all()
def get_object(self):
g = Group.objects.get(name="customers")
g.user_set.add(self.request.user)
return super().get_object()
class LoginView(APIView):
permission_classes = ()
def post(self, request):
print(request.user)
username = request.data.get("username")
password = request.data.get("password")
user = authenticate(username=username, password=password)
if user:
return Response({"token": user.auth_token.key})
else:
return Response(
{"error": "Wrong Credentials"}, status=status.HTTP_400_BAD_REQUEST
)
class OrderStatusCodeViewSet(ModelViewSet):
permission_classes = (IsAdminUser,)
serializer_class = serializers.OrderStatusCodeSerializer
queryset = models.OrderStatusCode.objects.all()
class CartItemViewSet(ModelViewSet):
serializer_class = serializers.CartItemSerializer
queryset = models.CartItem.objects.all()
def get_queryset(self):
qs = super().get_queryset()
if self.request.user.is_superuser:
pass
elif self.request.user.groups.filter(name="customers"):
qs = qs.filter(cart__user=self.request.user)
else:
qs = []
return qs
class CartViewSet(ModelViewSet):
serializer_class = serializers.CartSerializer
queryset = models.Cart.objects.all()
def get_queryset(self):
qs = super().get_queryset()
if not (self.request.user.is_superuser):
qs = qs.filter(user=self.request.user)
return qs
def perform_create(self, serializer):
try:
serializer.save(user=models.Customer.objects.get(id=self.request.user.id))
except:
serializer.save()
|
the-stack_106_24174 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
NodeConn: an object which manages p2p connectivity to a bitcoin node
NodeConnCB: a base class that describes the interface for receiving
callbacks with network messages from a NodeConn
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization
"""
import asyncore
from codecs import encode
from collections import defaultdict
import copy
import hashlib
from io import BytesIO
import logging
import random
import socket
import struct
import sys
import time
from threading import RLock, Thread
import gamecoin_scrypt
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str, wait_until
BIP0031_VERSION = 60000
MY_VERSION = 80014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
logger = logging.getLogger("TestFramework.mininode")
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.scrypt256 = header.scrypt256
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
self.scrypt256 = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
self.scrypt256 = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
self.scrypt256 = uint256_from_str(gamecoin_scrypt.getPoWHash(r))
def rehash(self):
self.sha256 = None
self.scrypt256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.scrypt256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.scrypt256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
class NodeConnCB(object):
"""Callback and helper functions for P2P connection to a bitcoind node.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour.
"""
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
# Message receiving methods
def deliver(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type.
Optionally waits for deliver_sleep_time before dispatching message.
"""
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
raise
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
self.connection = None
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_reject(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
# Connection helper methods
def add_connection(self, conn):
self.connection = conn
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_message(self, message):
if self.connection:
self.connection.send_message(message)
else:
logger.error("Cannot send message. No connection to node!")
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb", # mainnet
"testnet3": b"\xfc\xc1\xb7\xdc", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
logger.info('Connecting to Gamecoin Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
raise ValueError("Unknown command: '%s'" % (command))
except Exception as e:
logger.exception('got_data:', repr(e))
raise
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
self.cb.deliver(self, message)
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
logger.debug("Network thread closing")
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
the-stack_106_24175 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline-level operations."""
import copy
import functools
import threading
import time
import typing
from typing import Callable, List, Mapping, Optional
from absl import logging
import attr
from tfx import types
from tfx.orchestration import metadata
from tfx.orchestration.experimental.core import async_pipeline_task_gen
from tfx.orchestration.experimental.core import constants
from tfx.orchestration.experimental.core import mlmd_state
from tfx.orchestration.experimental.core import pipeline_state as pstate
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import sync_pipeline_task_gen
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_gen_utils
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core.task_schedulers import manual_task_scheduler
from tfx.orchestration.portable import partial_run_utils
from tfx.orchestration.portable.mlmd import execution_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.utils import status as status_lib
from ml_metadata.proto import metadata_store_pb2
# A coarse grained lock is used to ensure serialization of pipeline operations
# since there isn't a suitable MLMD transaction API.
_PIPELINE_OPS_LOCK = threading.RLock()
def _pipeline_ops_lock(fn):
"""Decorator to run `fn` within `_PIPELINE_OPS_LOCK` context."""
@functools.wraps(fn)
def _wrapper(*args, **kwargs):
with _PIPELINE_OPS_LOCK:
return fn(*args, **kwargs)
return _wrapper
def _to_status_not_ok_error(fn):
"""Decorator to catch exceptions and re-raise a `status_lib.StatusNotOkError`."""
@functools.wraps(fn)
def _wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
logging.exception('Error raised by `%s`:', fn.__name__)
if isinstance(e, status_lib.StatusNotOkError):
raise
raise status_lib.StatusNotOkError(
code=status_lib.Code.UNKNOWN,
message=f'`{fn.__name__}` error: {str(e)}')
return _wrapper
@_to_status_not_ok_error
@_pipeline_ops_lock
def initiate_pipeline_start(
mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
pipeline_run_metadata: Optional[Mapping[str, types.Property]] = None,
partial_run_option: Optional[pipeline_pb2.PartialRun] = None
) -> pstate.PipelineState:
"""Initiates a pipeline start operation.
Upon success, MLMD is updated to signal that the pipeline must be started.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: IR of the pipeline to start.
pipeline_run_metadata: Pipeline run metadata.
partial_run_option: Options for partial pipeline run.
Returns:
The `PipelineState` object upon success.
Raises:
status_lib.StatusNotOkError: Failure to initiate pipeline start. With code
`INVALILD_ARGUMENT` if it's a sync pipeline without `pipeline_run_id`
provided.
"""
logging.info('Received request to start pipeline; pipeline uid: %s',
task_lib.PipelineUid.from_pipeline(pipeline))
pipeline = copy.deepcopy(pipeline)
if pipeline.execution_mode == pipeline_pb2.Pipeline.SYNC and not (
pipeline.runtime_spec.pipeline_run_id.HasField('field_value') and
pipeline.runtime_spec.pipeline_run_id.field_value.string_value):
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message='Sync pipeline IR must specify pipeline_run_id.')
if partial_run_option:
if pipeline.execution_mode != pipeline_pb2.Pipeline.SYNC:
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
f'Partial run is only supported for SYNC pipeline execution modes; '
f'found pipeline with execution mode: {pipeline.execution_mode}'))
def node_fn(nodes):
return lambda node: node in nodes
# Mark nodes using partial pipeline run lib.
pipeline = partial_run_utils.mark_pipeline(
pipeline,
from_nodes=node_fn(partial_run_option.from_nodes),
to_nodes=node_fn(partial_run_option.to_nodes),
skip_nodes=node_fn(partial_run_option.skip_nodes),
snapshot_settings=partial_run_option.snapshot_settings)
if pipeline.runtime_spec.HasField('snapshot_settings'):
partial_run_utils.snapshot(mlmd_handle, pipeline)
return pstate.PipelineState.new(mlmd_handle, pipeline, pipeline_run_metadata)
@_to_status_not_ok_error
def stop_pipeline(mlmd_handle: metadata.Metadata,
pipeline_uid: task_lib.PipelineUid,
timeout_secs: Optional[float] = None) -> None:
"""Stops a pipeline.
Initiates a pipeline stop operation and waits for the pipeline execution to be
gracefully stopped in the orchestration loop.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline_uid: Uid of the pipeline to be stopped.
timeout_secs: Amount of time in seconds to wait for pipeline to stop. If
`None`, waits indefinitely.
Raises:
status_lib.StatusNotOkError: Failure to initiate pipeline stop.
"""
logging.info('Received request to stop pipeline; pipeline uid: %s',
pipeline_uid)
with _PIPELINE_OPS_LOCK:
with pstate.PipelineState.load(mlmd_handle, pipeline_uid) as pipeline_state:
pipeline_state.initiate_stop(
status_lib.Status(
code=status_lib.Code.CANCELLED,
message='Cancellation requested by client.'))
logging.info('Waiting for pipeline to be stopped; pipeline uid: %s',
pipeline_uid)
_wait_for_inactivation(
mlmd_handle, pipeline_state.execution_id, timeout_secs=timeout_secs)
logging.info('Done waiting for pipeline to be stopped; pipeline uid: %s',
pipeline_uid)
@_to_status_not_ok_error
@_pipeline_ops_lock
def initiate_node_start(mlmd_handle: metadata.Metadata,
node_uid: task_lib.NodeUid) -> pstate.PipelineState:
"""Initiates a node start operation for a pipeline node.
Args:
mlmd_handle: A handle to the MLMD db.
node_uid: Uid of the node to be started.
Returns:
The `PipelineState` object upon success.
Raises:
status_lib.StatusNotOkError: Failure to initiate node start operation.
"""
logging.info('Received request to start node; node uid: %s', node_uid)
with pstate.PipelineState.load(mlmd_handle,
node_uid.pipeline_uid) as pipeline_state:
with pipeline_state.node_state_update_context(node_uid) as node_state:
if node_state.is_startable():
node_state.update(pstate.NodeState.STARTING)
return pipeline_state
@_to_status_not_ok_error
def stop_node(mlmd_handle: metadata.Metadata,
node_uid: task_lib.NodeUid,
timeout_secs: Optional[float] = None) -> None:
"""Stops a node.
Initiates a node stop operation and waits for the node execution to become
inactive.
Args:
mlmd_handle: A handle to the MLMD db.
node_uid: Uid of the node to be stopped.
timeout_secs: Amount of time in seconds to wait for node to stop. If `None`,
waits indefinitely.
Raises:
status_lib.StatusNotOkError: Failure to stop the node.
"""
logging.info('Received request to stop node; node uid: %s', node_uid)
with _PIPELINE_OPS_LOCK:
with pstate.PipelineState.load(mlmd_handle,
node_uid.pipeline_uid) as pipeline_state:
nodes = pstate.get_all_pipeline_nodes(pipeline_state.pipeline)
filtered_nodes = [n for n in nodes if n.node_info.id == node_uid.node_id]
if len(filtered_nodes) != 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(
f'`stop_node` operation failed, unable to find node to stop: '
f'{node_uid}'))
node = filtered_nodes[0]
with pipeline_state.node_state_update_context(node_uid) as node_state:
if node_state.is_stoppable():
node_state.update(
pstate.NodeState.STOPPING,
status_lib.Status(
code=status_lib.Code.CANCELLED,
message='Cancellation requested by client.'))
# Wait until the node is stopped or time out.
_wait_for_node_inactivation(
pipeline_state, node_uid, timeout_secs=timeout_secs)
@_to_status_not_ok_error
@_pipeline_ops_lock
def resume_manual_node(mlmd_handle: metadata.Metadata,
node_uid: task_lib.NodeUid) -> None:
"""Resumes a manual node.
Args:
mlmd_handle: A handle to the MLMD db.
node_uid: Uid of the manual node to be resumed.
Raises:
status_lib.StatusNotOkError: Failure to resume a manual node.
"""
logging.info('Received request to resume manual node; node uid: %s', node_uid)
with pstate.PipelineState.load(mlmd_handle,
node_uid.pipeline_uid) as pipeline_state:
nodes = pstate.get_all_pipeline_nodes(pipeline_state.pipeline)
filtered_nodes = [n for n in nodes if n.node_info.id == node_uid.node_id]
if len(filtered_nodes) != 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=(f'Unable to find manual node to resume: {node_uid}'))
node = filtered_nodes[0]
node_type = node.node_info.type.name
if node_type != constants.MANUAL_NODE_TYPE:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INVALID_ARGUMENT,
message=('Unable to resume a non-manual node. '
f'Got non-manual node id: {node_uid}'))
executions = task_gen_utils.get_executions(mlmd_handle, node)
active_executions = [
e for e in executions if execution_lib.is_execution_active(e)
]
if not active_executions:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message=(f'Unable to find active manual node to resume: {node_uid}'))
if len(active_executions) > 1:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(f'Unexpected multiple active executions for manual node: '
f'{node_uid}'))
with mlmd_state.mlmd_execution_atomic_op(
mlmd_handle=mlmd_handle,
execution_id=active_executions[0].id) as execution:
completed_state = manual_task_scheduler.ManualNodeState(
state=manual_task_scheduler.ManualNodeState.COMPLETED)
completed_state.set_mlmd_value(
execution.custom_properties.get_or_create(
manual_task_scheduler.NODE_STATE_PROPERTY_KEY))
@_to_status_not_ok_error
@_pipeline_ops_lock
def _initiate_pipeline_update(
mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline) -> pstate.PipelineState:
"""Initiates pipeline update."""
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
with pstate.PipelineState.load(mlmd_handle, pipeline_uid) as pipeline_state:
pipeline_state.initiate_update(pipeline)
return pipeline_state
@_to_status_not_ok_error
def update_pipeline(mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
timeout_secs: Optional[float] = None) -> None:
"""Updates an active pipeline with a new pipeline IR.
Initiates a pipeline update operation and waits for it to finish.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: New pipeline IR to be applied.
timeout_secs: Timeout in seconds to wait for the update to finish. If
`None`, waits indefinitely.
Raises:
status_lib.StatusNotOkError: Failure to update the pipeline.
"""
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
logging.info('Received request to update pipeline; pipeline uid: %s',
pipeline_uid)
pipeline_state = _initiate_pipeline_update(mlmd_handle, pipeline)
def _is_update_applied() -> bool:
with pipeline_state:
if pipeline_state.is_active():
return not pipeline_state.is_update_initiated()
# If the pipeline is no longer active, whether or not the update is
# applied is irrelevant.
return True
logging.info('Waiting for pipeline update; pipeline uid: %s', pipeline_uid)
_wait_for_predicate(_is_update_applied, 'pipeline update', timeout_secs)
logging.info('Done waiting for pipeline update; pipeline uid: %s',
pipeline_uid)
def _wait_for_inactivation(mlmd_handle: metadata.Metadata,
execution_id: metadata_store_pb2.Execution,
timeout_secs: Optional[float]) -> None:
"""Waits for the given execution to become inactive.
Args:
mlmd_handle: A handle to the MLMD db.
execution_id: Id of the execution whose inactivation is awaited.
timeout_secs: Amount of time in seconds to wait. If `None`, waits
indefinitely.
Raises:
StatusNotOkError: With error code `DEADLINE_EXCEEDED` if execution is not
inactive after waiting approx. `timeout_secs`.
"""
def _is_inactivated() -> bool:
[execution] = mlmd_handle.store.get_executions_by_id([execution_id])
return not execution_lib.is_execution_active(execution)
return _wait_for_predicate(_is_inactivated, 'execution inactivation',
timeout_secs)
def _wait_for_node_inactivation(pipeline_state: pstate.PipelineState,
node_uid: task_lib.NodeUid,
timeout_secs: Optional[float]) -> None:
"""Waits for the given node to become inactive.
Args:
pipeline_state: Pipeline state.
node_uid: Uid of the node whose inactivation is awaited.
timeout_secs: Amount of time in seconds to wait. If `None`, waits
indefinitely.
Raises:
StatusNotOkError: With error code `DEADLINE_EXCEEDED` if node is not
inactive after waiting approx. `timeout_secs`.
"""
def _is_inactivated() -> bool:
with pipeline_state:
node_state = pipeline_state.get_node_state(node_uid)
return node_state.state in (pstate.NodeState.COMPLETE,
pstate.NodeState.FAILED,
pstate.NodeState.SKIPPED,
pstate.NodeState.STOPPED)
return _wait_for_predicate(_is_inactivated, 'node inactivation', timeout_secs)
@_to_status_not_ok_error
@_pipeline_ops_lock
def resume_pipeline(mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline) -> pstate.PipelineState:
"""Resumes a pipeline run from previously failed nodes.
Upon success, MLMD is updated to signal that the pipeline must be started.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: IR of the pipeline to resume.
Returns:
The `PipelineState` object upon success.
Raises:
status_lib.StatusNotOkError: Failure to resume pipeline. With code
`ALREADY_EXISTS` if a pipeline is already running. With code
`status_lib.Code.FAILED_PRECONDITION` if a previous pipeline run
is not found for resuming.
"""
logging.info('Received request to resume pipeline; pipeline uid: %s',
task_lib.PipelineUid.from_pipeline(pipeline))
if pipeline.execution_mode != pipeline_pb2.Pipeline.SYNC:
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
f'Only SYNC pipeline execution modes supported; '
f'found pipeline with execution mode: {pipeline.execution_mode}'))
latest_pipeline_view = None
pipeline_uid = task_lib.PipelineUid.from_pipeline(pipeline)
views = pstate.PipelineView.load_all(mlmd_handle, pipeline_uid)
for view in views:
execution = view.execution
if execution_lib.is_execution_active(execution):
raise status_lib.StatusNotOkError(
code=status_lib.Code.ALREADY_EXISTS,
message=(f'Can not resume pipeline. An active pipeline is already '
f'running with uid {pipeline_uid}.'))
if (not latest_pipeline_view or execution.create_time_since_epoch >
latest_pipeline_view.execution.create_time_since_epoch):
latest_pipeline_view = view
if not latest_pipeline_view:
raise status_lib.StatusNotOkError(
code=status_lib.Code.NOT_FOUND,
message='Pipeline failed to resume. No previous pipeline run found.')
if latest_pipeline_view.pipeline.execution_mode != pipeline_pb2.Pipeline.SYNC:
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
f'Only SYNC pipeline execution modes supported; previous pipeline '
f'run has execution mode: '
f'{latest_pipeline_view.pipeline.execution_mode}'))
# Get succeeded nodes in latest pipeline run.
latest_pipeline_node_states = latest_pipeline_view.get_node_states_dict()
previously_succeeded_nodes = []
for node, node_state in latest_pipeline_node_states.items():
if node_state.is_success():
previously_succeeded_nodes.append(node)
pipeline_nodes = [
node.node_info.id for node in pstate.get_all_pipeline_nodes(pipeline)
]
latest_pipeline_snapshot_settings = pipeline_pb2.SnapshotSettings()
latest_pipeline_snapshot_settings.latest_pipeline_run_strategy.SetInParent()
partial_run_option = pipeline_pb2.PartialRun(
from_nodes=pipeline_nodes,
to_nodes=pipeline_nodes,
skip_nodes=previously_succeeded_nodes,
snapshot_settings=latest_pipeline_snapshot_settings)
return initiate_pipeline_start(
mlmd_handle, pipeline, partial_run_option=partial_run_option)
_POLLING_INTERVAL_SECS = 10.0
def _wait_for_predicate(predicate_fn: Callable[[], bool], waiting_for_desc: str,
timeout_secs: Optional[float]) -> None:
"""Waits for `predicate_fn` to return `True` or until timeout seconds elapse."""
if timeout_secs is None:
while not predicate_fn():
time.sleep(_POLLING_INTERVAL_SECS)
return
polling_interval_secs = min(_POLLING_INTERVAL_SECS, timeout_secs / 4)
end_time = time.time() + timeout_secs
while end_time - time.time() > 0:
if predicate_fn():
return
time.sleep(max(0, min(polling_interval_secs, end_time - time.time())))
raise status_lib.StatusNotOkError(
code=status_lib.Code.DEADLINE_EXCEEDED,
message=(
f'Timed out ({timeout_secs} secs) waiting for {waiting_for_desc}.'))
@_to_status_not_ok_error
@_pipeline_ops_lock
def orchestrate(mlmd_handle: metadata.Metadata, task_queue: tq.TaskQueue,
service_job_manager: service_jobs.ServiceJobManager) -> None:
"""Performs a single iteration of the orchestration loop.
Embodies the core functionality of the main orchestration loop that scans MLMD
pipeline execution states, generates and enqueues the tasks to be performed.
Args:
mlmd_handle: A handle to the MLMD db.
task_queue: A `TaskQueue` instance into which any tasks will be enqueued.
service_job_manager: A `ServiceJobManager` instance for handling service
jobs.
Raises:
status_lib.StatusNotOkError: If error generating tasks.
"""
pipeline_states = _get_pipeline_states(mlmd_handle)
if not pipeline_states:
logging.info('No active pipelines to run.')
return
active_pipeline_states = []
stop_initiated_pipeline_states = []
update_initiated_pipeline_states = []
for pipeline_state in pipeline_states:
with pipeline_state:
if pipeline_state.is_stop_initiated():
stop_initiated_pipeline_states.append(pipeline_state)
elif pipeline_state.is_update_initiated():
update_initiated_pipeline_states.append(pipeline_state)
elif pipeline_state.is_active():
active_pipeline_states.append(pipeline_state)
else:
raise status_lib.StatusNotOkError(
code=status_lib.Code.INTERNAL,
message=(f'Found pipeline (uid: {pipeline_state.pipeline_uid}) '
f'which is neither active nor stop-initiated.'))
for pipeline_state in stop_initiated_pipeline_states:
logging.info('Orchestrating stop-initiated pipeline: %s',
pipeline_state.pipeline_uid)
_orchestrate_stop_initiated_pipeline(mlmd_handle, task_queue,
service_job_manager, pipeline_state)
for pipeline_state in update_initiated_pipeline_states:
logging.info('Orchestrating update-initiated pipeline: %s',
pipeline_state.pipeline_uid)
_orchestrate_update_initiated_pipeline(mlmd_handle, task_queue,
service_job_manager, pipeline_state)
for pipeline_state in active_pipeline_states:
logging.info('Orchestrating pipeline: %s', pipeline_state.pipeline_uid)
_orchestrate_active_pipeline(mlmd_handle, task_queue, service_job_manager,
pipeline_state)
def _get_pipeline_states(
mlmd_handle: metadata.Metadata) -> List[pstate.PipelineState]:
"""Scans MLMD and returns pipeline states."""
contexts = pstate.get_orchestrator_contexts(mlmd_handle)
result = []
for context in contexts:
try:
pipeline_state = pstate.PipelineState.load_from_orchestrator_context(
mlmd_handle, context)
except status_lib.StatusNotOkError as e:
if e.code == status_lib.Code.NOT_FOUND:
# Ignore any old contexts with no associated active pipelines.
logging.info(e.message)
continue
else:
raise
result.append(pipeline_state)
return result
def _cancel_nodes(mlmd_handle: metadata.Metadata, task_queue: tq.TaskQueue,
service_job_manager: service_jobs.ServiceJobManager,
pipeline_state: pstate.PipelineState, pause: bool) -> bool:
"""Cancels pipeline nodes and returns `True` if any node is currently active."""
pipeline = pipeline_state.pipeline
is_active = False
for node in pstate.get_all_pipeline_nodes(pipeline):
if service_job_manager.is_pure_service_node(pipeline_state,
node.node_info.id):
if not service_job_manager.stop_node_services(pipeline_state,
node.node_info.id):
is_active = True
elif _maybe_enqueue_cancellation_task(
mlmd_handle, pipeline, node, task_queue, pause=pause):
is_active = True
elif service_job_manager.is_mixed_service_node(pipeline_state,
node.node_info.id):
if not service_job_manager.stop_node_services(pipeline_state,
node.node_info.id):
is_active = True
return is_active
def _orchestrate_stop_initiated_pipeline(
mlmd_handle: metadata.Metadata, task_queue: tq.TaskQueue,
service_job_manager: service_jobs.ServiceJobManager,
pipeline_state: pstate.PipelineState) -> None:
"""Orchestrates stop initiated pipeline."""
with pipeline_state:
stop_reason = pipeline_state.stop_initiated_reason()
assert stop_reason is not None
is_active = _cancel_nodes(
mlmd_handle, task_queue, service_job_manager, pipeline_state, pause=False)
if not is_active:
with pipeline_state:
# Update pipeline execution state in MLMD.
pipeline_state.set_pipeline_execution_state(
_mlmd_execution_code(stop_reason))
def _orchestrate_update_initiated_pipeline(
mlmd_handle: metadata.Metadata, task_queue: tq.TaskQueue,
service_job_manager: service_jobs.ServiceJobManager,
pipeline_state: pstate.PipelineState) -> None:
"""Orchestrates an update-initiated pipeline."""
is_active = _cancel_nodes(
mlmd_handle, task_queue, service_job_manager, pipeline_state, pause=True)
if not is_active:
with pipeline_state:
pipeline_state.apply_pipeline_update()
@attr.s(auto_attribs=True, kw_only=True)
class _NodeInfo:
"""A convenience container of pipeline node and its state."""
node: pipeline_pb2.PipelineNode
state: pstate.NodeState
def _orchestrate_active_pipeline(
mlmd_handle: metadata.Metadata, task_queue: tq.TaskQueue,
service_job_manager: service_jobs.ServiceJobManager,
pipeline_state: pstate.PipelineState) -> None:
"""Orchestrates active pipeline."""
pipeline = pipeline_state.pipeline
with pipeline_state:
assert pipeline_state.is_active()
if pipeline_state.get_pipeline_execution_state() != (
metadata_store_pb2.Execution.RUNNING):
pipeline_state.set_pipeline_execution_state(
metadata_store_pb2.Execution.RUNNING)
orchestration_options = pipeline_state.get_orchestration_options()
logging.info('Orchestration options: %s', orchestration_options)
deadline_secs = orchestration_options.deadline_secs
if (pipeline.execution_mode == pipeline_pb2.Pipeline.SYNC and
deadline_secs > 0 and
time.time() - pipeline_state.pipeline_creation_time_secs_since_epoch() >
deadline_secs):
logging.error(
'Aborting pipeline due to exceeding deadline (%s secs); '
'pipeline uid: %s', deadline_secs, pipeline_state.pipeline_uid)
pipeline_state.initiate_stop(
status_lib.Status(
code=status_lib.Code.DEADLINE_EXCEEDED,
message=('Pipeline aborted due to exceeding deadline '
f'({deadline_secs} secs)')))
return
def _filter_by_state(node_infos: List[_NodeInfo],
state_str: str) -> List[_NodeInfo]:
return [n for n in node_infos if n.state.state == state_str]
node_infos = _get_node_infos(pipeline_state)
stopping_node_infos = _filter_by_state(node_infos, pstate.NodeState.STOPPING)
# Tracks nodes stopped in the current iteration.
stopped_node_infos: List[_NodeInfo] = []
# Create cancellation tasks for nodes in state STOPPING.
for node_info in stopping_node_infos:
if service_job_manager.is_pure_service_node(pipeline_state,
node_info.node.node_info.id):
if service_job_manager.stop_node_services(pipeline_state,
node_info.node.node_info.id):
stopped_node_infos.append(node_info)
elif _maybe_enqueue_cancellation_task(mlmd_handle, pipeline, node_info.node,
task_queue):
pass
elif service_job_manager.is_mixed_service_node(pipeline_state,
node_info.node.node_info.id):
if service_job_manager.stop_node_services(pipeline_state,
node_info.node.node_info.id):
stopped_node_infos.append(node_info)
else:
stopped_node_infos.append(node_info)
# Change the state of stopped nodes from STOPPING to STOPPED.
if stopped_node_infos:
with pipeline_state:
for node_info in stopped_node_infos:
node_uid = task_lib.NodeUid.from_pipeline_node(pipeline, node_info.node)
with pipeline_state.node_state_update_context(node_uid) as node_state:
node_state.update(pstate.NodeState.STOPPED, node_state.status)
# Initialize task generator for the pipeline.
if pipeline.execution_mode == pipeline_pb2.Pipeline.SYNC:
generator = sync_pipeline_task_gen.SyncPipelineTaskGenerator(
mlmd_handle,
task_queue.contains_task_id,
service_job_manager,
fail_fast=orchestration_options.fail_fast)
elif pipeline.execution_mode == pipeline_pb2.Pipeline.ASYNC:
generator = async_pipeline_task_gen.AsyncPipelineTaskGenerator(
mlmd_handle, task_queue.contains_task_id, service_job_manager)
else:
raise status_lib.StatusNotOkError(
code=status_lib.Code.FAILED_PRECONDITION,
message=(
f'Only SYNC and ASYNC pipeline execution modes supported; '
f'found pipeline with execution mode: {pipeline.execution_mode}'))
tasks = generator.generate(pipeline_state)
with pipeline_state:
# Handle all the UpdateNodeStateTasks by updating node states.
for task in tasks:
if task_lib.is_update_node_state_task(task):
task = typing.cast(task_lib.UpdateNodeStateTask, task)
with pipeline_state.node_state_update_context(
task.node_uid) as node_state:
node_state.update(task.state, task.status)
tasks = [t for t in tasks if not task_lib.is_update_node_state_task(t)]
# If there are still nodes in state STARTING, change them to STARTED.
for node in pstate.get_all_pipeline_nodes(pipeline_state.pipeline):
node_uid = task_lib.NodeUid.from_pipeline_node(pipeline_state.pipeline,
node)
with pipeline_state.node_state_update_context(node_uid) as node_state:
if node_state.state == pstate.NodeState.STARTING:
node_state.update(pstate.NodeState.STARTED)
for task in tasks:
if task_lib.is_exec_node_task(task):
task = typing.cast(task_lib.ExecNodeTask, task)
task_queue.enqueue(task)
else:
assert task_lib.is_finalize_pipeline_task(task)
assert pipeline.execution_mode == pipeline_pb2.Pipeline.SYNC
assert len(tasks) == 1
task = typing.cast(task_lib.FinalizePipelineTask, task)
if task.status.code == status_lib.Code.OK:
logging.info('Pipeline run successful; pipeline uid: %s',
pipeline_state.pipeline_uid)
else:
logging.info('Pipeline run failed; pipeline uid: %s',
pipeline_state.pipeline_uid)
pipeline_state.initiate_stop(task.status)
def _get_node_infos(pipeline_state: pstate.PipelineState) -> List[_NodeInfo]:
"""Returns a list of `_NodeInfo` object for each node in the pipeline."""
nodes = pstate.get_all_pipeline_nodes(pipeline_state.pipeline)
result: List[_NodeInfo] = []
with pipeline_state:
for node in nodes:
node_uid = task_lib.NodeUid.from_pipeline_node(pipeline_state.pipeline,
node)
result.append(
_NodeInfo(node=node, state=pipeline_state.get_node_state(node_uid)))
return result
def _maybe_enqueue_cancellation_task(mlmd_handle: metadata.Metadata,
pipeline: pipeline_pb2.Pipeline,
node: pipeline_pb2.PipelineNode,
task_queue: tq.TaskQueue,
pause: bool = False) -> bool:
"""Enqueues a node cancellation task if not already stopped.
If the node has an ExecNodeTask in the task queue, issue a cancellation.
Otherwise, when pause=False, if the node has an active execution in MLMD but
no ExecNodeTask enqueued, it may be due to orchestrator restart after stopping
was initiated but before the schedulers could finish. So, enqueue an
ExecNodeTask with is_cancelled set to give a chance for the scheduler to
finish gracefully.
Args:
mlmd_handle: A handle to the MLMD db.
pipeline: The pipeline containing the node to cancel.
node: The node to cancel.
task_queue: A `TaskQueue` instance into which any cancellation tasks will be
enqueued.
pause: Whether the cancellation is to pause the node rather than cancelling
the execution.
Returns:
`True` if a cancellation task was enqueued. `False` if node is already
stopped or no cancellation was required.
"""
exec_node_task_id = task_lib.exec_node_task_id_from_pipeline_node(
pipeline, node)
if task_queue.contains_task_id(exec_node_task_id):
task_queue.enqueue(
task_lib.CancelNodeTask(
node_uid=task_lib.NodeUid.from_pipeline_node(pipeline, node),
pause=pause))
return True
if not pause:
executions = task_gen_utils.get_executions(mlmd_handle, node)
exec_node_task = task_gen_utils.generate_task_from_active_execution(
mlmd_handle, pipeline, node, executions, is_cancelled=True)
if exec_node_task:
task_queue.enqueue(exec_node_task)
return True
return False
def _mlmd_execution_code(
status: status_lib.Status) -> metadata_store_pb2.Execution.State:
if status.code == status_lib.Code.OK:
return metadata_store_pb2.Execution.COMPLETE
elif status.code == status_lib.Code.CANCELLED:
return metadata_store_pb2.Execution.CANCELED
return metadata_store_pb2.Execution.FAILED
|
the-stack_106_24176 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import tir
from tvm import relax as rx
from tvm.script import tir as T
@tvm.register_func("test.op.identity")
def identity_packed(a):
return tvm.nd.array(a.asnumpy())
@T.prim_func
def identity_tir(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [54, 96])
B = T.match_buffer(b, [54, 96])
for i, j in T.grid(54, 96):
with T.block("compute"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj]
def test_call_dps() -> None:
shape_anno = [54, 96]
type_anno = rx.DynTensorType(2, "float32")
v0 = rx.Var("v0", shape_anno, type_anno)
v1 = rx.call_dps([54, 96], rx.extern("test.op.identity"), [v0])
v1 = rx.call_dps([54, 96], identity_tir, [v0])
if __name__ == "__main__":
test_call_dps()
|
the-stack_106_24177 | program = open("/share/Ecoli/GCA_000005845.2_ASM584v2_genomic.fna", "r")
genes = program.readlines()[1:]
genome = []
genome1 = []
for i in genes:
i = i.rstrip('\n')
genome.append(i)
for i in genome:
genome1.append(i)
genome2 = ""
for i in range(len(genome1)):
if i == "G":
if i+1 == "A":
if i+2 == "C":
if i+8 == "G":
if i+9 == "T":
if i+10 == "C":
genome2 = genome2 + genome1[i:i+6] + " " + genome1[i+6, i+11]
print (genome2)
else:
genome2 = genome2+genome1[i]
genome3 = genome2.split(" ")
lengths = []
for i in genome3:
lengths.append(len(i))
#print (genome3)
print (lengths)
|
the-stack_106_24178 | # -*- coding: utf-8 -*-
'''Code imported from ``textblob-fr`` sample extension.
:repo: `https://github.com/sloria/textblob-fr`_
:source: run_tests.py
:version: 2013-10-28 (5c6329d209)
:modified: July 2014 <[email protected]>
'''
import sys
import subprocess
import re
from setuptools import setup
packages = ['textblob_de']
requires = ["textblob>=0.9.0"]
PUBLISH_CMD = "python setup.py register sdist bdist_wheel upload"
TEST_PUBLISH_CMD = 'python setup.py register -r test sdist bdist_wheel upload -r test'
TEST_CMD = 'python run_tests.py'
def find_version(fname):
"""Attempts to find the version number in the file names fname.
Raises RuntimeError if not found.
"""
version = ''
with open(fname, 'r') as fp:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fp:
m = reg.match(line)
if m:
version = m.group(1)
break
if not version:
raise RuntimeError('Cannot find version information')
return version
__version__ = find_version("textblob_de/__init__.py")
if 'publish' in sys.argv:
try:
__import__('wheel')
except ImportError:
print("wheel required. Run `pip install wheel`.")
sys.exit(1)
status = subprocess.call(PUBLISH_CMD, shell=True)
sys.exit(status)
if 'publish_test' in sys.argv:
try:
__import__('wheel')
except ImportError:
print("wheel required. Run `pip install wheel`.")
sys.exit(1)
status = subprocess.call(TEST_PUBLISH_CMD, shell=True)
sys.exit()
if 'run_tests' in sys.argv:
try:
__import__('nose')
except ImportError:
print('nose required. Run `pip install nose`.')
sys.exit(1)
status = subprocess.call(TEST_CMD, shell=True)
sys.exit(status)
def read(fname):
with open(fname) as fp:
content = fp.read()
return content
setup(
name='textblob-de',
version=__version__,
description='German language support for TextBlob.',
long_description=(
read("README.rst") + '\n\n' + read("HISTORY.rst")),
author='Markus Killer',
author_email='[email protected]',
url='https://github.com/markuskiller/textblob-de',
packages=packages,
package_dir={
'textblob_de': 'textblob_de'},
include_package_data=True,
package_data={
"textblob_de": [
"data/*.*",
"ext/*.*",
"ext/_pattern/*.*",
"ext/_pattern/text/*.*",
"ext/_pattern/text/de/*.*",
]},
install_requires=requires,
license='\n\n' + read("LICENSE") + '\n\n',
zip_safe=False,
keywords=[
'textblob',
'textblob_de',
'nlp',
'linguistics',
'nltk',
'pattern'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: German',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing',
'Topic :: Text Processing :: Linguistic',
],
test_suite='tests',
tests_require=['nose'],
)
|
the-stack_106_24179 | import unittest
from collections import OrderedDict
from unittest import mock
from data import api_caller
class TestLegal(unittest.TestCase):
@mock.patch.object(api_caller, '_call_api')
def test_load_legal_mur(self, call_api):
call_api.return_value = {
'docs': [{
'no': '1234',
'mur_type': 'current',
'participants': [
{
'role': 'Complainant',
'name': 'Gollum',
'citations': []
},
{
'role': 'Respondent',
'name': 'Bilbo Baggins',
'citations': []
},
],
'commission_votes': [],
'dispositions': [
{
'disposition': 'Conciliation-PC',
'penalty': 100.0
},
{
'disposition': 'Conciliation-PC',
'penalty': 0.0
},
],
'documents': []
}]
}
mur = api_caller.load_legal_mur('1234')
assert mur.get('no') == '1234'
assert mur['participants_by_type'] == OrderedDict([
('Respondent', ['Bilbo Baggins']),
('Complainant', ['Gollum']),
])
assert mur['collated_dispositions'] == OrderedDict([
('Conciliation-PC', OrderedDict([
(100.0, [{'penalty': 100.0, 'disposition': 'Conciliation-PC'}]),
(0.0, [{'penalty': 0.0, 'disposition': 'Conciliation-PC'}])
]))
])
|
the-stack_106_24180 | import pandas as pd
import pytest
from evalml.data_checks import (
DataCheckMessageCode,
DataCheckWarning,
SparsityDataCheck
)
sparsity_data_check_name = SparsityDataCheck.name
def test_sparsity_data_check_init():
sparsity_check = SparsityDataCheck("multiclass", threshold=4 / 15)
assert sparsity_check.threshold == 4 / 15
sparsity_check = SparsityDataCheck("multiclass", threshold=0.2)
assert sparsity_check.unique_count_threshold == 10
sparsity_check = SparsityDataCheck("multiclass", threshold=.1, unique_count_threshold=5)
assert sparsity_check.unique_count_threshold == 5
with pytest.raises(ValueError, match="Threshold must be a float between 0 and 1, inclusive."):
SparsityDataCheck("multiclass", threshold=-0.1)
with pytest.raises(ValueError, match="Threshold must be a float between 0 and 1, inclusive."):
SparsityDataCheck("multiclass", threshold=1.1)
with pytest.raises(ValueError, match="Sparsity is only defined for multiclass problem types."):
SparsityDataCheck("binary", threshold=.5)
with pytest.raises(ValueError, match="Sparsity is only defined for multiclass problem types."):
SparsityDataCheck("time series binary", threshold=.5)
with pytest.raises(ValueError, match="Sparsity is only defined for multiclass problem types."):
SparsityDataCheck("regression", threshold=.5)
with pytest.raises(ValueError, match="Sparsity is only defined for multiclass problem types."):
SparsityDataCheck("time series regression", threshold=.5)
with pytest.raises(ValueError, match="Unique count threshold must be positive integer."):
SparsityDataCheck("multiclass", threshold=.5, unique_count_threshold=-1)
with pytest.raises(ValueError, match="Unique count threshold must be positive integer."):
SparsityDataCheck("multiclass", threshold=.5, unique_count_threshold=2.3)
def test_sparsity_data_check_sparsity_score():
# Application to a Series
# Here, only 0 exceedes the count_threshold of 3. 0 is 1/3 unique values. So the score is 1/3.
data = pd.Series([x % 3 for x in range(10)]) # [0,1,2,0,1,2,0,1,2,0]
scores = SparsityDataCheck.sparsity_score(data, count_threshold=3)
assert round(scores, 6) == round(1 / 3, 6), "Sparsity Series check failed."
# Another application to a Series
# Here, 1 exceeds the count_threshold of 3. 1 is 1/1 unique values, so the score is 1.
data = pd.Series([1, 1, 1, 1, 1, 1, 1, 1])
scores = SparsityDataCheck.sparsity_score(data, count_threshold=3)
assert scores == 1
# Another application to a Series
# Here, 1 does not exceed the count_threshold of 10. 1 is 1/1 unique values, so the score is 0.
data = pd.Series([1, 1, 1, 1, 1, 1, 1, 1])
scores = SparsityDataCheck.sparsity_score(data, count_threshold=10)
assert scores == 0
# Application to an entire DataFrame
data = pd.DataFrame({'most_sparse': [float(x) for x in range(10)], # [0,1,2,3,4,5,6,7,8,9]
'more_sparse': [x % 5 for x in range(10)], # [0,1,2,3,4,0,1,2,3,4]
'sparse': [x % 3 for x in range(10)], # [0,1,2,0,1,2,0,1,2,0]
'less_sparse': [x % 2 for x in range(10)], # [0,1,0,1,0,1,0,1,0,1]
'not_sparse': [float(1) for x in range(10)]}) # [1,1,1,1,1,1,1,1,1,1]
sparsity_score = SparsityDataCheck.sparsity_score
scores = data.apply(sparsity_score, count_threshold=3)
ans = pd.Series({'most_sparse': 0.000000,
'more_sparse': 0.000000,
'sparse': 0.333333,
'less_sparse': 1.000000,
'not_sparse': 1.000000})
assert scores.round(6).equals(ans), "Sparsity DataFrame check failed."
def test_sparsity_data_check_warnings():
data = pd.DataFrame({'most_sparse': [float(x) for x in range(10)], # [0,1,2,3,4,5,6,7,8,9]
'more_sparse': [x % 5 for x in range(10)], # [0,1,2,3,4,0,1,2,3,4]
'sparse': [x % 3 for x in range(10)], # [0,1,2,0,1,2,0,1,2,0]
'less_sparse': [x % 2 for x in range(10)], # [0,1,0,1,0,1,0,1,0,1]
'not_sparse': [float(1) for x in range(10)]}) # [1,1,1,1,1,1,1,1,1,1]
sparsity_check = SparsityDataCheck(problem_type="multiclass",
threshold=.4,
unique_count_threshold=3)
assert sparsity_check.validate(data) == {
"warnings": [DataCheckWarning(
message="Input columns (most_sparse) for multiclass problem type are too sparse.",
data_check_name=sparsity_data_check_name,
message_code=DataCheckMessageCode.TOO_SPARSE,
details={"column": "most_sparse",
'sparsity_score': 0}).to_dict(),
DataCheckWarning(
message="Input columns (more_sparse) for multiclass problem type are too sparse.",
data_check_name=sparsity_data_check_name,
message_code=DataCheckMessageCode.TOO_SPARSE,
details={"column": "more_sparse",
'sparsity_score': 0}).to_dict(),
DataCheckWarning(
message="Input columns (sparse) for multiclass problem type are too sparse.",
data_check_name=sparsity_data_check_name,
message_code=DataCheckMessageCode.TOO_SPARSE,
details={"column": "sparse",
'sparsity_score': 0.3333333333333333}).to_dict()],
"errors": [],
"actions": []
}
|
the-stack_106_24182 | import pathlib
import unittest
from jinja2 import FileSystemLoader
from jinja2 import Template
from jinja2 import Environment
from datetime import datetime, timedelta
from airflow import DAG
from airflow_kjo import KubernetesJobOperator
class KubeLauncherMock:
def __init__(self):
pass
def apply(self, **kwargs):
pass
def watch(self, **kwargs):
pass
def delete(self, **kwargs):
pass
class TaskInstanceMock:
def __init__(self):
self.try_number = 1
class TestKubernetesJobOperator(unittest.TestCase):
def setUp(self):
super().setUp()
default_args = {
"owner": "airflow",
"depends_on_past": False,
"email": ["[email protected]"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
"start_date": datetime(2021, 2, 24, 12, 0),
}
path = str(pathlib.Path(__file__).parent.absolute())
fixture_path = path + "/fixtures"
self.dag = DAG(
"test_kubernetes_job_op_dag",
default_args=default_args,
template_searchpath=fixture_path,
)
self.task_instance = TaskInstanceMock()
self.kube_launcher = KubeLauncherMock()
self.fixture_path = fixture_path
def test_single_jinja_rendered(self):
"""
Test that a templated yaml file will be properly rendered by the operator
"""
yaml_file_name = "test_single_yaml.tmpl"
task_num = 1
command = "sleep 60; for i in 5 4 3 2 1 ; do echo $i ; done"
with open(self.fixture_path + f"/{yaml_file_name}", "r") as read_file_obj:
yaml_content = read_file_obj.read()
template = Template(yaml_content)
expected_rendered = template.render(command=command, task_num=task_num)
task = KubernetesJobOperator(
task_id=yaml_file_name,
yaml_file_name=yaml_file_name,
yaml_template_fields={"command": command, "task_num": task_num},
kube_launcher=self.kube_launcher,
)
rendered_result = task.execute(
{
"dag": self.dag,
"ti": self.task_instance,
"task_instance": self.task_instance,
}
)
assert rendered_result == expected_rendered
def test_multiple_jinja_rendered(self):
"""
Test that multiple templated yaml files using jinja scheme will be properly rendered
"""
base_yaml_file_name = "test_multiple_yaml_core.tmpl"
command = "sleep 60; for i in 5 4 3 2 1 ; do echo $i ; done"
jinja_env = Environment(loader=FileSystemLoader(searchpath=self.fixture_path))
template = jinja_env.get_template(base_yaml_file_name)
expected_rendered = template.render(command=command)
task = KubernetesJobOperator(
task_id=base_yaml_file_name,
yaml_file_name=base_yaml_file_name,
yaml_template_fields={"command": command},
kube_launcher=self.kube_launcher,
)
rendered_result = task.execute(
{
"dag": self.dag,
"ti": self.task_instance,
"task_instance": self.task_instance,
}
)
assert rendered_result == expected_rendered
|
the-stack_106_24184 | import tensorflow as tf
from tensorflow.keras import layers
from model.ops import MultiChannelEmbedding, ConvolutionLayer, MaxPooling
from gluonnlp import Vocab
class SenCNN(tf.keras.Model):
def __init__(self, num_classes: int, vocab: Vocab) -> None:
super(SenCNN, self).__init__()
self._embedding = MultiChannelEmbedding(vocab)
self._convolution = ConvolutionLayer(300)
self._pooling = MaxPooling()
self._dropout = layers.Dropout(0.5)
self._fc = layers.Dense(units=num_classes) # softmax는 여기에서 안 씀
def call(self, x: tf.Tensor) -> tf.Tensor:
fmap = self._embedding(x)
fmap = self._convolution(fmap)
feature = self._pooling(fmap)
feature = self._dropout(feature)
score = self._fc(feature)
return score |
the-stack_106_24186 | from .config import *
import shodan
import time
def get_device(device_name, json_output=False):
try:
api = shodan.Shodan(shodan_api_key)
results = api.search(device_name)
time.sleep(5)
if json_output:
print(results)
return
print(f"Results Found: {results['total']}")
for result in results['matches']:
print(f"Ip:: {result['ip_str']} | Organization:: {result['org']} |"
f" Domain:: {result['domains']} | ISP:: {result['isp']}")
except shodan.APIError as e:
print(e)
|
the-stack_106_24192 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0003_auto_20141229_1610'),
]
operations = [
migrations.AddField(
model_name='taskassignment',
name='task',
field=models.ForeignKey(
on_delete=models.CASCADE, default=0, to='orchestra.Task'),
preserve_default=False,
),
]
|
the-stack_106_24194 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from code import Code
from model import PropertyType
import cpp_util
import schema_util
import util_cc_helper
class CCGenerator(object):
def __init__(self, type_generator, cpp_namespace):
self._type_generator = type_generator
self._cpp_namespace = cpp_namespace
def Generate(self, namespace):
return _Generator(namespace,
self._type_generator,
self._cpp_namespace).Generate()
class _Generator(object):
"""A .cc generator for a namespace.
"""
def __init__(self, namespace, cpp_type_generator, cpp_namespace):
self._namespace = namespace
self._type_helper = cpp_type_generator
self._cpp_namespace = cpp_namespace
self._target_namespace = (
self._type_helper.GetCppNamespaceName(self._namespace))
self._util_cc_helper = (
util_cc_helper.UtilCCHelper(self._type_helper))
self._generate_error_messages = namespace.compiler_options.get(
'generate_error_messages', False)
def Generate(self):
"""Generates a Code object with the .cc for a single namespace.
"""
c = Code()
(c.Append(cpp_util.CHROMIUM_LICENSE)
.Append()
.Append(cpp_util.GENERATED_FILE_MESSAGE % self._namespace.source_file)
.Append()
.Append(self._util_cc_helper.GetIncludePath())
.Append('#include "base/logging.h"')
.Append('#include "base/strings/string_number_conversions.h"')
.Append('#include "base/strings/utf_string_conversions.h"')
.Append('#include "%s/%s.h"' %
(self._namespace.source_file_dir, self._namespace.short_filename))
.Cblock(self._type_helper.GenerateIncludes(include_soft=True))
.Append()
.Concat(cpp_util.OpenNamespace(self._cpp_namespace))
.Cblock(self._type_helper.GetNamespaceStart())
)
if self._namespace.properties:
(c.Append('//')
.Append('// Properties')
.Append('//')
.Append()
)
for property in self._namespace.properties.values():
property_code = self._type_helper.GeneratePropertyValues(
property,
'const %(type)s %(name)s = %(value)s;',
nodoc=True)
if property_code:
c.Cblock(property_code)
if self._namespace.types:
(c.Append('//')
.Append('// Types')
.Append('//')
.Append()
.Cblock(self._GenerateTypes(None, self._namespace.types.values()))
)
if self._namespace.functions:
(c.Append('//')
.Append('// Functions')
.Append('//')
.Append()
)
for function in self._namespace.functions.values():
c.Cblock(self._GenerateFunction(function))
if self._namespace.events:
(c.Append('//')
.Append('// Events')
.Append('//')
.Append()
)
for event in self._namespace.events.values():
c.Cblock(self._GenerateEvent(event))
(c.Concat(self._type_helper.GetNamespaceEnd())
.Cblock(cpp_util.CloseNamespace(self._cpp_namespace))
)
c.Append()
return c
def _GenerateType(self, cpp_namespace, type_):
"""Generates the function definitions for a type.
"""
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
if type_.functions:
# Wrap functions within types in the type's namespace.
(c.Append('namespace %s {' % classname)
.Append())
for function in type_.functions.values():
c.Cblock(self._GenerateFunction(function))
c.Append('} // namespace %s' % classname)
elif type_.property_type == PropertyType.ARRAY:
c.Cblock(self._GenerateType(cpp_namespace, type_.item_type))
elif type_.property_type in (PropertyType.CHOICES,
PropertyType.OBJECT):
if cpp_namespace is None:
classname_in_namespace = classname
else:
classname_in_namespace = '%s::%s' % (cpp_namespace, classname)
if type_.property_type == PropertyType.OBJECT:
c.Cblock(self._GeneratePropertyFunctions(classname_in_namespace,
type_.properties.values()))
else:
c.Cblock(self._GenerateTypes(classname_in_namespace, type_.choices))
(c.Append('%s::%s()' % (classname_in_namespace, classname))
.Cblock(self._GenerateInitializersAndBody(type_))
.Append('%s::~%s() {}' % (classname_in_namespace, classname))
.Append()
)
if type_.origin.from_json:
c.Cblock(self._GenerateTypePopulate(classname_in_namespace, type_))
if cpp_namespace is None: # only generate for top-level types
c.Cblock(self._GenerateTypeFromValue(classname_in_namespace, type_))
if type_.origin.from_client:
c.Cblock(self._GenerateTypeToValue(classname_in_namespace, type_))
elif type_.property_type == PropertyType.ENUM:
(c.Cblock(self._GenerateEnumToString(cpp_namespace, type_))
.Cblock(self._GenerateEnumFromString(cpp_namespace, type_))
)
return c
def _GenerateInitializersAndBody(self, type_):
items = []
for prop in type_.properties.values():
if prop.optional:
continue
t = prop.type_
if t.property_type == PropertyType.INTEGER:
items.append('%s(0)' % prop.unix_name)
elif t.property_type == PropertyType.DOUBLE:
items.append('%s(0.0)' % prop.unix_name)
elif t.property_type == PropertyType.BOOLEAN:
items.append('%s(false)' % prop.unix_name)
elif (t.property_type == PropertyType.ANY or
t.property_type == PropertyType.ARRAY or
t.property_type == PropertyType.BINARY or # mapped to std::string
t.property_type == PropertyType.CHOICES or
t.property_type == PropertyType.ENUM or
t.property_type == PropertyType.OBJECT or
t.property_type == PropertyType.FUNCTION or
t.property_type == PropertyType.REF or
t.property_type == PropertyType.STRING):
# TODO(miket): It would be nice to initialize CHOICES and ENUM, but we
# don't presently have the semantics to indicate which one of a set
# should be the default.
continue
else:
raise TypeError(t)
if items:
s = ': %s' % (', '.join(items))
else:
s = ''
s = s + ' {}'
return Code().Append(s)
def _GenerateTypePopulate(self, cpp_namespace, type_):
"""Generates the function for populating a type given a pointer to it.
E.g for type "Foo", generates Foo::Populate()
"""
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
(c.Append('// static')
.Append('bool %(namespace)s::Populate(')
.Sblock(' %s) {' % self._GenerateParams(
('const base::Value& value', '%(name)s* out'))))
if type_.property_type == PropertyType.CHOICES:
for choice in type_.choices:
(c.Sblock('if (%s) {' % self._GenerateValueIsTypeExpression('value',
choice))
.Concat(self._GeneratePopulateVariableFromValue(
choice,
'(&value)',
'out->as_%s' % choice.unix_name,
'false',
is_ptr=True))
.Append('return true;')
.Eblock('}')
)
(c.Concat(self._GenerateError(
'"expected %s, got " + %s' %
(" or ".join(choice.name for choice in type_.choices),
self._util_cc_helper.GetValueTypeString('value'))))
.Append('return false;'))
elif type_.property_type == PropertyType.OBJECT:
(c.Sblock('if (!value.IsType(base::Value::TYPE_DICTIONARY)) {')
.Concat(self._GenerateError(
'"expected dictionary, got " + ' +
self._util_cc_helper.GetValueTypeString('value')))
.Append('return false;')
.Eblock('}'))
if type_.properties or type_.additional_properties is not None:
c.Append('const base::DictionaryValue* dict = '
'static_cast<const base::DictionaryValue*>(&value);')
for prop in type_.properties.values():
c.Concat(self._InitializePropertyToDefault(prop, 'out'))
for prop in type_.properties.values():
c.Concat(self._GenerateTypePopulateProperty(prop, 'dict', 'out'))
if type_.additional_properties is not None:
if type_.additional_properties.property_type == PropertyType.ANY:
c.Append('out->additional_properties.MergeDictionary(dict);')
else:
cpp_type = self._type_helper.GetCppType(type_.additional_properties,
is_in_container=True)
(c.Append('for (base::DictionaryValue::Iterator it(*dict);')
.Sblock(' !it.IsAtEnd(); it.Advance()) {')
.Append('%s tmp;' % cpp_type)
.Concat(self._GeneratePopulateVariableFromValue(
type_.additional_properties,
'(&it.value())',
'tmp',
'false'))
.Append('out->additional_properties[it.key()] = tmp;')
.Eblock('}')
)
c.Append('return true;')
(c.Eblock('}')
.Substitute({'namespace': cpp_namespace, 'name': classname}))
return c
def _GenerateValueIsTypeExpression(self, var, type_):
real_type = self._type_helper.FollowRef(type_)
if real_type.property_type is PropertyType.CHOICES:
return '(%s)' % ' || '.join(self._GenerateValueIsTypeExpression(var,
choice)
for choice in real_type.choices)
return '%s.IsType(%s)' % (var, cpp_util.GetValueType(real_type))
def _GenerateTypePopulateProperty(self, prop, src, dst):
"""Generate the code to populate a single property in a type.
src: base::DictionaryValue*
dst: Type*
"""
c = Code()
value_var = prop.unix_name + '_value'
c.Append('const base::Value* %(value_var)s = NULL;')
if prop.optional:
(c.Sblock(
'if (%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s)) {')
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false')))
underlying_type = self._type_helper.FollowRef(prop.type_)
if underlying_type.property_type == PropertyType.ENUM:
(c.Append('} else {')
.Append('%%(dst)s->%%(name)s = %s;' %
self._type_helper.GetEnumNoneValue(prop.type_)))
c.Eblock('}')
else:
(c.Sblock(
'if (!%(src)s->GetWithoutPathExpansion("%(key)s", &%(value_var)s)) {')
.Concat(self._GenerateError('"\'%%(key)s\' is required"'))
.Append('return false;')
.Eblock('}')
.Concat(self._GeneratePopulatePropertyFromValue(
prop, value_var, dst, 'false'))
)
c.Append()
c.Substitute({
'value_var': value_var,
'key': prop.name,
'src': src,
'dst': dst,
'name': prop.unix_name
})
return c
def _GenerateTypeFromValue(self, cpp_namespace, type_):
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
c = Code()
(c.Append('// static')
.Append('scoped_ptr<%s> %s::FromValue(%s) {' % (classname,
cpp_namespace, self._GenerateParams(('const base::Value& value',))))
.Append(' scoped_ptr<%s> out(new %s());' % (classname, classname))
.Append(' if (!Populate(%s))' % self._GenerateArgs(
('value', 'out.get()')))
.Append(' return scoped_ptr<%s>();' % classname)
.Append(' return out.Pass();')
.Append('}')
)
return c
def _GenerateTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes the type into a base::Value.
E.g. for type "Foo" generates Foo::ToValue()
"""
if type_.property_type == PropertyType.OBJECT:
return self._GenerateObjectTypeToValue(cpp_namespace, type_)
elif type_.property_type == PropertyType.CHOICES:
return self._GenerateChoiceTypeToValue(cpp_namespace, type_)
else:
raise ValueError("Unsupported property type %s" % type_.type_)
def _GenerateObjectTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes an object-representing type
into a base::DictionaryValue.
"""
c = Code()
(c.Sblock('scoped_ptr<base::DictionaryValue> %s::ToValue() const {' %
cpp_namespace)
.Append('scoped_ptr<base::DictionaryValue> value('
'new base::DictionaryValue());')
.Append()
)
for prop in type_.properties.values():
if prop.optional:
# Optional enum values are generated with a NONE enum value.
underlying_type = self._type_helper.FollowRef(prop.type_)
if underlying_type.property_type == PropertyType.ENUM:
c.Sblock('if (%s != %s) {' %
(prop.unix_name,
self._type_helper.GetEnumNoneValue(prop.type_)))
else:
c.Sblock('if (%s.get()) {' % prop.unix_name)
# ANY is a base::Value which is abstract and cannot be a direct member, so
# it will always be a pointer.
is_ptr = prop.optional or prop.type_.property_type == PropertyType.ANY
c.Append('value->SetWithoutPathExpansion("%s", %s);' % (
prop.name,
self._CreateValueFromType(prop.type_,
'this->%s' % prop.unix_name,
is_ptr=is_ptr)))
if prop.optional:
c.Eblock('}')
if type_.additional_properties is not None:
if type_.additional_properties.property_type == PropertyType.ANY:
c.Append('value->MergeDictionary(&additional_properties);')
else:
# Non-copyable types will be wrapped in a linked_ptr for inclusion in
# maps, so we need to unwrap them.
needs_unwrap = (
not self._type_helper.IsCopyable(type_.additional_properties))
cpp_type = self._type_helper.GetCppType(type_.additional_properties,
is_in_container=True)
(c.Sblock('for (std::map<std::string, %s>::const_iterator it =' %
cpp_util.PadForGenerics(cpp_type))
.Append(' additional_properties.begin();')
.Append(' it != additional_properties.end(); ++it) {')
.Append('value->SetWithoutPathExpansion(it->first, %s);' %
self._CreateValueFromType(
type_.additional_properties,
'%sit->second' % ('*' if needs_unwrap else '')))
.Eblock('}')
)
return (c.Append()
.Append('return value.Pass();')
.Eblock('}'))
def _GenerateChoiceTypeToValue(self, cpp_namespace, type_):
"""Generates a function that serializes a choice-representing type
into a base::Value.
"""
c = Code()
c.Sblock('scoped_ptr<base::Value> %s::ToValue() const {' % cpp_namespace)
c.Append('scoped_ptr<base::Value> result;')
for choice in type_.choices:
choice_var = 'as_%s' % choice.unix_name
(c.Sblock('if (%s) {' % choice_var)
.Append('DCHECK(!result) << "Cannot set multiple choices for %s";' %
type_.unix_name)
.Append('result.reset(%s);' %
self._CreateValueFromType(choice, '*%s' % choice_var))
.Eblock('}')
)
(c.Append('DCHECK(result) << "Must set at least one choice for %s";' %
type_.unix_name)
.Append('return result.Pass();')
.Eblock('}')
)
return c
def _GenerateFunction(self, function):
"""Generates the definitions for function structs.
"""
c = Code()
# TODO(kalman): use function.unix_name not Classname.
function_namespace = cpp_util.Classname(function.name)
# Windows has a #define for SendMessage, so to avoid any issues, we need
# to not use the name.
if function_namespace == 'SendMessage':
function_namespace = 'PassMessage'
(c.Append('namespace %s {' % function_namespace)
.Append()
)
# Params::Populate function
if function.params:
c.Concat(self._GeneratePropertyFunctions('Params', function.params))
(c.Append('Params::Params() {}')
.Append('Params::~Params() {}')
.Append()
.Cblock(self._GenerateFunctionParamsCreate(function))
)
# Results::Create function
if function.callback:
c.Concat(self._GenerateCreateCallbackArguments('Results',
function.callback))
c.Append('} // namespace %s' % function_namespace)
return c
def _GenerateEvent(self, event):
# TODO(kalman): use event.unix_name not Classname.
c = Code()
event_namespace = cpp_util.Classname(event.name)
(c.Append('namespace %s {' % event_namespace)
.Append()
.Cblock(self._GenerateEventNameConstant(None, event))
.Cblock(self._GenerateCreateCallbackArguments(None, event))
.Append('} // namespace %s' % event_namespace)
)
return c
def _CreateValueFromType(self, type_, var, is_ptr=False):
"""Creates a base::Value given a type. Generated code passes ownership
to caller.
var: variable or variable*
E.g for std::string, generate new base::StringValue(var)
"""
underlying_type = self._type_helper.FollowRef(type_)
if (underlying_type.property_type == PropertyType.CHOICES or
underlying_type.property_type == PropertyType.OBJECT):
if is_ptr:
return '(%s)->ToValue().release()' % var
else:
return '(%s).ToValue().release()' % var
elif (underlying_type.property_type == PropertyType.ANY or
underlying_type.property_type == PropertyType.FUNCTION):
if is_ptr:
vardot = '(%s)->' % var
else:
vardot = '(%s).' % var
return '%sDeepCopy()' % vardot
elif underlying_type.property_type == PropertyType.ENUM:
return 'new base::StringValue(ToString(%s))' % var
elif underlying_type.property_type == PropertyType.BINARY:
if is_ptr:
vardot = var + '->'
else:
vardot = var + '.'
return ('base::BinaryValue::CreateWithCopiedBuffer(%sdata(), %ssize())' %
(vardot, vardot))
elif underlying_type.property_type == PropertyType.ARRAY:
return '%s.release()' % self._util_cc_helper.CreateValueFromArray(
underlying_type,
var,
is_ptr)
elif underlying_type.property_type.is_fundamental:
if is_ptr:
var = '*%s' % var
if underlying_type.property_type == PropertyType.STRING:
return 'new base::StringValue(%s)' % var
else:
return 'new base::FundamentalValue(%s)' % var
else:
raise NotImplementedError('Conversion of %s to base::Value not '
'implemented' % repr(type_.type_))
def _GenerateParamsCheck(self, function, var):
"""Generates a check for the correct number of arguments when creating
Params.
"""
c = Code()
num_required = 0
for param in function.params:
if not param.optional:
num_required += 1
if num_required == len(function.params):
c.Sblock('if (%(var)s.GetSize() != %(total)d) {')
elif not num_required:
c.Sblock('if (%(var)s.GetSize() > %(total)d) {')
else:
c.Sblock('if (%(var)s.GetSize() < %(required)d'
' || %(var)s.GetSize() > %(total)d) {')
(c.Concat(self._GenerateError(
'"expected %%(total)d arguments, got " '
'+ base::IntToString(%%(var)s.GetSize())'))
.Append('return scoped_ptr<Params>();')
.Eblock('}')
.Substitute({
'var': var,
'required': num_required,
'total': len(function.params),
}))
return c
def _GenerateFunctionParamsCreate(self, function):
"""Generate function to create an instance of Params. The generated
function takes a base::ListValue of arguments.
E.g for function "Bar", generate Bar::Params::Create()
"""
c = Code()
(c.Append('// static')
.Sblock('scoped_ptr<Params> Params::Create(%s) {' % self._GenerateParams(
['const base::ListValue& args']))
.Concat(self._GenerateParamsCheck(function, 'args'))
.Append('scoped_ptr<Params> params(new Params());'))
for param in function.params:
c.Concat(self._InitializePropertyToDefault(param, 'params'))
for i, param in enumerate(function.params):
# Any failure will cause this function to return. If any argument is
# incorrect or missing, those following it are not processed. Note that
# for optional arguments, we allow missing arguments and proceed because
# there may be other arguments following it.
failure_value = 'scoped_ptr<Params>()'
c.Append()
value_var = param.unix_name + '_value'
(c.Append('const base::Value* %(value_var)s = NULL;')
.Append('if (args.Get(%(i)s, &%(value_var)s) &&')
.Sblock(' !%(value_var)s->IsType(base::Value::TYPE_NULL)) {')
.Concat(self._GeneratePopulatePropertyFromValue(
param, value_var, 'params', failure_value))
.Eblock('}')
)
if not param.optional:
(c.Sblock('else {')
.Concat(self._GenerateError('"\'%%(key)s\' is required"'))
.Append('return %s;' % failure_value)
.Eblock('}'))
c.Substitute({'value_var': value_var, 'i': i, 'key': param.name})
(c.Append()
.Append('return params.Pass();')
.Eblock('}')
.Append()
)
return c
def _GeneratePopulatePropertyFromValue(self,
prop,
src_var,
dst_class_var,
failure_value):
"""Generates code to populate property |prop| of |dst_class_var| (a
pointer) from a Value*. See |_GeneratePopulateVariableFromValue| for
semantics.
"""
return self._GeneratePopulateVariableFromValue(prop.type_,
src_var,
'%s->%s' % (dst_class_var,
prop.unix_name),
failure_value,
is_ptr=prop.optional)
def _GeneratePopulateVariableFromValue(self,
type_,
src_var,
dst_var,
failure_value,
is_ptr=False):
"""Generates code to populate a variable |dst_var| of type |type_| from a
Value* at |src_var|. The Value* is assumed to be non-NULL. In the generated
code, if |dst_var| fails to be populated then Populate will return
|failure_value|.
"""
c = Code()
underlying_type = self._type_helper.FollowRef(type_)
if underlying_type.property_type.is_fundamental:
if is_ptr:
(c.Append('%(cpp_type)s temp;')
.Sblock('if (!%s) {' % cpp_util.GetAsFundamentalValue(
self._type_helper.FollowRef(type_), src_var, '&temp'))
.Concat(self._GenerateError(
'"\'%%(key)s\': expected ' + '%s, got " + %s' % (
type_.name,
self._util_cc_helper.GetValueTypeString(
'%%(src_var)s', True))))
.Append('return %(failure_value)s;')
.Eblock('}')
.Append('%(dst_var)s.reset(new %(cpp_type)s(temp));')
)
else:
(c.Sblock('if (!%s) {' % cpp_util.GetAsFundamentalValue(
self._type_helper.FollowRef(type_),
src_var,
'&%s' % dst_var))
.Concat(self._GenerateError(
'"\'%%(key)s\': expected ' + '%s, got " + %s' % (
type_.name,
self._util_cc_helper.GetValueTypeString(
'%%(src_var)s', True))))
.Append('return %(failure_value)s;')
.Eblock('}')
)
elif underlying_type.property_type == PropertyType.OBJECT:
if is_ptr:
(c.Append('const base::DictionaryValue* dictionary = NULL;')
.Sblock('if (!%(src_var)s->GetAsDictionary(&dictionary)) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected dictionary, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
.Append('return %(failure_value)s;')
.Eblock('}')
.Append('scoped_ptr<%(cpp_type)s> temp(new %(cpp_type)s());')
.Append('if (!%%(cpp_type)s::Populate(%s)) {' % self._GenerateArgs(
('*dictionary', 'temp.get()')))
.Append(' return %(failure_value)s;')
.Append('}')
.Append('%(dst_var)s = temp.Pass();')
)
else:
(c.Append('const base::DictionaryValue* dictionary = NULL;')
.Sblock('if (!%(src_var)s->GetAsDictionary(&dictionary)) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected dictionary, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
.Append('return %(failure_value)s;')
.Eblock('}')
.Append('if (!%%(cpp_type)s::Populate(%s)) {' % self._GenerateArgs(
('*dictionary', '&%(dst_var)s')))
.Append(' return %(failure_value)s;')
.Append('}')
)
elif underlying_type.property_type == PropertyType.FUNCTION:
if is_ptr:
c.Append('%(dst_var)s.reset(new base::DictionaryValue());')
elif underlying_type.property_type == PropertyType.ANY:
c.Append('%(dst_var)s.reset(%(src_var)s->DeepCopy());')
elif underlying_type.property_type == PropertyType.ARRAY:
# util_cc_helper deals with optional and required arrays
(c.Append('const base::ListValue* list = NULL;')
.Sblock('if (!%(src_var)s->GetAsList(&list)) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected list, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
.Append('return %(failure_value)s;')
.Eblock('}'))
item_type = self._type_helper.FollowRef(underlying_type.item_type)
if item_type.property_type == PropertyType.ENUM:
c.Concat(self._GenerateListValueToEnumArrayConversion(
item_type,
'list',
dst_var,
failure_value,
is_ptr=is_ptr))
else:
(c.Sblock('if (!%s) {' % self._util_cc_helper.PopulateArrayFromList(
underlying_type,
'list',
dst_var,
is_ptr))
.Concat(self._GenerateError(
'"unable to populate array \'%%(parent_key)s\'"'))
.Append('return %(failure_value)s;')
.Eblock('}')
)
elif underlying_type.property_type == PropertyType.CHOICES:
if is_ptr:
(c.Append('scoped_ptr<%(cpp_type)s> temp(new %(cpp_type)s());')
.Append('if (!%%(cpp_type)s::Populate(%s))' % self._GenerateArgs(
('*%(src_var)s', 'temp.get()')))
.Append(' return %(failure_value)s;')
.Append('%(dst_var)s = temp.Pass();')
)
else:
(c.Append('if (!%%(cpp_type)s::Populate(%s))' % self._GenerateArgs(
('*%(src_var)s', '&%(dst_var)s')))
.Append(' return %(failure_value)s;'))
elif underlying_type.property_type == PropertyType.ENUM:
c.Concat(self._GenerateStringToEnumConversion(type_,
src_var,
dst_var,
failure_value))
elif underlying_type.property_type == PropertyType.BINARY:
(c.Sblock('if (!%(src_var)s->IsType(base::Value::TYPE_BINARY)) {')
.Concat(self._GenerateError(
'"\'%%(key)s\': expected binary, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
.Append('return %(failure_value)s;')
.Eblock('}')
.Append('const base::BinaryValue* binary_value =')
.Append(' static_cast<const base::BinaryValue*>(%(src_var)s);')
)
if is_ptr:
(c.Append('%(dst_var)s.reset(')
.Append(' new std::string(binary_value->GetBuffer(),')
.Append(' binary_value->GetSize()));')
)
else:
(c.Append('%(dst_var)s.assign(binary_value->GetBuffer(),')
.Append(' binary_value->GetSize());')
)
else:
raise NotImplementedError(type_)
if c.IsEmpty():
return c
return Code().Sblock('{').Concat(c.Substitute({
'cpp_type': self._type_helper.GetCppType(type_),
'src_var': src_var,
'dst_var': dst_var,
'failure_value': failure_value,
'key': type_.name,
'parent_key': type_.parent.name
})).Eblock('}')
def _GenerateListValueToEnumArrayConversion(self,
item_type,
src_var,
dst_var,
failure_value,
is_ptr=False):
"""Returns Code that converts a ListValue of string constants from
|src_var| into an array of enums of |type_| in |dst_var|. On failure,
returns |failure_value|.
"""
c = Code()
accessor = '.'
if is_ptr:
accessor = '->'
cpp_type = self._type_helper.GetCppType(item_type, is_in_container=True)
c.Append('%s.reset(new std::vector<%s>);' %
(dst_var, cpp_util.PadForGenerics(cpp_type)))
(c.Sblock('for (base::ListValue::const_iterator it = %s->begin(); '
'it != %s->end(); ++it) {' % (src_var, src_var))
.Append('%s tmp;' % self._type_helper.GetCppType(item_type))
.Concat(self._GenerateStringToEnumConversion(item_type,
'(*it)',
'tmp',
failure_value))
.Append('%s%spush_back(tmp);' % (dst_var, accessor))
.Eblock('}')
)
return c
def _GenerateStringToEnumConversion(self,
type_,
src_var,
dst_var,
failure_value):
"""Returns Code that converts a string type in |src_var| to an enum with
type |type_| in |dst_var|. In the generated code, if |src_var| is not
a valid enum name then the function will return |failure_value|.
"""
c = Code()
enum_as_string = '%s_as_string' % type_.unix_name
(c.Append('std::string %s;' % enum_as_string)
.Sblock('if (!%s->GetAsString(&%s)) {' % (src_var, enum_as_string))
.Concat(self._GenerateError(
'"\'%%(key)s\': expected string, got " + ' +
self._util_cc_helper.GetValueTypeString('%%(src_var)s', True)))
.Append('return %s;' % failure_value)
.Eblock('}')
.Append('%s = Parse%s(%s);' % (dst_var,
self._type_helper.GetCppType(type_),
enum_as_string))
.Sblock('if (%s == %s) {' % (dst_var,
self._type_helper.GetEnumNoneValue(type_)))
.Concat(self._GenerateError(
'\"\'%%(key)s\': expected \\"' +
'\\" or \\"'.join(
enum_value.name
for enum_value in self._type_helper.FollowRef(type_).enum_values) +
'\\", got \\"" + %s + "\\""' % enum_as_string))
.Append('return %s;' % failure_value)
.Eblock('}')
.Substitute({'src_var': src_var, 'key': type_.name})
)
return c
def _GeneratePropertyFunctions(self, namespace, params):
"""Generates the member functions for a list of parameters.
"""
return self._GenerateTypes(namespace, (param.type_ for param in params))
def _GenerateTypes(self, namespace, types):
"""Generates the member functions for a list of types.
"""
c = Code()
for type_ in types:
c.Cblock(self._GenerateType(namespace, type_))
return c
def _GenerateEnumToString(self, cpp_namespace, type_):
"""Generates ToString() which gets the string representation of an enum.
"""
c = Code()
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
if cpp_namespace is not None:
c.Append('// static')
maybe_namespace = '' if cpp_namespace is None else '%s::' % cpp_namespace
c.Sblock('std::string %sToString(%s enum_param) {' %
(maybe_namespace, classname))
c.Sblock('switch (enum_param) {')
for enum_value in self._type_helper.FollowRef(type_).enum_values:
(c.Append('case %s: ' % self._type_helper.GetEnumValue(type_, enum_value))
.Append(' return "%s";' % enum_value.name))
(c.Append('case %s:' % self._type_helper.GetEnumNoneValue(type_))
.Append(' return "";')
.Eblock('}')
.Append('NOTREACHED();')
.Append('return "";')
.Eblock('}')
)
return c
def _GenerateEnumFromString(self, cpp_namespace, type_):
"""Generates FromClassNameString() which gets an enum from its string
representation.
"""
c = Code()
classname = cpp_util.Classname(schema_util.StripNamespace(type_.name))
if cpp_namespace is not None:
c.Append('// static')
maybe_namespace = '' if cpp_namespace is None else '%s::' % cpp_namespace
c.Sblock('%s%s %sParse%s(const std::string& enum_string) {' %
(maybe_namespace, classname, maybe_namespace, classname))
for i, enum_value in enumerate(
self._type_helper.FollowRef(type_).enum_values):
# This is broken up into all ifs with no else ifs because we get
# "fatal error C1061: compiler limit : blocks nested too deeply"
# on Windows.
(c.Append('if (enum_string == "%s")' % enum_value.name)
.Append(' return %s;' %
self._type_helper.GetEnumValue(type_, enum_value)))
(c.Append('return %s;' % self._type_helper.GetEnumNoneValue(type_))
.Eblock('}')
)
return c
def _GenerateCreateCallbackArguments(self, function_scope, callback):
"""Generate all functions to create Value parameters for a callback.
E.g for function "Bar", generate Bar::Results::Create
E.g for event "Baz", generate Baz::Create
function_scope: the function scope path, e.g. Foo::Bar for the function
Foo::Bar::Baz(). May be None if there is no function scope.
callback: the Function object we are creating callback arguments for.
"""
c = Code()
params = callback.params
c.Concat(self._GeneratePropertyFunctions(function_scope, params))
(c.Sblock('scoped_ptr<base::ListValue> %(function_scope)s'
'Create(%(declaration_list)s) {')
.Append('scoped_ptr<base::ListValue> create_results('
'new base::ListValue());')
)
declaration_list = []
for param in params:
declaration_list.append(cpp_util.GetParameterDeclaration(
param, self._type_helper.GetCppType(param.type_)))
c.Append('create_results->Append(%s);' %
self._CreateValueFromType(param.type_, param.unix_name))
c.Append('return create_results.Pass();')
c.Eblock('}')
c.Substitute({
'function_scope': ('%s::' % function_scope) if function_scope else '',
'declaration_list': ', '.join(declaration_list),
'param_names': ', '.join(param.unix_name for param in params)
})
return c
def _GenerateEventNameConstant(self, function_scope, event):
"""Generates a constant string array for the event name.
"""
c = Code()
c.Append('const char kEventName[] = "%s.%s";' % (
self._namespace.name, event.name))
return c
def _InitializePropertyToDefault(self, prop, dst):
"""Initialize a model.Property to its default value inside an object.
E.g for optional enum "state", generate dst->state = STATE_NONE;
dst: Type*
"""
c = Code()
underlying_type = self._type_helper.FollowRef(prop.type_)
if (underlying_type.property_type == PropertyType.ENUM and
prop.optional):
c.Append('%s->%s = %s;' % (
dst,
prop.unix_name,
self._type_helper.GetEnumNoneValue(prop.type_)))
return c
def _GenerateError(self, body):
"""Generates an error message pertaining to population failure.
E.g 'expected bool, got int'
"""
c = Code()
if not self._generate_error_messages:
return c
(c.Append('if (error)')
.Append(' *error = base::UTF8ToUTF16(' + body + ');'))
return c
def _GenerateParams(self, params):
"""Builds the parameter list for a function, given an array of parameters.
"""
if self._generate_error_messages:
params = list(params) + ['base::string16* error']
return ', '.join(str(p) for p in params)
def _GenerateArgs(self, args):
"""Builds the argument list for a function, given an array of arguments.
"""
if self._generate_error_messages:
args = list(args) + ['error']
return ', '.join(str(a) for a in args)
|
the-stack_106_24195 | from crawler import crawler_51_job
import schedule
import time
import argparse
import sys
def run_one():
keys=['python','java']
c=crawler_51_job()
for k in keys:
c.set_search_key(k)
c.run()
def schedule_run(t):
schedule.every().day.at(t).do(run_one)
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == "__main__":
print(sys.argv)
print('开始启动程序')
# schedule_run()
run_one()
|
the-stack_106_24196 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_bootstrap_theme
# sys.path.insert(0, os.path.abspath("../packages/python/plotly"))
# -- Project information -----------------------------------------------------
project = ""
copyright = "2019, plotly team"
author = "plotly team"
# The short X.Y version
version = ""
# The full version, including alpha/beta/rc tags
release = "4.6.0"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# source_suffix = '.rst'
source_parsers = {".md": "recommonmark.parser.CommonMarkParser"}
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
default_role = "literal"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "bootstrap"
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_logo = "_static/logo.png"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"analytics_id": "UA-39373211-1", # not supported by this theme
"bootswatch_theme": "flatly",
"source_link_position": "no",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_favicon = "_static/favicon.ico"
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "plotlydoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "plotly.tex", "plotly Documentation", "plotly team", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "plotly", "plotly Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"plotly",
"plotly Documentation",
author,
"plotly",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
def setup(app):
app.add_stylesheet("plotly-style.css") # also can be a full URL
|
the-stack_106_24197 | #! /usr/bin/env python3
import os
from steves_utils.ORACLE.utils_v2 import (ALL_DISTANCES_FEET, ALL_RUNS,
ALL_SERIAL_NUMBERS,
serial_number_to_id)
from steves_utils.papermill_support import run_trials_with_papermill
###########################################
# papermill parameters
###########################################
TRIALS_PATH = "./trials"
NOTEBOOK_OUT_NAME = "trial.ipynb"
NOTEBOOK_TEMPLATE_PATH = os.path.realpath("../../../templates/cnn_template.ipynb")
BEST_MODEL_PATH = "./best_model.pth"
SAVE_BEST_MODEL=False
###########################################
# Build all experiment json parameters
###########################################
base_parameters = {}
base_parameters["experiment_name"] = "cnn_2:oracle.run1_limited"
base_parameters["labels"] = ALL_SERIAL_NUMBERS
base_parameters["domains_source"] = [8,32,50,14,20,26,38,44,]
base_parameters["domains_target"] = [8,32,50,14,20,26,38,44,]
base_parameters["pickle_name_source"] = "oracle.Run1_10kExamples_stratified_ds.2022A.pkl"
base_parameters["pickle_name_target"] = "oracle.Run1_10kExamples_stratified_ds.2022A.pkl"
base_parameters["device"] = "cuda"
base_parameters["lr"] = 0.0001
base_parameters["batch_size"] = 128
base_parameters["normalize_source"] = False
base_parameters["normalize_target"] = False
base_parameters["num_examples_per_domain_per_label_source"]=2000
base_parameters["num_examples_per_domain_per_label_target"]=2000
base_parameters["torch_default_dtype"] = "torch.float32"
base_parameters["n_epoch"] = 50
base_parameters["patience"] = 3
base_parameters["criteria_for_best"] = "target_accuracy"
base_parameters["x_net"] = [
{"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}},
{"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":[1,7], "bias":False, "padding":[0,3], },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":256}},
{"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":[2,7], "bias":True, "padding":[0,3], },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features":256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": len(base_parameters["labels"])}},
]
# Parameters relevant to results
# These parameters will basically never need to change
base_parameters["NUM_LOGS_PER_EPOCH"] = 10
base_parameters["BEST_MODEL_PATH"] = BEST_MODEL_PATH
parameters = base_parameters
custom_parameters = []
for seed in [1337, 420, 154325, 7, 500]:
custom_parameters.extend([
{
"dataset_seed": seed,
"seed": seed
},
{
"dataset_seed": seed,
"seed": seed
},
{
"dataset_seed": seed,
"seed": seed
}
])
trials = []
import copy
for custom in custom_parameters:
parameters = copy.deepcopy(base_parameters)
for key,val in custom.items():
parameters[key] = val
trials.append(parameters)
import random
random.seed(1337)
random.shuffle(trials)
###########################################
# Run each experiment using papermill
###########################################
run_trials_with_papermill(
trials=trials,
trials_dir_path=TRIALS_PATH,
notebook_out_name=NOTEBOOK_OUT_NAME,
notebook_template_path=NOTEBOOK_TEMPLATE_PATH,
best_model_path=BEST_MODEL_PATH,
save_best_model=False
)
|
the-stack_106_24199 | # coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gym-specific (non-Atari) utilities.
Some network specifications specific to certain Gym environments are provided
here.
Includes a wrapper class around Gym environments. This class makes general Gym
environments conformant with the API Dopamine is expecting.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import gym
import numpy as np
import tensorflow as tf
import gin.tf
CARTPOLE_MIN_VALS = np.array([-2.4, -5., -math.pi/12., -math.pi*2.])
CARTPOLE_MAX_VALS = np.array([2.4, 5., math.pi/12., math.pi*2.])
ACROBOT_MIN_VALS = np.array([-1., -1., -1., -1., -5., -5.])
ACROBOT_MAX_VALS = np.array([1., 1., 1., 1., 5., 5.])
gin.constant('gym_lib.CARTPOLE_OBSERVATION_SHAPE', (4, 1))
gin.constant('gym_lib.CARTPOLE_OBSERVATION_DTYPE', tf.float64)
gin.constant('gym_lib.CARTPOLE_STACK_SIZE', 1)
gin.constant('gym_lib.ACROBOT_OBSERVATION_SHAPE', (6, 1))
gin.constant('gym_lib.ACROBOT_OBSERVATION_DTYPE', tf.float64)
gin.constant('gym_lib.ACROBOT_STACK_SIZE', 1)
@gin.configurable
def create_gym_environment(environment_name=None, version='v0'):
"""Wraps a Gym environment with some basic preprocessing.
Args:
environment_name: str, the name of the environment to run.
version: str, version of the environment to run.
Returns:
A Gym environment with some standard preprocessing.
"""
assert environment_name is not None
full_game_name = '{}-{}'.format(environment_name, version)
env = gym.make(full_game_name)
# Strip out the TimeLimit wrapper from Gym, which caps us at 200 steps.
env = env.env
# Wrap the returned environment in a class which conforms to the API expected
# by Dopamine.
env = GymPreprocessing(env)
return env
@gin.configurable
def _basic_discrete_domain_network(min_vals, max_vals, num_actions, state,
num_atoms=None):
"""Builds a basic network for discrete domains, rescaling inputs to [-1, 1].
Args:
min_vals: float, minimum attainable values (must be same shape as `state`).
max_vals: float, maximum attainable values (must be same shape as `state`).
num_actions: int, number of actions.
state: `tf.Tensor`, the state input.
num_atoms: int or None, if None will construct a DQN-style network,
otherwise will construct a Rainbow-style network.
Returns:
The Q-values for DQN-style agents or logits for Rainbow-style agents.
"""
net = tf.cast(state, tf.float32)
net = tf.contrib.slim.flatten(net)
net -= min_vals
net /= max_vals - min_vals
net = 2.0 * net - 1.0 # Rescale in range [-1, 1].
net = tf.contrib.slim.fully_connected(net, 512)
net = tf.contrib.slim.fully_connected(net, 512)
if num_atoms is None:
# We are constructing a DQN-style network.
return tf.contrib.slim.fully_connected(net, num_actions, activation_fn=None)
else:
# We are constructing a rainbow-style network.
return tf.contrib.slim.fully_connected(net, num_actions * num_atoms,
activation_fn=None)
@gin.configurable
def cartpole_dqn_network(num_actions, network_type, state):
"""Builds the deep network used to compute the agent's Q-values.
It rescales the input features to a range that yields improved performance.
Args:
num_actions: int, number of actions.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
q_values = _basic_discrete_domain_network(
CARTPOLE_MIN_VALS, CARTPOLE_MAX_VALS, num_actions, state)
return network_type(q_values)
class FourierBasis(object):
"""Fourier Basis linear function approximation.
Requires the ranges for each dimension, and is thus able to use only sine or
cosine (and uses cosine). So, this has half the coefficients that a full
Fourier approximation would use.
Many thanks to Will Dabney (wdabney@) for this implementation.
From the paper:
G.D. Konidaris, S. Osentoski and P.S. Thomas. (2011)
Value Function Approximation in Reinforcement Learning using the Fourier Basis
"""
def __init__(self, nvars, min_vals=0, max_vals=None, order=3):
self.order = order
self.min_vals = min_vals
self.max_vals = max_vals
terms = itertools.product(range(order + 1), repeat=nvars)
# Removing first iterate because it corresponds to the constant bias
self.multipliers = tf.constant(
[list(map(int, x)) for x in terms][1:], dtype=tf.float32)
def scale(self, values):
shifted = values - self.min_vals
if self.max_vals is None:
return shifted
return shifted / (self.max_vals - self.min_vals)
def compute_features(self, features):
# Important to rescale features to be between [0,1]
scaled = self.scale(features)
return tf.cos(np.pi * tf.matmul(scaled, self.multipliers, transpose_b=True))
@gin.configurable
def fourier_dqn_network(min_vals,
max_vals,
num_actions,
state,
fourier_basis_order=3):
"""Builds the function approximator used to compute the agent's Q-values.
It uses FourierBasis features and a linear layer.
Args:
min_vals: float, minimum attainable values (must be same shape as `state`).
max_vals: float, maximum attainable values (must be same shape as `state`).
num_actions: int, number of actions.
state: `tf.Tensor`, contains the agent's current state.
fourier_basis_order: int, order of the Fourier basis functions.
Returns:
The Q-values for DQN-style agents or logits for Rainbow-style agents.
"""
net = tf.cast(state, tf.float32)
net = tf.contrib.slim.flatten(net)
# Feed state through Fourier basis.
feature_generator = FourierBasis(
net.get_shape().as_list()[-1],
min_vals,
max_vals,
order=fourier_basis_order)
net = feature_generator.compute_features(net)
# Q-values are always linear w.r.t. last layer.
q_values = tf.contrib.slim.fully_connected(
net, num_actions, activation_fn=None, biases_initializer=None)
return q_values
def cartpole_fourier_dqn_network(num_actions, network_type, state):
"""Builds the function approximator used to compute the agent's Q-values.
It uses the Fourier basis features and a linear function approximator.
Args:
num_actions: int, number of actions.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
q_values = fourier_dqn_network(CARTPOLE_MIN_VALS, CARTPOLE_MAX_VALS,
num_actions, state)
return network_type(q_values)
@gin.configurable
def cartpole_rainbow_network(num_actions, num_atoms, support, network_type,
state):
"""Build the deep network used to compute the agent's Q-value distributions.
Args:
num_actions: int, number of actions.
num_atoms: int, the number of buckets of the value function distribution.
support: tf.linspace, the support of the Q-value distribution.
network_type: `namedtuple`, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
net = _basic_discrete_domain_network(
CARTPOLE_MIN_VALS, CARTPOLE_MAX_VALS, num_actions, state,
num_atoms=num_atoms)
logits = tf.reshape(net, [-1, num_actions, num_atoms])
probabilities = tf.contrib.layers.softmax(logits)
q_values = tf.reduce_sum(support * probabilities, axis=2)
return network_type(q_values, logits, probabilities)
@gin.configurable
def acrobot_dqn_network(num_actions, network_type, state):
"""Builds the deep network used to compute the agent's Q-values.
It rescales the input features to a range that yields improved performance.
Args:
num_actions: int, number of actions.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
q_values = _basic_discrete_domain_network(
ACROBOT_MIN_VALS, ACROBOT_MAX_VALS, num_actions, state)
return network_type(q_values)
@gin.configurable
def acrobot_fourier_dqn_network(num_actions, network_type, state):
"""Builds the function approximator used to compute the agent's Q-values.
It uses the Fourier basis features and a linear function approximator.
Args:
num_actions: int, number of actions.
network_type: namedtuple, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
q_values = fourier_dqn_network(ACROBOT_MIN_VALS, ACROBOT_MAX_VALS,
num_actions, state)
return network_type(q_values)
@gin.configurable
def acrobot_rainbow_network(num_actions, num_atoms, support, network_type,
state):
"""Build the deep network used to compute the agent's Q-value distributions.
Args:
num_actions: int, number of actions.
num_atoms: int, the number of buckets of the value function distribution.
support: tf.linspace, the support of the Q-value distribution.
network_type: `namedtuple`, collection of expected values to return.
state: `tf.Tensor`, contains the agent's current state.
Returns:
net: _network_type object containing the tensors output by the network.
"""
net = _basic_discrete_domain_network(
ACROBOT_MIN_VALS, ACROBOT_MAX_VALS, num_actions, state,
num_atoms=num_atoms)
logits = tf.reshape(net, [-1, num_actions, num_atoms])
probabilities = tf.contrib.layers.softmax(logits)
q_values = tf.reduce_sum(support * probabilities, axis=2)
return network_type(q_values, logits, probabilities)
@gin.configurable
class GymPreprocessing(object):
"""A Wrapper class around Gym environments."""
def __init__(self, environment):
self.environment = environment
self.game_over = False
@property
def observation_space(self):
return self.environment.observation_space
@property
def action_space(self):
return self.environment.action_space
@property
def reward_range(self):
return self.environment.reward_range
@property
def metadata(self):
return self.environment.metadata
def reset(self):
return self.environment.reset()
def step(self, action):
observation, reward, game_over, info = self.environment.step(action)
self.game_over = game_over
return observation, reward, game_over, info
|
the-stack_106_24203 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import io
import json
import os
import six
import unicodedata
from collections import OrderedDict, UserDict
from shutil import copyfile
from typing import Iterable, Iterator, Optional, List, Any, Callable, Union
from paddle.utils import try_import
from paddlenlp.utils.downloader import get_path_from_url, COMMUNITY_MODEL_PREFIX
from paddlenlp.utils.env import MODEL_HOME
from paddlenlp.utils.log import logger
from dataclasses import dataclass, field
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
from ..data.vocab import Vocab
from .utils import InitTrackerMeta, fn_args_to_dict
from collections import OrderedDict
__all__ = [
'PretrainedTokenizer', 'BPETokenizer', 'tokenize_chinese_chars',
'is_chinese_char', 'normalize_chars', 'tokenize_special_chars'
]
class BatchEncoding(UserDict):
def __init__(self, data=None):
super().__init__(data)
def __getitem__(self, item):
if isinstance(item, str):
return self.data[item]
else:
raise KeyError(
"Indexing with integers is not available when using tokenizer.__call__()"
" with return_dict=True. Please set return_dict to False to use integer indexing."
)
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def items(self):
return self.data.items()
def convert_to_unicode(text):
"""
Converts `text` to Unicode (if it's not already), assuming utf-8 input.
Args:
text (str|bytes): Text to be converted to unicode.
Returns:
str: converted text.
"""
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
def whitespace_tokenize(text):
"""
Runs basic whitespace cleaning and splitting on a peice of text.
Args:
text (str): Text to be tokened.
Returns:
list(str): Token list.
"""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def _is_whitespace(char):
"""
Checks whether `chars` is a whitespace character.
"""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _is_nonnormalized_char(char):
"""Check whther `chars` is a non-normalized character."""
cp = ord(char)
if ((0xFF00 <= cp <= 0xFFEF) or # Halfwidth and Fullwidth Forms
(0xFE50 <= cp <= 0xFE6B) or # Small Form Variants
(0x3358 <= cp <= 0x33FF) or # CJK Compatibility
(0x249C <= cp <= 0x24E9)): # Enclosed Alphanumerics: Ⓛ ⒰
return True
return False
def _is_nonnormalized_numeric(char):
"""Check whether `chars` is a non-normalized numeric character."""
cp = ord(char)
if ((0x2460 <= cp <= 0x249B) or #
(0x24EA <= cp <= 0x24FF) or #
(0x2776 <= cp <= 0x2793) or # Enclosed Alphanumerics
(0x2160 <= cp <= 0x217F)): # Number Forms
return True
return False
def normalize_chars(text):
"""
Normalize the text for multiligual and chinese models. Unicode range:
https://www.ling.upenn.edu/courses/Spring_2003/ling538/UnicodeRanges.html
"""
output = []
for char in text:
if _is_nonnormalized_char(char):
for c in unicodedata.normalize("NFKC", char):
output.append(c)
elif _is_nonnormalized_numeric(char):
output.append(" ")
for c in str(int(unicodedata.numeric(char))):
output.append(c)
output.append(" ")
elif ord(char) == 0xF979: # https://www.zhihu.com/question/20697984
output.append("凉")
else:
output.append(char)
return "".join(output)
def _is_symbol(char):
"""Check whether CP is the codepoint of a Symbol character."""
cp = ord(char)
if unicodedata.category(char).startswith('S') or (
cp in
[0x00ad, 0x00b2, 0x00ba, 0x3007, 0x00b5, 0x00d8, 0x014b, 0x01b1]):
return True
return False
def tokenize_special_chars(text):
"""Adds whitespace around any special character."""
output = []
for char in text:
cp = ord(char)
if ((0x3040 <= cp <= 0x30FF) or # Japanese
(0x0370 <= cp <= 0x04FF) or # Greek/Coptic & Cyrillic
(0x0250 <= cp <= 0x02AF) or # IPA
_is_symbol(char)):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
@dataclass(frozen=True, eq=True)
class AddedToken:
"""
AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
way it should behave.
"""
content: str = field(default_factory=str)
single_word: bool = False
lstrip: bool = False
rstrip: bool = False
normalized: bool = True
def __getstate__(self):
return self.__dict__
class Trie:
"""
Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
Loose reference https://en.wikipedia.org/wiki/Trie
"""
def __init__(self):
self.data = {}
def add(self, word: str):
"""
Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.
The special key `""` is used to represent termination.
This function is idempotent, adding twice the same word will leave the trie unchanged
Example::
>>> trie = Trie()
>>> trie.add("Hello 友達")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}
>>> trie.add("Hello")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}
"""
if not word:
return
ref = self.data
for char in word:
ref[char] = char in ref and ref[char] or {}
ref = ref[char]
ref[""] = 1
def split(self, text: str) -> List[str]:
"""
Will look for the words added to the trie within `text`. Output is the original string splitted along the
boundaries of the words found.
This trie will match the longest possible word first !
Example::
>>> trie = Trie()
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS] This is a extra_id_100"]
>>> trie.add("[CLS]")
>>> trie.add("extra_id_1")
>>> trie.add("extra_id_100")
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS]", " This is a ", "extra_id_100"]
"""
# indexes are counted left of the chars index.
# "hello", index 0, is left of h, index 1 is between h and e.
# index 5 is right of the "o".
# States are going to capture every possible start (indexes as above)
# as keys, and have as values, a pointer to the position in the trie
# where we're at. This is a partial match for now.
# This enables to keep track of multiple matches while we're iterating
# the string
# If the trie contains, "blowing", and "lower" and we encounter the
# string "blower", we need to split into ["b", "lower"].
# This is where we need to keep track of multiple possible starts.
states = OrderedDict()
# This will contain every indices where we need
# to cut.
# We force to cut at offset 0 and len(text) (added later)
offsets = [0]
# This is used by the lookahead which needs to skip over
# some text where the full match exceeded the place in the initial
# for loop
skip = None
# Main loop, Giving this algorithm O(n) complexity
for current, current_char in enumerate(text):
if skip and current < skip:
# Prevents the lookahead for matching twice
# like extra_id_100 and id_100
continue
# This will track every state
# that stop matching, we need to stop tracking them.
# If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then
# fail on "b", we need to remove 0 from the valid states.
to_remove = set()
# Whenever we found a match, we need to drop everything
# this is a greedy algorithm, it will match on the first found token
reset = False
# In this case, we already have partial matches (But unfinished)
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`.
# Lookahead to match longest first
# Important in case of extra_id_1 vs extra_id_100
# Here we are also actively looking for other earlier partial
# matches
# "[CLS]", "L", we need to match CLS even if L is special
for lookstart, looktrie_pointer in states.items():
if lookstart > start:
# This partial match is later, we can stop looking
break
elif lookstart < start:
# This partial match is earlier, the trie pointer
# was already updated, so index is + 1
lookahead_index = current + 1
end = current + 1
else:
# Here lookstart == start and
# looktrie_pointer == trie_pointer
# It wasn't updated yet so indices are current ones
lookahead_index = current
end = current
next_char = text[
lookahead_index] if lookahead_index < len(
text) else None
while next_char in looktrie_pointer:
looktrie_pointer = looktrie_pointer[next_char]
lookahead_index += 1
if "" in looktrie_pointer:
start = lookstart
end = lookahead_index
skip = lookahead_index
if lookahead_index == len(text):
# End of string
break
next_char = text[lookahead_index]
# End lookahead
# Storing and resetting
offsets.append(start)
offsets.append(end)
reset = True
break
elif current_char in trie_pointer:
# The current character being looked at has a match within the trie
# update the pointer (it will be stored back into states later).
trie_pointer = trie_pointer[current_char]
# Storing back the new pointer into the states.
# Partial matches got longer by one.
states[start] = trie_pointer
else:
# The new character has not match in the trie, we need
# to stop keeping track of this partial match.
# We can't do it directly within the loop because of how
# python iteration works
to_remove.add(start)
# Either clearing the full start (we found a real match)
# Or clearing only the partial matches that didn't work.
if reset:
states = {}
else:
for start in to_remove:
del states[start]
# If this character is a starting character within the trie
# start keeping track of this partial match.
if current_char in self.data:
states[current] = self.data[current_char]
# We have a cut at the end with states.
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`.
end = len(text)
offsets.append(start)
offsets.append(end)
# Longest cut is always the one with lower start so the first
# item so we need to break.
break
return self.cut_text(text, offsets)
def cut_text(self, text, offsets):
# We have all the offsets now, we just need to do the actual splitting.
# We need to eventually add the first part of the string and the eventual
# last part.
offsets.append(len(text))
tokens = []
start = 0
for end in offsets:
if start > end:
logger.error(
"There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it anyway."
)
continue
elif start == end:
# This might happen if there's a match at index 0
# we're also preventing zero-width cuts in case of two
# consecutive matches
continue
tokens.append(text[start:end])
start = end
return tokens
def tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
output = []
buff = ""
for char in text:
cp = ord(char)
if is_chinese_char(cp):
if buff != "":
output.append(buff)
buff = ""
output.append(char)
else:
buff += char
if buff != "":
output.append(buff)
return output
@six.add_metaclass(InitTrackerMeta)
class PretrainedTokenizer(object):
"""
The base class for all pretrained tokenizers. It mainly provides common methods
for loading (construction and loading) and saving pretrained tokenizers. Loading
and saving also rely on the following class attributes which should be overridden
by derived classes accordingly:
- **tokenizer_config_file** (str): Represents the file name of tokenizer
configuration for configuration saving and loading in local file system.
The value is `tokenizer_config.json`.
- **resource_files_names** (dict): Represents resources to specific file
names mapping for resource saving and loading in local file system. The
keys of dict representing resource items should be argument names in
tokenizer's `__init__` method, and the values are file names for saving
and loading corresponding resources. The mostly used resources here are
vocabulary file and sentence-piece model file.
- **pretrained_init_configuration** (dict): Provides the tokenizer configurations
of built-in pretrained tokenizers (contrasts to tokenizers in local file
system). It has pretrained tokenizer names as keys (the same as pretrained
model names, such as `bert-base-uncased`), and the values are dict preserving
corresponding configuration for tokenizer initialization.
- **pretrained_resource_files_map** (dict): Provides resource URLs of built-in
pretrained tokenizers (contrasts to tokenizers in local file system). It
has the same keys as `resource_files_names`, and the values are also `dict`
mapping specific pretrained tokenizer names (such as `bert-base-uncased`)
to corresponding resource URLs.
Moreover, methods common to tokenizers for tokenization, token/id conversion
and encoding as model inputs are also provided here.
Besides, metaclass `InitTrackerMeta` is used to create `PretrainedTokenizer`,
by which subclasses can track arguments for initialization automatically
and expose special tokens initialization used as attributes.
"""
tokenizer_config_file = "tokenizer_config.json"
pretrained_init_configuration = {}
resource_files_names = {} # keys are arguments of __init__
pretrained_resource_files_map = {}
padding_side = 'right'
pad_token_type_id = 0
special_tokens_map_extended = {}
_additional_special_tokens = []
def _wrap_init(self, original_init, *args, **kwargs):
"""
It would be hooked after `__init__` to add specials tokens (arguments of
`__init__` whose name ends with `_token`) as attributes of the tokenizer
instance.
"""
# expose tokens as attributes
self.padding_side = kwargs.pop("padding_side", self.padding_side)
assert self.padding_side in [
"right", "left"
], "Padding side must be either left or right"
init_dict = fn_args_to_dict(original_init, *args, **kwargs)
self.added_tokens_encoder = {}
self.added_tokens_decoder = {}
# TODO(guosheng): Use OrderedDict, otherwise `all_special_tokens` returns
# a list without order.
self.tokens_trie = Trie()
self.special_tokens_map = {}
for identifier, value in init_dict.items():
if identifier.endswith('_token'):
self.special_tokens_map[identifier] = value
if identifier == "additional_special_tokens":
assert isinstance(value, (
list, tuple)), f"Value {value} is not a list or tuple"
self._additional_special_tokens += value
assert all(
isinstance(t, (str, AddedToken)) for t in
value), "One of the tokens is not a string or an AddedToken"
self.special_tokens_map[
identifier] = self._additional_special_tokens
self.add_tokens(self.all_special_tokens, special_tokens=True)
additional_special_tokens = []
for token in self.all_special_tokens:
if isinstance(token, AddedToken):
token = token.content
if token not in self.special_tokens_map.values():
additional_special_tokens.append(token)
self.special_tokens_map[
"additional_special_tokens"] = additional_special_tokens
def _build_special_tokens_map_extended(self, **kwargs):
for identifier, token in kwargs.items():
if identifier.endswith('_token') and isinstance(token, AddedToken):
self.special_tokens_map_extended[identifier] = token
def __call__(self,
text,
text_pair=None,
max_seq_len: Optional[int]=None,
stride=0,
is_split_into_words=False,
pad_to_max_seq_len=False,
truncation_strategy="longest_first",
return_position_ids=False,
return_token_type_ids=True,
return_attention_mask=False,
return_length=False,
return_overflowing_tokens=False,
return_special_tokens_mask=False,
return_dict=True,
return_offsets_mapping=False):
"""
Performs tokenization and uses the tokenized tokens to prepare model
inputs. It supports sequence or sequence pair as input, and batch input
is allowed. `self.encode()` or `self.batch_encode()` would be called
separately for single or batch input depending on input format and
`is_split_into_words` argument.
Args:
text (str, List[str] or List[List[str]]):
The sequence or batch of sequences to be processed. One sequence
is a string or a list of strings depending on whether it has been
pretokenized. If each sequence is provided as a list of strings
(pretokenized), you must set `is_split_into_words` as `True` to
disambiguate with a batch of sequences.
text_pair (str, List[str] or List[List[str]], optional):
Same as `text` argument, while it represents for the latter
sequence of the sequence pair.
max_seq_len (int, optional):
If set to a number, will limit the total sequence returned so
that it has a maximum length. If there are overflowing tokens,
those overflowing tokens will be added to the returned dictionary
when `return_overflowing_tokens` is `True`. Defaults to `None`.
stride (int, optional):
Only available for batch input of sequence pair and mainly for
question answering usage. When for QA, `text` represents questions
and `text_pair` represents contexts. If `stride` is set to a
positive number, the context will be split into multiple spans
where `stride` defines the number of (tokenized) tokens to skip
from the start of one span to get the next span, thus will produce
a bigger batch than inputs to include all spans. Moreover, 'overflow_to_sample'
and 'offset_mapping' preserving the original example and position
information will be added to the returned dictionary. Defaults to 0.
pad_to_max_seq_len (bool, optional):
If set to `True`, the returned sequences would be padded up to
`max_seq_len` specified length according to padding side
(`self.padding_side`) and padding token id. Defaults to `False`.
truncation_strategy (str, optional):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence
until the input is under `max_seq_len` starting from the longest
one at each token (when there is a pair of input sequences).
- 'only_first': Only truncate the first sequence.
- 'only_second': Only truncate the second sequence.
- 'do_not_truncate': Do not truncate (raise an error if the input
sequence is longer than `max_seq_len`).
Defaults to 'longest_first'.
return_position_ids (bool, optional):
Whether to include tokens position ids in the returned dictionary.
Defaults to `False`.
return_token_type_ids (bool, optional):
Whether to include token type ids in the returned dictionary.
Defaults to `True`.
return_attention_mask (bool, optional):
Whether to include the attention mask in the returned dictionary.
Defaults to `False`.
return_length (bool, optional):
Whether to include the length of each encoded inputs in the
returned dictionary. Defaults to `False`.
return_overflowing_tokens (bool, optional):
Whether to include overflowing token information in the returned
dictionary. Defaults to `False`.
return_special_tokens_mask (bool, optional):
Whether to include special tokens mask information in the returned
dictionary. Defaults to `False`.
return_dict (bool, optional):
Decide the format for returned encoded batch inputs. Only works when
input is a batch of data.
::
- If True, encoded inputs would be a dictionary like:
{'input_ids': [[1, 4444, 4385, 1545, 6712],[1, 4444, 4385]],
'token_type_ids': [[0, 0, 0, 0, 0], [0, 0, 0]]}
- If False, encoded inputs would be a list like:
[{'input_ids': [1, 4444, 4385, 1545, 6712],
'token_type_ids': [0, 0, 0, 0, 0]},
{'input_ids': [1, 4444, 4385], 'token_type_ids': [0, 0, 0]}]
Defaults to `True`.
return_offsets_mapping (bool, optional):
Whether to include the list of pair preserving the index of start
and end char in original input for each token in the returned
dictionary. Would be automatically set to `True` when `stride` > 0.
Defaults to `False`.
Returns:
dict or list[dict] (for batch input):
The dict has the following optional items:
- **input_ids** (list[int] or list[list[int]]): List of token ids to be fed to a model.
- **position_ids** (list[int] or list[list[int]], optional): List of token position ids to be
fed to a model. Included when `return_position_ids` is `True`
- **token_type_ids** (list[int] or list[list[int]], optional): List of token type ids to be
fed to a model. Included when `return_token_type_ids` is `True`.
- **attention_mask** (list[int] or list[list[int]], optional): List of integers valued 0 or 1,
where 0 specifies paddings and should not be attended to by the
model. Included when `return_attention_mask` is `True`.
- **seq_len** (int or list[int], optional): The input_ids length. Included when `return_length`
is `True`.
- **overflowing_tokens** (list[int] or list[list[int]], optional): List of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **num_truncated_tokens** (int or list[int], optional): The number of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **special_tokens_mask** (list[int] or list[list[int]], optional): List of integers valued 0 or 1,
with 0 specifying special added tokens and 1 specifying sequence tokens.
Included when `return_special_tokens_mask` is `True`.
- **offset_mapping** (list[int], optional): list of pair preserving the
index of start and end char in original input for each token.
For a sqecial token, the index pair is `(0, 0)`. Included when
`return_overflowing_tokens` is True or `stride` > 0.
- **overflow_to_sample** (int or list[int], optional): Index of example from which this
feature is generated. Included when `stride` works.
"""
# Input type checking for clearer error
assert isinstance(text, str) or (
isinstance(text, (list, tuple)) and (len(text) == 0 or (
isinstance(text[0], str) or
(isinstance(text[0], (list, tuple)) and
(len(text[0]) == 0 or isinstance(text[0][0], str)))))
), ("text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples).")
assert (text_pair is None or isinstance(text_pair, str) or (
isinstance(text_pair, (list, tuple)) and (len(text_pair) == 0 or (
isinstance(text_pair[0], str) or
(isinstance(text_pair[0], (list, tuple)) and
(len(text_pair[0]) == 0 or isinstance(text_pair[0][0], str)))))
)), (
"text_pair input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples).")
is_batched = bool(
(not is_split_into_words and isinstance(text, (list, tuple))) or
(is_split_into_words and isinstance(text, (list, tuple)) and
text and isinstance(text[0], (list, tuple))))
if is_batched:
batch_text_or_text_pairs = list(zip(
text, text_pair)) if text_pair is not None else text
return self.batch_encode(
batch_text_or_text_pairs=batch_text_or_text_pairs,
max_seq_len=max_seq_len,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_max_seq_len=pad_to_max_seq_len,
truncation_strategy=truncation_strategy,
return_position_ids=return_position_ids,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_length=return_length,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_dict=return_dict,
return_offsets_mapping=return_offsets_mapping)
else:
return self.encode(
text=text,
text_pair=text_pair,
max_seq_len=max_seq_len,
pad_to_max_seq_len=pad_to_max_seq_len,
truncation_strategy=truncation_strategy,
return_position_ids=return_position_ids,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_length=return_length,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping)
@property
def all_special_tokens(self):
"""
list: All the special tokens ('<unk>', '<cls>'...) corresponding to
special token arguments in `__init__` (arguments end with '_end').
"""
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (
list, tuple)) else [attr_value])
all_toks = list(OrderedDict.fromkeys(all_toks))
return all_toks
@property
def all_special_tokens_extended(self):
"""
list: All the special tokens ('<unk>', '<cls>'...) corresponding to
special token arguments in `__init__` (arguments end with '_end').
"""
all_toks = []
set_attr = self.special_tokens_map_extended
for attr_value in set_attr.values():
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (
list, tuple)) else [attr_value])
all_toks = list(set(all_toks))
return all_toks
@property
def all_special_ids(self):
"""
list: All the token ids corresponding to all the special tokens.
"""
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids
def __len__(self):
"""
Size of the full vocabulary with the added tokens.
"""
return self.vocab_size + len(self.added_tokens_encoder)
def _add_tokens(self, new_tokens, special_tokens=True):
if special_tokens:
add_special_tokens = []
add_special_tokens_extended = []
for token in new_tokens:
if isinstance(token, AddedToken):
if token.content not in add_special_tokens:
self.tokens_trie.add(token.content)
add_special_tokens_extended.append(token)
add_special_tokens.append(token.content)
if token.content != self.unk_token and self.convert_tokens_to_ids(
token.content) == self.convert_tokens_to_ids(
self.unk_token):
self.added_tokens_encoder[token.content] = len(self)
self.added_tokens_decoder[len(self) -
1] = token.content
else:
if token not in add_special_tokens:
self.tokens_trie.add(token)
add_special_tokens.append(token)
if token != self.unk_token and self.convert_tokens_to_ids(
token) == self.convert_tokens_to_ids(
self.unk_token):
self.added_tokens_encoder[token] = len(self)
self.added_tokens_decoder[len(self) - 1] = token
self.special_tokens_map_extended[
"additional_special_tokens"] = add_special_tokens_extended
else:
for token in new_tokens:
if not isinstance(token, str):
raise TypeError(
f"Token {token} is not a string but a {type(token)}.")
if hasattr(self, "do_lower_case") and self.do_lower_case:
token = token.lower()
if token not in self.added_tokens_encoder and token != self.unk_token and self.convert_tokens_to_ids(
token) == self.convert_tokens_to_ids(self.unk_token):
self.added_tokens_encoder[token] = len(self)
self.added_tokens_decoder[len(self) - 1] = token
return len(self.added_tokens_encoder)
def add_tokens(self, new_tokens, special_tokens=True):
if not new_tokens:
return 0
if not isinstance(new_tokens, (list, tuple)):
new_tokens = [new_tokens]
return self._add_tokens(new_tokens, special_tokens=special_tokens)
def prepare_for_tokenization(self, text, **kwargs):
return text
def tokenize(self, text, **kwargs):
all_special_tokens_extended = dict(
(t.content, t) for t in self.all_special_tokens_extended
if isinstance(t, AddedToken))
no_split_token = set(self.all_special_tokens)
text = self.prepare_for_tokenization(text, **kwargs)
tokens = self.tokens_trie.split(text)
for i, token in enumerate(tokens):
if token in no_split_token:
tok_extended = all_special_tokens_extended.get(token, None)
left = tokens[i - 1] if i > 0 else None
right = tokens[i + 1] if i < len(tokens) - 1 else None
if isinstance(tok_extended, AddedToken):
if tok_extended.rstrip and right:
# A bit counter-intuitive but we strip the left of the string
# since tok_extended.rstrip means the special token is eating all white spaces on its right
tokens[i + 1] = right.lstrip()
# Strip white spaces on the left
if tok_extended.lstrip and left:
tokens[i - 1] = left.rstrip() # Opposite here
else:
# We strip left and right by default
if right:
tokens[i + 1] = right.lstrip()
if left:
tokens[i - 1] = left.rstrip()
tokenized_text = []
for token in tokens:
if not token:
continue
if token in no_split_token:
tokenized_text.append(token)
else:
tokenized_text.extend(self._tokenize(token, **kwargs))
return tokenized_text
def convert_tokens_to_ids(self, tokens):
if tokens is None:
return None
if isinstance(tokens, str):
if tokens in self.added_tokens_encoder:
return self.added_tokens_encoder[tokens]
else:
return self._convert_token_to_id(tokens)
ids = []
for token in tokens:
if token in self.added_tokens_encoder:
ids.append(self.added_tokens_encoder[token])
else:
ids.append(self._convert_token_to_id(token))
return ids
def _convert_token_to_id(self, token):
return self.vocab.to_indices(token)
def convert_tokens_to_string(self, tokens):
"""
Converts a sequence of tokens (list of string) to a single string by
using ``' '.join(tokens)`` .
Args:
tokens (list[str]): A sequence of tokens.
Returns:
str: Converted string.
"""
return " ".join(tokens)
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index):
return self.vocab.to_tokens(index)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
"""
Creates an instance of `PretrainedTokenizer`. Related resources are loaded
by specifying name of a built-in pretrained model, or a community-contributed
pretrained model, or a local file directory path.
Args:
pretrained_model_name_or_path (str): Name of pretrained model or dir path
to load from. The string can be:
- Name of built-in pretrained model
- Name of a community-contributed pretrained model.
- Local directory path which contains tokenizer related resources
and tokenizer config file ("tokenizer_config.json").
*args (tuple): position arguments for model `__init__`. If provided,
use these as position argument values for tokenizer initialization.
**kwargs (dict): keyword arguments for model `__init__`. If provided,
use these to update pre-defined keyword argument values for tokenizer
initialization.
Returns:
PretrainedTokenizer: An instance of `PretrainedTokenizer`.
Example:
.. code-block::
from paddlenlp.transformers import BertTokenizer
# Name of built-in pretrained model
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Name of community-contributed pretrained model
tokenizer = BertTokenizer.from_pretrained('yingyibiao/bert-base-uncased-sst-2-finetuned')
# Load from local directory path
tokenizer = BertTokenizer.from_pretrained('./my_bert/')
"""
pretrained_models = list(cls.pretrained_init_configuration.keys())
vocab_files = {}
init_configuration = {}
# From built-in pretrained models
if pretrained_model_name_or_path in pretrained_models:
for file_id, map_list in cls.pretrained_resource_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
init_configuration = copy.deepcopy(
cls.pretrained_init_configuration[
pretrained_model_name_or_path])
# From local dir path
elif os.path.isdir(pretrained_model_name_or_path):
for file_id, file_name in cls.resource_files_names.items():
full_file_name = os.path.join(pretrained_model_name_or_path,
file_name)
if os.path.isfile(full_file_name):
vocab_files[file_id] = full_file_name
vocab_files["tokenizer_config_file"] = os.path.join(
pretrained_model_name_or_path, cls.tokenizer_config_file)
else:
# Assuming from community-contributed pretrained models
for file_id, file_name in cls.resource_files_names.items():
full_file_name = os.path.join(COMMUNITY_MODEL_PREFIX,
pretrained_model_name_or_path,
file_name)
vocab_files[file_id] = full_file_name
vocab_files["tokenizer_config_file"] = os.path.join(
COMMUNITY_MODEL_PREFIX, pretrained_model_name_or_path,
cls.tokenizer_config_file)
default_root = os.path.join(MODEL_HOME, pretrained_model_name_or_path)
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None or os.path.isfile(file_path):
resolved_vocab_files[file_id] = file_path
continue
path = os.path.join(default_root, file_path.split('/')[-1])
if os.path.exists(path):
logger.info("Already cached %s" % path)
resolved_vocab_files[file_id] = path
else:
logger.info("Downloading %s and saved to %s" %
(file_path, default_root))
try:
resolved_vocab_files[file_id] = get_path_from_url(
file_path, default_root)
except RuntimeError as err:
logger.error(err)
raise RuntimeError(
f"Can't load tokenizer for '{pretrained_model_name_or_path}'.\n"
f"Please make sure that '{pretrained_model_name_or_path}' is:\n"
"- a correct model-identifier of built-in pretrained models,\n"
"- or a correct model-identifier of community-contributed pretrained models,\n"
"- or the correct path to a directory containing relevant tokenizer files.\n"
)
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop(
"tokenizer_config_file", None)
if tokenizer_config_file is not None:
with io.open(tokenizer_config_file, encoding="utf-8") as f:
init_kwargs = json.load(f)
else:
init_kwargs = init_configuration
# position args are stored in kwargs, maybe better not include
init_args = init_kwargs.pop("init_args", ())
init_kwargs.pop("init_class", None)
# Update with newly provided args and kwargs
init_args = init_args if not args else args
init_kwargs.update(kwargs)
# Merge resolved_vocab_files arguments in init_kwargs if not including.
# Maybe need more ways to load resources.
for args_name, file_path in resolved_vocab_files.items():
# when `pretrained_model_name_or_path` is a pretrained model name,
# use pretrained_init_configuration as `init_kwargs` to init which
# does not include the vocab file in it, thus add vocab file into
# args.
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
# when `pretrained_model_name_or_path` is a pretrained model dir,
# use tokenizer_config_file.json as `init_kwargs` to init which
# does include a vocab file path in it. However, if the vocab file
# path included in json does not exist, such as was deleted, to make
# it still work, use the vocab file under this dir.
elif not os.path.isfile(init_kwargs[args_name]) and os.path.isfile(
file_path):
init_kwargs[args_name] = file_path
# TODO(guosheng): avoid reduplication of position args and key word args
tokenizer = cls(*init_args, **init_kwargs)
return tokenizer
def save_pretrained(self, save_directory):
"""
Save tokenizer configuration and related resources to files under
`save_directory`. The tokenizer configuration would be saved into
`tokenizer_config_file` indicating file (thus `tokenizer_config.json`),
and resources would be saved into `resource_files_names` indicating files
by using `self.save_resources(save_directory)`.
The `save_directory` can be used in `from_pretrained` as argument value
of `pretrained_model_name_or_path` to re-load the tokenizer.
Args:
save_directory (str): Directory to save files into.
Example:
.. code-block::
from paddlenlp.transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.save_pretrained('trained_model')
# reload from save_directory
tokenizer = BertTokenizer.from_pretrained('trained_model')
"""
assert not os.path.isfile(
save_directory
), "Saving directory ({}) should be a directory, not a file".format(
save_directory)
os.makedirs(save_directory, exist_ok=True)
tokenizer_config_file = os.path.join(save_directory,
self.tokenizer_config_file)
# init_config is set in metaclass created `__init__`,
tokenizer_config = self.init_config
with io.open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
self.save_resources(save_directory)
def save_resources(self, save_directory):
"""
Save tokenizer related resources to `resource_files_names` indicating
files under `save_directory` by copying directly. Override it if necessary.
Args:
save_directory (str): Directory to save files into.
"""
for name, file_name in self.resource_files_names.items():
src_path = self.init_config[name]
dst_path = os.path.join(save_directory, file_name)
if os.path.abspath(src_path) != os.path.abspath(dst_path):
copyfile(src_path, dst_path)
@staticmethod
def load_vocabulary(filepath,
unk_token=None,
pad_token=None,
bos_token=None,
eos_token=None,
**kwargs):
"""
Instantiate an instance of `Vocab` from a file reserving all tokens
by using `Vocab.from_dict`. The file contains a token per line, and the
line number would be the index of corresponding token.
Args:
filepath (str): path of file to construct vocabulary.
unk_token (str): special token for unknown token. If no need, it also
could be `None`. Defaults to `None`.
pad_token (str): special token for padding token. If no need, it also
could be `None`. Defaults to `None`.
bos_token (str): special token for bos token. If no need, it also
could be `None`. Defaults to `None`.
eos_token (str): special token for eos token. If no need, it also
could be `None`. Defaults to `None`.
**kwargs (dict): keyword arguments for `Vocab.from_dict`.
Returns:
Vocab: An instance of `Vocab`.
"""
token_to_idx = {}
with io.open(filepath, 'r', encoding='utf-8') as f:
for index, line in enumerate(f):
token = line.rstrip('\n')
token_to_idx[token] = int(index)
vocab = Vocab.from_dict(
token_to_idx,
unk_token=unk_token,
pad_token=pad_token,
bos_token=bos_token,
eos_token=eos_token,
**kwargs)
return vocab
@staticmethod
def save_vocabulary(filepath, vocab):
"""
Save all tokens to a vocabulary file. The file contains a token per line,
and the line number would be the index of corresponding token.
Args:
filepath (str): File path to be saved to.
vocab (Vocab|dict): The `Vocab` or `dict` instance to be saved.
"""
if isinstance(vocab, Vocab):
tokens = vocab.idx_to_token
else:
tokens = sorted(vocab.keys(), key=lambda token: vocab[token])
with io.open(filepath, 'w', encoding='utf-8') as f:
for token in tokens:
f.write(token + '\n')
def __getattr__(self, name):
if name.endswith('_token'):
return self.special_tokens_map[name]
elif name.endswith('_token_id'):
return self._convert_token_to_id(self.special_tokens_map[name[:-3]])
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, name))
def truncate_sequences(self,
ids,
pair_ids=None,
num_tokens_to_remove=0,
truncation_strategy='longest_first',
stride=0):
"""
Truncates a sequence pair in place to the maximum length.
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
num_tokens_to_remove (:obj:`int`, `optional`, defaults to ``0``):
number of tokens to remove using the truncation strategy
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_seq_len
starting from the longest one at each token (when there is a pair of input sequences).
Overflowing tokens only contains overflow from the first sequence.
- 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove.
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_seq_len)
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_seq_len, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
"""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if truncation_strategy == 'longest_first':
overflowing_tokens = []
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
overflowing_tokens = [ids[-1]] + overflowing_tokens
ids = ids[:-1]
else:
pair_ids = pair_ids[:-1]
window_len = min(len(ids), stride)
if window_len > 0:
overflowing_tokens = ids[-window_len:] + overflowing_tokens
elif truncation_strategy == 'only_first':
assert len(ids) > num_tokens_to_remove
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
elif truncation_strategy == 'only_second':
assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
elif truncation_strategy == 'do_not_truncate':
raise ValueError(
"Input sequence are too long for max_length. Please select a truncation strategy."
)
else:
raise ValueError(
"Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']"
)
return (ids, pair_ids, overflowing_tokens)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens.
Should be overridden in a subclass if the model has a special way of building those.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
List[int]: List of input_id with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
"""
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
Should be overridden in a subclass if the model has a special way of building those.
Args:
offset_mapping_0 (List[tuple]):
List of char offsets to which the special tokens will be added.
offset_mapping_1 (List[tuple], optional):
Optional second list of char offsets for offset mapping pairs.
Returns:
List[tuple]: List of char offsets with the appropriate offsets of special tokens.
"""
if offset_mapping_1 is None:
return offset_mapping_0
return offset_mapping_0 + offset_mapping_1
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``encode`` methods.
Args:
token_ids_0 (List[int]): List of ids of the first sequence.
token_ids_1 (List[int], optional): List of ids of the second sequence.
already_has_special_tokens (bool, optional): Whether or not the token list is already
formatted with special tokens for the model. Defaults to None.
Returns:
results (List[int]): The list of integers in the range [0, 1]:
1 for a special token, 0 for a sequence token.
"""
return [0] * ((len(token_ids_1)
if token_ids_1 else 0) + len(token_ids_0))
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
Should be overridden in a subclass if the model has a special way of building those.
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (List[int]):
List of IDs.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs.
Returns:
List[int]: List of token_type_id according to the given sequence(s).
"""
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def num_special_tokens_to_add(self, pair):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Args:
pair (bool, optional):
Whether the number of added tokens should be computed in the case of a sequence pair or a single
sequence. Defaults to `False`.
Returns:
int: Number of special tokens added to sequences.
"""
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
def encode(self,
text,
text_pair=None,
max_seq_len=512,
pad_to_max_seq_len=False,
truncation_strategy="longest_first",
return_position_ids=False,
return_token_type_ids=True,
return_attention_mask=False,
return_length=False,
return_overflowing_tokens=False,
return_special_tokens_mask=False,
return_offsets_mapping=False):
"""
Performs tokenization and uses the tokenized tokens to prepare model
inputs. It supports sequence or sequence pair as input, and batch input
is not allowed.
Args:
text (str, List[str] or List[int]):
The sequence to be processed. One sequence is a string, a list
of strings, or a list of integers depending on whether it has
been pretokenized and converted to ids.
text_pair (str, List[str] or List[List[str]]):
Same as `text` argument, while it represents for the latter
sequence of the sequence pair.
max_seq_len (int, optional):
If set to a number, will limit the total sequence returned so
that it has a maximum length. If there are overflowing tokens,
those overflowing tokens will be added to the returned dictionary
when `return_overflowing_tokens` is `True`. Defaults to `None`.
pad_to_max_seq_len (bool, optional):
If set to `True`, the returned sequences would be padded up to
`max_seq_len` specified length according to padding side
(`self.padding_side`) and padding token id. Defaults to `False`.
truncation_strategy (str, optional):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence
until the input is under `max_seq_len` starting from the longest
one at each token (when there is a pair of input sequences).
- 'only_first': Only truncate the first sequence.
- 'only_second': Only truncate the second sequence.
- 'do_not_truncate': Do not truncate (raise an error if the input
sequence is longer than `max_seq_len`).
Defaults to 'longest_first'.
return_position_ids (bool, optional):
Whether to include tokens position ids in the returned dictionary.
Defaults to `False`.
return_token_type_ids (bool, optional):
Whether to include token type ids in the returned dictionary.
Defaults to `True`.
return_attention_mask (bool, optional):
Whether to include the attention mask in the returned dictionary.
Defaults to `False`.
return_length (bool, optional):
Whether to include the length of each encoded inputs in the
returned dictionary. Defaults to `False`.
return_overflowing_tokens (bool, optional):
Whether to include overflowing token information in the returned
dictionary. Defaults to `False`.
return_special_tokens_mask (bool, optional):
Whether to include special tokens mask information in the returned
dictionary. Defaults to `False`.
return_offsets_mapping (bool, optional):
Whether to include the list of pair preserving the index of start
and end char in original input for each token in the returned
dictionary. Defaults to `False`.
Returns:
dict:
The dict has the following optional items:
- **input_ids** (list[int]): List of token ids to be fed to a model.
- **position_ids** (list[int], optional): List of token position ids to be
fed to a model. Included when `return_position_ids` is `True`
- **token_type_ids** (list[int], optional): List of token type ids to be
fed to a model. Included when `return_token_type_ids` is `True`.
- **attention_mask** (list[int], optional): List of integers valued 0 or 1,
where 0 specifies paddings and should not be attended to by the
model. Included when `return_attention_mask` is `True`.
- **seq_len** (int, optional): The input_ids length. Included when `return_length`
is `True`.
- **overflowing_tokens** (list[int], optional): List of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **num_truncated_tokens** (int, optional): The number of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **special_tokens_mask** (list[int], optional): List of integers valued 0 or 1,
with 0 specifying special added tokens and 1 specifying sequence tokens.
Included when `return_special_tokens_mask` is `True`.
- **offset_mapping** (list[int], optional): list of pair preserving the
index of start and end char in original input for each token.
For a sqecial token, the index pair is `(0, 0)`. Included when
`return_overflowing_tokens` is True.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text,
(list, tuple)) and len(text) > 0 and isinstance(
text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text,
(list, tuple)) and len(text) > 0 and isinstance(
text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
ids = get_input_ids(text)
pair_ids = get_input_ids(text_pair) if text_pair is not None else None
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
encoded_inputs = {}
# Truncation: Handle max sequence length
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(
pair=pair))
if max_seq_len and total_len > max_seq_len:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_seq_len,
truncation_strategy=truncation_strategy, )
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_seq_len
# Add special tokens
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids,
pair_ids)
# Build output dictionnary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
encoded_inputs[
"special_tokens_mask"] = self.get_special_tokens_mask(ids,
pair_ids)
if return_offsets_mapping:
token_offset_mapping = self.get_offset_mapping(text)
token_pair_offset_mapping = self.get_offset_mapping(text_pair)
if max_seq_len and total_len > max_seq_len:
token_offset_mapping, token_pair_offset_mapping, _ = self.truncate_sequences(
token_offset_mapping,
pair_ids=token_pair_offset_mapping,
num_tokens_to_remove=total_len - max_seq_len,
truncation_strategy=truncation_strategy, )
offset_mapping = self.build_offset_mapping_with_special_tokens(
token_offset_mapping, token_pair_offset_mapping)
encoded_inputs['offset_mapping'] = offset_mapping
if return_length:
encoded_inputs["seq_len"] = len(encoded_inputs["input_ids"])
# Check lengths
assert max_seq_len is None or len(encoded_inputs[
"input_ids"]) <= max_seq_len
# Padding
needs_to_be_padded = pad_to_max_seq_len and \
max_seq_len and len(encoded_inputs["input_ids"]) < max_seq_len
if needs_to_be_padded:
difference = max_seq_len - len(encoded_inputs["input_ids"])
if self.padding_side == 'right':
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs[
"input_ids"]) + [0] * difference
if return_token_type_ids:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] +
[self.pad_token_type_id] * difference)
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = encoded_inputs[
"special_tokens_mask"] + [1] * difference
encoded_inputs["input_ids"] = encoded_inputs[
"input_ids"] + [self.pad_token_id] * difference
if return_offsets_mapping:
encoded_inputs["offset_mapping"] = encoded_inputs[
"offset_mapping"] + [(0, 0)] * difference
elif self.padding_side == 'left':
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + [
1
] * len(encoded_inputs["input_ids"])
if return_token_type_ids:
encoded_inputs["token_type_ids"] = (
[self.pad_token_type_id] * difference +
encoded_inputs["token_type_ids"])
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = [
1
] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs["input_ids"] = [
self.pad_token_id
] * difference + encoded_inputs["input_ids"]
if return_offsets_mapping:
encoded_inputs["offset_mapping"] = [
(0, 0)
] * difference + encoded_inputs["offset_mapping"]
else:
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs[
"input_ids"])
if return_position_ids:
encoded_inputs["position_ids"] = list(
range(len(encoded_inputs["input_ids"])))
return encoded_inputs
def batch_encode(self,
batch_text_or_text_pairs,
max_seq_len=512,
pad_to_max_seq_len=False,
stride=0,
is_split_into_words=False,
truncation_strategy="longest_first",
return_position_ids=False,
return_token_type_ids=True,
return_attention_mask=False,
return_length=False,
return_overflowing_tokens=False,
return_special_tokens_mask=False,
return_dict=True,
return_offsets_mapping=False):
"""
Performs tokenization and uses the tokenized tokens to prepare model
inputs. It supports batch inputs of sequence or sequence pair.
Args:
batch_text_or_text_pairs (list):
The element of list can be sequence or sequence pair, and the
sequence is a string or a list of strings depending on whether
it has been pretokenized. If each sequence is provided as a list
of strings (pretokenized), you must set `is_split_into_words` as
`True` to disambiguate with a sequence pair.
max_seq_len (int, optional):
If set to a number, will limit the total sequence returned so
that it has a maximum length. If there are overflowing tokens,
those overflowing tokens will be added to the returned dictionary
when `return_overflowing_tokens` is `True`. Defaults to `None`.
stride (int, optional):
Only available for batch input of sequence pair and mainly for
question answering usage. When for QA, `text` represents questions
and `text_pair` represents contexts. If `stride` is set to a
positive number, the context will be split into multiple spans
where `stride` defines the number of (tokenized) tokens to skip
from the start of one span to get the next span, thus will produce
a bigger batch than inputs to include all spans. Moreover, 'overflow_to_sample'
and 'offset_mapping' preserving the original example and position
information will be added to the returned dictionary. Defaults to 0.
pad_to_max_seq_len (bool, optional):
If set to `True`, the returned sequences would be padded up to
`max_seq_len` specified length according to padding side
(`self.padding_side`) and padding token id. Defaults to `False`.
truncation_strategy (str, optional):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence
until the input is under `max_seq_len` starting from the longest
one at each token (when there is a pair of input sequences).
- 'only_first': Only truncate the first sequence.
- 'only_second': Only truncate the second sequence.
- 'do_not_truncate': Do not truncate (raise an error if the input
sequence is longer than `max_seq_len`).
Defaults to 'longest_first'.
return_position_ids (bool, optional):
Whether to include tokens position ids in the returned dictionary.
Defaults to `False`.
return_token_type_ids (bool, optional):
Whether to include token type ids in the returned dictionary.
Defaults to `True`.
return_attention_mask (bool, optional):
Whether to include the attention mask in the returned dictionary.
Defaults to `False`.
return_length (bool, optional):
Whether to include the length of each encoded inputs in the
returned dictionary. Defaults to `False`.
return_overflowing_tokens (bool, optional):
Whether to include overflowing token information in the returned
dictionary. Defaults to `False`.
return_special_tokens_mask (bool, optional):
Whether to include special tokens mask information in the returned
dictionary. Defaults to `False`.
return_dict (bool, optional):
Decide the format for returned encoded batch inputs. Only works when
input is a batch of data.
::
- If True, encoded inputs would be a dictionary like:
{'input_ids': [[1, 4444, 4385, 1545, 6712],[1, 4444, 4385]],
'token_type_ids': [[0, 0, 0, 0, 0], [0, 0, 0]]}
- If False, encoded inputs would be a list like:
[{'input_ids': [1, 4444, 4385, 1545, 6712],
'token_type_ids': [0, 0, 0, 0, 0]},
{'input_ids': [1, 4444, 4385], 'token_type_ids': [0, 0, 0]}]
Defaults to `True`.
return_offsets_mapping (bool, optional):
Whether to include the list of pair preserving the index of start
and end char in original input for each token in the returned
dictionary. Would be automatically set to `True` when `stride` > 0.
Defaults to `False`.
Returns:
list[dict]:
The dict has the following optional items:
- **input_ids** (list[int]): List of token ids to be fed to a model.
- **position_ids** (list[int], optional): List of token position ids to be
fed to a model. Included when `return_position_ids` is `True`
- **token_type_ids** (list[int], optional): List of token type ids to be
fed to a model. Included when `return_token_type_ids` is `True`.
- **attention_mask** (list[int], optional): List of integers valued 0 or 1,
where 0 specifies paddings and should not be attended to by the
model. Included when `return_attention_mask` is `True`.
- **seq_len** (int, optional): The input_ids length. Included when `return_length`
is `True`.
- **overflowing_tokens** (list[int], optional): List of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **num_truncated_tokens** (int, optional): The number of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **special_tokens_mask** (list[int], optional): List of integers valued 0 or 1,
with 0 specifying special added tokens and 1 specifying sequence tokens.
Included when `return_special_tokens_mask` is `True`.
- **offset_mapping** (list[int], optional): list of pair preserving the
index of start and end char in original input for each token.
For a sqecial token, the index pair is `(0, 0)`. Included when
`return_overflowing_tokens` is True or `stride` > 0.
- **overflow_to_sample** (int, optional): Index of example from which this
feature is generated. Included when `stride` works.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text,
(list, tuple)) and len(text) > 0 and isinstance(
text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text,
(list, tuple)) and len(text) > 0 and isinstance(
text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
batch_outputs = {}
batch_encode_inputs = []
for example_id, tokens_or_pair_tokens in enumerate(
batch_text_or_text_pairs):
if not isinstance(tokens_or_pair_tokens, (list, tuple)):
text, text_pair = tokens_or_pair_tokens, None
elif is_split_into_words and not isinstance(
tokens_or_pair_tokens[0], (list, tuple)):
text, text_pair = tokens_or_pair_tokens, None
else:
text, text_pair = tokens_or_pair_tokens
first_ids = get_input_ids(text)
second_ids = get_input_ids(
text_pair) if text_pair is not None else None
if stride > 0 and second_ids is not None:
max_len_for_pair = max_seq_len - len(
first_ids) - self.num_special_tokens_to_add(pair=True)
token_offset_mapping = self.get_offset_mapping(text)
token_pair_offset_mapping = self.get_offset_mapping(text_pair)
offset = 0
while offset < len(second_ids):
encoded_inputs = {}
length = len(second_ids) - offset
if length > max_len_for_pair:
length = max_len_for_pair
ids = first_ids
pair_ids = second_ids[offset:offset + length]
mapping = token_offset_mapping
pair_mapping = token_pair_offset_mapping[offset:offset +
length]
offset_mapping = self.build_offset_mapping_with_special_tokens(
mapping, pair_mapping)
encoded_inputs['offset_mapping'] = offset_mapping
sequence = self.build_inputs_with_special_tokens(ids,
pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(
ids, pair_ids)
# Build output dictionnary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
encoded_inputs[
"special_tokens_mask"] = self.get_special_tokens_mask(
ids, pair_ids)
if return_length:
encoded_inputs["seq_len"] = len(encoded_inputs[
"input_ids"])
# Check lengths
assert max_seq_len is None or len(encoded_inputs[
"input_ids"]) <= max_seq_len
# Padding
needs_to_be_padded = pad_to_max_seq_len and \
max_seq_len and len(encoded_inputs["input_ids"]) < max_seq_len
if needs_to_be_padded:
difference = max_seq_len - len(encoded_inputs[
"input_ids"])
if self.padding_side == 'right':
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(
encoded_inputs[
"input_ids"]) + [0] * difference
if return_token_type_ids:
# 0 for padding token mask
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] +
[self.pad_token_type_id] * difference)
if return_special_tokens_mask:
encoded_inputs[
"special_tokens_mask"] = encoded_inputs[
"special_tokens_mask"] + [1
] * difference
encoded_inputs["input_ids"] = encoded_inputs[
"input_ids"] + [self.pad_token_id] * difference
encoded_inputs['offset_mapping'] = encoded_inputs[
'offset_mapping'] + [(0, 0)] * difference
elif self.padding_side == 'left':
if return_attention_mask:
encoded_inputs["attention_mask"] = [
0
] * difference + [1] * len(encoded_inputs[
"input_ids"])
if return_token_type_ids:
# 0 for padding token mask
encoded_inputs["token_type_ids"] = (
[self.pad_token_type_id] * difference +
encoded_inputs["token_type_ids"])
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = [
1
] * difference + encoded_inputs[
"special_tokens_mask"]
encoded_inputs["input_ids"] = [
self.pad_token_id
] * difference + encoded_inputs["input_ids"]
encoded_inputs['offset_mapping'] = [
(0, 0)
] * difference + encoded_inputs['offset_mapping']
else:
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(
encoded_inputs["input_ids"])
if return_position_ids:
encoded_inputs["position_ids"] = list(
range(len(encoded_inputs["input_ids"])))
encoded_inputs['overflow_to_sample'] = example_id
if return_dict:
for key, value in encoded_inputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
else:
batch_encode_inputs.append(encoded_inputs)
if offset + length == len(second_ids):
break
offset += min(length, stride)
else:
encoded_inputs = self.encode(
text,
text_pair,
max_seq_len=max_seq_len,
pad_to_max_seq_len=pad_to_max_seq_len,
truncation_strategy=truncation_strategy,
return_position_ids=return_position_ids,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_length=return_length,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping)
if return_dict:
for key, value in encoded_inputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
else:
batch_encode_inputs.append(encoded_inputs)
return BatchEncoding(
batch_outputs) if return_dict else batch_encode_inputs
def get_offset_mapping(self, text):
"""
Returns the map of tokens and the start and end index of their start and end character.
Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/tokenizers.py#L372
Args:
text (str):
Input text.
Returns:
list: The offset map of input text.
"""
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token
if sub_token != self.unk_token else token)
normalized_text, char_mapping = '', []
for i, ch in enumerate(text):
if self.basic_tokenizer.do_lower_case:
ch = ch.lower()
ch = unicodedata.normalize('NFD', ch)
ch = ''.join([c for c in ch if unicodedata.category(c) != 'Mn'])
ch = ''.join([
c for c in ch
if not (ord(c) == 0 or ord(c) == 0xfffd or _is_control(c))
])
normalized_text += ch
char_mapping.extend([i] * len(ch))
text, token_mapping, offset = normalized_text, [], 0
for token in split_tokens:
if token[:2] == '##':
token = token[2:]
start = text[offset:].index(token) + offset
end = start + len(token)
token_mapping.append(
(char_mapping[start], char_mapping[end - 1] + 1))
offset = end
return token_mapping
class BPETokenizer(PretrainedTokenizer):
"""
The base class for all bpe tokenizers. It mainly provides common tokenize
methods for bpe type tokenizer.
Args:
vocab_file (str):
file path of the vocabulary.
encoder_json_path (str, optional):
file path of the id to vocab.
vocab_bpe_path (str, optional):
file path of word merge text.
unk_token (str, optional):
The special token for unknown words.
Defaults to "[UNK]".
sep_token (str, optional):
The special token for separator token.
Defaults to "[SEP]".
pad_token (str, optional):
The special token for padding.
Defaults to "[PAD]".
cls_token (str, optional):
The special token for cls.
Defaults to "[CLS]".
mask_token (str, optional):
The special token for mask.
Defaults to "[MASK]".
"""
class Encoder(object):
def __init__(self,
encoder,
bpe_merges,
errors='replace',
special_tokens=["[SEP]", "[p]", "[q]", "[/q]"]):
self.encoder = encoder
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = self._bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.re = try_import("regex")
self.special_tokens = special_tokens
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = self.re.compile(
r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
)
@lru_cache()
def _bytes_to_unicode(self):
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (list(range(ord("!"), ord("~") + 1)) +
list(range(ord("¡"), ord("¬") + 1)) +
list(range(ord("®"), ord("ÿ") + 1)))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
ddict = dict(zip(bs, cs))
return dict(zip(bs, cs))
def _get_pairs(self, word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = self._get_pairs(word)
if not pairs:
return token
while True:
bigram = min(
pairs,
key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[
i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = self._get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def tokenize(self, text):
tokens = text.split(' ')
sub_tokens = []
for token_i, token in enumerate(tokens):
if self.is_special_token(token):
if token_i == 0:
sub_tokens.extend([token])
else:
sub_tokens.extend([" " + token])
else:
if token_i == 0:
sub_tokens.extend(self.re.findall(self.pat, token))
else:
sub_tokens.extend(
self.re.findall(self.pat, " " + token))
return sub_tokens
def tokenize_old(self, text):
return self.re.findall(self.pat, text)
def is_special_token(self, tok):
if isinstance(tok, int):
return False
res = False
for t in self.special_tokens:
# if tok.find(t) != -1:
if tok.strip() == t:
res = True
break
return res
def tokenize_bpe(self, token):
if self.is_special_token(token):
return [token.strip()] # remove space for convert_to_ids
else:
token = ''.join(self.byte_encoder[b]
for b in token.encode('utf-8'))
return [
self.encoder[bpe_token]
for bpe_token in self.bpe(token).split(' ')
]
def encode(self, text):
bpe_tokens = []
for token in self.tokenize(text):
bpe_tokens.extend(self.tokenize_bpe(token))
return bpe_tokens
def decode(self, tokens):
pre_token_i = 0
texts = []
for token_i, token in enumerate(tokens):
if self.is_special_token(token):
# proprecess tokens before token_i
if token_i - pre_token_i > 0:
text = ''.join([
self.decoder[int(tok)]
for tok in tokens[pre_token_i:token_i]
])
text = bytearray(
[self.byte_decoder[c] for c in text]).decode(
'utf-8', errors=self.errors)
texts.append(text)
# texts.append(token)
if token_i == 0:
texts.append(
token
) # in the beginning, there is no space before special tokens
else:
texts.extend(
[" ", token]
) # in middle sentence, there must be a space before special tokens
pre_token_i = token_i + 1
if pre_token_i < len(tokens):
text = ''.join(
[self.decoder[int(tok)] for tok in tokens[pre_token_i:]])
text = bytearray([self.byte_decoder[c] for c in text]).decode(
'utf-8', errors=self.errors)
texts.append(text)
return ''.join(texts)
def __init__(self,
vocab_file,
encoder_json_path="./configs/encoder.json",
vocab_bpe_path="./configs/vocab.bpe",
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]"):
self.vocab = self.load_vocabulary(
vocab_file,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
mask_token=mask_token)
self.encoder_json_path = encoder_json_path
self.vocab_bpe_path = vocab_bpe_path
self.encoder = self._get_encoder(encoder_json_path, vocab_bpe_path)
self.nltk = try_import('nltk')
def _tokenize(self, text, is_sentencepiece=True):
text = convert_to_unicode(text)
text = " ".join(text.split()) # remove duplicate whitespace
if is_sentencepiece:
sents = self.nltk.tokenize.sent_tokenize(text)
bpe_ids = sum([self.encoder.encode(sent) for sent in sents], [])
else:
bpe_ids = self.encoder.encode(text)
tokens = [str(bpe_id) for bpe_id in bpe_ids]
return tokens
def _get_encoder(self, encoder_json_path, vocab_bpe_path):
with open(encoder_json_path, 'r') as f:
encoder = json.load(f)
with open(vocab_bpe_path, 'r') as f:
bpe_data = f.read()
bpe_merges = [
tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]
]
return self.Encoder(
encoder=encoder,
bpe_merges=bpe_merges, )
|
the-stack_106_24204 | """
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os, sys
sys.path.append(os.path.dirname(__file__))
from Editor_TestClass import BaseClass
class Editor_ComponentPropertyCommands_visibility(BaseClass):
# Description:
# ComponentPropertyCommands test case visibility
@staticmethod
def test():
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.legacy.general
import azlmbr.entity as entity
check_result = BaseClass.check_result
# Find entity
searchFilter = entity.SearchFilter()
searchFilter.names = ["Shader Ball"]
entityId = entity.SearchBus(bus.Broadcast, 'SearchEntities', searchFilter)[0]
check_result(entityId, "entityId was found")
# Find Infinite Ocean component
typeIdsList = editor.EditorComponentAPIBus(bus.Broadcast, 'FindComponentTypeIdsByEntityType', ['Mesh'], entity.EntityType().Game)
getComponentOutcome = editor.EditorComponentAPIBus(bus.Broadcast, 'GetComponentOfType', entityId, typeIdsList[0])
check_result(getComponentOutcome.IsSuccess(), "Found component")
componentId = getComponentOutcome.GetValue()
# Get the PTE from the Mesh Component
pteObj = editor.EditorComponentAPIBus(bus.Broadcast, 'BuildComponentPropertyTreeEditor', componentId)
check_result(pteObj.IsSuccess(), "Created a PropertyTreeEditor for the infiniteOceanId")
pte = pteObj.GetValue()
paths = pte.build_paths_list_with_types()
# test for visibility (default all nodes are exposed)
check_result(pte.get_value('Controller|Configuration|Model Asset').IsSuccess(), "Found property hidden node in path")
# enable visibility enforcement
pte.set_visible_enforcement(True)
paths = pte.build_paths_list_with_types()
check_result(pte.get_value('Controller|Configuration|Model Asset').IsSuccess() is not True, "Property Controller|Configuration| is now a hidden path")
# test for visibility (missing some property paths parts now)
check_result(pte.get_value('Model Asset').IsSuccess(), "Property path enforcement of visibility")
if __name__ == "__main__":
tester = Editor_ComponentPropertyCommands_visibility()
tester.test_case(tester.test, level="TestDependenciesLevel")
|
the-stack_106_24205 | from functools import partial
from typing import Any, List, Optional
import torch
from torch import nn
from torch.nn import functional as F
BATCH_NORM_MOMENTUM = 0.005
ENABLE_BIAS = True
activation_fn = nn.ELU()
class ASPPConv(nn.Sequential):
def __init__(self, in_channels: int, out_channels: int, dilation: int) -> None:
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
]
super().__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(x, size=size, mode="bilinear", align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels: int, atrous_rates: List[int], out_channels: int = 256) -> None:
super().__init__()
modules = []
modules.append(
nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU())
)
rates = tuple(atrous_rates)
for rate in rates:
modules.append(ASPPConv(in_channels, out_channels, rate))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Dropout(0.5),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
_res = []
for conv in self.convs:
_res.append(conv(x))
res = torch.cat(_res, dim=1)
return self.project(res)
class AtrousBlock(nn.Module):
def __init__(self, input_filters, filters, dilation, apply_initial_bn=True):
super(AtrousBlock, self).__init__()
self.initial_bn = nn.BatchNorm2d(input_filters, BATCH_NORM_MOMENTUM)
self.apply_initial_bn = apply_initial_bn
self.conv1 = nn.Conv2d(input_filters, filters*2, 1, 1, 0, bias=False)
self.norm1 = nn.BatchNorm2d(filters*2, BATCH_NORM_MOMENTUM)
self.atrous_conv = nn.Conv2d(filters*2, filters, 3, 1, dilation, dilation, bias=False)
self.norm2 = nn.BatchNorm2d(filters, BATCH_NORM_MOMENTUM)
def forward(self, input):
if self.apply_initial_bn:
input = self.initial_bn(input)
input = self.conv1(input.relu())
input = self.norm1(input)
input = self.atrous_conv(input.relu())
input = self.norm2(input)
return input
class ASSPBlock(nn.Module):
def __init__(self, input_filters=256, cat_filters=448, atrous_filters=128):
super(ASSPBlock, self).__init__()
self.atrous_conv_r3 = AtrousBlock(input_filters, atrous_filters, 3, apply_initial_bn=False)
self.atrous_conv_r6 = AtrousBlock(cat_filters + atrous_filters, atrous_filters, 6)
self.atrous_conv_r12 = AtrousBlock(cat_filters + atrous_filters*2, atrous_filters, 12)
self.atrous_conv_r18 = AtrousBlock(cat_filters + atrous_filters*3, atrous_filters, 18)
self.atrous_conv_r24 = AtrousBlock(cat_filters + atrous_filters*4, atrous_filters, 24)
self.conv = nn.Conv2d(5 * atrous_filters + cat_filters, atrous_filters, 3, 1, 1, bias=ENABLE_BIAS)
def forward(self, input):
input, cat = input
layer1_out = self.atrous_conv_r3(input)
concat1 = torch.cat((cat, layer1_out), 1)
layer2_out = self.atrous_conv_r6(concat1)
concat2 = torch.cat((concat1, layer2_out), 1)
layer3_out = self.atrous_conv_r12(concat2)
concat3 = torch.cat((concat2, layer3_out), 1)
layer4_out = self.atrous_conv_r18(concat3)
concat4 = torch.cat((concat3, layer4_out), 1)
layer5_out = self.atrous_conv_r24(concat4)
concat5 = torch.cat((concat4, layer5_out), 1)
features = activation_fn(self.conv(concat5))
return features |
the-stack_106_24206 | from scoring_matrices import BLOSUM62
def global_alignment(seq1, seq2, scoring_matrix, indel_penalty):
m = len(seq1)
n = len(seq2)
s = [[0 for i in range(n+1)] for j in range(m+1)]
backtrack_matrix = [[0 for i in range(n+1)] for j in range(m+1)]
for i in range(1, m+1):
s[i][0] = -i*indel_penalty
for j in range(1, n+1):
s[0][j] = -j*indel_penalty
for i in range(1, m+1):
for j in range(1, n+1):
score = [s[i-1][j] - indel_penalty, s[i][j-1] - indel_penalty, s[i-1][j-1] + scoring_matrix[seq1[i-1], seq2[j-1]]]
s[i][j] = max(score)
backtrack_matrix[i][j] = score.index(s[i][j])
aligned_seq1 = seq1
aligned_seq2 = seq2
max_score = s[m][n]
i, j = m, n
while(i*j != 0):
if backtrack_matrix[i][j] == 0:
aligned_seq2 = indel_inserted(aligned_seq2, j)
i = i-1
elif backtrack_matrix[i][j] == 1:
aligned_seq1 = indel_inserted(aligned_seq1, i)
j = j-1
else:
i = i-1
j = j-1
for indel in range(i):
aligned_seq2 = indel_inserted(aligned_seq2, 0)
for indel in range(j):
aligned_seq1 = indel_inserted(aligned_seq1, 0)
return max_score, aligned_seq1, aligned_seq2
def indel_inserted(seq, i):
return seq[:i] + '-' + seq[i:]
def main():
with open('datasets/rosalind_ba5e.txt') as input_data:
seq1, seq2 = input_data.read().strip().split('\n')
indel_penalty = 5
scoring_matrix = BLOSUM62()
max_score, aligned_seq1, aligned_seq2 = global_alignment(seq1, seq2, scoring_matrix, indel_penalty)
print(str(max_score))
print(aligned_seq1)
print(aligned_seq2)
with open('solutions/rosalind_ba5e', 'w') as output_data:
output_data.write(str(max_score)+'\n')
output_data.write(aligned_seq1+'\n')
output_data.write(aligned_seq2)
if __name__ == '__main__':
main() |
the-stack_106_24207 | from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
def test_setitem_str_subclass(self):
# GH#37366
class mystring(str):
pass
data = ["2020-10-22 01:21:00+00:00"]
index = DatetimeIndex(data)
df = DataFrame({"a": [1]}, index=index)
df["b"] = 2
df[mystring("c")] = 3
expected = DataFrame({"a": [1], "b": [2], mystring("c"): [3]}, index=index)
tm.assert_equal(df, expected)
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex on an axis with duplicate labels"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(FutureWarning, match="non-unique"):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_period_preserves_dtype(self):
# GH: 26861
data = [Period("2003-12", "D")]
result = DataFrame([])
result["a"] = data
expected = DataFrame({"a": data})
tm.assert_frame_equal(result, expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(
Interval(left=0, right=5, inclusive="right"),
IntervalDtype("int64", "right"),
),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
v1 = df._mgr.arrays[1]
v2 = df._mgr.arrays[2]
tm.assert_extension_array_equal(v1, v2)
v1base = v1._data.base
v2base = v2._data.base
assert v1base is None or (id(v1base) != id(v2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_setitem_complete_column_with_array(self):
# GH#37954
df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]})
arr = np.array([[1, 1], [3, 1], [5, 1]])
df[["c", "d"]] = arr
expected = DataFrame(
{
"a": ["one", "two", "three"],
"b": [1, 2, 3],
"c": [1, 3, 5],
"d": [1, 1, 1],
}
)
expected["c"] = expected["c"].astype(arr.dtype)
expected["d"] = expected["d"].astype(arr.dtype)
assert expected["c"].dtype == arr.dtype
assert expected["d"].dtype == arr.dtype
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_setitem_bool_with_numeric_index(self, dtype):
# GH#36319
cols = Index([1, 2, 3], dtype=dtype)
df = DataFrame(np.random.randn(3, 3), columns=cols)
df[False] = ["a", "b", "c"]
expected_cols = Index([1, 2, 3, False], dtype=object)
if dtype == "f8":
expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
tm.assert_index_equal(df.columns, expected_cols)
@pytest.mark.parametrize("indexer", ["B", ["B"]])
def test_setitem_frame_length_0_str_key(self, indexer):
# GH#38831
df = DataFrame(columns=["A", "B"])
other = DataFrame({"B": [1, 2]})
df[indexer] = other
expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
expected["A"] = expected["A"].astype("object")
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns(self, using_array_manager):
# GH#15695
warn = FutureWarning if using_array_manager else None
msg = "will attempt to set the values inplace"
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
df.loc[0, "A"] = (0, 3)
with tm.assert_produces_warning(warn, match=msg):
df.loc[:, "B"] = (1, 4)
df["C"] = (2, 5)
expected = DataFrame(
[
[0, 1, 2, 3, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
],
dtype="object",
)
if using_array_manager:
# setitem replaces column so changes dtype
expected.columns = cols
expected["C"] = expected["C"].astype("int64")
# TODO(ArrayManager) .loc still overwrites
expected["B"] = expected["B"].astype("int64")
else:
# set these with unique columns to be extra-unambiguous
expected[2] = expected[2].astype(np.int64)
expected[5] = expected[5].astype(np.int64)
expected.columns = cols
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns_size_mismatch(self):
# GH#39510
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
with pytest.raises(ValueError, match="Columns must be same length as key"):
df[["A"]] = (0, 3, 5)
df2 = df.iloc[:, :3] # unique columns
with pytest.raises(ValueError, match="Columns must be same length as key"):
df2[["A"]] = (0, 3, 5)
@pytest.mark.parametrize("cols", [["a", "b", "c"], ["a", "a", "a"]])
def test_setitem_df_wrong_column_number(self, cols):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=cols)
rhs = DataFrame([[10, 11]], columns=["d", "e"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df["a"] = rhs
def test_setitem_listlike_indexer_duplicate_columns(self):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
df[["a", "b"]] = rhs
expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
df[["c", "b"]] = rhs
expected = DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_setitem_listlike_indexer_duplicate_columns_not_equal_length(self):
# GH#39403
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11]], columns=["a", "b"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df[["a", "b"]] = rhs
def test_setitem_intervals(self):
df = DataFrame({"A": range(10)})
ser = cut(df["A"], 5)
assert isinstance(ser.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainder are converted to in-line objects
# containing an IntervalIndex.values
df["B"] = ser
df["C"] = np.array(ser)
df["D"] = ser.values
df["E"] = np.array(ser.values)
df["F"] = ser.astype(object)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
# These go through the Series constructor and so get inferred back
# to IntervalDtype
assert is_interval_dtype(df["C"])
assert is_interval_dtype(df["E"])
# But the Series constructor doesn't do inference on Series objects,
# so setting df["F"] doesn't get cast back to IntervalDtype
assert is_object_dtype(df["F"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B))
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.C), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"])
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"])
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_setitem_categorical(self):
# GH#35369
df = DataFrame({"h": Series(list("mn")).astype("category")})
df.h = df.h.cat.reorder_categories(["n", "m"])
expected = DataFrame(
{"h": Categorical(["m", "n"]).reorder_categories(["n", "m"])}
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_empty_listlike(self):
# GH#17101
index = Index([], name="idx")
result = DataFrame(columns=["A"], index=index)
result["A"] = []
expected = DataFrame(columns=["A"], index=index)
tm.assert_index_equal(result.index, expected.index)
@pytest.mark.parametrize(
"cols, values, expected",
[
(["C", "D", "D", "a"], [1, 2, 3, 4], 4), # with duplicates
(["D", "C", "D", "a"], [1, 2, 3, 4], 4), # mixed order
(["C", "B", "B", "a"], [1, 2, 3, 4], 4), # other duplicate cols
(["C", "B", "a"], [1, 2, 3], 3), # no duplicates
(["B", "C", "a"], [3, 2, 1], 1), # alphabetical order
(["C", "a", "B"], [3, 2, 1], 2), # in the middle
],
)
def test_setitem_same_column(self, cols, values, expected):
# GH#23239
df = DataFrame([values], columns=cols)
df["a"] = df["a"]
result = df["a"].values[0]
assert result == expected
def test_setitem_multi_index(self):
# GH#7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"]
cols = MultiIndex.from_product(it)
index = date_range("20141006", periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df["jim"] = df["jolie"].loc[i, ::-1]
tm.assert_frame_equal(df["jim"], df["jolie"])
np.random.shuffle(j)
df[("joe", "first")] = df[("jolie", "last")].loc[i, j]
tm.assert_frame_equal(df[("joe", "first")], df[("jolie", "last")])
np.random.shuffle(j)
df[("joe", "last")] = df[("jolie", "first")].loc[i, j]
tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")])
@pytest.mark.parametrize(
"columns,box,expected",
[
(
["A", "B", "C", "D"],
7,
DataFrame(
[[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "D"],
[7, 8],
DataFrame(
[[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "B", "C"],
np.array([7, 8, 9], dtype=np.int64),
DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]),
),
(
["B", "C", "D"],
[[7, 8, 9], [10, 11, 12], [13, 14, 15]],
DataFrame(
[[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "A", "D"],
np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64),
DataFrame(
[[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "C"],
DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]),
DataFrame(
[[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"]
),
),
],
)
def test_setitem_list_missing_columns(self, columns, box, expected):
# GH#29334
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"])
df[columns] = box
tm.assert_frame_equal(df, expected)
def test_setitem_list_of_tuples(self, float_frame):
tuples = list(zip(float_frame["A"], float_frame["B"]))
float_frame["tuples"] = tuples
result = float_frame["tuples"]
expected = Series(tuples, index=float_frame.index, name="tuples")
tm.assert_series_equal(result, expected)
def test_setitem_iloc_generator(self):
# GH#39614
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer] = 1
expected = DataFrame({"a": [1, 1, 1], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
def test_setitem_iloc_two_dimensional_generator(self):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer, 1] = 1
expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
def test_setitem_dtypes_bytes_type_to_object(self):
# GH 20734
index = Series(name="id", dtype="S24")
df = DataFrame(index=index)
df["a"] = Series(name="a", index=index, dtype=np.uint32)
df["b"] = Series(name="b", index=index, dtype="S64")
df["c"] = Series(name="c", index=index, dtype="S64")
df["d"] = Series(name="d", index=index, dtype=np.uint8)
result = df.dtypes
expected = Series([np.uint32, object, object, np.uint8], index=list("abcd"))
tm.assert_series_equal(result, expected)
def test_boolean_mask_nullable_int64(self):
# GH 28928
result = DataFrame({"a": [3, 4], "b": [5, 6]}).astype(
{"a": "int64", "b": "Int64"}
)
mask = Series(False, index=result.index)
result.loc[mask, "a"] = result["a"]
result.loc[mask, "b"] = result["b"]
expected = DataFrame({"a": [3, 4], "b": [5, 6]}).astype(
{"a": "int64", "b": "Int64"}
)
tm.assert_frame_equal(result, expected)
# TODO(ArrayManager) set column with 2d column array, see #44788
@td.skip_array_manager_not_yet_implemented
def test_setitem_npmatrix_2d(self):
# GH#42376
# for use-case df["x"] = sparse.random(10, 10).mean(axis=1)
expected = DataFrame(
{"np-array": np.ones(10), "np-matrix": np.ones(10)}, index=np.arange(10)
)
a = np.ones((10, 1))
df = DataFrame(index=np.arange(10))
df["np-array"] = a
# Instantiation of `np.matrix` gives PendingDeprecationWarning
with tm.assert_produces_warning(PendingDeprecationWarning):
df["np-matrix"] = np.matrix(a)
tm.assert_frame_equal(df, expected)
class TestSetitemTZAwareValues:
@pytest.fixture
def idx(self):
naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B")
idx = naive.tz_localize("US/Pacific")
return idx
@pytest.fixture
def expected(self, idx):
expected = Series(np.array(idx.tolist(), dtype="object"), name="B")
assert expected.dtype == idx.dtype
return expected
def test_setitem_dt64series(self, idx, expected):
# convert to utc
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
class TestDataFrameSetItemWithExpansion:
def test_setitem_listlike_views(self):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
# get one column as a view of df
ser = df["a"]
# add columns with list-like indexer
df[["c", "d"]] = np.array([[0.1, 0.2], [0.3, 0.4], [0.4, 0.5]])
# edit in place the first column to check view semantics
df.iloc[0, 0] = 100
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
def test_setitem_string_column_numpy_dtype_raising(self):
# GH#39010
df = DataFrame([[1, 2], [3, 4]])
df["0 - Name"] = [5, 6]
expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
tm.assert_frame_equal(df, expected)
def test_setitem_empty_df_duplicate_columns(self):
# GH#38521
df = DataFrame(columns=["a", "b", "b"], dtype="float64")
msg = "will attempt to set the values inplace instead"
with tm.assert_produces_warning(FutureWarning, match=msg):
df.loc[:, "a"] = list(range(2))
expected = DataFrame(
[[0, np.nan, np.nan], [1, np.nan, np.nan]], columns=["a", "b", "b"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_expansion_categorical_dtype(self):
# assignment
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
ser = cut(df.value, range(0, 10500, 500), right=False, labels=labels)
cat = ser.values
# setting with a Categorical
df["D"] = cat
str(df)
result = df.dtypes
expected = Series(
[np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)],
index=["value", "D"],
)
tm.assert_series_equal(result, expected)
# setting with a Series
df["E"] = ser
str(df)
result = df.dtypes
expected = Series(
[
np.dtype("int32"),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False),
],
index=["value", "D", "E"],
)
tm.assert_series_equal(result, expected)
result1 = df["D"]
result2 = df["E"]
tm.assert_categorical_equal(result1._mgr.array, cat)
# sorting
ser.name = "E"
tm.assert_series_equal(result2.sort_index(), ser.sort_index())
def test_setitem_scalars_no_index(self):
# GH#16823 / GH#17894
df = DataFrame()
df["foo"] = 1
expected = DataFrame(columns=["foo"]).astype(np.int64)
tm.assert_frame_equal(df, expected)
def test_setitem_newcol_tuple_key(self, float_frame):
assert (
"A",
"B",
) not in float_frame.columns
float_frame["A", "B"] = float_frame["A"]
assert ("A", "B") in float_frame.columns
result = float_frame["A", "B"]
expected = float_frame["A"]
tm.assert_series_equal(result, expected, check_names=False)
def test_frame_setitem_newcol_timestamp(self):
# GH#2155
columns = date_range(start="1/1/2012", end="2/1/2012", freq=BDay())
data = DataFrame(columns=columns, index=range(10))
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works, mostly a smoke-test
assert np.isnan(data[ts]).all()
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
# GH#31469
df = DataFrame(np.zeros((100, 1)))
df[-4:] = 1
arr = np.zeros((100, 1))
arr[-4:] = 1
expected = DataFrame(arr)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list, pd.array])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_slice_indexer_broadcasting_rhs(self, n, box, indexer):
# GH#40440
df = DataFrame([[1, 3, 5]] + [[2, 4, 6]] * n, columns=["a", "b", "c"])
indexer(df)[1:] = box([10, 11, 12])
expected = DataFrame([[1, 3, 5]] + [[10, 11, 12]] * n, columns=["a", "b", "c"])
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("box", [Series, np.array, list, pd.array])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_list_indexer_broadcasting_rhs(self, n, box):
# GH#40440
df = DataFrame([[1, 3, 5]] + [[2, 4, 6]] * n, columns=["a", "b", "c"])
df.iloc[list(range(1, n + 1))] = box([10, 11, 12])
expected = DataFrame([[1, 3, 5]] + [[10, 11, 12]] * n, columns=["a", "b", "c"])
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list, pd.array])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_slice_broadcasting_rhs_mixed_dtypes(self, n, box, indexer):
# GH#40440
df = DataFrame(
[[1, 3, 5], ["x", "y", "z"]] + [[2, 4, 6]] * n, columns=["a", "b", "c"]
)
indexer(df)[1:] = box([10, 11, 12])
expected = DataFrame(
[[1, 3, 5]] + [[10, 11, 12]] * (n + 1),
columns=["a", "b", "c"],
dtype="object",
)
tm.assert_frame_equal(df, expected)
class TestDataFrameSetItemCallable:
def test_setitem_callable(self):
# GH#12533
df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]})
df[lambda x: "A"] = [11, 12, 13, 14]
exp = DataFrame({"A": [11, 12, 13, 14], "B": [5, 6, 7, 8]})
tm.assert_frame_equal(df, exp)
def test_setitem_other_callable(self):
# GH#13299
def inc(x):
return x + 1
df = DataFrame([[-1, 1], [1, -1]])
df[df > 0] = inc
expected = DataFrame([[-1, inc], [inc, -1]])
tm.assert_frame_equal(df, expected)
class TestDataFrameSetItemBooleanMask:
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
@pytest.mark.parametrize(
"mask_type",
[lambda df: df > np.abs(df) / 2, lambda df: (df > np.abs(df) / 2).values],
ids=["dataframe", "array"],
)
def test_setitem_boolean_mask(self, mask_type, float_frame):
# Test for issue #18582
df = float_frame.copy()
mask = mask_type(df)
# index with boolean mask
result = df.copy()
result[mask] = np.nan
expected = df.copy()
expected.values[np.array(mask)] = np.nan
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(reason="Currently empty indexers are treated as all False")
@pytest.mark.parametrize("box", [list, np.array, Series])
def test_setitem_loc_empty_indexer_raises_with_non_empty_value(self, box):
# GH#37672
df = DataFrame({"a": ["a"], "b": [1], "c": [1]})
if box == Series:
indexer = box([], dtype="object")
else:
indexer = box([])
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[indexer, ["b"]] = [1]
@pytest.mark.parametrize("box", [list, np.array, Series])
def test_setitem_loc_only_false_indexer_dtype_changed(self, box):
# GH#37550
# Dtype is only changed when value to set is a Series and indexer is
# empty/bool all False
df = DataFrame({"a": ["a"], "b": [1], "c": [1]})
indexer = box([False])
df.loc[indexer, ["b"]] = 10 - df["c"]
expected = DataFrame({"a": ["a"], "b": [1], "c": [1]})
tm.assert_frame_equal(df, expected)
df.loc[indexer, ["b"]] = 9
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.loc])
def test_setitem_boolean_mask_aligning(self, indexer):
# GH#39931
df = DataFrame({"a": [1, 4, 2, 3], "b": [5, 6, 7, 8]})
expected = df.copy()
mask = df["a"] >= 3
indexer(df)[mask] = indexer(df)[mask].sort_values("a")
tm.assert_frame_equal(df, expected)
def test_setitem_mask_categorical(self):
# assign multiple rows (mixed values) (-> array) -> exp_multi_row
# changed multiple rows
cats2 = Categorical(["a", "a", "b", "b", "a", "a", "a"], categories=["a", "b"])
idx2 = Index(["h", "i", "j", "k", "l", "m", "n"])
values2 = [1, 1, 2, 2, 1, 1, 1]
exp_multi_row = DataFrame({"cats": cats2, "values": values2}, index=idx2)
catsf = Categorical(
["a", "a", "c", "c", "a", "a", "a"], categories=["a", "b", "c"]
)
idxf = Index(["h", "i", "j", "k", "l", "m", "n"])
valuesf = [1, 1, 3, 3, 1, 1, 1]
df = DataFrame({"cats": catsf, "values": valuesf}, index=idxf)
exp_fancy = exp_multi_row.copy()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# issue #37643 inplace kwarg deprecated
return_value = exp_fancy["cats"].cat.set_categories(
["a", "b", "c"], inplace=True
)
assert return_value is None
mask = df["cats"] == "c"
df[mask] = ["b", 2]
# category c is kept in .categories
tm.assert_frame_equal(df, exp_fancy)
@pytest.mark.parametrize("dtype", ["float", "int64"])
@pytest.mark.parametrize("kwargs", [{}, {"index": [1]}, {"columns": ["A"]}])
def test_setitem_empty_frame_with_boolean(self, dtype, kwargs):
# see GH#10126
kwargs["dtype"] = dtype
df = DataFrame(**kwargs)
df2 = df.copy()
df[df > df2] = 47
tm.assert_frame_equal(df, df2)
def test_setitem_boolean_indexing(self):
idx = list(range(3))
cols = ["A", "B", "C"]
df1 = DataFrame(
index=idx,
columns=cols,
data=np.array(
[[0.0, 0.5, 1.0], [1.5, 2.0, 2.5], [3.0, 3.5, 4.0]], dtype=float
),
)
df2 = DataFrame(index=idx, columns=cols, data=np.ones((len(idx), len(cols))))
expected = DataFrame(
index=idx,
columns=cols,
data=np.array([[0.0, 0.5, 1.0], [1.5, 2.0, -1], [-1, -1, -1]], dtype=float),
)
df1[df1 > 2.0 * df2] = -1
tm.assert_frame_equal(df1, expected)
with pytest.raises(ValueError, match="Item wrong length"):
df1[df1.index[:-1] > 2] = -1
def test_loc_setitem_all_false_boolean_two_blocks(self):
# GH#40885
df = DataFrame({"a": [1, 2], "b": [3, 4], "c": "a"})
expected = df.copy()
indexer = Series([False, False], name="c")
df.loc[indexer, ["b"]] = DataFrame({"b": [5, 6]}, index=[0, 1])
tm.assert_frame_equal(df, expected)
class TestDataFrameSetitemCopyViewSemantics:
def test_setitem_always_copy(self, float_frame):
assert "E" not in float_frame.columns
s = float_frame["A"].copy()
float_frame["E"] = s
float_frame["E"][5:10] = np.nan
assert notna(s[5:10]).all()
@pytest.mark.parametrize("consolidate", [True, False])
def test_setitem_partial_column_inplace(self, consolidate, using_array_manager):
# This setting should be in-place, regardless of whether frame is
# single-block or multi-block
# GH#304 this used to be incorrectly not-inplace, in which case
# we needed to ensure _item_cache was cleared.
df = DataFrame(
{"x": [1.1, 2.1, 3.1, 4.1], "y": [5.1, 6.1, 7.1, 8.1]}, index=[0, 1, 2, 3]
)
df.insert(2, "z", np.nan)
if not using_array_manager:
if consolidate:
df._consolidate_inplace()
assert len(df._mgr.blocks) == 1
else:
assert len(df._mgr.blocks) == 2
zvals = df["z"]._values
df.loc[2:, "z"] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index, name="z")
tm.assert_series_equal(df["z"], expected)
# check setting occurred in-place
tm.assert_numpy_array_equal(zvals, expected.values)
assert np.shares_memory(zvals, df["z"]._values)
if not consolidate:
assert df["z"]._values is zvals
def test_setitem_duplicate_columns_not_inplace(self):
# GH#39510
cols = ["A", "B"] * 2
df = DataFrame(0.0, index=[0], columns=cols)
df_copy = df.copy()
df_view = df[:]
df["B"] = (2, 5)
expected = DataFrame([[0.0, 2, 0.0, 5]], columns=cols)
tm.assert_frame_equal(df_view, df_copy)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"value", [1, np.array([[1], [1]], dtype="int64"), [[1], [1]]]
)
def test_setitem_same_dtype_not_inplace(self, value, using_array_manager):
# GH#39510
cols = ["A", "B"]
df = DataFrame(0, index=[0, 1], columns=cols)
df_copy = df.copy()
df_view = df[:]
df[["B"]] = value
expected = DataFrame([[0, 1], [0, 1]], columns=cols)
tm.assert_frame_equal(df, expected)
tm.assert_frame_equal(df_view, df_copy)
@pytest.mark.parametrize("value", [1.0, np.array([[1.0], [1.0]]), [[1.0], [1.0]]])
def test_setitem_listlike_key_scalar_value_not_inplace(self, value):
# GH#39510
cols = ["A", "B"]
df = DataFrame(0, index=[0, 1], columns=cols)
df_copy = df.copy()
df_view = df[:]
df[["B"]] = value
expected = DataFrame([[0, 1.0], [0, 1.0]], columns=cols)
tm.assert_frame_equal(df_view, df_copy)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"indexer",
[
"a",
["a"],
pytest.param(
[True, False],
marks=pytest.mark.xfail(
reason="Boolean indexer incorrectly setting inplace",
strict=False, # passing on some builds, no obvious pattern
),
),
],
)
@pytest.mark.parametrize(
"value, set_value",
[
(1, 5),
(1.0, 5.0),
(Timestamp("2020-12-31"), Timestamp("2021-12-31")),
("a", "b"),
],
)
def test_setitem_not_operating_inplace(self, value, set_value, indexer):
# GH#43406
df = DataFrame({"a": value}, index=[0, 1])
expected = df.copy()
view = df[:]
df[indexer] = set_value
tm.assert_frame_equal(view, expected)
|
the-stack_106_24210 | import codecs
import datetime as dt
import re
import time
from configparser import ConfigParser
from bs4 import BeautifulSoup as bs
from peewee import fn
from progress.bar import IncrementalBar
from selenium.common.exceptions import (ElementClickInterceptedException,
StaleElementReferenceException,
TimeoutException, WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait
from src import models
from src.auth import Day
class Payments:
def __init__(self):
self.cfg = ConfigParser()
self.cfg.read_file(codecs.open("config.ini", "r", "utf-8"))
self.payments = self.get_unmatched()
self.registers = self.get_unmatched(reg=True)
def get_progress_bar(self, name):
lenght = len(self.payments)
if name != "single":
lenght = len(self.registers)
progress_bar = IncrementalBar(self.cfg["title"][name], max=lenght)
return progress_bar
def get_unmatched(self, reg=False):
date = dt.datetime.strptime(self.cfg["dir"]["day"], "%d.%m.%Y").date()
if not reg:
return (
models.SinglePayment.select()
.where(
models.SinglePayment.date == date,
models.SinglePayment.is_taken == 0,
)
.objects()
)
registers = (
models.RegisterPayment.select(
models.RegisterPayment.num,
fn.SUM(models.RegisterPayment.amount),
)
.where(
models.RegisterPayment.date == date,
)
.group_by(models.RegisterPayment.num)
.objects()
)
return (
models.SinglePayment.select()
.where(
models.SinglePayment.date == date,
models.SinglePayment.num.in_([pay.num for pay in registers]),
models.SinglePayment.amount.in_([pay.amount for pay in registers]),
)
.objects()
)
class Receipting(Day):
class ElemHasClass:
def __init__(self, locator, css_class):
self.locator = locator
self.css_class = css_class
def __call__(self, driver):
element = driver.find_element(*self.locator)
if self.css_class in element.get_attribute("class"):
return element
return False
class RequiredNumberElems:
def __init__(self, locator, number):
self.locator = locator
self.number = number
def __call__(self, driver):
elems = (
bs(driver.page_source, "lxml")
.find("tbody", wicketpath="chargeTable_body")
.findChildren("tr")
)
if len(elems) == self.number:
return driver.find_element(*self.locator)
return False
def __init__(self, payment):
super().__init__()
self.cfg = ConfigParser()
self.cfg.read_file(codecs.open("config.ini", "r", "utf-8"))
self.payment = payment
self.driver.implicitly_wait(20)
self.uin_elem = None
self.charge_uin = None
def open_payment(self):
self.driver.find_element_by_xpath(
f'//*[@wicketpath="table_body_rows_{self.payment.elem_id}_cells_2"]/div/div/a/img'
).click()
self.uin_elem = WebDriverWait(self.driver, 10).until(
ec.visibility_of_element_located((By.NAME, "uin"))
)
self.charge_uin = WebDriverWait(self.driver, 10).until(
ec.element_to_be_clickable((By.NAME, "chargeUin"))
)
def check_matched(self):
charge_tab = (
bs(self.driver.page_source, "lxml")
.find("tbody", wicketpath="chargeTable_body")
.findChildren("tr")
)
if len(charge_tab) > 0:
return True
return False
def update_payment(self):
self.payment.payer = (
self.driver.find_element_by_name("payerName").get_attribute("value").upper()
)
self.payment.payer_code = self.driver.find_element_by_name(
"payerInn"
).get_attribute("value")
self.payment.amount = (
self.driver.find_element_by_name("paymentSum")
.get_attribute("value")
.replace("\xa0", "")
.replace(",", ".")
)
payment_uin = re.search(
self.cfg["pattern"]["uin"], self.uin_elem.get_attribute("value")
)
if payment_uin and self.payment.fine:
self.payment.fine.uin = payment_uin.group(0)
self.payment.fine.save()
elif payment_uin and not self.payment.fine:
self.payment.fine = models.Fine.create(uin=payment_uin.group(0))
def fill(self):
if self.payment.fine.uin:
self.uin_elem.clear()
self.uin_elem.send_keys(self.payment.fine.uin)
WebDriverWait(self.driver, 20).until(
ec.invisibility_of_element_located(
(By.XPATH, '//*[@class="blockUI blockOverlay"]')
)
)
self.charge_uin.click()
elif self.payment.fine.number:
self.driver.find_element_by_name("decisionNum").send_keys(
self.payment.fine.number
)
self.driver.find_element_by_name("search").click()
try:
WebDriverWait(self.driver, 30).until(
ec.visibility_of_element_located(
(By.XPATH, '//*[@wicketpath="decisionTable_body"]/tr/td[2]/div')
)
)
except TimeoutException:
return True
return False
def update_fine(self, reg=None):
payment = reg
if reg is None:
payment = self.payment
table = bs(self.driver.page_source, "lxml").find(
"table", wicketpath="decisionTable"
)
titles = self.cfg["title"]["payment"].split(";")
titles_id = {}
for i, tag in enumerate(table.thead.tr.findChildren("th")):
if tag.text in titles:
titles_id[tag.text] = i
if payment.fine is not None:
for num, tag in enumerate(table.tbody.tr.findChildren("td")):
if titles_id[titles[0]] == num:
payment.fine.number = tag.text.strip()
elif titles_id[titles[1]] == num:
payment.fine.debtor = tag.text.strip().upper()
elif titles_id[titles[2]] == num:
payment.fine.debtor_code = tag.text.strip()
payment.fine.save()
def add_founded_fine(self, count=1, reg=None):
WebDriverWait(self.driver, 20).until(
ec.invisibility_of_element_located(
(By.XPATH, '//*[@class="blockUI blockOverlay"]')
)
)
WebDriverWait(self.driver, 10).until(
ec.visibility_of_element_located(
(By.XPATH, '//*[@wicketpath="decisionTable_body"]/tr/td[2]/div')
)
).click()
WebDriverWait(self.driver, 10).until(
self.ElemHasClass(
(By.XPATH, '//*[@wicketpath="decisionTable_body"]/tr'),
" row-selected",
)
)
amount = self.payment.amount
if reg is not None:
amount = reg.amount
self.driver.find_element_by_name("amountDistributed").send_keys(
(str(amount)).replace(".", ",")
)
WebDriverWait(self.driver, 20).until(
ec.invisibility_of_element_located(
(By.XPATH, '//*[@class="blockUI blockOverlay"]')
)
)
self.driver.find_element_by_name("add").click()
try:
WebDriverWait(self.driver, 1).until(
ec.visibility_of_element_located(
(
By.XPATH,
'//*[@wicketpath="messageWindow_content_buttonPanel_buttonRepeater_0_button"]'
)
)
).click()
except TimeoutException:
pass
if count < 30:
WebDriverWait(self.driver, 60).until(
self.RequiredNumberElems(
(By.XPATH, '//*[@wicketpath="chargeTable_body"]'),
count,
)
)
else:
time.sleep(15)
def save_payment(self):
self.driver.find_element_by_name("save").click()
try:
WebDriverWait(self.driver, 1).until(
ec.presence_of_element_located(
(By.NAME, "table:body:rows:1:cells:1:cell:columnEditorDiv:comp")
)
)
except TimeoutException:
pass
def exit_receipting(self):
self.payment.is_taken = True
self.payment.save()
self.driver.quit()
return self.payment.is_taken
class RegisterReceipting(Receipting):
def __init__(self, payment):
super().__init__(payment)
self.reg_payments = self.get_reg_payments()
def get_reg_payments(self):
return (
models.RegisterPayment.select()
.where(
models.RegisterPayment.date == self.payment.date,
models.RegisterPayment.num == self.payment.num,
)
.order_by(models.RegisterPayment.payorder_id)
.objects()
)
def fill(self, reg):
self.uin_elem.clear()
self.driver.find_element_by_name("decisionNum").clear()
if not reg.fine:
if self.charge_uin.is_selected():
WebDriverWait(self.driver, 20).until(
ec.invisibility_of_element_located(
(By.XPATH, '//*[@class="blockUI blockOverlay"]')
)
)
self.charge_uin.click()
self.driver.find_element_by_name("decisionNum").send_keys("09-61")
elif reg.fine.uin:
if not self.charge_uin.is_selected():
WebDriverWait(self.driver, 20).until(
ec.invisibility_of_element_located(
(By.XPATH, '//*[@class="blockUI blockOverlay"]')
)
)
self.charge_uin.click()
self.uin_elem.send_keys(reg.fine.uin)
elif reg.fine.number:
if self.charge_uin.is_selected():
WebDriverWait(self.driver, 20).until(
ec.invisibility_of_element_located(
(By.XPATH, '//*[@class="blockUI blockOverlay"]')
)
)
self.charge_uin.click()
self.driver.find_element_by_name("decisionNum").send_keys(reg.fine.number)
self.driver.find_element_by_name("search").click()
WebDriverWait(self.driver, 120).until(
ec.visibility_of_element_located(
(By.XPATH, '//*[@wicketpath="decisionTable_body"]/tr/td[2]/div')
)
)
return False
def save_payment(self):
time_sleep = len(self.reg_payments) * 0.5
self.driver.find_element_by_name("save").click()
try:
WebDriverWait(self.driver, time_sleep).until(
ec.presence_of_element_located(
(By.NAME, "table:body:rows:1:cells:1:cell:columnEditorDiv:comp")
)
)
except TimeoutException:
pass
def exit_receipting(self):
self.payment.save()
self.driver.quit()
def receipting(payment):
manager = Receipting(payment)
try:
manager.open_day()
manager.open_payment()
manager.update_payment()
if manager.check_matched():
return manager.exit_receipting()
if not manager.payment.fine:
return manager.exit_receipting()
if manager.fill():
return manager.exit_receipting()
manager.update_fine()
manager.add_founded_fine()
manager.save_payment()
except (
ElementClickInterceptedException,
TimeoutException,
StaleElementReferenceException,
WebDriverException,
):
manager.quit()
return payment.is_taken
return manager.exit_receipting()
def reg_receipting(payment):
manager = RegisterReceipting(payment)
try:
manager.open_day()
manager.open_payment()
manager.update_payment()
if manager.check_matched():
manager.exit_receipting()
return None
for i, reg in enumerate(manager.reg_payments, start=1):
if manager.fill(reg):
manager.exit_receipting()
return None
manager.update_fine(reg)
manager.add_founded_fine(i, reg)
manager.save_payment()
except (
ElementClickInterceptedException,
TimeoutException,
StaleElementReferenceException,
WebDriverException,
):
manager.quit()
manager.exit_receipting()
return None
|
the-stack_106_24212 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Cloud functions for build scheduling."""
from collections import namedtuple
import logging
import os
import re
import yaml
from github import Github
from google.api_core import exceptions
from google.cloud import ndb
from google.cloud import scheduler_v1
import build_and_run_coverage
import build_project
from datastore_entities import GithubCreds
from datastore_entities import Project
VALID_PROJECT_NAME = re.compile(r'^[a-zA-Z0-9_-]+$')
DEFAULT_BUILDS_PER_DAY = 1
MAX_BUILDS_PER_DAY = 4
COVERAGE_SCHEDULE = '0 6 * * *'
INTROSPECTOR_SCHEDULE = '0 0 * * *'
FUZZING_BUILD_TOPIC = 'request-build'
COVERAGE_BUILD_TOPIC = 'request-coverage-build'
INTROSPECTOR_BUILD_TOPIC = 'request-introspector-build'
ProjectMetadata = namedtuple(
'ProjectMetadata', 'schedule project_yaml_contents dockerfile_contents')
logging.basicConfig(level=logging.INFO)
class ProjectYamlError(Exception):
"""Error in project.yaml format."""
def create_scheduler(cloud_scheduler_client, project_name, schedule, tag,
topic):
"""Creates schedulers for new projects."""
project_id = os.environ.get('GCP_PROJECT')
location_id = os.environ.get('FUNCTION_REGION')
parent = cloud_scheduler_client.location_path(project_id, location_id)
job = {
'name': parent + '/jobs/' + project_name + '-scheduler-' + tag,
'pubsub_target': {
'topic_name': 'projects/' + project_id + '/topics/' + topic,
'data': project_name.encode()
},
'schedule': schedule
}
try:
existing_job = cloud_scheduler_client.get_job(job['name'])
except exceptions.NotFound:
existing_job = None
if existing_job:
if existing_job.schedule != schedule:
update_mask = {'paths': ['schedule']}
cloud_scheduler_client.update_job(job, update_mask)
else:
cloud_scheduler_client.create_job(parent, job)
def delete_scheduler(cloud_scheduler_client, project_name, tag):
"""Deletes schedulers for projects that were removed."""
project_id = os.environ.get('GCP_PROJECT')
location_id = os.environ.get('FUNCTION_REGION')
name = cloud_scheduler_client.job_path(project_id, location_id,
project_name + '-scheduler-' + tag)
cloud_scheduler_client.delete_job(name)
def delete_project(cloud_scheduler_client, project):
"""Delete the given project."""
logging.info('Deleting project %s', project.name)
for tag in (build_project.FUZZING_BUILD_TYPE,
build_and_run_coverage.COVERAGE_BUILD_TYPE,
build_and_run_coverage.INTROSPECTOR_BUILD_TYPE):
try:
delete_scheduler(cloud_scheduler_client, project.name, tag)
except exceptions.NotFound:
# Already deleted.
continue
except exceptions.GoogleAPICallError as error:
logging.error('Scheduler deletion for %s failed with %s', project.name,
error)
return
project.key.delete()
# pylint: disable=too-many-branches
def sync_projects(cloud_scheduler_client, projects):
"""Sync projects with cloud datastore."""
for project in Project.query():
if project.name not in projects:
delete_project(cloud_scheduler_client, project)
existing_projects = {project.name for project in Project.query()}
for project_name in projects:
try:
create_scheduler(cloud_scheduler_client, project_name,
projects[project_name].schedule,
build_project.FUZZING_BUILD_TYPE, FUZZING_BUILD_TOPIC)
create_scheduler(cloud_scheduler_client, project_name, COVERAGE_SCHEDULE,
build_and_run_coverage.COVERAGE_BUILD_TYPE,
COVERAGE_BUILD_TOPIC)
create_scheduler(cloud_scheduler_client, project_name,
INTROSPECTOR_SCHEDULE,
build_and_run_coverage.INTROSPECTOR_BUILD_TYPE,
INTROSPECTOR_BUILD_TOPIC)
except exceptions.GoogleAPICallError as error:
logging.error('Scheduler creation for %s failed with %s', project_name,
error)
continue
if project_name in existing_projects:
continue
project_metadata = projects[project_name]
Project(name=project_name,
schedule=project_metadata.schedule,
project_yaml_contents=project_metadata.project_yaml_contents,
dockerfile_contents=project_metadata.dockerfile_contents).put()
for project in Project.query():
if project.name not in projects:
continue
logging.info('Setting up project %s', project.name)
project_metadata = projects[project.name]
project_changed = False
if project.schedule != project_metadata.schedule:
try:
logging.info('Schedule changed.')
project.schedule = project_metadata.schedule
project_changed = True
except exceptions.GoogleAPICallError as error:
logging.error('Updating scheduler for %s failed with %s', project.name,
error)
if project.project_yaml_contents != project_metadata.project_yaml_contents:
project.project_yaml_contents = project_metadata.project_yaml_contents
project_changed = True
if project.dockerfile_contents != project_metadata.dockerfile_contents:
project.dockerfile_contents = project_metadata.dockerfile_contents
project_changed = True
if project_changed:
project.put()
def _has_docker_file(project_contents):
"""Checks if project has a Dockerfile."""
return any(
content_file.name == 'Dockerfile' for content_file in project_contents)
def get_project_metadata(project_contents):
"""Checks for schedule parameter in yaml file else uses DEFAULT_SCHEDULE."""
for content_file in project_contents:
if content_file.name == 'project.yaml':
project_yaml_contents = content_file.decoded_content.decode('utf-8')
if content_file.name == 'Dockerfile':
dockerfile_contents = content_file.decoded_content.decode('utf-8')
project_yaml = yaml.safe_load(project_yaml_contents)
builds_per_day = project_yaml.get('builds_per_day', DEFAULT_BUILDS_PER_DAY)
if not isinstance(builds_per_day, int) or builds_per_day not in range(
1, MAX_BUILDS_PER_DAY + 1):
raise ProjectYamlError('Parameter is not an integer in range [1-4]')
# Starting at 6:00 am, next build schedules are added at 'interval' slots
# Example for interval 2, hours = [6, 18] and schedule = '0 6,18 * * *'
interval = 24 // builds_per_day
hours = []
for hour in range(6, 30, interval):
hours.append(hour % 24)
schedule = '0 ' + ','.join(str(hour) for hour in hours) + ' * * *'
return ProjectMetadata(schedule, project_yaml_contents, dockerfile_contents)
def get_projects(repo):
"""Get project list from git repository."""
projects = {}
contents = repo.get_contents('projects')
for content_file in contents:
if content_file.type != 'dir' or not VALID_PROJECT_NAME.match(
content_file.name):
continue
project_contents = repo.get_contents(content_file.path)
if not _has_docker_file(project_contents):
continue
try:
projects[content_file.name] = get_project_metadata(project_contents)
except ProjectYamlError as error:
logging.error(
'Incorrect format for project.yaml file of %s with error %s',
content_file.name, error)
return projects
def get_github_creds():
"""Retrieves GitHub client credentials."""
git_creds = GithubCreds.query().get()
if git_creds is None:
raise RuntimeError('Git credentials not available.')
return git_creds
def sync(event, context):
"""Sync projects with cloud datastore."""
del event, context # Unused.
with ndb.Client().context():
git_creds = get_github_creds()
github_client = Github(git_creds.client_id, git_creds.client_secret)
repo = github_client.get_repo('google/oss-fuzz')
projects = get_projects(repo)
cloud_scheduler_client = scheduler_v1.CloudSchedulerClient()
sync_projects(cloud_scheduler_client, projects)
|
the-stack_106_24214 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
"""
Finding Best Hyper Parameter for Classifier
=======================================================
In this example, we try to find the best hyper parameter of Classifier
(`method` and `regularization_weight`) by calculating precision for
possible hyper parameter values.
Datasets are randomly generated by using scikit-learn data generator.
"""
import sklearn.datasets
import sklearn.metrics
from jubakit.classifier import Classifier, Dataset, Config
# Generate a dummy dataset using scikit-learn.
(X, y) = sklearn.datasets.make_classification(
n_samples=512,
n_features=20,
n_informative=2,
n_redundant=2,
n_repeated=0,
n_classes=2,
n_clusters_per_class=2,
weights=None,
flip_y=0.01,
class_sep=1.0,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=True,
random_state=0, # fixed seed
)
# Convert arrays into jubakit Dataset.
dataset = Dataset.from_array(X, y)
# Try finding the best classifier parameter.
param2metrics = {}
for method in ['AROW', 'NHERD', 'CW']:
for rw in [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0]:
print('Running ({0} / regularization_weight = {1})...'.format(method, rw))
# Create a config data structure.
jubatus_config = Config(method=method, parameter={'regularization_weight': rw})
# It is equivalent to:
#jubatus_config = Config.default()
#jubatus_config['method'] = method
#jubatus_config['parameter']['regularization_weight'] = rw
# Launch Jubatus server using the specified configuration.
classifier = Classifier.run(jubatus_config)
# Train with the dataset.
for _ in classifier.train(dataset):
pass
# Classify with the same dataset.
y_true = []
y_pred = []
for (idx, label, result) in classifier.classify(dataset):
y_true.append(label)
y_pred.append(result[0][0])
classifier.stop()
# Store the metrics for current configuration.
param2metrics['{0} ({1})'.format(method, rw)] = sklearn.metrics.accuracy_score(y_true, y_pred)
# Show results for each hyper parameter.
best_C = sorted(param2metrics.keys(), key=lambda x: param2metrics[x], reverse=True)[0]
print('--------------------')
print('Configuration\tAccuracy')
for C in sorted(param2metrics.keys()):
print('{0}\t{1}\t{2}'.format(C, param2metrics[C], '*' if C == best_C else ''))
|
the-stack_106_24216 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import super
import unittest
from parameterized import parameterized
from pprint import pprint
from beem import Steem
from beem.discussions import (
Query, Discussions_by_trending, Comment_discussions_by_payout,
Post_discussions_by_payout, Discussions_by_created, Discussions_by_active,
Discussions_by_cashout, Discussions_by_votes,
Discussions_by_children, Discussions_by_hot, Discussions_by_feed, Discussions_by_blog,
Discussions_by_comments, Discussions_by_promoted, Discussions
)
from datetime import datetime
from beem.instance import set_shared_steem_instance
from beem.nodelist import NodeList
wif = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"
class Testcases(unittest.TestCase):
@classmethod
def setUpClass(cls):
nodelist = NodeList()
nodelist.update_nodes(steem_instance=Steem(node=nodelist.get_nodes(normal=True, appbase=True), num_retries=10))
cls.bts = Steem(
node=nodelist.get_nodes(appbase=False),
use_condenser=True,
nobroadcast=True,
keys={"active": wif},
num_retries=10
)
cls.appbase = Steem(
node=nodelist.get_nodes(normal=False, appbase=True),
nobroadcast=True,
keys={"active": wif},
num_retries=10
)
# from getpass import getpass
# self.bts.wallet.unlock(getpass())
set_shared_steem_instance(cls.bts)
cls.bts.set_default_account("test")
@parameterized.expand([
("non_appbase"),
("appbase"),
])
def test_trending(self, node_param):
if node_param == "non_appbase":
bts = self.bts
else:
bts = self.appbase
query = Query()
query["limit"] = 10
query["tag"] = "steemit"
d = Discussions_by_trending(query, steem_instance=bts)
self.assertEqual(len(d), 10)
@parameterized.expand([
("non_appbase"),
("appbase"),
])
def test_comment_payout(self, node_param):
if node_param == "non_appbase":
bts = self.bts
else:
bts = self.appbase
query = Query()
query["limit"] = 10
query["tag"] = "steemit"
d = Comment_discussions_by_payout(query, steem_instance=bts)
self.assertEqual(len(d), 10)
@parameterized.expand([
("non_appbase"),
("appbase"),
])
def test_post_payout(self, node_param):
if node_param == "non_appbase":
bts = self.bts
else:
bts = self.appbase
query = Query()
query["limit"] = 10
query["tag"] = "steemit"
d = Post_discussions_by_payout(query, steem_instance=bts)
self.assertEqual(len(d), 10)
@parameterized.expand([
("non_appbase"),
("appbase"),
])
def test_created(self, node_param):
if node_param == "non_appbase":
bts = self.bts
else:
bts = self.appbase
query = Query()
query["limit"] = 10
query["tag"] = "steemit"
d = Discussions_by_created(query, steem_instance=bts)
self.assertEqual(len(d), 10)
@parameterized.expand([
("non_appbase"),
("appbase"),
])
def test_active(self, node_param):
if node_param == "non_appbase":
bts = self.bts
else:
bts = self.appbase
query = Query()
query["limit"] = 10
query["tag"] = "steemit"
d = Discussions_by_active(query, steem_instance=bts)
self.assertEqual(len(d), 10)
def test_cashout(self):
bts = self.appbase
query = Query(limit=10)
Discussions_by_cashout(query, steem_instance=bts)
# self.assertEqual(len(d), 10)
@parameterized.expand([
("non_appbase"),
("appbase"),
])
def test_votes(self, node_param):
if node_param == "non_appbase":
bts = self.bts
else:
bts = self.appbase
query = Query()
query["limit"] = 10
query["tag"] = "steemit"
d = Discussions_by_votes(query, steem_instance=bts)
self.assertEqual(len(d), 10)
@parameterized.expand([
("non_appbase"),
("appbase"),
])
def test_children(self, node_param):
if node_param == "non_appbase":
bts = self.bts
else:
bts = self.appbase
query = Query()
query["limit"] = 10
query["tag"] = "steemit"
d = Discussions_by_children(query, steem_instance=bts)
self.assertEqual(len(d), 10)
@parameterized.expand([
("non_appbase"),
("appbase"),
])
def test_feed(self, node_param):
if node_param == "non_appbase":
bts = self.bts
else:
bts = self.appbase
query = Query()
query["limit"] = 10
query["tag"] = "gtg"
d = Discussions_by_feed(query, steem_instance=bts)
self.assertEqual(len(d), 10)
@parameterized.expand([
("non_appbase"),
("appbase"),
])
def test_blog(self, node_param):
if node_param == "non_appbase":
bts = self.bts
else:
bts = self.appbase
query = Query()
query["limit"] = 10
query["tag"] = "gtg"
d = Discussions_by_blog(query, steem_instance=bts)
self.assertEqual(len(d), 10)
@parameterized.expand([
("non_appbase"),
("appbase"),
])
def test_comments(self, node_param):
if node_param == "non_appbase":
bts = self.bts
else:
bts = self.appbase
query = Query()
query["limit"] = 10
query["filter_tags"] = ["gtg"]
query["start_author"] = "gtg"
d = Discussions_by_comments(query, steem_instance=bts)
self.assertEqual(len(d), 10)
@parameterized.expand([
("non_appbase"),
("appbase"),
])
def test_promoted(self, node_param):
if node_param == "non_appbase":
bts = self.bts
else:
bts = self.appbase
query = Query()
query["limit"] = 10
query["tag"] = "steemit"
d = Discussions_by_promoted(query, steem_instance=bts)
discussions = Discussions(steem_instance=bts)
d2 = []
for dd in discussions.get_discussions("promoted", query, limit=10):
d2.append(dd)
self.assertEqual(len(d), 10)
self.assertEqual(len(d2), 10)
|
the-stack_106_24219 | import oyaml as yaml
import sys
import configargparse
parser = configargparse.ArgumentParser(auto_env_var_prefix="INIT_")
parser.add_argument("--config-path", help="path to the configuration.yml file", default="/opt/opencga/conf/configuration.yml")
parser.add_argument("--client-config-path", help="path to the client-configuration.yml file", default="/opt/opencga/conf/client-configuration.yml")
parser.add_argument("--storage-config-path", help="path to the storage-configuration.yml file", default="/opt/opencga/conf/storage-configuration.yml")
parser.add_argument("--database-prefix", required=False)
parser.add_argument("--search-hosts", required=True)
parser.add_argument("--clinical-hosts", required=True)
parser.add_argument("--cellbase-rest-url", required=False, help="Cellbase rest server hosting the cellbase service")
parser.add_argument("--catalog-database-hosts", required=True)
parser.add_argument("--catalog-database-user", required=True)
parser.add_argument("--catalog-database-password", required=True)
parser.add_argument("--catalog-database-ssl", required=False, default=True)
parser.add_argument("--catalog-search-hosts", required=True)
parser.add_argument("--catalog-search-user", required=False)
parser.add_argument("--catalog-search-password", required=False)
parser.add_argument("--rest-host", required=True)
parser.add_argument("--grpc-host", required=True)
parser.add_argument("--analysis-execution-mode", required=False)
parser.add_argument("--batch-account-name", required=False)
parser.add_argument("--batch-account-key", required=False)
parser.add_argument("--batch-endpoint", required=False)
parser.add_argument("--batch-pool-id", required=False)
parser.add_argument("--k8s-master-node", required=False)
parser.add_argument("--k8s-namespace", required=False, default="default")
parser.add_argument("--k8s-volumes-pvc-conf", required=False)
parser.add_argument("--k8s-volumes-pvc-sessions", required=False)
parser.add_argument("--k8s-volumes-pvc-variants", required=False)
parser.add_argument("--k8s-volumes-pvc-analysisconf", required=False)
parser.add_argument("--max-concurrent-jobs", required=False)
parser.add_argument("--variant-default-engine", required=False, default="hadoop")
parser.add_argument("--variant-options", required=False, action="append", nargs='*')
parser.add_argument("--hadoop-ssh-dns", required=False)
parser.add_argument("--hadoop-ssh-user", required=False)
parser.add_argument("--hadoop-ssh-pass", required=False)
parser.add_argument("--hadoop-ssh-key", required=False)
parser.add_argument("--hadoop-ssh-remote-opencga-home", required=False)
parser.add_argument("--health-check-interval", required=False)
parser.add_argument("--save", help="save update to source configuration files (default: false)", default=False, action='store_true')
args = parser.parse_args()
# TODO: Add check for a job config.
##############################################################################################################
# Load storage configuration yaml
##############################################################################################################
with open(args.storage_config_path) as f:
storage_config = yaml.safe_load(f)
# Inject search hosts
search_hosts = args.search_hosts.replace('\"','').replace('[','').replace(']','').split(",")
for i, search_host in enumerate(search_hosts):
if i == 0:
# If we are overriding the default hosts,
# clear them only on the first iteration
storage_config["search"]["hosts"].clear()
storage_config["search"]["hosts"].insert(i, search_host.strip())
# Inject clinical hosts
clinical_hosts = args.clinical_hosts.replace('\"','').replace('[','').replace(']','').split(",")
for i, clinical_host in enumerate(clinical_hosts):
if i == 0:
# If we are overriding the default hosts,
# clear them only on the first iteration
storage_config["clinical"]["hosts"].clear()
storage_config["clinical"]["hosts"].insert(i, clinical_host.strip())
# Inject cellbase rest host, if set
if args.cellbase_rest_url is not None and args.cellbase_rest_url != "":
cellbase_rest_url = args.cellbase_rest_url.replace('\"', '').replace('[','').replace(']','')
storage_config["cellbase"]["host"] = cellbase_rest_url
# set default engine
storage_config["variant"]["defaultEngine"] = args.variant_default_engine
storage_config["variant"]["options"]["annotator"] = "cellbase"
if args.variant_options is not None:
# print("variant options: ", args.variant_options)
for options in args.variant_options:
for option in options:
# print("option: ", option)
kv = option.split("=")
storage_config["variant"]["options"][kv[0]] = kv[1]
# Inject Hadoop ssh configuration
for _, storage_engine in enumerate(storage_config["variant"]["engines"]):
if storage_engine["id"] == "hadoop":
storage_engine["options"]["storage.hadoop.mr.executor"] = "ssh"
storage_engine["options"]["storage.hadoop.mr.executor.ssh.host"] = args.hadoop_ssh_dns
storage_engine["options"]["storage.hadoop.mr.executor.ssh.user"] = args.hadoop_ssh_user
storage_engine["options"]["storage.hadoop.mr.executor.ssh.password"] = args.hadoop_ssh_pass
if args.hadoop_ssh_key is not None and args.hadoop_ssh_key != "":
storage_engine["options"]["storage.hadoop.mr.executor.ssh.key"] = args.hadoop_ssh_key
else:
storage_engine["options"]["storage.hadoop.mr.executor.ssh.key"] = ""
storage_engine["options"]["storage.hadoop.mr.executor.ssh.remoteOpenCgaHome"] = args.hadoop_ssh_remote_opencga_home
##############################################################################################################
# Load configuration yaml
##############################################################################################################
with open(args.config_path) as f:
config = yaml.safe_load(f)
if args.database_prefix:
config["databasePrefix"] = args.database_prefix
# Inject catalog database
catalog_hosts = args.catalog_database_hosts.replace('\"','').replace('[','').replace(']','').split(",")
for i, catalog_host in enumerate(catalog_hosts):
if i == 0:
# If we are overriding the default hosts,
# clear them only on the first iteration
config["catalog"]["database"]["hosts"].clear()
config["catalog"]["database"]["hosts"].insert(i, catalog_host.strip())
config["catalog"]["database"]["user"] = args.catalog_database_user
config["catalog"]["database"]["password"] = args.catalog_database_password
config["catalog"]["database"]["options"]["sslEnabled"] = args.catalog_database_ssl
config["catalog"]["database"]["options"]["sslInvalidCertificatesAllowed"] = True
config["catalog"]["database"]["options"]["authenticationDatabase"] = "admin"
# Inject search database
catalog_search_hosts = args.catalog_search_hosts.replace('\"','').replace('[','').replace(']','').split(",")
for i, catalog_search_host in enumerate(catalog_search_hosts):
if i == 0:
# If we are overriding the default hosts,
# clear them only on the first iteration
config["catalog"]["searchEngine"]["hosts"].clear()
config["catalog"]["searchEngine"]["hosts"].insert(i, catalog_search_host.strip())
if args.catalog_search_user is not None:
config["catalog"]["searchEngine"]["user"] = args.catalog_search_user
config["catalog"]["searchEngine"]["password"] = args.catalog_search_password
# Inject execution settings
config["analysis"]["scratchDir"] = "/tmp/opencga_scratch"
if args.max_concurrent_jobs is not None:
config["analysis"]["execution"]["maxConcurrentJobs"]["variant-index"] = int(args.max_concurrent_jobs)
if args.analysis_execution_mode is not None:
config["analysis"]["execution"]["id"] = args.analysis_execution_mode
if args.analysis_execution_mode == "AZURE":
config["analysis"]["execution"]["options"] = {}
config["analysis"]["execution"]["options"]["azure.batchAccount"] = args.batch_account_name
config["analysis"]["execution"]["options"]["azure.batchKey"] = args.batch_account_key
config["analysis"]["execution"]["options"]["azure.batchUri"] = args.batch_endpoint
config["analysis"]["execution"]["options"]["azure.batchPoolId"] = args.batch_pool_id
elif args.analysis_execution_mode == "k8s":
config["analysis"]["execution"]["options"]["k8s.masterUrl"] = args.k8s_master_node
config["analysis"]["execution"]["options"]["k8s.namespace"] = args.k8s_namespace
config["analysis"]["execution"]["options"]["k8s.volumes"][0]["name"] = "conf"
config["analysis"]["execution"]["options"]["k8s.volumes"][0]["persistentVolumeClaim"]["claimName"] = args.k8s_volumes_pvc_conf
config["analysis"]["execution"]["options"]["k8s.volumes"][1]["name"] = "sessions"
config["analysis"]["execution"]["options"]["k8s.volumes"][1]["persistentVolumeClaim"]["claimName"] = args.k8s_volumes_pvc_sessions
config["analysis"]["execution"]["options"]["k8s.volumes"][2]["name"] = "variants"
config["analysis"]["execution"]["options"]["k8s.volumes"][2]["persistentVolumeClaim"]["claimName"] = args.k8s_volumes_pvc_variants
config["analysis"]["execution"]["options"]["k8s.volumes"][3]["name"] = "analysisconf"
config["analysis"]["execution"]["options"]["k8s.volumes"][3]["persistentVolumeClaim"]["claimName"] = args.k8s_volumes_pvc_analysisconf
# Inject healthCheck interval
if args.health_check_interval is not None:
config["healthCheck"]["interval"] = args.health_check_interval
##############################################################################################################
# Load client configuration yaml
##############################################################################################################
with open(args.client_config_path) as f:
client_config = yaml.safe_load(f)
# Inject grpc and rest host
client_config["rest"]["host"] = args.rest_host.replace('"','')
client_config["grpc"]["host"] = args.grpc_host.replace('"','')
# Running with --save will update the configuration files inplace.
# Without --save will simply dump the update YAML to stdout so that
# the caller can handle it.
# Note: The dump will use the safe representation so there is likely
# to be format diffs between the original input and the output as well
# as value changes.
if args.save == False:
yaml.dump(storage_config, sys.stdout, default_flow_style=False, allow_unicode=True)
print("---") # Add yaml delimiter
yaml.dump(config, sys.stdout, default_flow_style=False, allow_unicode=True)
print("---") # Add yaml delimiter
yaml.dump(client_config, sys.stdout, default_flow_style=False, allow_unicode=True)
else:
with open(args.storage_config_path, "w") as f:
yaml.dump(storage_config, f, default_flow_style=False)
with open(args.config_path, "w") as f:
yaml.dump(config, f, default_flow_style=False)
with open(args.client_config_path, "w") as f:
yaml.dump(client_config, f, default_flow_style=False)
|
the-stack_106_24220 | from __future__ import division
from __future__ import print_function
import codecs
import sys
import tensorflow.compat.v1 as tf
from DataLoader import FilePaths
class DecoderType:
BestPath = 0
WordBeamSearch = 1
BeamSearch = 2
class Model:
# Model Constants
batchSize = 10 # 50
imgSize = (800, 64)
maxTextLen = 100 # maximum text length can be reccognized
def __init__(self, charList, decoderType=DecoderType.BestPath, mustRestore=False):
tf.disable_v2_behavior()
self.charList = charList
self.decoderType = decoderType
self.mustRestore = mustRestore
self.snapID = 0
# input image batch
self.inputImgs = tf.compat.v1.placeholder(tf.float32, shape=(None, Model.imgSize[0], Model.imgSize[1]))
# setup CNN, RNN and CTC
self.setupCNN()
self.setupRNN()
self.setupCTC()
# setup optimizer to train NN
self.batchesTrained = 0
self.learningRate = tf.compat.v1.placeholder(tf.float32, shape=[])
self.optimizer = tf.compat.v1.train.RMSPropOptimizer(self.learningRate).minimize(self.loss)
# Initialize TensorFlow
(self.sess, self.saver) = self.setupTF()
self.training_loss_summary = tf.compat.v1.summary.scalar('loss', self.loss)
self.writer = tf.compat.v1.summary.FileWriter(
'./logs', self.sess.graph) # Tensorboard: Create writer
self.merge = tf.compat.v1.summary.merge([self.training_loss_summary]) # Tensorboard: Merge
def setupCNN(self):
""" Create CNN layers and return output of these layers """
cnnIn4d = tf.expand_dims(input=self.inputImgs, axis=3)
# First Layer: Conv (5x5) + Pool (2x2) - Output size: 400 x 32 x 64
with tf.compat.v1.name_scope('Conv_Pool_1'):
kernel = tf.Variable(
tf.random.truncated_normal([5, 5, 1, 64], stddev=0.1))
conv = tf.nn.conv2d(
input=cnnIn4d, filters=kernel, padding='SAME', strides=(1, 1, 1, 1))
learelu = tf.nn.leaky_relu(conv, alpha=0.01)
pool = tf.nn.max_pool2d(input=learelu, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='VALID')
# Second Layer: Conv (5x5) + Pool (1x2) - Output size: 400 x 16 x 128
with tf.compat.v1.name_scope('Conv_Pool_2'):
kernel = tf.Variable(tf.random.truncated_normal(
[5, 5, 64, 128], stddev=0.1))
conv = tf.nn.conv2d(
input=pool, filters=kernel, padding='SAME', strides=(1, 1, 1, 1))
learelu = tf.nn.leaky_relu(conv, alpha=0.01)
pool = tf.nn.max_pool2d(input=learelu, ksize=(1, 1, 2, 1), strides=(1, 1, 2, 1), padding='VALID')
# Third Layer: Conv (3x3) + Pool (2x2) + Simple Batch Norm - Output size: 200 x 8 x 128
with tf.compat.v1.name_scope('Conv_Pool_BN_3'):
kernel = tf.Variable(tf.random.truncated_normal(
[3, 3, 128, 128], stddev=0.1))
conv = tf.nn.conv2d(
input=pool, filters=kernel, padding='SAME', strides=(1, 1, 1, 1))
mean, variance = tf.nn.moments(x=conv, axes=[0])
batch_norm = tf.nn.batch_normalization(
conv, mean, variance, offset=None, scale=None, variance_epsilon=0.001)
learelu = tf.nn.leaky_relu(batch_norm, alpha=0.01)
pool = tf.nn.max_pool2d(input=learelu, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='VALID')
# Fourth Layer: Conv (3x3) - Output size: 200 x 8 x 256
with tf.compat.v1.name_scope('Conv_4'):
kernel = tf.Variable(tf.random.truncated_normal(
[3, 3, 128, 256], stddev=0.1))
conv = tf.nn.conv2d(
input=pool, filters=kernel, padding='SAME', strides=(1, 1, 1, 1))
learelu = tf.nn.leaky_relu(conv, alpha=0.01)
# Fifth Layer: Conv (3x3) + Pool(2x2) - Output size: 100 x 4 x 256
with tf.compat.v1.name_scope('Conv_Pool_5'):
kernel = tf.Variable(tf.random.truncated_normal(
[3, 3, 256, 256], stddev=0.1))
conv = tf.nn.conv2d(
input=learelu, filters=kernel, padding='SAME', strides=(1, 1, 1, 1))
learelu = tf.nn.leaky_relu(conv, alpha=0.01)
pool = tf.nn.max_pool2d(input=learelu, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='VALID')
# Sixth Layer: Conv (3x3) + Pool(1x2) + Simple Batch Norm - Output size: 100 x 2 x 512
with tf.compat.v1.name_scope('Conv_Pool_BN_6'):
kernel = tf.Variable(tf.random.truncated_normal(
[3, 3, 256, 512], stddev=0.1))
conv = tf.nn.conv2d(
input=pool, filters=kernel, padding='SAME', strides=(1, 1, 1, 1))
mean, variance = tf.nn.moments(x=conv, axes=[0])
batch_norm = tf.nn.batch_normalization(
conv, mean, variance, offset=None, scale=None, variance_epsilon=0.001)
learelu = tf.nn.leaky_relu(batch_norm, alpha=0.01)
pool = tf.nn.max_pool2d(input=learelu, ksize=(1, 1, 2, 1), strides=(1, 1, 2, 1), padding='VALID')
# Seventh Layer: Conv (3x3) + Pool (1x2) - Output size: 100 x 1 x 512
with tf.compat.v1.name_scope('Conv_Pool_7'):
kernel = tf.Variable(tf.random.truncated_normal(
[3, 3, 512, 512], stddev=0.1))
conv = tf.nn.conv2d(
input=pool, filters=kernel, padding='SAME', strides=(1, 1, 1, 1))
learelu = tf.nn.leaky_relu(conv, alpha=0.01)
pool = tf.nn.max_pool2d(input=learelu, ksize=(1, 1, 2, 1), strides=(1, 1, 2, 1), padding='VALID')
self.cnnOut4d = pool
def setupRNN(self):
""" Create RNN layers and return output of these layers """
# Collapse layer to remove dimension 100 x 1 x 512 --> 100 x 512 on axis=2
rnnIn3d = tf.squeeze(self.cnnOut4d, axis=[2])
# 2 layers of LSTM cell used to build RNN
numHidden = 512
cells = [tf.compat.v1.nn.rnn_cell.LSTMCell(
num_units=numHidden, state_is_tuple=True, name='basic_lstm_cell') for _ in range(2)]
stacked = tf.compat.v1.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
# Bi-directional RNN
# BxTxF -> BxTx2H
((forward, backward), _) = tf.compat.v1.nn.bidirectional_dynamic_rnn(
cell_fw=stacked, cell_bw=stacked, inputs=rnnIn3d, dtype=rnnIn3d.dtype)
# BxTxH + BxTxH -> BxTx2H -> BxTx1X2H
concat = tf.expand_dims(tf.concat([forward, backward], 2), 2)
# Project output to chars (including blank): BxTx1x2H -> BxTx1xC -> BxTxC
kernel = tf.Variable(tf.random.truncated_normal(
[1, 1, numHidden * 2, len(self.charList) + 1], stddev=0.1))
self.rnnOut3d = tf.squeeze(tf.nn.atrous_conv2d(value=concat, filters=kernel, rate=1, padding='SAME'), axis=[2])
def setupCTC(self):
""" Create CTC loss and decoder and return them """
# BxTxC -> TxBxC
self.ctcIn3dTBC = tf.transpose(a=self.rnnOut3d, perm=[1, 0, 2])
# Ground truth text as sparse tensor
with tf.compat.v1.name_scope('CTC_Loss'):
self.gtTexts = tf.SparseTensor(tf.compat.v1.placeholder(tf.int64, shape=[
None, 2]), tf.compat.v1.placeholder(tf.int32, [None]), tf.compat.v1.placeholder(tf.int64, [2]))
# Calculate loss for batch
self.seqLen = tf.compat.v1.placeholder(tf.int32, [None])
self.loss = tf.reduce_mean(input_tensor=tf.compat.v1.nn.ctc_loss(labels=self.gtTexts, inputs=self.ctcIn3dTBC, sequence_length=self.seqLen,
ctc_merge_repeated=True, ignore_longer_outputs_than_inputs=True))
with tf.compat.v1.name_scope('CTC_Decoder'):
# Decoder: Best path decoding or Word beam search decoding
if self.decoderType == DecoderType.BestPath:
self.decoder = tf.nn.ctc_greedy_decoder(
inputs=self.ctcIn3dTBC, sequence_length=self.seqLen)
elif self.decoderType == DecoderType.BeamSearch:
self.decoder = tf.compat.v1.nn.ctc_beam_search_decoder(inputs=self.ctcIn3dTBC, sequence_length=self.seqLen, beam_width=50, merge_repeated=True)
elif self.decoderType == DecoderType.WordBeamSearch:
# Import compiled word beam search operation (see https://github.com/githubharald/CTCWordBeamSearch)
word_beam_search_module = tf.load_op_library(
'./TFWordBeamSearch.so')
# Prepare: dictionary, characters in dataset, characters forming words
chars = codecs.open(FilePaths.wordCharList.txt, 'r').read()
wordChars = codecs.open(
FilePaths.fnWordCharList, 'r').read()
corpus = codecs.open(FilePaths.corpus.txt, 'r').read()
# # Decoder using the "NGramsForecastAndSample": restrict number of (possible) next words to at most 20 words: O(W) mode of word beam search
# decoder = word_beam_search_module.word_beam_search(tf.nn.softmax(ctcIn3dTBC, dim=2), 25, 'NGramsForecastAndSample', 0.0, corpus.encode('utf8'), chars.encode('utf8'), wordChars.encode('utf8'))
# Decoder using the "Words": only use dictionary, no scoring: O(1) mode of word beam search
self.decoder = word_beam_search_module.word_beam_search(tf.nn.softmax(
self.ctcIn3dTBC, axis=2), 25, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'), wordChars.encode('utf8'))
# Return a CTC operation to compute the loss and CTC operation to decode the RNN output
return self.loss, self.decoder
def setupTF(self):
""" Initialize TensorFlow """
print('Python: ' + sys.version)
print('Tensorflow: ' + tf.__version__)
sess = tf.compat.v1.Session() # Tensorflow session
saver = tf.compat.v1.train.Saver(max_to_keep=3) # Saver saves model to file
modelDir = '../model/'
latestSnapshot = tf.train.latest_checkpoint(modelDir) # Is there a saved model?
# If model must be restored (for inference), there must be a snapshot
if self.mustRestore and not latestSnapshot:
raise Exception('No saved model found in: ' + modelDir)
# Load saved model if available
if latestSnapshot:
print('Init with stored values from ' + latestSnapshot)
saver.restore(sess, latestSnapshot)
else:
print('Init with new values')
sess.run(tf.compat.v1.global_variables_initializer())
return (sess, saver)
def toSpare(self, texts):
""" Convert ground truth texts into sparse tensor for ctc_loss """
indices = []
values = []
shape = [len(texts), 0] # Last entry must be max(labelList[i])
# Go over all texts
for (batchElement, texts) in enumerate(texts):
# Convert to string of label (i.e. class-ids)
print(texts)
labelStr = []
for c in texts:
print(c, '|', end='')
labelStr.append(self.charList.index(c))
print(' ')
labelStr = [self.charList.index(c) for c in texts]
# Sparse tensor must have size of max. label-string
if len(labelStr) > shape[1]:
shape[1] = len(labelStr)
# Put each label into sparse tensor
for (i, label) in enumerate(labelStr):
indices.append([batchElement, i])
values.append(label)
return (indices, values, shape)
def decoderOutputToText(self, ctcOutput):
""" Extract texts from output of CTC decoder """
# Contains string of labels for each batch element
encodedLabelStrs = [[] for i in range(Model.batchSize)]
# Word beam search: label strings terminated by blank
if self.decoderType == DecoderType.WordBeamSearch:
blank = len(self.charList)
for b in range(Model.batchSize):
for label in ctcOutput[b]:
if label == blank:
break
encodedLabelStrs[b].append(label)
# TF decoders: label strings are contained in sparse tensor
else:
# Ctc returns tuple, first element is SparseTensor
decoded = ctcOutput[0][0]
# Go over all indices and save mapping: batch -> values
idxDict = {b : [] for b in range(Model.batchSize)}
for (idx, idx2d) in enumerate(decoded.indices):
label = decoded.values[idx]
batchElement = idx2d[0] # index according to [b,t]
encodedLabelStrs[batchElement].append(label)
# Map labels to chars for all batch elements
return [str().join([self.charList[c] for c in labelStr]) for labelStr in encodedLabelStrs]
def trainBatch(self, batch, batchNum):
""" Feed a batch into the NN to train it """
sparse = self.toSpare(batch.gtTexts)
rate = 0.001 # if you use the pretrained model to continue train
#rate = 0.01 if self.batchesTrained < 10 else (
# 0.001 if self.batchesTrained < 2750 else 0.001) # variable learning_rate is used from trained from scratch
evalList = [self.merge, self.optimizer, self.loss]
feedDict = {self.inputImgs: batch.imgs, self.gtTexts: sparse, self.seqLen: [Model.maxTextLen] * Model.batchSize, self.learningRate: rate}
(loss_summary, _, lossVal) = self.sess.run(evalList, feedDict)
# Tensorboard: Add loss_summary to writer
self.writer.add_summary(loss_summary, batchNum)
self.batchesTrained += 1
return lossVal
def return_rnn_out(self, batch, write_on_csv=False):
"""Only return rnn_out prediction value without decoded"""
numBatchElements = len(batch.imgs)
decoded, rnnOutput = self.sess.run([self.decoder, self.ctcIn3dTBC],
{self.inputImgs: batch.imgs, self.seqLen: [Model.maxTextLen] * numBatchElements})
decoded = rnnOutput
print(decoded.shape)
if write_on_csv:
s = rnnOutput.shape
b = 0
csv = ''
for t in range(s[0]):
for c in range(s[2]):
csv += str(rnnOutput[t, b, c]) + ';'
csv += '\n'
open('mat_0.csv', 'w').write(csv)
return decoded[:,0,:].reshape(100,80)
def inferBatch(self, batch):
""" Feed a batch into the NN to recognize texts """
numBatchElements = len(batch.imgs)
feedDict = {self.inputImgs: batch.imgs, self.seqLen: [Model.maxTextLen] * numBatchElements}
evalRes = self.sess.run([self.decoder, self.ctcIn3dTBC], feedDict)
decoded = evalRes[0]
# # Dump RNN output to .csv file
# decoded, rnnOutput = self.sess.run([self.decoder, self.rnnOutput], {
# self.inputImgs: batch.imgs, self.seqLen: [Model.maxTextLen] * Model.batchSize})
# s = rnnOutput.shape
# b = 0
# csv = ''
# for t in range(s[0]):
# for c in range(s[2]):
# csv += str(rnnOutput[t, b, c]) + ';'
# csv += '\n'
# open('mat_0.csv', 'w').write(csv)
texts = self.decoderOutputToText(decoded)
return texts
def save(self):
""" Save model to file """
self.snapID += 1
self.saver.save(self.sess, '../model/snapshot',
global_step=self.snapID)
|
the-stack_106_24223 | from functools import partial
import json
import pytest
import requests
import responses
import tamr_client as tc
from tests.tamr_client import fake
@fake.json
def test_get_auth_cookie():
auth = fake.username_password_auth()
s = fake.session()
instance = fake.instance()
assert s.auth is None
assert s._stored_auth == auth
assert len(s.cookies.keys()) == 0
tc.backup.get_all(session=s, instance=instance)
assert s.auth is None
assert s._stored_auth == auth
assert s.cookies.get("authToken") == "auth_token_string_value"
@fake.json
def test_refresh_auth_cookie():
auth = fake.username_password_auth()
s = fake.session()
instance = fake.instance()
assert s.auth is None
assert s._stored_auth == auth
assert len(s.cookies.keys()) == 0
tc.backup.get_all(session=s, instance=instance)
assert s.auth is None
assert s._stored_auth == auth
assert s.cookies.get("authToken") == "auth_token_string_value"
@fake.json
def test_bad_credentials():
auth = fake.username_password_auth()
s = fake.session()
instance = fake.instance()
assert s.auth is None
assert s._stored_auth == auth
assert len(s.cookies.keys()) == 0
s.cookies.set("authToken", "expired_auth_token")
with pytest.raises(requests.exceptions.HTTPError):
tc.backup.get_all(session=s, instance=instance)
@fake.json
def test_missing_api():
auth = fake.username_password_auth()
s = fake.session()
instance = fake.instance()
assert s.auth is None
assert s._stored_auth == auth
assert len(s.cookies.keys()) == 0
tc.backup.get_all(session=s, instance=instance)
assert s.auth == auth
@responses.activate # TODO: Can this request header checking be done with fake.json?
def test_request_headers():
def create_callback(request, snoop, status, response_body):
snoop["headers"] = request.headers
return status, {}, json.dumps(response_body)
auth_endpoint = "http://localhost/api/versioned/v1/instance:login"
endpoint = "http://localhost/api/versioned/v1/backups"
snoop_dict_1: tc._types.JsonDict = {}
responses.add_callback(
responses.GET,
endpoint,
partial(
create_callback,
snoop=snoop_dict_1,
status=401,
response_body="Credentials are required to access this resource.",
),
)
snoop_dict_2: tc._types.JsonDict = {}
responses.add_callback(
responses.POST,
auth_endpoint,
partial(
create_callback, snoop=snoop_dict_2, status=200, response_body=auth_json,
),
)
snoop_dict_3: tc._types.JsonDict = {}
responses.add_callback(
responses.GET,
endpoint,
partial(create_callback, snoop=snoop_dict_3, status=200, response_body={},),
)
s = fake.session()
instance = fake.instance()
tc.backup.get_all(session=s, instance=instance)
# No credentials in any call
assert "Authorization" not in snoop_dict_1["headers"]
assert "Authorization" not in snoop_dict_2["headers"]
assert "Authorization" not in snoop_dict_3["headers"]
# No cookie in first call
assert "Cookie" not in snoop_dict_1["headers"]
# Valid cookie passed in second call
assert snoop_dict_3["headers"]["Cookie"] == f'authToken={auth_json["token"]}'
auth_json = {"token": "auth_token_string_value", "username": "user"}
|
the-stack_106_24225 | import os
parent_directory_names = ["input_images", "csv_data_and_classes", "labelled_xml_data", "tfrecords", "model_training", "trained_model"]
subdirectory_names = ["train", "test"]
for directory_name in parent_directory_names:
cwd = os.getcwd()
directory_to_be_created = os.path.join(cwd, directory_name)
# Create parent if it is not present
if not os.path.exists(directory_to_be_created):
print("Creating directory {}".format(directory_to_be_created))
os.mkdir(directory_to_be_created)
print("Created successfully!")
else:
print("Directory {} already exists".format(directory_to_be_created))
# Create subdirectories
if directory_name == "input_images" or directory_name == "labelled_xml_data":
for subdirectory_name in subdirectory_names:
subdirectory_to_be_created = os.path.join(directory_to_be_created, subdirectory_name)
if not os.path.exists(subdirectory_to_be_created):
print("Creating subdirectory {}".format(subdirectory_to_be_created))
os.mkdir(subdirectory_to_be_created)
print("Created successfully!")
else:
print("Subdirectory {} already exists".format(subdirectory_to_be_created))
|
the-stack_106_24226 | # -*- coding:utf-8 -*-
'''
Define the widget modules for TorCMS.
'''
import tornado.escape
import tornado.web
from torcms.model.reply_model import MReply
from torcms.model.rating_model import MRating
from torcms.core.libs.deprecation import deprecated
class BaiduShare(tornado.web.UIModule):
'''
widget for baidu share.
'''
def render(self, *args, **kwargs):
return self.render_string('modules/widget/baidu_share.html')
class ReplyPanel(tornado.web.UIModule):
'''
the reply panel.
'''
def render(self, *args, **kwargs):
uid = args[0]
userinfo = args[1]
return self.render_string(
'modules/widget/reply_panel.html',
uid=uid,
replys=MReply.query_by_post(uid),
userinfo=userinfo,
linkify=tornado.escape.linkify
)
class UserinfoWidget(tornado.web.UIModule, tornado.web.RequestHandler):
'''
userinfo widget.
'''
def render(self, *args, **kwargs):
is_logged = True if ('userinfo' in kwargs and kwargs['userinfo']) else False
return self.render_string(
'modules/widget/loginfo.html',
userinfo=kwargs['userinfo'],
is_logged=is_logged)
class WidgetEditor(tornado.web.UIModule):
'''
editor widget.
'''
def render(self, *args, **kwargs):
router = args[0]
uid = args[1]
userinfo = args[2]
if 'catid' in kwargs:
catid = kwargs['catid']
else:
catid = ''
kwd = {
'router': router,
'uid': uid,
'catid':catid
}
return self.render_string(
'modules/widget/widget_editor.html',
kwd=kwd,
userinfo=userinfo)
class WidgetSearch(tornado.web.UIModule):
'''
search widget. Simple searching. searching for all.
'''
def render(self, *args, **kwargs):
# tag_enum = MCategory.query_pcat()
return self.render_string('modules/widget/widget_search.html')
class StarRating(tornado.web.UIModule):
'''
For rating of posts.
'''
def render(self, *args, **kwargs):
postinfo = args[0]
userinfo = args[1]
rating = False
if userinfo:
rating = MRating.get_rating(postinfo.uid, userinfo.uid)
if rating:
pass
else:
rating = postinfo.rating
return self.render_string(
'modules/widget/star_rating.html',
postinfo=postinfo,
userinfo=userinfo,
rating=rating,
)
class NavigatePanel(tornado.web.UIModule):
'''
render navigate panel.
'''
@deprecated(details='Should not used any more.')
def render(self, *args, **kwargs):
userinfo = args[0]
return self.render_string(
'modules/widget/navigate_panel.html',
userinfo=userinfo,
)
class FooterPanel(tornado.web.UIModule):
'''
render footer panel.
'''
@deprecated(details='Should not used any more.')
def render(self, *args, **kwargs):
userinfo = args[0]
return self.render_string(
'modules/widget/footer_panel.html',
userinfo=userinfo,
)
class UseF2E(tornado.web.UIModule):
'''
using f2e lib.
'''
def render(self, *args, **kwargs):
f2ename = args[0]
return self.render_string(
'modules/usef2e/{0}.html'.format(f2ename),
)
class BaiduSearch(tornado.web.UIModule):
'''
widget for baidu search.
'''
def render(self, *args, **kwargs):
baidu_script = ''
return self.render_string('modules/info/baidu_script.html',
baidu_script=baidu_script)
|
the-stack_106_24227 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolAddOptions(Model):
"""Additional parameters for add operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
"""
_attribute_map = {
'timeout': {'key': '', 'type': 'int'},
'client_request_id': {'key': '', 'type': 'str'},
'return_client_request_id': {'key': '', 'type': 'bool'},
'ocp_date': {'key': '', 'type': 'rfc-1123'},
}
def __init__(self, **kwargs):
super(PoolAddOptions, self).__init__(**kwargs)
self.timeout = kwargs.get('timeout', 30)
self.client_request_id = kwargs.get('client_request_id', None)
self.return_client_request_id = kwargs.get('return_client_request_id', False)
self.ocp_date = kwargs.get('ocp_date', None)
|
the-stack_106_24228 | import warnings
import os.path
import numpy as np
import pandas as pd
import ntpath
import openpyxl
import xlrd
try:
from openpyxl.utils.cell import coordinate_from_string
except:
from openpyxl.utils import coordinate_from_string
from .helpers import compare_pandas_versions, check_valid_xls
from .collect import Collect
# d6tcollect.init(__name__)
#******************************************************************
# read_excel_advanced
#******************************************************************
def read_excel_advanced(fname, remove_blank_cols=True, remove_blank_rows=True, collapse_header=True,
header_xls_range=None, header_xls_start=None, header_xls_end=None,
is_preview=False, nrows_preview=3, **kwds):
"""
Read Excel files to pandas dataframe with advanced options like set header ranges and remove blank columns and rows
Args:
fname (str): Excel file path
remove_blank_cols (bool): remove blank columns
remove_blank_rows (bool): remove blank rows
collapse_header (bool): to convert multiline header to a single line string
header_xls_range (string): range of headers in excel, eg: A4:B16
header_xls_start (string): Starting cell of excel for header range, eg: A4
header_xls_end (string): End cell of excel for header range, eg: B16
is_preview (bool): Read only first `nrows_preview` lines
nrows_preview (integer): Initial number of rows to be used for preview columns (default: 3)
kwds (mixed): parameters for `pandas.read_excel()` to pass through
Returns:
df (dataframe): pandas dataframe
Note:
You can pass in any `pandas.read_excel()` parameters in particular `sheet_name`
"""
header = []
if header_xls_range:
if not (header_xls_start and header_xls_end):
header_xls_range = header_xls_range.split(':')
header_xls_start, header_xls_end = header_xls_range
else:
raise ValueError('Parameter conflict. Can only pass header_xls_range or header_xls_start with header_xls_end')
if header_xls_start and header_xls_end:
if 'skiprows' in kwds or 'usecols' in kwds:
raise ValueError('Parameter conflict. Cannot pass skiprows or usecols with header_xls')
scol, srow = coordinate_from_string(header_xls_start)
ecol, erow = coordinate_from_string(header_xls_end)
# header, skiprows, usecols
header = list(range(erow - srow + 1))
usecols = scol + ":" + ecol
skiprows = srow - 1
if compare_pandas_versions(pd.__version__, "0.20.3") > 0:
df = pd.read_excel(fname, header=header, skiprows=skiprows, usecols=usecols, **kwds)
else:
df = pd.read_excel(fname, header=header, skiprows=skiprows, parse_cols=usecols, **kwds)
else:
df = pd.read_excel(fname, **kwds)
# remove blank cols and rows
if remove_blank_cols:
df = df.dropna(axis='columns', how='all')
if remove_blank_rows:
df = df.dropna(axis='rows', how='all')
# todo: add df.reset_index() once no actual data in index
# clean up header
if collapse_header:
if len(header) > 1:
df.columns = [' '.join([s for s in col if not 'Unnamed' in s]).strip().replace("\n", ' ')
for col in df.columns.values]
df = df.reset_index()
else:
df.rename(columns=lambda x: x.strip().replace("\n", ' '), inplace=True)
# preview
if is_preview:
df = df.head(nrows_preview)
return df
#******************************************************************
# XLSSniffer
#******************************************************************
class XLSSniffer(object, metaclass=Collect):
"""
Extracts available sheets from MULTIPLE Excel files and runs diagnostics
Args:
fname_list (list): file paths, eg ['dir/a.csv','dir/b.csv']
logger (object): logger object with send_log(), optional
"""
def __init__(self, fname_list, logger=None):
if not fname_list:
raise ValueError("Filename list should not be empty")
self.fname_list = fname_list
self.logger = logger
check_valid_xls(self.fname_list)
self.sniff()
def sniff(self):
"""
Executes sniffer
Returns:
boolean: True if everything ok. Results are accessible in ``.df_xls_sheets``
"""
xls_sheets = {}
for fname in self.fname_list:
if self.logger:
self.logger.send_log('sniffing sheets in '+ntpath.basename(fname),'ok')
xls_fname = {}
xls_fname['file_name'] = ntpath.basename(fname)
if fname[-5:]=='.xlsx':
fh = openpyxl.load_workbook(fname,read_only=True)
xls_fname['sheets_names'] = fh.sheetnames
fh.close()
# todo: need to close file?
elif fname[-4:]=='.xls':
fh = xlrd.open_workbook(fname, on_demand=True)
xls_fname['sheets_names'] = fh.sheet_names()
fh.release_resources()
else:
raise IOError('Only .xls or .xlsx files can be combined')
xls_fname['sheets_count'] = len(xls_fname['sheets_names'])
xls_fname['sheets_idx'] = np.arange(xls_fname['sheets_count']).tolist()
xls_sheets[fname] = xls_fname
self.xls_sheets = xls_sheets
df_xls_sheets = pd.DataFrame(xls_sheets).T
df_xls_sheets.index.names = ['file_path']
self.dict_xls_sheets = xls_sheets
self.df_xls_sheets = df_xls_sheets
return True
def all_contain_sheetname(self,sheet_name):
"""
Check if all files contain a certain sheet
Args:
sheet_name (string): sheetname to check
Returns:
boolean: If true
"""
return np.all([sheet_name in self.dict_xls_sheets[fname]['sheets_names'] for fname in self.fname_list])
def all_have_idx(self,sheet_idx):
"""
Check if all files contain a certain index
Args:
sheet_idx (string): index to check
Returns:
boolean: If true
"""
return np.all([sheet_idx<=(d['sheets_count']-1) for k,d in self.dict_xls_sheets.items()])
def all_same_count(self):
"""
Check if all files contain the same number of sheets
Args:
sheet_idx (string): index to check
Returns:
boolean: If true
"""
first_elem = next(iter(self.dict_xls_sheets.values()))
return np.all([first_elem['sheets_count']==d['sheets_count'] for k,d in self.dict_xls_sheets.items()])
def all_same_names(self):
first_elem = next(iter(self.dict_xls_sheets.values()))
return np.all([first_elem['sheets_names']==d['sheets_names'] for k,d in self.dict_xls_sheets.items()])
#******************************************************************
# convertor
#******************************************************************
class XLStoBase(object, metaclass=Collect):
def __init__(self, if_exists='skip', output_dir=None, logger=None):
"""
Base class for converting Excel files
Args:
if_exists (str): Possible values: skip and replace, default: skip, optional
output_dir (str): If present, file is saved in given directory, optional
logger (object): logger object with send_log('msg','status'), optional
"""
if if_exists not in ['skip', 'replace']:
raise ValueError("Possible value of 'if_exists' are 'skip' and 'replace'")
self.logger = logger
self.if_exists = if_exists
self.output_dir = output_dir
if self.output_dir:
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def _get_output_filename(self, fname):
if self.output_dir:
basename = os.path.basename(fname)
fname_out = os.path.join(self.output_dir, basename)
else:
fname_out = fname
is_skip = (self.if_exists == 'skip' and os.path.isfile(fname_out))
return fname_out, is_skip
def convert_single(self, fname, sheet_name, **kwds):
"""
Converts single file
Args:
fname: path to file
sheet_name (str): optional sheet_name to override global `cfg_xls_sheets_sel`
Same as `d6tstack.utils.read_excel_advanced()`
Returns:
list: output file names
"""
if self.logger:
msg = 'converting file: '+ntpath.basename(fname)+' | sheet: '
if hasattr(self, 'cfg_xls_sheets_sel'):
msg += str(self.cfg_xls_sheets_sel[fname])
self.logger.send_log(msg,'ok')
fname_out = fname + '-' + str(sheet_name) + '.csv'
fname_out, is_skip = self._get_output_filename(fname_out)
if not is_skip:
df = read_excel_advanced(fname, sheet_name=sheet_name, **kwds)
df.to_csv(fname_out, index=False)
else:
warnings.warn('File %s exists, skipping' %fname)
return fname_out
class XLStoCSVMultiFile(XLStoBase, metaclass=Collect):
"""
Converts xls|xlsx files to csv files. Selects a SINGLE SHEET from each file. To extract MULTIPLE SHEETS from a file use XLStoCSVMultiSheet
Args:
fname_list (list): file paths, eg ['dir/a.csv','dir/b.csv']
cfg_xls_sheets_sel_mode (string): mode to select tabs
* ``name``: select by name, provide name for each file, can customize by file
* ``name_global``: select by name, one name for all files
* ``idx``: select by index, provide index for each file, can customize by file
* ``idx_global``: select by index, one index for all files
cfg_xls_sheets_sel (dict): values to select tabs `{'filename':'value'}`
output_dir (str): If present, file is saved in given directory, optional
if_exists (str): Possible values: skip and replace, default: skip, optional
logger (object): logger object with send_log('msg','status'), optional
"""
def __init__(self, fname_list, cfg_xls_sheets_sel_mode='idx_global', cfg_xls_sheets_sel=0,
output_dir=None, if_exists='skip', logger=None):
super().__init__(if_exists, output_dir, logger)
if not fname_list:
raise ValueError("Filename list should not be empty")
self.set_files(fname_list)
self.set_select_mode(cfg_xls_sheets_sel_mode, cfg_xls_sheets_sel)
def set_files(self, fname_list):
"""
Update input files. You will also need to update sheet selection with ``.set_select_mode()``.
Args:
fname_list (list): see class description for details
"""
self.fname_list = fname_list
self.xlsSniffer = XLSSniffer(fname_list)
def set_select_mode(self, cfg_xls_sheets_sel_mode, cfg_xls_sheets_sel):
"""
Update sheet selection values
Args:
cfg_xls_sheets_sel_mode (string): see class description for details
cfg_xls_sheets_sel (list): see class description for details
"""
assert cfg_xls_sheets_sel_mode in ['name','idx','name_global','idx_global']
sheets = self.xlsSniffer.dict_xls_sheets
if cfg_xls_sheets_sel_mode=='name_global':
cfg_xls_sheets_sel_mode = 'name'
cfg_xls_sheets_sel = dict(zip(self.fname_list,[cfg_xls_sheets_sel]*len(self.fname_list)))
elif cfg_xls_sheets_sel_mode=='idx_global':
cfg_xls_sheets_sel_mode = 'idx'
cfg_xls_sheets_sel = dict(zip(self.fname_list,[cfg_xls_sheets_sel]*len(self.fname_list)))
if not set(cfg_xls_sheets_sel.keys())==set(sheets.keys()):
raise ValueError('Need to select a sheet from every file')
# check given selection actually present in files
if cfg_xls_sheets_sel_mode=='name':
if not np.all([cfg_xls_sheets_sel[fname] in sheets[fname]['sheets_names'] for fname in self.fname_list]):
raise ValueError('Invalid sheet name selected in one of the files')
# todo show which file is mismatched
elif cfg_xls_sheets_sel_mode=='idx':
if not np.all([cfg_xls_sheets_sel[fname] <= sheets[fname]['sheets_count'] for fname in self.fname_list]):
raise ValueError('Invalid index selected in one of the files')
# todo show which file is mismatched
else:
raise ValueError('Invalid xls_sheets_mode')
self.cfg_xls_sheets_sel_mode = cfg_xls_sheets_sel_mode
self.cfg_xls_sheets_sel = cfg_xls_sheets_sel
def convert_all(self, **kwds):
"""
Converts all files
Args:
Any parameters for `d6tstack.utils.read_excel_advanced()`
Returns:
list: output file names
"""
fnames_converted = []
for fname in self.fname_list:
fname_out = self.convert_single(fname, self.cfg_xls_sheets_sel[fname], **kwds)
fnames_converted.append(fname_out)
return fnames_converted
class XLStoCSVMultiSheet(XLStoBase, metaclass=Collect):
"""
Converts ALL SHEETS from a SINGLE xls|xlsx files to separate csv files
Args:
fname (string): file path
sheet_names (list): list of int or str. If not given, will convert all sheets in the file
output_dir (str): If present, file is saved in given directory, optional
if_exists (str): Possible values: skip and replace, default: skip, optional
logger (object): logger object with send_log('msg','status'), optional
"""
def __init__(self, fname, sheet_names=None, output_dir=None, if_exists='skip', logger=None):
super().__init__(if_exists, output_dir, logger)
self.fname = fname
if sheet_names:
if not isinstance(sheet_names, (list,str)):
raise ValueError('sheet_names needs to be a list')
self.sheet_names = sheet_names
else:
self.xlsSniffer = XLSSniffer([fname, ])
self.sheet_names = self.xlsSniffer.xls_sheets[self.fname]['sheets_names']
def convert_single(self, sheet_name, **kwds):
"""
Converts all files
Args:
sheet_name (str): Excel sheet
Any parameters for `d6tstack.utils.read_excel_advanced()`
Returns:
str: output file name
"""
return super().convert_single(self.fname, sheet_name, **kwds)
def convert_all(self, **kwds):
"""
Converts all files
Args:
Any parameters for `d6tstack.utils.read_excel_advanced()`
Returns:
list: output file names
"""
fnames_converted = []
for iSheet in self.sheet_names:
fname_out = self.convert_single(iSheet, **kwds)
fnames_converted.append(fname_out)
return fnames_converted |
the-stack_106_24232 | # User Configuration variable settings for pitimolo
# Purpose - Motion Detection Security Cam
# Created - 20-Jul-2015 pi-timolo ver 2.94 compatible or greater
# Done by - Claude Pageau
configTitle = "pi-timolo default config motion"
configName = "pi-timolo-default-config"
# These settings should both be False if this script is run as a background /etc/init.d daemon
verbose = True # Sends detailed logging info to console. set to False if running script as daeman
logDataToFile = True # logs diagnostic data to a disk file for review default=False
debug = False # Puts in debug mode returns pixel average data for tuning
# print a test image
imageTestPrint = False # default=False Set to True to print one image and exit (useful for aligning camera)
# Image Settings
imageNamePrefix = 'cam1-' # Prefix for all image file names. Eg front-
imageWidth = 1024 # Full Size Image Width in px default=1024
imageHeight = 768 # Full Size Image Height in px default=768
imageVFlip = False # Flip image Vertically default=False
imageHFlip = False # Flip image Horizontally default=False
imageRotation = 0 # Rotate image. Valid values: 0, 90, 180 & 270
imagePreview = False # Preview image on connected RPI Monitor default=False
noNightShots = False # Don't Take images at Night default=False
noDayShots = False # Don't Take images during day time default=False
# Low Light Night Settings
nightMaxShut = 5.5 # default=5.5 sec Highest cam shut exposure time.
# IMPORTANT 6 sec works sometimes but occasionally locks RPI and HARD reboot required to clear
nightMinShut = .001 # default=.002 sec Lowest camera shut exposure time for transition from day to night (or visa versa)
nightMaxISO = 800 # default=800 Max cam ISO night setting
nightMinISO = 100 # lowest ISO camera setting for transition from day to night (or visa versa)
nightSleepSec = 10 # default=10 Sec - Time period to allow camera to calculate low light AWB
twilightThreshold = 40 # default=40 Light level to trigger day/night transition at twilight
# Date/Time Settings for Displaying info Directly on Images
showDateOnImage = True # Set to False for No display of date/time on image default= True
showTextFontSize = 18 # Size of image Font in pixel height
showTextBottom = True # Location of image Text True=Bottom False=Top
showTextWhite = True # Colour of image Text True=White False=Black
showTextWhiteNight = True # Change night text to white. Might help if night needs white instead of black during day or visa versa
# Motion Detect Settings
motionOn = True # True = motion capture is turned on. False= No motion detection
motionPrefix = "mo-" # Prefix Motion Detection images
motionDir = "motion" # Storage Folder for Motion Detect Images
threshold = 35 # How much a pixel has to change to be counted default=35 (1-200)
sensitivity = 100 # Number of changed pixels to trigger motion default=100
motionAverage = 2 # Number of images to average for motion verification: 1=last image only or 100=Med 300=High Average Etc.
useVideoPort = True # Use the video port to capture motion images - faster than the image port. Default=False
motionVideoOn = False # If set to True then video clip is taken rather than image
motionVideoTimer = 10 # Number of seconds of video clip to take if Motion Detected default=10
motionQuickTLOn = False # if set to True then take a quick time lapse sequence rather than a single image (overrides motionVideoOn)
motionQuickTLTimer = 10 # Duration in seconds of quick time lapse sequence after initial motion detected default=10
motionQuickTLInterval = 0 # Time between each Quick time lapse image 0 is fast as possible
motionForce = 60 * 60 # Force single motion image if no Motion Detected in specified seconds. default=60*60
motionNumOn = True # True=On (filenames by sequenced Number) otherwise date/time used for filenames
motionNumStart = 1000 # Start motion number sequence
motionNumMax = 500 # Max number of motion images desired. 0=Continuous default=0
motionNumRecycle = True # After numberMax reached restart at numberStart instead of exiting default=True
motionMaxDots = 100 # Number of motion dots before starting new line
createLockFile = False # default=False if True then sync.sh will call gdrive to sync files to your web google drive if .sync file exists
# Lock File is used to indicate motion images are added so sync.sh can sync in background via sudo crontab -e
# Time Lapse Settings
timelapseOn = True # Turns timelapse True=On False=Off
timelapseTimer = 60 # Seconds between timelapse images default=5*60
timelapseDir = "timelapse" # Storage Folder for Time Lapse Images
timelapsePrefix = "tl-" # Prefix timelapse images with this prefix
timelapseExit = 0 * 60 # Will Quit program after specified seconds 0=Continuous default=0
timelapseNumOn = False # True=On (filenames Sequenced by Number) otherwise date/time used for filename
timelapseNumStart = 1000 # Start of timelapse number sequence
timelapseNumMax = 2000 # Max number of timelapse images desired. 0=Continuous default=2000
timelapseNumRecycle = True # After numberMax reached restart at numberStart instead of exiting default=True
# ---------------------------------------------- End of User Variables -----------------------------------------------------
|
the-stack_106_24237 | import logging
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.test import Client, TestCase
from django.urls import reverse
from cms.contexts.tests import ContextUnitTest
from cms.pages.models import PageHeading
from cms.pages.tests import PageUnitTest
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class PageHeadingAPIUnitTest(TestCase):
def setUp(self):
pass
def test_page_heading(self):
"""
Page Heading API
"""
req = Client()
user2 = ContextUnitTest.create_user(username='staff',
is_staff=True)
ebu = ContextUnitTest.create_editorialboard_user()
user = ebu.user
webpath = ebu.webpath
site = webpath.site
page = PageUnitTest.create_page()
page.webpath = webpath
page.save()
# page headings list
url = reverse('unicms_api:editorial-board-site-webpath-page-headings',
kwargs={'site_id': site.pk,
'webpath_id': webpath.pk,
'page_id': page.pk})
# accessible to staff users only
res = req.get(url)
assert res.status_code == 403
# site is not managed by user2
req.force_login(user2)
res = req.get(url)
assert res.status_code == 403
# user is staff
user.is_staff = True
user.is_superuser = True
user.save()
req.force_login(user)
res = req.get(url)
assert isinstance(res.json(), dict)
# POST
data = {'page': page.pk,
'title': 'heading name',
'description': 'heading descr',
'is_active': 1
}
# user hasn't permission
req.force_login(user2)
res = req.post(url, data=data, follow=1,
content_type='application/json')
assert res.status_code == 403
# user has permission
req.force_login(user)
res = req.post(url, data=data, follow=1,
content_type='application/json')
page_heading = PageHeading.objects.filter(page=page).last()
assert page_heading
# GET LOGS
url = reverse('unicms_api:editorial-board-site-webpath-page-heading-logs',
kwargs={'site_id': site.pk,
'webpath_id': webpath.pk,
'page_id': page.pk,
'pk': page_heading.pk})
res = req.get(url, content_type='application/json',)
assert isinstance(res.json(), dict)
# redis lock set
ct = ContentType.objects.get_for_model(page_heading)
data = {'content_type_id': ct.pk,
'object_id': page_heading.pk}
res = req.post(url, data,
content_type='application/json', follow=1)
assert isinstance(res.json(), dict)
# GET, patch, put, delete
url = reverse('unicms_api:editorial-board-site-webpath-page-heading',
kwargs={'site_id': site.pk,
'webpath_id': webpath.pk,
'page_id': page.pk,
'pk': page_heading.pk})
# GET
res = req.get(url, content_type='application/json',)
assert isinstance(res.json(), dict)
# PATCH
data = {'title': 'patched'}
# user hasn't permission
req.force_login(user2)
res = req.patch(url, data,
content_type='application/json',
follow=1)
assert res.status_code == 403
# user has permission on page
page.created_by = user2
page.save()
ebu3 = ContextUnitTest.create_editorialboard_user(user=user2,
webpath=webpath,
permission=3)
user2.refresh_from_db()
req.force_login(user2)
res = req.patch(url, data,
content_type='application/json',
follow=1)
page_heading.refresh_from_db()
assert page_heading.title == 'patched'
# PUT
page.created_by = None
page.save()
data = {'page': page.pk,
'title': 'putted',
'description': 'new descr',
'is_active': 1
}
# user hasn't permission
req.force_login(user2)
res = req.put(url, data, follow=1,
content_type='application/json')
assert res.status_code == 403
# user has permission
req.force_login(user)
res = req.put(url, data, follow=1,
content_type='application/json')
page_heading.refresh_from_db()
assert page_heading.title == 'putted'
# DELETE
# user hasn't permission
req.force_login(user2)
res = req.delete(url)
assert res.status_code == 403
# user has permission
req.force_login(user)
res = req.delete(url)
try:
page_heading.refresh_from_db()
except ObjectDoesNotExist:
assert True
# form
url = reverse('unicms_api:editorial-board-site-webpath-page-heading-form',
kwargs={'site_id': site.pk,
'webpath_id': webpath.pk,
'page_id': page.pk})
res = req.get(url)
assert isinstance(res.json(), list)
|
the-stack_106_24240 | # Regularly scheduled update: check which files need updating and process them
# TODO: migrate to GitHub API v4, which uses GraphQL. Example query below -- use
# https://developer.github.com/v4/explorer/ to try it out (need to use the
# actual node IDs returned):
#
# {
# repository(owner: "vim", name: "vim") {
# refs(refPrefix: "refs/tags/", last: 1) {
# nodes {
# name
# }
# }
# object(expression: "master:runtime/doc") {
# ... on Tree {
# entries {
# type
# name
# oid
# object {
# id
# }
# }
# }
# }
# }
# nodes(ids: ["xyz", "abc"]) {
# ... on Blob {
# text
# }
# }
# }
import base64
import hashlib
import json
import logging
import os
import re
from http import HTTPStatus
import flask
import flask.views
import gevent
import gevent.pool
import gevent.ssl
import geventhttpclient
import geventhttpclient.client
import geventhttpclient.response
import werkzeug.exceptions
import google.cloud.ndb
import google.cloud.tasks
from . import secret
from .dbmodel import GlobalInfo, ProcessedFileHead, ProcessedFilePart, \
RawFileContent, RawFileInfo, ndb_client
from . import vimh2h
# Once we have consumed about ten minutes of CPU time, Google will throw us a
# DeadlineExceededError and our script terminates. Therefore, we must be
# careful with the order of operations, to ensure that after this has happened,
# the next scheduled run of the script can pick up where the previous one was
# interrupted. Although in practice, it takes about 30 seconds, so it's
# unlikely to be an issue.
# Number of concurrent (in the gevent sense) workers. Avoid setting this too
# high, else there is risk of running out of memory on our puny worker node.
CONCURRENCY = 5
TAGS_NAME = 'tags'
FAQ_NAME = 'vim_faq.txt'
HELP_NAME = 'help.txt'
DOC_ITEM_RE = re.compile(r'(?:[-\w]+\.txt|tags)$')
COMMIT_MSG_RE = re.compile(r'[Pp]atch\s+(\d[^:\n]+)')
GITHUB_API_URL_BASE = 'https://api.github.com'
FAQ_BASE_URL = 'https://raw.githubusercontent.com/chrisbra/vim_faq/master/doc/'
PFD_MAX_PART_LEN = 995000
# Request header name
HTTP_HDR_IF_NONE_MATCH = 'If-None-Match'
# Response header name
HTTP_HDR_ETAG = 'ETag'
class UpdateHandler(flask.views.MethodView):
def post(self):
# We get an HTTP POST request if the request came programmatically via
# Cloud Tasks.
self._run(flask.request.data)
return flask.Response()
def get(self):
# We get an HTTP GET request if the request was generated by the
# user, by entering the URL in their browser.
self._run(flask.request.query_string)
return "Success."
def _run(self, request_data):
req = flask.request
# https://cloud.google.com/tasks/docs/creating-appengine-handlers#reading_app_engine_task_request_headers
if 'X-AppEngine-QueueName' not in req.headers and \
os.environ.get('VIMHELP_ENV') != 'dev' and \
secret.UPDATE_PASSWORD not in request_data:
raise werkzeug.exceptions.Forbidden()
force = b'force' in request_data
logging.info("Starting %supdate", 'forced ' if force else '')
self._http_client_pool = geventhttpclient.client.HTTPClientPool(
ssl_context_factory=gevent.ssl.create_default_context,
concurrency=CONCURRENCY)
try:
self._greenlet_pool = gevent.pool.Pool(size=CONCURRENCY)
with ndb_client.context():
self._g_changed = False
self._update_g(wipe=force)
self._do_update(no_rfi=force)
if self._g_changed:
self._g.put()
logging.info("Finished update, updated global info")
else:
logging.info("Finished update, global info unchanged")
self._greenlet_pool.join()
finally:
self._http_client_pool.close()
def _do_update(self, no_rfi):
# Kick off retrieval of all RawFileInfo entities from the Datastore
if no_rfi:
all_rfi_greenlet = None
else:
all_rfi_greenlet = self._spawn_ndb(lambda:
RawFileInfo.query().fetch())
# Kick off check for new vim version
refresh_vim_version_greenlet = self._spawn(self._refresh_vim_version)
# Kick off retrieval of 'runtime/doc' dir listing in github
docdir_greenlet = self._spawn(self._vim_github_request,
'/repos/vim/vim/contents/runtime/doc',
self._g.docdir_etag)
# Put all RawFileInfo entites into a map
if all_rfi_greenlet:
rfi_map = {r.key.string_id(): r for r in all_rfi_greenlet.get()}
else:
rfi_map = {}
fetcher_greenlets = set()
fetcher_greenlets_by_name = {}
def fetcher_greenlets_add(name, value):
fetcher_greenlets.add(value)
fetcher_greenlets_by_name[name] = value
def queue_urlfetch(name, url, git_sha=None):
rfi = rfi_map.get(name)
etag = rfi.etag if rfi is not None else None
logging.info("Queueing URL fetch for '%s' (etag: %s)", name, etag)
processor_greenlet = self._spawn(ProcessorHTTP.create, name,
git_sha,
client_pool=self._http_client_pool,
url=url, etag=etag)
fetcher_greenlets_add(name, processor_greenlet)
# Kick off FAQ download
queue_urlfetch(FAQ_NAME, FAQ_BASE_URL + FAQ_NAME)
# Iterating over 'runtime/doc' dir listing, kick off download for all
# modified items
docdir = docdir_greenlet.get()
if docdir.status_code == HTTPStatus.NOT_MODIFIED:
logging.info("doc dir not modified")
elif docdir.status_code == HTTPStatus.OK:
self._g.docdir_etag = docdir.header(HTTP_HDR_ETAG).encode()
self._g_changed = True
logging.info("doc dir modified, new etag is %s",
docdir.header(HTTP_HDR_ETAG))
for item in json.loads(docdir.body):
name = item['name']
if item['type'] == 'file' and DOC_ITEM_RE.match(name):
assert name not in fetcher_greenlets_by_name
git_sha = item['sha'].encode()
rfi = rfi_map.get(name)
if rfi is not None and rfi.git_sha == git_sha:
logging.debug("Found unchanged '%s'", name)
continue
elif rfi is None:
logging.info("Found new '%s'", name)
else:
logging.info("Found changed '%s'", name)
queue_urlfetch(name, item['download_url'], git_sha)
# Check if we have a new vim version
is_new_vim_version = refresh_vim_version_greenlet.get()
# If there is no new vim version, and if the only file we're
# downloading is the FAQ, and if the FAQ was not modified, then there
# is nothing to do for us, so bail out now
if not is_new_vim_version and len(fetcher_greenlets) == 1:
faq_uf = fetcher_greenlets_by_name[FAQ_NAME].get()
if faq_uf.status_code() == HTTPStatus.NOT_MODIFIED:
return
def get_content(name):
processor_greenlet = fetcher_greenlets_by_name.get(name)
# Do we already have retrieval queued?
if processor_greenlet is not None:
# If so, wait for that and return the content.
logging.info("Getting '%s'", name)
return processor_greenlet.get().raw_content()
else:
logging.info("Getting '%s' from Datastore", name)
# If we don't have retrieval queued, that means we must already
# have the latest version in the Datastore, so get the content
# from there.
return RawFileContent.get_by_id(name).data
# Make sure we are retrieving tags, either from HTTP or from Datastore
tags_greenlet = self._spawn_ndb(get_content, TAGS_NAME)
# Make sure we are retrieving FAQ, either from HTTP or from Datastore
faq_greenlet = self._spawn_ndb(get_content, FAQ_NAME)
# If we found a new vim version and we're not already downloading
# help.txt, kick off its retrieval from the Datastore instead
# (since we're displaying the current vim version in the rendered
# help.txt.html)
if is_new_vim_version and HELP_NAME not in fetcher_greenlets_by_name:
fetcher_greenlets_add(HELP_NAME, self._spawn_ndb(ProcessorDB.create,
HELP_NAME))
tags_body = tags_greenlet.get()
# Construct the vimhelp-to-html converter, providing it the tags file,
# and adding on the FAQ for extra tags
h2h = vimh2h.VimH2H(tags_body.decode(),
version=self._g.vim_version.decode())
logging.info("Adding FAQ tags")
h2h.add_tags(FAQ_NAME, faq_greenlet.get().decode())
# Wait for urlfetches and Datastore accesses to return; kick off the
# processing as they do so
logging.info("Waiting for fetchers")
processor_greenlets = []
for greenlet in gevent.iwait(fetcher_greenlets):
try:
processor = greenlet.get()
except UrlfetchError as e:
logging.error(e)
# If we could not fetch the URL, continue with the others, but
# set 'self._g_changed' to False so we do not save the
# 'GlobalInfo' object at the end, so that we will retry at the
# next run
self._g_changed = False
else: # no exception was raised
processor_greenlets.append(self._spawn_ndb(processor.process,
h2h))
logging.info("Waiting for processors")
gevent.joinall(processor_greenlets)
logging.info("All done")
def _refresh_vim_version(self):
# Check if the Vim version has changed; we display it on our front
# page, so we must keep it updated even if nothing else has changed
# TODO: find a better way... this doesn't find the current vim version
# if the latest commit did not bump the version (only a problem if we
# don't already have the vim version in the datastore)
#
# should probably use the Events API:
# https://developer.github.com/v3/activity/events/types/#pushevent
# this is also (somewhat) compatible with webhook payloads
# or perhaps better to use
# https://developer.github.com/v3/repos/commits/ since that does not
# include events we're not interested in
is_new_vim_version = False
# Kick off retrieval of data about latest commit on master branch,
# which we will use to figure out if there is a new vim version
master = self._vim_github_request('/repos/vim/vim/branches/master',
self._g.master_etag)
if master.status_code == HTTPStatus.OK:
message = json.loads(master.body)['commit']['commit']['message']
m = COMMIT_MSG_RE.match(message)
if m:
new_vim_version = m.group(1)
new_vim_version_b = new_vim_version.encode()
if new_vim_version_b != self._g.vim_version:
logging.info("Found new vim version '%s' (was: '%s')",
new_vim_version, self._g.vim_version)
is_new_vim_version = True
self._g.vim_version = new_vim_version_b
self._g_changed = True
else:
logging.warn("master branch has moved forward, but vim "
"version from commit message is unchanged: "
"'%s' -> version '%s'", message,
self._g.vim_version)
else:
logging.warn("master branch has moved forward, but no new vim "
"version found in commit msg ('%s'), so keeping "
"old one ('%s')", message, self._g.vim_version)
self._g.master_etag = master.header(HTTP_HDR_ETAG).encode()
self._g_changed = True
elif self._g.master_etag and \
master.status_code == HTTPStatus.NOT_MODIFIED:
logging.info("master branch is unchanged, so no new vim version")
else:
logging.warn("Failed to get master branch: HTTP status %d",
master.status_code)
return is_new_vim_version
def _update_g(self, wipe):
g = GlobalInfo.get_by_id('global')
if wipe:
logging.info("Deleting global info and raw files from Datastore")
greenlets = [
self._spawn_ndb(wipe_db, RawFileContent),
self._spawn_ndb(wipe_db, RawFileInfo)
]
if g:
greenlets.append(self._spawn_ndb(g.key.delete))
# Make sure we preserve at least the vim version; necessary in
# case the latest commit on master doesn't specify it
g = GlobalInfo(id='global', vim_version=g.vim_version)
gevent.joinall(greenlets)
if not g:
g = GlobalInfo(id='global')
logging.info("Global info: %s",
", ".join("{} = {}".format(n, getattr(g, n)) for n in
g._properties.keys()))
self._g = g
def _vim_github_request(self, document, etag):
headers = {
'Accept': 'application/vnd.github.v3+json',
'Authorization': 'token ' + secret.GITHUB_ACCESS_TOKEN,
}
return urlfetch(self._http_client_pool, GITHUB_API_URL_BASE + document,
etag, headers=headers)
def _spawn(self, f, *args, **kwargs):
return self._greenlet_pool.apply_async(f, args, kwargs)
def _spawn_ndb(self, f, *args, **kwargs):
def g():
with ndb_client.context():
return f(*args, **kwargs)
return self._greenlet_pool.apply_async(g)
class ProcessorHTTP:
def __init__(self, name, git_sha, result):
self._name = name
self._git_sha = git_sha
self._result = result
self._raw_content = None
def status_code(self):
return self._result.status_code
def name(self):
return self._name
def raw_content(self):
if self._raw_content is None:
r = self._result
if r.status_code == HTTPStatus.OK:
self._raw_content = r.body
logging.info("Got '%s' from HTTP (%d bytes)",
self._name, len(self._raw_content))
elif r.status_code == HTTPStatus.NOT_MODIFIED:
rfc = RawFileContent.get_by_id(self._name)
self._raw_content = rfc.data
logging.info("Got '%s' from Datastore (%d bytes)",
self._name, len(self._raw_content))
return self._raw_content
def process(self, h2h):
r = self._result
if r.status_code == HTTPStatus.OK:
encoding = do_process(self._name, self.raw_content(), h2h)
do_save_rawfile(self._name, self._git_sha, self.raw_content(),
encoding.encode(), r.header(HTTP_HDR_ETAG))
@staticmethod
def create(name, git_sha, **urlfetch_args):
result = urlfetch(**urlfetch_args)
return ProcessorHTTP(name, git_sha, result)
class ProcessorDB:
def __init__(self, name, rfc):
logging.info("'%s': got %d bytes from Datastore", name, len(rfc.data))
self._name = name
self._rfc = rfc
def name(self):
return self._name
def raw_content(self):
return self._rfc.data
def process(self, h2h):
do_process(self._name, self._rfc.data, h2h,
encoding=self._rfc.encoding.decode())
@staticmethod
def create(name):
rfc = RawFileContent.get_by_id(name)
return ProcessorDB(name, rfc)
@google.cloud.ndb.transactional(xg=True)
def save_transactional(entities):
google.cloud.ndb.put_multi(entities)
def wipe_db(model):
all_keys = model.query().fetch(keys_only=True)
google.cloud.ndb.delete_multi(all_keys)
def sha1(content):
digest = hashlib.sha1()
digest.update(content)
return digest.digest()
def do_process(name, content, h2h, encoding=None):
logging.info("Translating '%s' to HTML", name)
phead, pparts, encoding = to_html(name, content, encoding, h2h)
logging.info("Saving HTML translation of '%s' (encoded as %s) to "
"Datastore", name, encoding)
save_transactional([phead] + pparts)
return encoding
def need_save_rawfilecontent(name):
return name in (HELP_NAME, FAQ_NAME, TAGS_NAME)
def do_save_rawfile(name, git_sha, content, encoding, etag):
rfi = RawFileInfo(id=name, git_sha=git_sha, etag=etag.encode())
if need_save_rawfilecontent(name):
logging.info("Saving raw file '%s' (info and content) to Datastore",
name)
rfc = RawFileContent(id=name, data=content, encoding=encoding)
save_transactional([rfi, rfc])
else:
logging.info("Saving raw file '%s' (info only) to Datastore", name)
rfi.put()
def to_html(name, content, encoding, h2h):
content_str = None
if encoding is None:
try:
encoding = 'UTF-8'
content_str = content.decode(encoding)
except UnicodeError:
encoding = 'ISO-8859-1'
if content_str is None:
content_str = content.decode(encoding)
html = h2h.to_html(name, content_str, encoding).encode()
etag = base64.b64encode(sha1(html))
datalen = len(html)
phead = ProcessedFileHead(id=name, encoding=encoding.encode(), etag=etag)
pparts = []
if datalen > PFD_MAX_PART_LEN:
phead.numparts = 0
for i in range(0, datalen, PFD_MAX_PART_LEN):
part = html[i:(i+PFD_MAX_PART_LEN)]
if i == 0:
phead.data0 = part
else:
partname = name + ':' + str(phead.numparts)
pparts.append(ProcessedFilePart(id=partname, data=part,
etag=etag))
phead.numparts += 1
else:
phead.numparts = 1
phead.data0 = html
return phead, pparts, encoding
def urlfetch(client_pool, url, etag, headers=None):
if headers is None:
headers = {}
if etag is not None:
headers[HTTP_HDR_IF_NONE_MATCH] = etag.decode()
logging.info("Fetching %s with headers %s", url, headers)
url = geventhttpclient.URL(url)
try:
result = client_pool.get_client(url).get(url.request_uri, headers)
except Exception as e:
logging.error(e)
raise UrlfetchError(e, url)
logging.info("Fetched %s -> HTTP %s", url, result.status_code)
return UrlfetchResponse(result)
class UrlfetchResponse:
def __init__(self, ghc_resp):
self.body = bytes(ghc_resp.read())
ghc_resp.release()
self._resp = ghc_resp
@property
def status_code(self):
return self._resp.status_code
def header(self, name):
return self._resp.get(name)
class UrlfetchError(RuntimeError):
def __init__(self, e, url):
self._e = e
self._url = url
def __str__(self):
return f"Failed to fetch {self._url}: {self._e}"
def handle_enqueue_update():
req = flask.request
is_cron = req.headers.get('X-AppEngine-Cron') == 'true'
# https://cloud.google.com/appengine/docs/standard/python3/scheduling-jobs-with-cron-yaml?hl=en_GB#validating_cron_requests
if not is_cron and os.environ.get('VIMHELP_ENV') != 'dev' and \
secret.UPDATE_PASSWORD not in req.query_string:
raise werkzeug.exceptions.Forbidden()
logging.info("Enqueueing update")
client = google.cloud.tasks.CloudTasksClient()
queue_name = client.queue_path(os.environ['GOOGLE_CLOUD_PROJECT'],
"us-central1",
"update2")
task = {
'app_engine_http_request': {
'http_method': 'POST',
'relative_uri': '/update',
'body': req.query_string
}
}
response = client.create_task(parent=queue_name, task=task)
logging.info('Task %s enqueued, ETA %s', response.name,
response.schedule_time)
if is_cron:
return flask.Response()
else:
return "Successfully enqueued update task."
|
the-stack_106_24242 | from openpharmacophore._private_tools.exceptions import FetchError, OpenPharmacophoreValueError
import pandas as pd
from tqdm.auto import tqdm
from io import StringIO
import json
import requests
import time
from typing import List, Dict, Tuple
base_url = "https://pubchem.ncbi.nlm.nih.gov/rest/pug"
def _get_data(url: str, attempts: int = 5) -> requests.Response.content:
""" Downloads data from a given url.
Parameters
----------
url : str
url to fetch data from
attempts : int
number of times to try to download the data in case of failure
Returns
----------
requests.Response.content
The content of the response
"""
if attempts <= 0:
raise ValueError("Number of attempts must be greater than 0")
while (attempts > 0):
res = requests.get(url)
if res.status_code == requests.codes.ok:
break
else:
attempts -= 1
time.sleep(2)
if res.status_code != requests.codes.ok:
raise FetchError("Failed to get data from {}".format(url))
return res.content
def get_assay_compounds_id(assay_id: int, attempts: int = 10) -> List[int]:
""" Get compounds id for tested compounds in an assay
Parameters
----------
assay_id : int
The id of the bioassay.
attempts : int, optional
Number of times to try to download the data in case of failure
(default=10).
Returns
-------
list
List containing the compounds ids.
"""
assay_url = base_url + "/bioassay/AID/{}/cids/JSON".format(assay_id)
data = _get_data(assay_url, attempts)
ids_dict = json.loads(data)
return ids_dict["InformationList"]["Information"][0]["CID"]
def get_assay_description(assay_id: int, summary: bool =True, attempts: int = 10) -> Dict[str, str]:
""" Get the description of an assay in JSON format.
Parameters
----------
assay_id : int
The id of the bioassay.
summary : bool
If true returns a summary of the description of the assay (default=True).
attempts : int
number of times to try to download the data in case of failure
(default=10).
Returns
--------
dict
A dictionary containing the assay description.
"""
assay_url = base_url + "/assay/aid/{}".format(assay_id)
if summary:
description_url = assay_url + "/summary/JSON"
else:
description_url = assay_url + "/description/JSON"
data = _get_data(description_url, attempts)
return json.loads(data)
def get_assay_results(assay_id, form="dataframe", attempts=10):
""" Get results of an assay.
Parameters
----------
assay_id : int
The id of the bioassay.
form : {"dataframe", "dict"}
The form of the returned object. Can be a dataframe or dict.
attempts : int, optional
number of times to try to download the data in case of failure
(default=10).
Returns
----------
pandas.DataFrame or dict
Dataframe or dictionarty with the assay results.
"""
if form == "dataframe":
format = "CSV"
elif form == "dict":
format = "JSON"
else:
raise ValueError("{} is not a valid form".format(form))
assay_url = base_url + "/assay/aid/{}/{}".format(assay_id, format)
data = _get_data(assay_url, attempts)
if format == "CSV":
csv_string = StringIO(data.decode("utf-8"))
return pd.read_csv(csv_string)
elif format == "JSON":
return json.loads(data.content)
def get_assay_target_info(assay_id: int, attempts: int = 10) -> Dict[str, str]:
""" Get target information of an assay.
Parameters
----------
assay_id : int
The id of the bioassay.
attempts : int, optional
number of times to try to download the data in case of failure
(default=10).
Returns
--------
dict
A dictionary containing information of the assay target.
"""
target_url = base_url + "/assay/aid/{}/targets/ProteinGI,ProteinName,GeneID,GeneSymbol/JSON".format(assay_id)
data = _get_data(target_url, attempts)
return json.loads(data)
def get_assay_bioactivity_data(assay_id):
""" Get bioactivity data and the compounds in an assay.
Parameters
----------
assay_id : int
The id of the bioassay.
Returns
----------
compounds : list of 2-tuples
The first element is the compound PubChem id.
The second element is the smiles of the compound.
bioactivity : np.array of bits
An array where each element corresponds to the index of the compounds list.
An entry is either one if the compound is active or zero if the compund is inactive.
"""
assay_results = get_assay_results(assay_id=assay_id, form="dataframe")
# Keep only cid and activity columns
df = assay_results[["PUBCHEM_CID", "PUBCHEM_ACTIVITY_OUTCOME"]]
df = df.dropna()
# Add activity column of 1 and 0
df["activity"] = df["PUBCHEM_ACTIVITY_OUTCOME"].apply(lambda x: 0 if x == "Inactive" else 1)
# Drop original activity column
df = df.drop("PUBCHEM_ACTIVITY_OUTCOME", axis=1)
df = df.astype("int32")
molecules_ids = df["PUBCHEM_CID"].tolist()
bioactivity = df["activity"].to_numpy()
molecules = []
print("Fetching molecules smiles...")
for mol_id in tqdm(molecules_ids):
smiles = get_compound_smiles(mol_id)
molecules.append((mol_id, smiles))
return molecules, bioactivity
def get_assay_actives_and_inactives(assay_id):
""" Get smiles for compounds in an assay split into active and inactive.
Parameters
----------
assay_id : int
The id of the bioassay.
Returns
----------
actives : 2-tuple
The first element is a list of the active compounds PubChem ids, and
the second elment is a list of smiles for the active compounds.
inactives : 2-tuple
The first element is a list of the inactive compounds PubChem ids, and
the second elment is a list of smiles for the inactive compounds.
"""
assay_results = get_assay_results(assay_id=assay_id, form="dataframe")
# Keep only cid and activity columns
df = assay_results[["PUBCHEM_CID", "PUBCHEM_ACTIVITY_OUTCOME"]]
df = df.dropna()
# Split into active/inactive
actives = df.loc[df["PUBCHEM_ACTIVITY_OUTCOME"] == "Active"]
inactives = df.loc[df["PUBCHEM_ACTIVITY_OUTCOME"] == "Inactive"]
# Drop activity column
actives = actives.drop("PUBCHEM_ACTIVITY_OUTCOME", axis=1)
inactives = inactives.drop("PUBCHEM_ACTIVITY_OUTCOME", axis=1)
# Cast to int
actives = actives.astype("int32")
inactives = inactives.astype("int32")
actives_list = actives["PUBCHEM_CID"].tolist()
inactives_list = inactives["PUBCHEM_CID"].tolist()
actives_smiles = []
inactives_smiles = []
print("Fetching active compound smiles...")
for compound in tqdm(actives_list):
smiles = get_compound_smiles(compound)
actives_smiles.append(smiles)
print("Fetching inactive compound smiles...")
for compound in tqdm(inactives_list):
smiles = get_compound_smiles(compound)
inactives_smiles.append(smiles)
return (actives_list, actives_smiles), (inactives_list, inactives_smiles)
def get_compound_assay_summary(compound_id, form="dataframe", attempts=10):
""" Get summary of biological test results for a given compound.
Parameters
----------
compound_id : int
The PubChem id of the compound.
form : {"dataframe", "dict"}
The form of the returned object. Can be a dataframe or dict.
attempts : int, optional
Number of times to try to download the data in case of failure
(default=10).
Returns
----------
pandas.Dataframe or dict
A dataFrame or a dictionary with the assay results for the passed compound.
"""
if form == "dataframe":
format = "CSV"
elif form == "dict":
format = "JSON"
else:
raise OpenPharmacophoreValueError("{} is not a valid form".format(form))
compound_url = base_url + "/compound/cid/{}/assaysummary/{}".format(compound_id, format)
data = _get_data(compound_url, attempts)
if format == "CSV":
csv_string = StringIO(data.decode("utf-8"))
return pd.read_csv(csv_string)
elif format == "JSON":
return json.loads(data.content)
def get_compound_id(name: str, attempts: int = 10) -> str:
""" Get pubchem compound id for a given compound name.
Parameters
----------
name : str
Name of the compound.
attempts : int, optional
Number of times to try to download the data in case of failure
(default=10).
Returns
----------
str
A string with the compound name.
"""
compound_url = base_url + "/compound/name/{}/cids/JSON".format(name)
data = _get_data(compound_url, attempts)
json_data = json.loads(data)
return json_data["IdentifierList"]["CID"][0]
def get_compound_description(compound_identifier, attempts=10):
""" Get description for a given compound.
Parameters
----------
compound_identifier : str or int
The name as str or the PubChem id as int of the compound.
attempts : int, optional
Number of times to try to download the data in case of failure
(default=10).
Returns
--------
dict
A dictionary containing the compound description.
"""
# If a string is passed assume its compound name
if isinstance(compound_identifier, str):
compound_url = base_url + "/compound/name/{}/description/JSON".format(compound_identifier)
# Else use compound id
else:
compound_url = base_url + "/compound/cid/{}/description/JSON".format(compound_identifier)
data = _get_data(compound_url, attempts)
return json.loads(data)
def get_compound_smiles(compound_id: int, attempts: int = 10) -> str:
""" Get smiles for a given compound.
Parameters
----------
compound_id: int
The PubChem id of the compound.
attempts : int, optional
Number of times to try to download the data in case of failure
(default=10).
Returns
-------
smiles : str
The smiles for the passed compound.
"""
smiles_url = base_url + "/compound/cid/{}/property/CanonicalSMILES/TXT".format(compound_id)
data = _get_data(smiles_url, attempts)
smiles = data.decode("utf-8").rstrip()
return smiles
def get_target_assays(identifier: str, identifier_type: str, attempts: int = 10) -> pd.DataFrame:
""" Get assay ids and name for a given target
Parameters
----------
identifier : str
Identifier of the target. Can be GI, Gene ID, or gene symbol.
identifer_type : {"genesymbol", "geneid", "gi"}
The type of the identifier can be genesymbol, geneid or gi.
Returns
--------
pandas.DataFrame
Dataframe with the assays ids and names for the passed target
"""
identifier_type = identifier_type.lower()
valid_identifiers = ["genesymbol", "geneid", "gi"]
if identifier_type not in valid_identifiers:
raise OpenPharmacophoreValueError("{} is not a valid identifier type")
target_url = base_url + "/assay/target/{}/{}/description/JSON".format(identifier_type, identifier)
data = _get_data(target_url, attempts)
assays_dict = json.loads(data)
ids = []
names = []
# Retrieve only id and name
for i in range(len(assays_dict["PC_AssayContainer"])):
assay = assays_dict['PC_AssayContainer'][i]['assay']['descr']
id = int(assay["aid"]["id"])
name = assay["name"]
ids.append(id)
names.append(name)
assays = {
"id": ids,
"name": names,
}
return pd.DataFrame.from_dict(assays)
def similarity_search(compound, threshold=None, max_records=None, attempts=5):
""" Perform a 2D similarity search for a given compound.
Parameters
----------
compound : str or int
Can be a smiles str or the PubChem id of the compound.
threshold : int, optional
Minimum Tanimoto score for a hit.
max_records : int, optional
Maximum number of hits.
attempts : int, optional
Number of times to try to download the data in case of failure
(default=10).
Returns
--------
list
A List with the id's of the hits.
"""
# If compound is passed as str assume is smiles
if isinstance(compound, str):
url = base_url + "/compound/similarity/smiles/{}/JSON".format(compound)
# Else use compound id
else:
url = base_url + "/compound/similarity/cid/{}/JSON".format(compound)
if threshold and max_records:
url += "?Threshold={}&MaxRecords={}".format(threshold, max_records)
elif threshold:
url += "?Threshold={}".format(threshold)
elif max_records:
url += "?MaxRecords={}".format(max_records)
data = _get_data(url, attempts)
# Data returns a listkey that can be used to retrieve the results from another url
content = json.loads(data)
listkey = content["Waiting"]["ListKey"]
results_url = base_url + "/compound/listkey/{}/cids/JSON".format(listkey)
# Wait a little as similarity searches take more time to complete
time.sleep(5)
data = _get_data(results_url, attempts)
data_dict = json.loads(data)
return data_dict["IdentifierList"]["CID"]
|
the-stack_106_24243 | # coding: utf-8
"""
Accounting Extension
These APIs allow you to interact with HubSpot's Accounting Extension. It allows you to: * Specify the URLs that HubSpot will use when making webhook requests to your external accounting system. * Respond to webhook calls made to your external accounting system by HubSpot # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.extensions.accounting.configuration import Configuration
class InvoiceUpdateRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'external_invoice_number': 'str',
'currency_code': 'str',
'due_date': 'date',
'external_recipient_id': 'str',
'received_by_recipient_date': 'int',
'is_voided': 'bool',
'received_by_customer_date': 'str',
'invoice_number': 'str'
}
attribute_map = {
'external_invoice_number': 'externalInvoiceNumber',
'currency_code': 'currencyCode',
'due_date': 'dueDate',
'external_recipient_id': 'externalRecipientId',
'received_by_recipient_date': 'receivedByRecipientDate',
'is_voided': 'isVoided',
'received_by_customer_date': 'receivedByCustomerDate',
'invoice_number': 'invoiceNumber'
}
def __init__(self, external_invoice_number=None, currency_code=None, due_date=None, external_recipient_id=None, received_by_recipient_date=None, is_voided=None, received_by_customer_date=None, invoice_number=None, local_vars_configuration=None): # noqa: E501
"""InvoiceUpdateRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._external_invoice_number = None
self._currency_code = None
self._due_date = None
self._external_recipient_id = None
self._received_by_recipient_date = None
self._is_voided = None
self._received_by_customer_date = None
self._invoice_number = None
self.discriminator = None
if external_invoice_number is not None:
self.external_invoice_number = external_invoice_number
if currency_code is not None:
self.currency_code = currency_code
if due_date is not None:
self.due_date = due_date
if external_recipient_id is not None:
self.external_recipient_id = external_recipient_id
if received_by_recipient_date is not None:
self.received_by_recipient_date = received_by_recipient_date
if is_voided is not None:
self.is_voided = is_voided
if received_by_customer_date is not None:
self.received_by_customer_date = received_by_customer_date
if invoice_number is not None:
self.invoice_number = invoice_number
@property
def external_invoice_number(self):
"""Gets the external_invoice_number of this InvoiceUpdateRequest. # noqa: E501
:return: The external_invoice_number of this InvoiceUpdateRequest. # noqa: E501
:rtype: str
"""
return self._external_invoice_number
@external_invoice_number.setter
def external_invoice_number(self, external_invoice_number):
"""Sets the external_invoice_number of this InvoiceUpdateRequest.
:param external_invoice_number: The external_invoice_number of this InvoiceUpdateRequest. # noqa: E501
:type: str
"""
self._external_invoice_number = external_invoice_number
@property
def currency_code(self):
"""Gets the currency_code of this InvoiceUpdateRequest. # noqa: E501
The ISO 4217 currency code that represents the currency used in the invoice to bill the recipient # noqa: E501
:return: The currency_code of this InvoiceUpdateRequest. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this InvoiceUpdateRequest.
The ISO 4217 currency code that represents the currency used in the invoice to bill the recipient # noqa: E501
:param currency_code: The currency_code of this InvoiceUpdateRequest. # noqa: E501
:type: str
"""
self._currency_code = currency_code
@property
def due_date(self):
"""Gets the due_date of this InvoiceUpdateRequest. # noqa: E501
The ISO-8601 due date of the invoice. # noqa: E501
:return: The due_date of this InvoiceUpdateRequest. # noqa: E501
:rtype: date
"""
return self._due_date
@due_date.setter
def due_date(self, due_date):
"""Sets the due_date of this InvoiceUpdateRequest.
The ISO-8601 due date of the invoice. # noqa: E501
:param due_date: The due_date of this InvoiceUpdateRequest. # noqa: E501
:type: date
"""
self._due_date = due_date
@property
def external_recipient_id(self):
"""Gets the external_recipient_id of this InvoiceUpdateRequest. # noqa: E501
The ID of the invoice recipient. This is the recipient ID from the external accounting system. # noqa: E501
:return: The external_recipient_id of this InvoiceUpdateRequest. # noqa: E501
:rtype: str
"""
return self._external_recipient_id
@external_recipient_id.setter
def external_recipient_id(self, external_recipient_id):
"""Sets the external_recipient_id of this InvoiceUpdateRequest.
The ID of the invoice recipient. This is the recipient ID from the external accounting system. # noqa: E501
:param external_recipient_id: The external_recipient_id of this InvoiceUpdateRequest. # noqa: E501
:type: str
"""
self._external_recipient_id = external_recipient_id
@property
def received_by_recipient_date(self):
"""Gets the received_by_recipient_date of this InvoiceUpdateRequest. # noqa: E501
:return: The received_by_recipient_date of this InvoiceUpdateRequest. # noqa: E501
:rtype: int
"""
return self._received_by_recipient_date
@received_by_recipient_date.setter
def received_by_recipient_date(self, received_by_recipient_date):
"""Sets the received_by_recipient_date of this InvoiceUpdateRequest.
:param received_by_recipient_date: The received_by_recipient_date of this InvoiceUpdateRequest. # noqa: E501
:type: int
"""
self._received_by_recipient_date = received_by_recipient_date
@property
def is_voided(self):
"""Gets the is_voided of this InvoiceUpdateRequest. # noqa: E501
States if the invoice is voided or not. # noqa: E501
:return: The is_voided of this InvoiceUpdateRequest. # noqa: E501
:rtype: bool
"""
return self._is_voided
@is_voided.setter
def is_voided(self, is_voided):
"""Sets the is_voided of this InvoiceUpdateRequest.
States if the invoice is voided or not. # noqa: E501
:param is_voided: The is_voided of this InvoiceUpdateRequest. # noqa: E501
:type: bool
"""
self._is_voided = is_voided
@property
def received_by_customer_date(self):
"""Gets the received_by_customer_date of this InvoiceUpdateRequest. # noqa: E501
The ISO-8601 datetime of when the customer received the invoice. # noqa: E501
:return: The received_by_customer_date of this InvoiceUpdateRequest. # noqa: E501
:rtype: str
"""
return self._received_by_customer_date
@received_by_customer_date.setter
def received_by_customer_date(self, received_by_customer_date):
"""Sets the received_by_customer_date of this InvoiceUpdateRequest.
The ISO-8601 datetime of when the customer received the invoice. # noqa: E501
:param received_by_customer_date: The received_by_customer_date of this InvoiceUpdateRequest. # noqa: E501
:type: str
"""
self._received_by_customer_date = received_by_customer_date
@property
def invoice_number(self):
"""Gets the invoice_number of this InvoiceUpdateRequest. # noqa: E501
The number / name of the invoice. # noqa: E501
:return: The invoice_number of this InvoiceUpdateRequest. # noqa: E501
:rtype: str
"""
return self._invoice_number
@invoice_number.setter
def invoice_number(self, invoice_number):
"""Sets the invoice_number of this InvoiceUpdateRequest.
The number / name of the invoice. # noqa: E501
:param invoice_number: The invoice_number of this InvoiceUpdateRequest. # noqa: E501
:type: str
"""
self._invoice_number = invoice_number
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InvoiceUpdateRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InvoiceUpdateRequest):
return True
return self.to_dict() != other.to_dict()
|
the-stack_106_24245 | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making
蓝鲸智云PaaS平台社区版 (BlueKing PaaSCommunity Edition) available.
Copyright (C) 2017-2018 THL A29 Limited,
a Tencent company. All rights reserved.
Licensed under the MIT License (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.contrib import admin
from module_biz.models import ChatBindBusiness
class ChatBindBusinessAdmin(admin.ModelAdmin):
list_display = ("chat_group_id", "chat_bot_type", "biz_id", "biz_name")
list_filter = ("chat_group_id", "chat_bot_type", "biz_id", "biz_name")
search_fields = ("chat_group_id", "chat_bot_type", "biz_id", "biz_name")
admin.site.register(ChatBindBusiness, ChatBindBusinessAdmin)
|
the-stack_106_24246 | import random
import time
import math
from copy import deepcopy
import gamePlay
from getAllPossibleMoves import getAllPossibleMoves
ALL_PLAYERS = {}
WIN_PRIZE = 1
LOSS_PRIZE = 0
TURNS_COUNT = 0
C = math.sqrt(2)
def nextMove(board, color, sim_time, max_turns, max_sim, percent_wrong,
moves_remaining, verbose):
global ALL_PLAYERS
reset(moves_remaining)
if color in ALL_PLAYERS:
return ALL_PLAYERS[color].next_move(board, moves_remaining)
else:
ALL_PLAYERS[color] = MonteCarlo(color, sim_time, max_turns, max_sim,
percent_wrong, verbose)
return ALL_PLAYERS[color].next_move(board, moves_remaining)
def reset(moves_remaining):
global ALL_PLAYERS
global TURNS_COUNT
if moves_remaining > TURNS_COUNT:
ALL_PLAYERS = {}
TURNS_COUNT = moves_remaining
class MonteCarlo:
def __init__(self, color, sim_time, max_turns, max_sim, percent_wrong,
verbose):
self.color = color
self.op_color = gamePlay.getOpponentColor(self.color)
self.sim_time = sim_time
self.max_turns = max_turns
self.max_sim = max_sim
self.percent_wrong = percent_wrong
self.state_node = {}
self.verbose = verbose
def reset(self):
self.state_node = {}
def next_move(self, board, moves_remaining):
moves = getAllPossibleMoves(board, self.color)
chosen_move = None
if random.randint(1, 100) <= self.percent_wrong:
chosen_move = random.choice(moves)
else:
chosen_move = self.monte_carlo_search(board, len(moves),
moves_remaining)
if self.verbose:
print("Chosen Move: {} to {}".format(chosen_move[0],
chosen_move[1]))
return chosen_move
def monte_carlo_search(self, board, len_moves, moves_remaining):
results = {}
root = self.get_root(board, len_moves, moves_remaining)
sim_count = 0
now = time.time()
while (time.time() - now) < self.sim_time and (root.moves_unfinished
> 0 and sim_count < self.max_sim):
picked_node = self.tree_policy(root)
result = self.simulate(picked_node)
self.back_prop(picked_node, result)
sim_count += 1
if self.verbose:
self.print_monte_carlo_results(root, results, sim_count)
return self.best_action(root)
def get_root(self, board, len_moves, moves_remaining):
root = None
board_string = makeBoardString(board)
if board_string in self.state_node:
root = self.state_node[board_string]
else:
amnt_children = len_moves
if amnt_children == 0 and self.isAWinner(board) is False:
amnt_children = 1
root = Node(board, None, amnt_children, moves_remaining, self.color)
root.parent = None
return root
def print_monte_carlo_results(self, root, results, sim_count):
for child in root.children:
wins, plays = child.get_wins_plays()
position = convertMoveToString(child.move)
results[position] = (wins, plays)
for position in sorted(results, key=lambda x: results[x][1]):
print('Monte Carlo {}: {}: ({}/{})'.format(self.color,
position,
results[position][0],
results[position][1]))
print('Monte Carlo {}: {} simulations performed.'.format(self.color,
sim_count))
def best_action(self, node):
most_plays = -float('inf')
best_wins = -float('inf')
best_actions = []
for child in node.children:
wins, plays = child.get_wins_plays()
wins += 1
plays += 2
if wins > best_wins:
most_plays = plays
best_actions = [child.move]
best_wins = wins
elif wins == best_wins:
if plays > most_plays:
most_plays = plays
best_actions = [child.move]
elif plays == most_plays:
best_actions.append(child.move)
return random.choice(best_actions)
def back_prop(self, node, delta):
while node.parent is not None:
node.plays += 1
node.wins += delta
node = node.parent
node.plays += 1
node.wins += delta
def tree_policy(self, root):
cur_node = root
while root.moves_unfinished > 0:
legal_moves = getAllPossibleMoves(cur_node.board, self.color)
if not legal_moves:
break
elif len(cur_node.children) < len(legal_moves):
unexpanded = [
move for move in legal_moves
if move not in cur_node.moves_expanded
]
assert len(unexpanded) > 0
move = random.choice(unexpanded)
future_state = deepcopy(cur_node.board)
gamePlay.doMove(future_state, move)
child = Node(future_state, move, len(legal_moves),
cur_node.turn - 1,
gamePlay.getOpponentColor(cur_node.color))
cur_node.add_child(child)
board_string = makeBoardString(future_state)
self.state_node[board_string] = child
return child
else:
# Every possible next state has been expanded, so pick one
cur_node = self.best_child(cur_node)
return cur_node
def best_child(self, node):
enemy_turn = (node.color != self.color)
values = {}
for child in node.children:
wins, plays = child.get_wins_plays()
if enemy_turn:
# the enemy will play against us, not for us
wins = plays - wins
_, parent_plays = node.get_wins_plays()
assert parent_plays > 0
values[child] = (wins / plays) \
+ C * math.sqrt(2 * math.log(parent_plays) / plays)
best_choice = max(values, key=values.get)
return best_choice
def simulate(self, picked_node):
board_copy = deepcopy(picked_node.board)
turns = picked_node.turn
cur_color = picked_node.color
op_color = gamePlay.getOpponentColor(cur_color)
while turns > 0:
moves = getAllPossibleMoves(board_copy, cur_color)
if not moves:
winner = self.isAWinner(board_copy)
if winner == self.color:
return WIN_PRIZE
elif winner == self.op_color:
return LOSS_PRIZE
else:
if cur_color == self.color:
return LOSS_PRIZE
else:
return WIN_PRIZE
else:
random_move = random.choice(moves)
gamePlay.doMove(board_copy, random_move)
cur_color, op_color = op_color, cur_color
turns -= 1
return LOSS_PRIZE
def isAWinner(self, board):
my_pieces = 0
opponent_pieces = 0
for row in board:
for item in row:
if item.upper() == self.color.upper():
my_pieces += 1
elif item.upper() == self.op_color.upper():
opponent_pieces += 1
elif item != ' ':
pass
if my_pieces == 0:
return self.op_color
elif opponent_pieces == 0:
return self.color
else:
return False
def convertMoveToString(move):
move_string = ""
for item in move:
move_string += "{}".format(item).zfill(2)
return move_string
def makeBoardString(board):
board_string = ''
for row in board:
for item in row:
board_string += item
return board_string
def numberOfRemainingPieces(board, color):
piece_count = 0
for row in board:
for item in row:
if item.upper() == color.upper():
piece_count += 1
return piece_count
class Node:
def __init__(self, board, move, amount_children, turn, color):
self.board = board
self.plays = 0
self.wins = 0
self.children = []
self.parent = None
self.moves_expanded = list()
self.moves_unfinished = amount_children
self.move = move
self.turn = turn
self.color = color
def propagate_completion(self):
if self.parent is None:
return
if self.moves_unfinished > 0:
self.moves_unfinished -= 1
self.parent.propagate_completion()
def add_child(self, node):
self.children.append(node)
self.moves_expanded.append(node.move)
node.parent = self
def has_children(self):
return len(self.children) > 0
def get_wins_plays(self):
return self.wins, self.plays
def __hash__(self):
return hash(makeBoardString(self.board))
def __eq__(self, other):
if not isinstance(other, Node):
return False
return self.board == other.board |
the-stack_106_24247 | from core.advbase import *
from slot.a import *
galex_conf = {
'x1.dmg': 82 / 100.0,
'x1.startup': 8 / 60.0,
'x1.recovery': 34 / 60.0,
'x1.hit': 1,
'x2.dmg': 88 / 100.0,
'x2.startup': 0,
'x2.recovery': 28 / 60.0,
'x2.hit': 1,
'x3.dmg': 104 / 100.0,
'x3.startup': 0,
'x3.recovery': 19 / 60.0,
'x3.hit': 1,
'x4.dmg': 110 / 100.0,
'x4.startup': 0,
'x4.recovery': 33 / 60.0,
'x4.hit': 1,
'x5.dmg': 165 / 100.0,
'x5.startup': 0,
'x5.recovery': 49 / 60.0,
'x5.hit': 1,
}
def module():
return Gala_Alex
class Skill_Reservoir(Skill):
def __init__(self, name=None, conf1=None, conf2=None):
super().__init__(name, conf1)
self.conf_tpl = (conf1, conf2)
self.ac_tpl = (
S('s1', conf1),
S('s2', conf2)
)
self.chain_timer = Timer(self.chain_off)
self.chain_status = 0
def chain_on(self, skill, timeout=3):
self.chain_status = skill
self.chain_timer.on(timeout)
def chain_off(self, t=None):
self.chain_status = 0
def charge(self, sp):
self.charged = min(self.sp*3, self.charged + sp)
if self.charged >= self.sp*3:
self.skill_charged()
@property
def count(self):
return self.charged // self.sp
def __call__(self, call=0):
self.conf = self.conf_tpl[call]
self.ac = self.ac_tpl[call]
casted = self.cast()
if casted and self.count == 0 and self.chain_timer.online:
self.chain_timer.off()
self.chain_status = 0
return casted
class Gala_Alex(Adv):
comment = 'no bk bonus in sim; s2 c4fs [s1 c4fs]*5 & use s1/s2 only when charge>=2'
a3 = ('k_poison', 0.30)
conf = galex_conf.copy()
conf['slots.a'] = The_Shining_Overlord()+The_Fires_of_Hate()
conf['acl'] = """
`dragon.act("c3 s end")
`s3, not self.s3_buff
if fsc
`s2, not self.afflics.poison.get()
`s2, self.sr.chain_status=1 and self.s1_debuff.buff_end_timer.timing-now()<5
`s1, not self.s1_debuff.get() or self.sr.count > 1
end
`fs, x=4
"""
coab = ['Blade','Wand','Delphi']
conf['afflict_res.poison'] = 0
def d_coabs(self):
if self.duration <= 120:
self.coab = ['Blade','Wand','Heinwald']
def prerun(self):
self.s1_debuff = Debuff('s1', 0.05, 15)
self.a1_k = Modifier('a1', 'killer', 'passive', 0.30)
self.a1_k.get = self.a1_get
self.a1_k.on()
self.sr = Skill_Reservoir('s1', self.conf.s1, self.conf.s2)
# cursed
self.s1.cast = lambda: self.sr(0)
self.s2.cast = lambda: self.sr(1)
def charge_p(self, name, percent):
percent = percent / 100 if percent > 1 else percent
self.sr.charge(self.sp_convert(percent, self.conf.sr.sp))
self.s3.charge(self.sp_convert(percent, self.conf.s3.sp))
log('sp', name, f'{percent*100:.0f}%', f'{self.sr.charged}/{self.sr.sp}, {self.s3.charged}/{self.s3.sp}')
self.think_pin('prep')
def charge(self, name, sp):
# sp should be integer
sp = self.sp_convert(self.sp_mod(name), sp)
self.sr.charge(sp)
self.s3.charge(sp)
self.think_pin('sp')
log('sp', name, sp, f'{self.sr.charged}/{self.sr.sp} ({self.sr.count}), {self.s3.charged}/{self.s3.sp}')
def a1_get(self):
return (self.mod('def') != 1) * 0.30
def s1_proc(self, e):
if self.sr.chain_status == 1:
k = 1 + (self.mod('def') != 1) * 0.1
self.dmg_make('s1', 2.02*3*k)
self.dmg_make('s1', 4.85*k)
self.hits += 4
else:
self.dmg_make('s1', 2.02)
self.s1_debuff.on()
self.dmg_make('s1', 2.02*2)
self.hits += 3
# elif self.sr.chain_status == 2:
# k = 1 + (self.mod('def') != 1) * 0.1
# self.dmg_make('s1', 2.02*3)
# self.dmg_make('s1', 4.85*k)
# self.hits += 4
# break only
# 352.5 * 3 + 987
self.sr.chain_on(1)
def s2_proc(self, e):
if self.sr.chain_status == 2:
with Modifier('s2_killer', 'poison_killer', 'hit', 0.1):
self.dmg_make('s2', 5.53)
self.dmg_make('s2', 4.42)
self.hits += 2
else:
self.dmg_make('s2', 5.53)
self.afflics.poison('s2', 120, 0.582)
self.hits += 1
# elif self.sr.chain_status == 2:
# self.dmg_make('s2', 5.53)
# with Modifier('s2_killer', 'poison_killer', 'hit', 0.1):
# self.dmg_make('s2', 4.42)
# break only
# 972 * 2
self.sr.chain_on(2)
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv) |
the-stack_106_24250 | #! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import numpy as np
from typing import Dict
import torch
from ludwig.constants import *
from ludwig.decoders.generic_decoders import Regressor
from ludwig.encoders.binary_encoders import ENCODER_REGISTRY
from ludwig.features.base_feature import InputFeature, OutputFeature
from ludwig.modules.loss_modules import BWCEWLoss
from ludwig.modules.metric_modules import Accuracy, BWCEWLMetric, ROCAUCMetric
from ludwig.utils.eval_utils import ConfusionMatrix, average_precision_score,\
precision_recall_curve, roc_auc_score, roc_curve
from ludwig.utils.misc_utils import set_default_value, set_default_values
from ludwig.utils import strings_utils
from ludwig.utils import output_feature_utils
logger = logging.getLogger(__name__)
class BinaryFeatureMixin:
type = BINARY
preprocessing_defaults = {
"missing_value_strategy": FILL_WITH_CONST,
"fill_value": 0,
}
fill_value_schema = {
"anyOf": [
{"type": "integer", "minimum": 0, "maximum": 1},
{"type": "string", "enum": strings_utils.all_bool_strs()},
]
}
preprocessing_schema = {
"missing_value_strategy": {
"type": "string",
"enum": MISSING_VALUE_STRATEGY_OPTIONS,
},
"fill_value": fill_value_schema,
"computed_fill_value": fill_value_schema,
"fallback_true_label": {'type': 'string'},
}
@staticmethod
def cast_column(column, backend):
# todo maybe move code from add_feature_data here
# + figure out what NaN is in a bool column
return column
@staticmethod
def get_feature_meta(column, preprocessing_parameters, backend):
if column.dtype != object:
return {}
distinct_values = backend.df_engine.compute(column.drop_duplicates())
if len(distinct_values) > 2:
raise ValueError(
f"Binary feature column {column.name} expects 2 distinct values, "
f"found: {distinct_values.values.tolist()}"
)
if 'fallback_true_label' in preprocessing_parameters:
fallback_true_label = preprocessing_parameters['fallback_true_label']
else:
fallback_true_label = sorted(distinct_values)[0]
logger.warning(
f"In case binary feature {column.name} doesn't have conventional boolean values, "
f"we will interpret {fallback_true_label} as 1 and the other values as 0. "
f"If this is incorrect, please use the category feature type or "
f"manually specify the true value with `preprocessing.fallback_true_label`.")
str2bool = {v: strings_utils.str2bool(
v, fallback_true_label) for v in distinct_values}
bool2str = [
k for k, v in sorted(str2bool.items(), key=lambda item: item[1])
]
return {
"str2bool": str2bool,
"bool2str": bool2str,
"fallback_true_label": fallback_true_label
}
@staticmethod
def add_feature_data(
feature,
input_df,
proc_df,
metadata,
preprocessing_parameters,
backend,
skip_save_processed_input,
):
column = input_df[feature[COLUMN]]
if column.dtype == object:
metadata = metadata[feature[NAME]]
if "str2bool" in metadata:
column = column.map(lambda x: metadata["str2bool"][x])
else:
# No predefined mapping from string to bool, so compute it directly
column = column.map(strings_utils.str2bool)
proc_df[feature[PROC_COLUMN]] = column.astype(np.bool_).values
return proc_df
class BinaryInputFeature(BinaryFeatureMixin, InputFeature):
encoder = "passthrough"
norm = None
dropout = False
def __init__(self, feature, encoder_obj=None):
super().__init__(feature)
self.overwrite_defaults(feature)
if encoder_obj:
self.encoder_obj = encoder_obj
else:
self.encoder_obj = self.initialize_encoder(feature)
def forward(self, inputs):
assert isinstance(inputs, torch.Tensor)
assert inputs.dtype in [torch.bool, torch.int64, torch.float32]
assert len(inputs.shape) == 1 or (
len(inputs.shape) == 2 and inputs.shape[1] == 1)
if len(inputs.shape) == 1:
inputs = inputs[:, None]
encoder_outputs = self.encoder_obj(inputs)
return {'encoder_output': encoder_outputs}
@property
def input_dtype(self):
return torch.bool
@property
def input_shape(self) -> torch.Size:
return torch.Size([1])
@property
def output_shape(self) -> torch.Size:
return self.encoder_obj.output_shape
@staticmethod
def update_config_with_metadata(
input_feature, feature_metadata, *args, **kwargs
):
pass
@staticmethod
def populate_defaults(input_feature):
set_default_value(input_feature, TIED, None)
def create_sample_input(self):
return torch.Tensor([True, False])
encoder_registry = ENCODER_REGISTRY
class BinaryOutputFeature(BinaryFeatureMixin, OutputFeature):
decoder = "regressor"
loss = {TYPE: SOFTMAX_CROSS_ENTROPY}
metric_functions = {LOSS: None, ACCURACY: None}
default_validation_metric = ACCURACY
threshold = 0.5
def __init__(self, feature):
super().__init__(feature)
self.overwrite_defaults(feature)
self.decoder_obj = self.initialize_decoder(feature)
self._setup_loss()
self._setup_metrics()
def logits(self, inputs, **kwargs):
hidden = inputs[HIDDEN]
return self.decoder_obj(hidden)
def predictions(self, inputs: Dict[str, torch.Tensor], feature_name: str, **kwargs):
logits = output_feature_utils.get_output_feature_tensor(
inputs, feature_name, LOGITS)
probabilities = torch.sigmoid(logits)
predictions = probabilities >= self.threshold
return {
PROBABILITIES: probabilities,
PREDICTIONS: predictions,
LOGITS: logits,
}
def _setup_loss(self):
self.train_loss_function = BWCEWLoss(
positive_class_weight=self.loss["positive_class_weight"],
robust_lambda=self.loss["robust_lambda"],
confidence_penalty=self.loss["confidence_penalty"],
)
self.eval_loss_function = self.train_loss_function
def _setup_metrics(self):
self.metric_functions = {} # needed to shadow class variable
self.metric_functions[LOSS] = BWCEWLMetric(
positive_class_weight=self.loss["positive_class_weight"],
robust_lambda=self.loss["robust_lambda"],
confidence_penalty=self.loss["confidence_penalty"],
)
self.metric_functions[ACCURACY] = Accuracy()
self.metric_functions[ROC_AUC] = ROCAUCMetric()
def get_prediction_set(self):
return {PREDICTIONS, PROBABILITIES, LOGITS}
@classmethod
def get_output_dtype(cls):
return torch.bool
@property
def output_shape(self) -> torch.Size:
return torch.Size([1])
@property
def input_shape(self) -> torch.Size:
return torch.Size([1])
@staticmethod
def update_config_with_metadata(
input_feature, feature_metadata, *args, **kwargs
):
pass
@staticmethod
def calculate_overall_stats(predictions, targets, train_set_metadata):
overall_stats = {}
confusion_matrix = ConfusionMatrix(
targets, predictions[PREDICTIONS], labels=["False", "True"]
)
overall_stats["confusion_matrix"] = confusion_matrix.cm.tolist()
overall_stats["overall_stats"] = confusion_matrix.stats()
overall_stats["per_class_stats"] = confusion_matrix.per_class_stats()
fpr, tpr, thresholds = roc_curve(targets, predictions[PROBABILITIES])
overall_stats["roc_curve"] = {
"false_positive_rate": fpr.tolist(),
"true_positive_rate": tpr.tolist(),
}
overall_stats["roc_auc_macro"] = roc_auc_score(
targets, predictions[PROBABILITIES], average="macro"
)
overall_stats["roc_auc_micro"] = roc_auc_score(
targets, predictions[PROBABILITIES], average="micro"
)
ps, rs, thresholds = precision_recall_curve(
targets, predictions[PROBABILITIES]
)
overall_stats["precision_recall_curve"] = {
"precisions": ps.tolist(),
"recalls": rs.tolist(),
}
overall_stats["average_precision_macro"] = average_precision_score(
targets, predictions[PROBABILITIES], average="macro"
)
overall_stats["average_precision_micro"] = average_precision_score(
targets, predictions[PROBABILITIES], average="micro"
)
overall_stats["average_precision_samples"] = average_precision_score(
targets, predictions[PROBABILITIES], average="samples"
)
return overall_stats
def postprocess_predictions(
self,
result,
metadata,
output_directory,
backend,
):
class_names = ["False", "True"]
if "bool2str" in metadata:
class_names = metadata["bool2str"]
predictions_col = f"{self.feature_name}_{PREDICTIONS}"
if predictions_col in result:
if "bool2str" in metadata:
result[predictions_col] = backend.df_engine.map_objects(
result[predictions_col],
lambda pred: metadata["bool2str"][pred],
)
probabilities_col = f"{self.feature_name}_{PROBABILITIES}"
if probabilities_col in result:
false_col = f"{probabilities_col}_{class_names[0]}"
result[false_col] = backend.df_engine.map_objects(
result[probabilities_col], lambda prob: 1 - prob
)
true_col = f"{probabilities_col}_{class_names[1]}"
result[true_col] = result[probabilities_col]
prob_col = f"{self.feature_name}_{PROBABILITY}"
result[prob_col] = result[[false_col, true_col]].max(axis=1)
result[probabilities_col] = backend.df_engine.map_objects(
result[probabilities_col], lambda prob: [1 - prob, prob]
)
return result
@staticmethod
def populate_defaults(output_feature):
# If Loss is not defined, set an empty dictionary
set_default_value(output_feature, LOSS, {})
set_default_values(
output_feature[LOSS],
{
"robust_lambda": 0,
"confidence_penalty": 0,
"positive_class_weight": None,
"weight": 1,
},
)
set_default_value(output_feature[LOSS], "robust_lambda", 0)
set_default_value(output_feature[LOSS], "confidence_penalty", 0)
set_default_value(output_feature[LOSS], "positive_class_weight", None)
set_default_value(output_feature[LOSS], "weight", 1)
set_default_values(
output_feature,
{
"threshold": 0.5,
"dependencies": [],
"reduce_input": SUM,
"reduce_dependencies": SUM,
},
)
decoder_registry = {
"regressor": Regressor,
"null": Regressor,
"none": Regressor,
"None": Regressor,
None: Regressor,
}
|
the-stack_106_24252 | # -*- coding: utf-8 -*-
# Update by : https://github.com/tenyue/ServerStatus
# 支持Python版本:2.6 to 3.5
# 支持操作系统: Linux, OSX, FreeBSD, OpenBSD and NetBSD, both 32-bit and 64-bit architectures
SERVER = "127.0.0.1"
PORT = 35601
USER = "USER"
PASSWORD = "USER_PASSWORD"
INTERVAL = 1 #更新间隔
import socket
import time
import string
import math
import re
import os
import json
import subprocess
import collections
import platform
def get_uptime():
f = open('/proc/uptime', 'r')
uptime = f.readline()
f.close()
uptime = uptime.split('.', 2)
time = int(uptime[0])
return int(time)
def get_memory():
re_parser = re.compile(r'^(?P<key>\S*):\s*(?P<value>\d*)\s*kB')
result = dict()
for line in open('/proc/meminfo'):
match = re_parser.match(line)
if not match:
continue;
key, value = match.groups(['key', 'value'])
result[key] = int(value)
MemTotal = float(result['MemTotal'])
MemFree = float(result['MemFree'])
Cached = float(result['Cached'])
MemUsed = MemTotal - (Cached + MemFree)
SwapTotal = float(result['SwapTotal'])
SwapFree = float(result['SwapFree'])
return int(MemTotal), int(MemUsed), int(SwapTotal), int(SwapFree)
def get_hdd():
p = subprocess.check_output(['df', '-Tlm', '--total', '-t', 'ext4', '-t', 'ext3', '-t', 'ext2', '-t', 'reiserfs', '-t', 'jfs', '-t', 'ntfs', '-t', 'fat32', '-t', 'btrfs', '-t', 'fuseblk', '-t', 'zfs', '-t', 'simfs', '-t', 'xfs']).decode("Utf-8")
total = p.splitlines()[-1]
used = total.split()[3]
size = total.split()[2]
return int(size), int(used)
def get_load():
system = platform.linux_distribution()
if system[0][:6] == "CentOS":
if system[1][0] == "6":
tmp_load = os.popen("netstat -anp |grep ESTABLISHED |grep tcp |grep '::ffff:' |awk '{print $5}' |awk -F ':' '{print $4}' |sort -u |grep -E -o '([0-9]{1,3}[\.]){3}[0-9]{1,3}' |wc -l").read()
else:
tmp_load = os.popen("netstat -anp |grep ESTABLISHED |grep tcp6 |awk '{print $5}' |awk -F ':' '{print $1}' |sort -u |grep -E -o '([0-9]{1,3}[\.]){3}[0-9]{1,3}' |wc -l").read()
else:
tmp_load = os.popen("netstat -anp |grep ESTABLISHED |grep tcp6 |awk '{print $5}' |awk -F ':' '{print $1}' |sort -u |grep -E -o '([0-9]{1,3}[\.]){3}[0-9]{1,3}' |wc -l").read()
return float(tmp_load)
#return os.getloadavg()[0]
def get_time():
stat_file = file("/proc/stat", "r")
time_list = stat_file.readline().split(' ')[2:6]
stat_file.close()
for i in range(len(time_list)) :
time_list[i] = int(time_list[i])
return time_list
def delta_time():
x = get_time()
time.sleep(INTERVAL)
y = get_time()
for i in range(len(x)):
y[i]-=x[i]
return y
def get_cpu():
t = delta_time()
st = sum(t)
if st == 0:
st = 1
result = 100-(t[len(t)-1]*100.00/st)
return round(result)
class Traffic:
def __init__(self):
self.rx = collections.deque(maxlen=10)
self.tx = collections.deque(maxlen=10)
def get(self):
f = open('/proc/net/dev', 'r')
net_dev = f.readlines()
f.close()
avgrx = 0; avgtx = 0
for dev in net_dev[2:]:
dev = dev.split(':')
if dev[0].strip() == "lo" or dev[0].find("tun") > -1:
continue
dev = dev[1].split()
avgrx += int(dev[0])
avgtx += int(dev[8])
self.rx.append(avgrx)
self.tx.append(avgtx)
avgrx = 0; avgtx = 0
l = len(self.rx)
for x in range(l - 1):
avgrx += self.rx[x+1] - self.rx[x]
avgtx += self.tx[x+1] - self.tx[x]
avgrx = int(avgrx / l / INTERVAL)
avgtx = int(avgtx / l / INTERVAL)
return avgrx, avgtx
def liuliang():
NET_IN = 0
NET_OUT = 0
with open('/proc/net/dev') as f:
for line in f.readlines():
netinfo = re.findall('([^\s]+):[\s]{0,}(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)', line)
if netinfo:
if netinfo[0][0] == 'lo' or 'tun' in netinfo[0][0] or netinfo[0][1]=='0' or netinfo[0][9]=='0':
continue
else:
NET_IN += int(netinfo[0][1])
NET_OUT += int(netinfo[0][9])
return NET_IN, NET_OUT
def get_network(ip_version):
if(ip_version == 4):
HOST = "ipv4.google.com"
elif(ip_version == 6):
HOST = "ipv6.google.com"
try:
s = socket.create_connection((HOST, 80), 2)
return True
except:
pass
return False
if __name__ == '__main__':
socket.setdefaulttimeout(30)
while 1:
try:
print("Connecting...")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((SERVER, PORT))
data = s.recv(1024)
if data.find("Authentication required") > -1:
s.send(USER + ':' + PASSWORD + '\n')
data = s.recv(1024)
if data.find("Authentication successful") < 0:
print(data)
raise socket.error
else:
print(data)
raise socket.error
print(data)
# data = s.recv(1024)
print(data)
timer = 0
check_ip = 0
if data.find("IPv4") > -1:
check_ip = 6
elif data.find("IPv6") > -1:
check_ip = 4
else:
print(data)
raise socket.error
traffic = Traffic()
traffic.get()
while 1:
CPU = get_cpu()
NetRx, NetTx = traffic.get()
NET_IN, NET_OUT = liuliang()
Uptime = get_uptime()
Load = get_load()
MemoryTotal, MemoryUsed, SwapTotal, SwapFree = get_memory()
HDDTotal, HDDUsed = get_hdd()
array = {}
if not timer:
array['online' + str(check_ip)] = get_network(check_ip)
timer = 10
else:
timer -= 1*INTERVAL
array['uptime'] = Uptime
array['load'] = Load
array['memory_total'] = MemoryTotal
array['memory_used'] = MemoryUsed
array['swap_total'] = SwapTotal
array['swap_used'] = SwapTotal - SwapFree
array['hdd_total'] = HDDTotal
array['hdd_used'] = HDDUsed
array['cpu'] = CPU
array['network_rx'] = NetRx
array['network_tx'] = NetTx
array['network_in'] = NET_IN
array['network_out'] = NET_OUT
s.send("update " + json.dumps(array) + "\n")
except KeyboardInterrupt:
raise
except socket.error:
print("Disconnected...")
# keep on trying after a disconnect
s.close()
time.sleep(3)
except Exception as e:
print("Caught Exception:", e)
s.close()
time.sleep(3)
|
the-stack_106_24254 | from settings import LOAD_TALIB
if LOAD_TALIB:
import talib
from apps.TA import HORIZONS
from apps.TA.storages.abstract.indicator import IndicatorStorage
from apps.TA.storages.abstract.indicator_subscriber import IndicatorSubscriber
from apps.TA.storages.data.price import PriceStorage
from settings import logger
class MomStorage(IndicatorStorage):
def produce_signal(self):
pass
class MomSubscriber(IndicatorSubscriber):
classes_subscribing_to = [
PriceStorage
]
def handle(self, channel, data, *args, **kwargs):
self.index = self.key_suffix
if str(self.index) is not "close_price":
logger.debug(f'index {self.index} is not close_price ...ignoring...')
return
new_mom_storage = MomStorage(ticker=self.ticker,
exchange=self.exchange,
timestamp=self.timestamp)
for horizon in HORIZONS:
periods = horizon * 10
close_value_np_array = new_mom_storage.get_denoted_price_array("close_price", periods)
timeperiod = min([len(close_value_np_array), periods])
mom_value = talib.MOM(close_value_np_array, timeperiod=timeperiod)[-1]
# logger.debug(f'savingMom value {mom_value} for {self.ticker} on {periods} periods')
new_mom_storage.periods = periods
new_mom_storage.value = float(mom_value)
new_mom_storage.save()
|
the-stack_106_24255 | import os
import random
import re
import subprocess
import sys
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from threading import Thread
from typing import NoReturn, Tuple
import requests
from bs4 import BeautifulSoup
from googlehomepush import GoogleHome
from googlehomepush.http_server import serve_file
from joke.jokes import chucknorris, geek, icanhazdad, icndb
from newsapi import NewsApiClient, newsapi_exception
from playsound import playsound
from pychromecast.error import ChromecastConnectionError
from randfacts import get_fact
from executors.communicator import read_gmail
from executors.date_time import current_date, current_time
from executors.internet import vpn_checker
from executors.logger import logger
from executors.robinhood import robinhood
from executors.todo_list import todo
from executors.weather import weather
from modules.audio import listener, speaker
from modules.audio.listener import listen
from modules.conditions import keywords
from modules.database import database
from modules.dictionary import dictionary
from modules.models import models
from modules.utils import shared, support
env = models.env
fileio = models.FileIO()
indicators = models.Indicators()
db = database.Database(database=fileio.base_db)
def repeat() -> NoReturn:
"""Repeats whatever is heard."""
speaker.speak(text="Please tell me what to repeat.", run=True)
if keyword := listener.listen(timeout=3, phrase_limit=10):
if 'exit' in keyword or 'quit' in keyword or 'Xzibit' in keyword:
pass
else:
speaker.speak(text=f"I heard {keyword}")
def apps(phrase: str) -> None:
"""Launches the requested application and if Jarvis is unable to find the app, asks for the app name from the user.
Args:
phrase: Takes the phrase spoken as an argument.
"""
keyword = phrase.split()[-1] if phrase else None
ignore = ['app', 'application']
if not keyword or keyword in ignore:
if shared.called_by_offline:
speaker.speak(text=f'I need an app name to open {env.title}!')
return
speaker.speak(text=f"Which app shall I open {env.title}?", run=True)
if keyword := listener.listen(timeout=3, phrase_limit=4):
if 'exit' in keyword or 'quit' in keyword or 'Xzibit' in keyword:
return
else:
speaker.speak(text="I didn't quite get that. Try again.")
return
if not env.macos:
status = os.system(f'start {keyword}')
if status == 0:
speaker.speak(text=f'I have opened {keyword}')
else:
speaker.speak(text=f"I did not find the app {keyword}. Try again.")
return
all_apps = subprocess.check_output("ls /Applications/", shell=True)
apps_ = all_apps.decode('utf-8').split('\n')
app_check = False
for app in apps_:
if re.search(keyword, app, flags=re.IGNORECASE) is not None:
keyword = app
app_check = True
break
if not app_check:
speaker.speak(text=f"I did not find the app {keyword}. Try again.")
Thread(target=support.unrecognized_dumper, args=[{'APPLICATIONS': keyword}]).start()
return
app_status = os.system(f"open /Applications/'{keyword}' > /dev/null 2>&1")
keyword = keyword.replace('.app', '')
if app_status == 256:
speaker.speak(text=f"I'm sorry {env.title}! I wasn't able to launch {keyword}. "
"You might need to check its permissions.")
else:
speaker.speak(text=f"I have opened {keyword}")
def music(phrase: str = None) -> NoReturn:
"""Scans music directory in the user profile for ``.mp3`` files and plays using default player.
Args:
phrase: Takes the phrase spoken as an argument.
"""
sys.stdout.write("\rScanning music files...")
get_all_files = (os.path.join(root, f) for root, _, files in os.walk(os.path.join(env.home, "Music")) for f in
files)
if music_files := [file for file in get_all_files if os.path.splitext(file)[1] == '.mp3']:
chosen = random.choice(music_files)
if phrase and 'speaker' in phrase:
google_home(device=phrase, file=chosen)
else:
if env.macos:
subprocess.call(["open", chosen])
else:
os.system(f'start wmplayer "{chosen}"')
support.flush_screen()
speaker.speak(text=f"Enjoy your music {env.title}!")
else:
speaker.speak(text=f'No music files were found {env.title}!')
def google_home(device: str = None, file: str = None) -> None:
"""Uses ``socket lib`` to extract ip address and scan ip range for Google home devices.
Notes:
- Can also play music on multiple devices at once.
See Also:
- https://github.com/deblockt/google-home-push/pull/7
- | When music is played and immediately stopped/tasked the Google home device, it is most likely to except
| ``BrokenPipeError``
- This usually happens when a socket is written after it is fully closed.
- This error occurs when one end of the connection tries sending data while the other has closed the connection.
- This can simply be ignored or handled adding the code below in socket module (NOT PREFERRED).
.. code-block:: python
except IOError as error:
import errno
if error.errno != errno.EPIPE:
sys.stdout.write(error)
Args:
device: Name of the Google home device on which the music has to be played.
file: Scanned audio file to be played.
"""
if not (network_id := vpn_checker()):
return
if not shared.called_by_offline:
speaker.speak(text=f'Scanning your IP range for Google Home devices {env.title}!', run=True)
sys.stdout.write('\rScanning your IP range for Google Home devices..')
network_id = '.'.join(network_id.split('.')[0:3])
def ip_scan(host_id: int) -> Tuple[str, str]:
"""Scans the IP range using the received args as host id in an IP address.
Args:
host_id: Host ID passed in a multithreaded fashion to scan for Google home devices in IP range.
Returns:
Tuple(str, str):
Device name, and it's IP address.
"""
try:
device_info = GoogleHome(host=f"{network_id}.{host_id}").cc
device_info = str(device_info)
device_name = device_info.split("'")[3]
device_ip = device_info.split("'")[1]
# port = sample.split("'")[2].split()[1].replace(',', '')
return device_name, device_ip
except ChromecastConnectionError:
pass
devices = []
with ThreadPoolExecutor(max_workers=100) as executor:
for info in executor.map(ip_scan, range(1, 101)): # scans host IDs 1 to 255 (eg: 192.168.1.1 to 192.168.1.255)
devices.append(info) # this includes all the NoneType values returned by unassigned host IDs
devices = dict([i for i in devices if i]) # removes None values and converts list to dictionary of name and ip pair
if not device or not file:
support.flush_screen()
speaker.speak(text=f"You have {len(devices)} devices in your IP range {env.title}! "
f"{support.comma_separator(list(devices.keys()))}. You can choose one and ask me to play "
f"some music on any of these.")
return
else:
chosen = [value for key, value in devices.items() if key.lower() in device.lower()]
if not chosen:
speaker.speak(text=f"I don't see any matching devices {env.title}!. Let me help you. "
f"You have {len(devices)} devices in your IP range {env.title}! "
f"{support.comma_separator(list(devices.keys()))}.")
return
for target in chosen:
file_url = serve_file(file, "audio/mp3") # serves the file on local host and generates the play url
support.flush_screen()
support.block_print()
GoogleHome(host=target).play(file_url, "audio/mp3")
support.release_print()
if len(chosen) > 1:
speaker.speak(text=f"That's interesting, you've asked me to play on {len(chosen)} devices at a time. "
f"I hope you'll enjoy this {env.title}.", run=True)
else:
speaker.speak(text=f"Enjoy your music {env.title}!", run=True)
def jokes() -> NoReturn:
"""Uses jokes lib to say chucknorris jokes."""
speaker.speak(text=random.choice([geek, icanhazdad, chucknorris, icndb])())
def flip_a_coin() -> NoReturn:
"""Says ``heads`` or ``tails`` from a random choice."""
playsound(sound=indicators.coin, block=True) if not shared.called_by_offline else None
speaker.speak(
text=f"""{random.choice(['You got', 'It landed on', "It's"])} {random.choice(['heads', 'tails'])} {env.title}"""
)
def facts() -> NoReturn:
"""Tells a random fact."""
speaker.speak(text=get_fact(filter_enabled=False))
def meaning(phrase: str) -> None:
"""Gets meaning for a word skimmed from the user statement.
Args:
phrase: Takes the phrase spoken as an argument.
"""
keyword = phrase.split()[-1] if phrase else None
if not keyword or keyword == 'word':
speaker.speak(text="Please tell a keyword.", run=True)
response = listener.listen(timeout=3, phrase_limit=3)
if not response or any(word in response.lower() for word in keywords.exit_):
return
meaning(phrase=response)
else:
if definition := dictionary.meaning(term=keyword):
n = 0
vowel = ['A', 'E', 'I', 'O', 'U']
for key, value in definition.items():
insert = 'an' if key[0] in vowel else 'a'
repeated = 'also' if n != 0 else ''
n += 1
mean = ', '.join(value[0:2])
speaker.speak(text=f'{keyword} is {repeated} {insert} {key}, which means {mean}.')
if shared.called_by_offline:
return
speaker.speak(text=f'Do you wanna know how {keyword} is spelled?', run=True)
response = listener.listen(timeout=3, phrase_limit=3)
if any(word in response.lower() for word in keywords.ok):
for letter in list(keyword.lower()):
speaker.speak(text=letter)
speaker.speak(run=True)
else:
speaker.speak(text=f"I'm sorry {env.title}! I was unable to get meaning for the word: {keyword}")
def notes() -> None:
"""Listens to the user and saves it as a text file."""
if (converted := listener.listen(timeout=5, phrase_limit=10)) or 'exit' in converted or 'quit' in converted or \
'Xzibit' in converted:
return
with open(fileio.notes, 'a') as writer:
writer.write(f"{datetime.now().strftime('%A, %B %d, %Y')}\n{datetime.now().strftime('%I:%M %p')}\n"
f"{converted}\n")
def news(news_source: str = 'fox') -> None:
"""Says news around the user's location.
Args:
news_source: Source from where the news has to be fetched. Defaults to ``fox``.
"""
if not env.news_api:
logger.warning("News apikey not found.")
support.no_env_vars()
return
sys.stdout.write(f'\rGetting news from {news_source} news.')
news_client = NewsApiClient(api_key=env.news_api)
try:
all_articles = news_client.get_top_headlines(sources=f'{news_source}-news')
except newsapi_exception.NewsAPIException:
speaker.speak(text=f"I wasn't able to get the news {env.title}! "
"I think the News API broke, you may try after sometime.")
return
speaker.speak(text="News around you!")
speaker.speak(text=' '.join([article['title'] for article in all_articles['articles']]))
if shared.called_by_offline:
return
if shared.called['report'] or shared.called['time_travel']:
speaker.speak(run=True)
def report() -> NoReturn:
"""Initiates a list of functions, that I tend to check first thing in the morning."""
sys.stdout.write("\rStarting today's report")
shared.called['report'] = True
current_date()
current_time()
weather()
todo()
read_gmail()
robinhood()
news()
shared.called['report'] = False
def time_travel() -> None:
"""Triggered only from ``initiator()`` to give a quick update on the user's daily routine."""
part_day = support.part_of_day()
speaker.speak(text=f"Good {part_day} {env.name}!")
if part_day == 'Night':
if event := support.celebrate():
speaker.speak(text=f'Happy {event}!')
return
current_date()
current_time()
weather()
speaker.speak(run=True)
with db.connection:
cursor = db.connection.cursor()
meeting_status = cursor.execute("SELECT info, date FROM ics").fetchone()
if meeting_status and meeting_status[0].startswith('You') and \
meeting_status[1] == datetime.now().strftime('%Y_%m_%d'):
speaker.speak(text=meeting_status[0])
with db.connection:
cursor = db.connection.cursor()
event_status = cursor.execute(f"SELECT info FROM {env.event_app}").fetchone()
if event_status and event_status[0].startswith('You'):
speaker.speak(text=event_status[0])
todo()
read_gmail()
speaker.speak(text='Would you like to hear the latest news?', run=True)
phrase = listen(timeout=3, phrase_limit=3)
if any(word in phrase.lower() for word in keywords.ok):
news()
def sprint_name() -> NoReturn:
"""Generates a random sprint name."""
response = requests.get(url="https://sprint-name-gen.herokuapp.com/")
if not response.ok:
speaker.speak(text="I wasn't able to get a sprint name sir! Why not name it, Jarvis failed?")
return
soup = BeautifulSoup(response.content, 'html.parser')
name = soup.find('span', {'class': 'sprint-name'}).text
speaker.speak(text=name)
|
the-stack_106_24256 | import copy
import logging
from typing import Dict, List, Optional, Union
from ray.tune.error import TuneError
from ray.tune.experiment import Experiment, convert_to_experiment_list
from ray.tune.config_parser import make_parser, create_trial_from_spec
from ray.tune.suggest.search import SearchAlgorithm
from ray.tune.suggest.suggestion import Searcher
from ray.tune.suggest.util import set_search_properties_backwards_compatible
from ray.tune.suggest.variant_generator import format_vars, resolve_nested_dict
from ray.tune.trial import Trial
from ray.tune.utils.util import (flatten_dict, merge_dicts, atomic_save,
load_newest_checkpoint)
logger = logging.getLogger(__name__)
def _warn_on_repeater(searcher, total_samples):
from ray.tune.suggest.repeater import _warn_num_samples
_warn_num_samples(searcher, total_samples)
class SearchGenerator(SearchAlgorithm):
"""Generates trials to be passed to the TrialRunner.
Uses the provided ``searcher`` object to generate trials. This class
transparently handles repeating trials with score aggregation
without embedding logic into the Searcher.
Args:
searcher: Search object that subclasses the Searcher base class. This
is then used for generating new hyperparameter samples.
"""
CKPT_FILE_TMPL = "search_gen_state-{}.json"
def __init__(self, searcher: Searcher):
assert issubclass(
type(searcher),
Searcher), ("Searcher should be subclassing Searcher.")
self.searcher = searcher
self._parser = make_parser()
self._experiment = None
self._counter = 0 # Keeps track of number of trials created.
self._total_samples = 0 # int: total samples to evaluate.
self._finished = False
@property
def metric(self):
return self.searcher.metric
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict, **spec) -> bool:
return set_search_properties_backwards_compatible(
self.searcher.set_search_properties, metric, mode, config, **spec)
@property
def total_samples(self):
return self._total_samples
def add_configurations(
self,
experiments: Union[Experiment, List[Experiment], Dict[str, Dict]]):
"""Registers experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
"""
assert not self._experiment
logger.debug("added configurations")
experiment_list = convert_to_experiment_list(experiments)
assert len(experiment_list) == 1, (
"SearchAlgorithms can only support 1 experiment at a time.")
self._experiment = experiment_list[0]
experiment_spec = self._experiment.spec
self._total_samples = self._experiment.spec.get("num_samples", 1)
_warn_on_repeater(self.searcher, self._total_samples)
if "run" not in experiment_spec:
raise TuneError("Must specify `run` in {}".format(experiment_spec))
def next_trial(self):
"""Provides one Trial object to be queued into the TrialRunner.
Returns:
Trial: Returns a single trial.
"""
if not self.is_finished():
return self.create_trial_if_possible(self._experiment.spec,
self._experiment.dir_name)
return None
def create_trial_if_possible(self, experiment_spec: Dict,
output_path: str) -> Optional[Trial]:
logger.debug("creating trial")
trial_id = Trial.generate_id()
suggested_config = self.searcher.suggest(trial_id)
if suggested_config == Searcher.FINISHED:
self._finished = True
logger.debug("Searcher has finished.")
return
if suggested_config is None:
return
spec = copy.deepcopy(experiment_spec)
spec["config"] = merge_dicts(spec["config"],
copy.deepcopy(suggested_config))
# Create a new trial_id if duplicate trial is created
flattened_config = resolve_nested_dict(spec["config"])
self._counter += 1
tag = "{0}_{1}".format(
str(self._counter), format_vars(flattened_config))
trial = create_trial_from_spec(
spec,
output_path,
self._parser,
evaluated_params=flatten_dict(suggested_config),
experiment_tag=tag,
trial_id=trial_id)
return trial
def on_trial_result(self, trial_id: str, result: Dict):
"""Notifies the underlying searcher."""
self.searcher.on_trial_result(trial_id, result)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
self.searcher.on_trial_complete(
trial_id=trial_id, result=result, error=error)
def is_finished(self) -> bool:
return self._counter >= self._total_samples or self._finished
def get_state(self) -> Dict:
return {
"counter": self._counter,
"total_samples": self._total_samples,
"finished": self._finished,
"experiment": self._experiment
}
def set_state(self, state: Dict):
self._counter = state["counter"]
self._total_samples = state["total_samples"]
self._finished = state["finished"]
self._experiment = state["experiment"]
def has_checkpoint(self, dirpath: str):
return bool(
load_newest_checkpoint(dirpath, self.CKPT_FILE_TMPL.format("*")))
def save_to_dir(self, dirpath: str, session_str: str):
"""Saves self + searcher to dir.
Separates the "searcher" from its wrappers (concurrency, repeating).
This allows the user to easily restore a given searcher.
The save operation is atomic (write/swap).
Args:
dirpath (str): Filepath to experiment dir.
session_str (str): Unique identifier of the current run
session.
"""
searcher = self.searcher
search_alg_state = self.get_state()
while hasattr(searcher, "searcher"):
searcher_name = type(searcher).__name__
if searcher_name in search_alg_state:
logger.warning(
"There was a duplicate when saving {}. "
"Restore may not work properly.".format(searcher_name))
else:
search_alg_state["name:" +
searcher_name] = searcher.get_state()
searcher = searcher.searcher
base_searcher = searcher
# We save the base searcher separately for users to easily
# separate the searcher.
base_searcher.save_to_dir(dirpath, session_str)
atomic_save(
state=search_alg_state,
checkpoint_dir=dirpath,
file_name=self.CKPT_FILE_TMPL.format(session_str),
tmp_file_name=".tmp_search_generator_ckpt")
def restore_from_dir(self, dirpath: str):
"""Restores self + searcher + search wrappers from dirpath."""
searcher = self.searcher
search_alg_state = load_newest_checkpoint(
dirpath, self.CKPT_FILE_TMPL.format("*"))
if not search_alg_state:
raise RuntimeError(
"Unable to find checkpoint in {}.".format(dirpath))
while hasattr(searcher, "searcher"):
searcher_name = "name:" + type(searcher).__name__
if searcher_name not in search_alg_state:
names = [
key.split("name:")[1] for key in search_alg_state
if key.startswith("name:")
]
logger.warning("{} was not found in the experiment checkpoint "
"state when restoring. Found {}.".format(
searcher_name, names))
else:
searcher.set_state(search_alg_state.pop(searcher_name))
searcher = searcher.searcher
base_searcher = searcher
logger.debug(f"searching base {base_searcher}")
base_searcher.restore_from_dir(dirpath)
self.set_state(search_alg_state)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.