filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_26901 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
import importlib
import traceback
import ssl as ssl_lib
import six
import mongoengine
from mongoengine.queryset import visitor
from pymongo import uri_parser
from pymongo.errors import OperationFailure
from pymongo.errors import ConnectionFailure
from st2common import log as logging
from st2common.util import isotime
from st2common.util.misc import get_field_name_from_mongoengine_error
from st2common.models.db import stormbase
from st2common.models.utils.profiling import log_query_and_profile_data_for_queryset
from st2common.exceptions import db as db_exc
LOG = logging.getLogger(__name__)
MODEL_MODULE_NAMES = [
'st2common.models.db.auth',
'st2common.models.db.action',
'st2common.models.db.actionalias',
'st2common.models.db.keyvalue',
'st2common.models.db.execution',
'st2common.models.db.executionstate',
'st2common.models.db.execution_queue',
'st2common.models.db.liveaction',
'st2common.models.db.notification',
'st2common.models.db.pack',
'st2common.models.db.policy',
'st2common.models.db.rbac',
'st2common.models.db.rule',
'st2common.models.db.rule_enforcement',
'st2common.models.db.runner',
'st2common.models.db.sensor',
'st2common.models.db.trace',
'st2common.models.db.trigger',
'st2common.models.db.webhook',
'st2common.models.db.workflow'
]
# A list of model names for which we don't perform extra index cleanup
INDEX_CLEANUP_MODEL_NAMES_BLACKLIST = [
'PermissionGrantDB'
]
# Reference to DB model classes used for db_ensure_indexes
# NOTE: This variable is populated lazily inside get_model_classes()
MODEL_CLASSES = None
def get_model_classes():
"""
Retrieve a list of all the defined model classes.
:rtype: ``list``
"""
global MODEL_CLASSES
if MODEL_CLASSES:
return MODEL_CLASSES
result = []
for module_name in MODEL_MODULE_NAMES:
module = importlib.import_module(module_name)
model_classes = getattr(module, 'MODELS', [])
result.extend(model_classes)
MODEL_CLASSES = result
return MODEL_CLASSES
def _db_connect(db_name, db_host, db_port, username=None, password=None,
ssl=False, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None,
ssl_ca_certs=None, authentication_mechanism=None, ssl_match_hostname=True):
if '://' in db_host:
# Hostname is provided as a URI string. Make sure we don't log the password in case one is
# included as part of the URI string.
uri_dict = uri_parser.parse_uri(db_host)
username_string = uri_dict.get('username', username) or username
if uri_dict.get('username', None) and username:
# Username argument has precedence over connection string username
username_string = username
hostnames = get_host_names_for_uri_dict(uri_dict=uri_dict)
if len(uri_dict['nodelist']) > 1:
host_string = '%s (replica set)' % (hostnames)
else:
host_string = hostnames
else:
host_string = '%s:%s' % (db_host, db_port)
username_string = username
LOG.info('Connecting to database "%s" @ "%s" as user "%s".' % (db_name, host_string,
str(username_string)))
ssl_kwargs = _get_ssl_kwargs(ssl=ssl, ssl_keyfile=ssl_keyfile, ssl_certfile=ssl_certfile,
ssl_cert_reqs=ssl_cert_reqs, ssl_ca_certs=ssl_ca_certs,
authentication_mechanism=authentication_mechanism,
ssl_match_hostname=ssl_match_hostname)
connection = mongoengine.connection.connect(db_name, host=db_host,
port=db_port, tz_aware=True,
username=username, password=password,
**ssl_kwargs)
# NOTE: Since pymongo 3.0, connect() method is lazy and not blocking (always returns success)
# so we need to issue a command / query to check if connection has been
# successfully established.
# See http://api.mongodb.com/python/current/api/pymongo/mongo_client.html for details
try:
# The ismaster command is cheap and does not require auth
connection.admin.command('ismaster')
except ConnectionFailure as e:
LOG.error('Failed to connect to database "%s" @ "%s" as user "%s": %s' %
(db_name, host_string, str(username_string), str(e)))
raise e
LOG.info('Successfully connected to database "%s" @ "%s" as user "%s".' % (
db_name, host_string, str(username_string)))
return connection
def db_setup(db_name, db_host, db_port, username=None, password=None, ensure_indexes=True,
ssl=False, ssl_keyfile=None, ssl_certfile=None,
ssl_cert_reqs=None, ssl_ca_certs=None,
authentication_mechanism=None, ssl_match_hostname=True):
connection = _db_connect(db_name, db_host, db_port, username=username,
password=password, ssl=ssl, ssl_keyfile=ssl_keyfile,
ssl_certfile=ssl_certfile,
ssl_cert_reqs=ssl_cert_reqs, ssl_ca_certs=ssl_ca_certs,
authentication_mechanism=authentication_mechanism,
ssl_match_hostname=ssl_match_hostname)
# Create all the indexes upfront to prevent race-conditions caused by
# lazy index creation
if ensure_indexes:
db_ensure_indexes()
return connection
def db_ensure_indexes(model_classes=None):
"""
This function ensures that indexes for all the models have been created and the
extra indexes cleaned up.
Note #1: When calling this method database connection already needs to be
established.
Note #2: This method blocks until all the index have been created (indexes
are created in real-time and not in background).
:param model_classes: DB model classes to ensure indexes for. If not specified, indexes are
ensured for all the models.
:type model_classes: ``list``
"""
LOG.debug('Ensuring database indexes...')
if not model_classes:
model_classes = get_model_classes()
for model_class in model_classes:
class_name = model_class.__name__
# Note: We need to ensure / create new indexes before removing extra ones
try:
model_class.ensure_indexes()
except OperationFailure as e:
# Special case for "uid" index. MongoDB 3.4 has dropped "_types" index option so we
# need to re-create the index to make it work and avoid "index with different options
# already exists" error.
# Note: This condition would only be encountered when upgrading existing StackStorm
# installation from MongoDB 3.2 to 3.4.
msg = str(e)
if 'already exists with different options' in msg and 'uid_1' in msg:
drop_obsolete_types_indexes(model_class=model_class)
else:
raise e
except Exception as e:
tb_msg = traceback.format_exc()
msg = 'Failed to ensure indexes for model "%s": %s' % (class_name, str(e))
msg += '\n\n' + tb_msg
exc_cls = type(e)
raise exc_cls(msg)
if model_class.__name__ in INDEX_CLEANUP_MODEL_NAMES_BLACKLIST:
LOG.debug('Skipping index cleanup for blacklisted model "%s"...' % (class_name))
continue
removed_count = cleanup_extra_indexes(model_class=model_class)
if removed_count:
LOG.debug('Removed "%s" extra indexes for model "%s"' % (removed_count, class_name))
LOG.debug('Indexes are ensured for models: %s' %
', '.join(sorted((model_class.__name__ for model_class in model_classes))))
def cleanup_extra_indexes(model_class):
"""
Finds any extra indexes and removes those from mongodb.
"""
extra_indexes = model_class.compare_indexes().get('extra', None)
if not extra_indexes:
return 0
# mongoengine does not have the necessary method so we need to drop to
# pymongo interfaces via some private methods.
removed_count = 0
c = model_class._get_collection()
for extra_index in extra_indexes:
try:
c.drop_index(extra_index)
LOG.debug('Dropped index %s for model %s.', extra_index, model_class.__name__)
removed_count += 1
except OperationFailure:
LOG.warning('Attempt to cleanup index %s failed.', extra_index, exc_info=True)
return removed_count
def drop_obsolete_types_indexes(model_class):
"""
Special class for droping offending "types" indexes for which support has
been removed in mongoengine and MongoDB 3.4.
For more info, see: http://docs.mongoengine.org/upgrade.html#inheritance
"""
class_name = model_class.__name__
LOG.debug('Dropping obsolete types index for model "%s"' % (class_name))
collection = model_class._get_collection()
collection.update({}, {'$unset': {'_types': 1}}, multi=True)
info = collection.index_information()
indexes_to_drop = [key for key, value in six.iteritems(info)
if '_types' in dict(value['key']) or 'types' in value]
LOG.debug('Will drop obsolete types indexes for model "%s": %s' % (class_name,
str(indexes_to_drop)))
for index in indexes_to_drop:
collection.drop_index(index)
LOG.debug('Recreating indexes for model "%s"' % (class_name))
model_class.ensure_indexes()
def db_teardown():
mongoengine.connection.disconnect()
def db_cleanup(db_name, db_host, db_port, username=None, password=None,
ssl=False, ssl_keyfile=None, ssl_certfile=None,
ssl_cert_reqs=None, ssl_ca_certs=None,
authentication_mechanism=None, ssl_match_hostname=True):
connection = _db_connect(db_name, db_host, db_port, username=username,
password=password, ssl=ssl, ssl_keyfile=ssl_keyfile,
ssl_certfile=ssl_certfile,
ssl_cert_reqs=ssl_cert_reqs, ssl_ca_certs=ssl_ca_certs,
authentication_mechanism=authentication_mechanism,
ssl_match_hostname=ssl_match_hostname)
LOG.info('Dropping database "%s" @ "%s:%s" as user "%s".',
db_name, db_host, db_port, str(username))
connection.drop_database(db_name)
return connection
def _get_ssl_kwargs(ssl=False, ssl_keyfile=None, ssl_certfile=None, ssl_cert_reqs=None,
ssl_ca_certs=None, authentication_mechanism=None, ssl_match_hostname=True):
ssl_kwargs = {
'ssl': ssl,
}
if ssl_keyfile:
ssl_kwargs['ssl'] = True
ssl_kwargs['ssl_keyfile'] = ssl_keyfile
if ssl_certfile:
ssl_kwargs['ssl'] = True
ssl_kwargs['ssl_certfile'] = ssl_certfile
if ssl_cert_reqs:
if ssl_cert_reqs == 'none':
ssl_cert_reqs = ssl_lib.CERT_NONE
elif ssl_cert_reqs == 'optional':
ssl_cert_reqs = ssl_lib.CERT_OPTIONAL
elif ssl_cert_reqs == 'required':
ssl_cert_reqs = ssl_lib.CERT_REQUIRED
ssl_kwargs['ssl_cert_reqs'] = ssl_cert_reqs
if ssl_ca_certs:
ssl_kwargs['ssl'] = True
ssl_kwargs['ssl_ca_certs'] = ssl_ca_certs
if authentication_mechanism:
ssl_kwargs['ssl'] = True
ssl_kwargs['authentication_mechanism'] = authentication_mechanism
if ssl_kwargs.get('ssl', False):
# pass in ssl_match_hostname only if ssl is True. The right default value
# for ssl_match_hostname in almost all cases is True.
ssl_kwargs['ssl_match_hostname'] = ssl_match_hostname
return ssl_kwargs
class MongoDBAccess(object):
"""Database object access class that provides general functions for a model type."""
def __init__(self, model):
self.model = model
def get_by_name(self, value):
return self.get(name=value, raise_exception=True)
def get_by_id(self, value):
return self.get(id=value, raise_exception=True)
def get_by_uid(self, value):
return self.get(uid=value, raise_exception=True)
def get_by_ref(self, value):
return self.get(ref=value, raise_exception=True)
def get_by_pack(self, value):
return self.get(pack=value, raise_exception=True)
def get(self, *args, **kwargs):
exclude_fields = kwargs.pop('exclude_fields', None)
raise_exception = kwargs.pop('raise_exception', False)
only_fields = kwargs.pop('only_fields', None)
args = self._process_arg_filters(args)
instances = self.model.objects(*args, **kwargs)
if exclude_fields:
instances = instances.exclude(*exclude_fields)
if only_fields:
try:
instances = instances.only(*only_fields)
except (mongoengine.errors.LookUpError, AttributeError) as e:
msg = ('Invalid or unsupported include attribute specified: %s' % str(e))
raise ValueError(msg)
instance = instances[0] if instances else None
log_query_and_profile_data_for_queryset(queryset=instances)
if not instance and raise_exception:
msg = 'Unable to find the %s instance. %s' % (self.model.__name__, kwargs)
raise db_exc.StackStormDBObjectNotFoundError(msg)
return instance
def get_all(self, *args, **kwargs):
return self.query(*args, **kwargs)
def count(self, *args, **kwargs):
result = self.model.objects(*args, **kwargs).count()
log_query_and_profile_data_for_queryset(queryset=result)
return result
# TODO: PEP-3102 introduced keyword-only arguments, so once we support Python 3+, we can change
# this definition to have explicit keyword-only arguments:
#
# def query(self, *args, offset=0, limit=None, order_by=None, exclude_fields=None,
# **filters):
def query(self, *args, **filters):
# Python 2: Pop keyword parameters that aren't actually filters off of the kwargs
offset = filters.pop('offset', 0)
limit = filters.pop('limit', None)
order_by = filters.pop('order_by', None)
exclude_fields = filters.pop('exclude_fields', None)
only_fields = filters.pop('only_fields', None)
no_dereference = filters.pop('no_dereference', None)
order_by = order_by or []
exclude_fields = exclude_fields or []
eop = offset + int(limit) if limit else None
args = self._process_arg_filters(args)
# Process the filters
# Note: Both of those functions manipulate "filters" variable so the order in which they
# are called matters
filters, order_by = self._process_datetime_range_filters(filters=filters, order_by=order_by)
filters = self._process_null_filters(filters=filters)
result = self.model.objects(*args, **filters)
if exclude_fields:
try:
result = result.exclude(*exclude_fields)
except (mongoengine.errors.LookUpError, AttributeError) as e:
field = get_field_name_from_mongoengine_error(e)
msg = ('Invalid or unsupported exclude attribute specified: %s' % field)
raise ValueError(msg)
if only_fields:
try:
result = result.only(*only_fields)
except (mongoengine.errors.LookUpError, AttributeError) as e:
field = get_field_name_from_mongoengine_error(e)
msg = ('Invalid or unsupported include attribute specified: %s' % field)
raise ValueError(msg)
if no_dereference:
result = result.no_dereference()
result = result.order_by(*order_by)
result = result[offset:eop]
log_query_and_profile_data_for_queryset(queryset=result)
return result
def distinct(self, *args, **kwargs):
field = kwargs.pop('field')
result = self.model.objects(**kwargs).distinct(field)
log_query_and_profile_data_for_queryset(queryset=result)
return result
def aggregate(self, *args, **kwargs):
return self.model.objects(**kwargs)._collection.aggregate(*args, **kwargs)
def insert(self, instance):
instance = self.model.objects.insert(instance)
return self._undo_dict_field_escape(instance)
def add_or_update(self, instance, validate=True):
instance.save(validate=validate)
return self._undo_dict_field_escape(instance)
def update(self, instance, **kwargs):
return instance.update(**kwargs)
def delete(self, instance):
return instance.delete()
def delete_by_query(self, *args, **query):
"""
Delete objects by query and return number of deleted objects.
"""
qs = self.model.objects.filter(*args, **query)
count = qs.delete()
log_query_and_profile_data_for_queryset(queryset=qs)
return count
def _undo_dict_field_escape(self, instance):
for attr, field in six.iteritems(instance._fields):
if isinstance(field, stormbase.EscapedDictField):
value = getattr(instance, attr)
setattr(instance, attr, field.to_python(value))
return instance
def _process_arg_filters(self, args):
"""
Fix filter arguments in nested Q objects
"""
_args = tuple()
for arg in args:
# Unforunately mongoengine doesn't expose any visitors other than Q, so we have to
# extract QCombination from the module itself
if isinstance(arg, visitor.Q):
# Note: Both of those functions manipulate "filters" variable so the order in which
# they are called matters
filters, _ = self._process_datetime_range_filters(filters=arg.query)
filters = self._process_null_filters(filters=filters)
# Create a new Q object with the same filters as the old one
_args += (visitor.Q(**filters),)
elif isinstance(arg, visitor.QCombination):
# Recurse if we need to
children = self._process_arg_filters(arg.children)
# Create a new QCombination object with the same operation and fixed filters
_args += (visitor.QCombination(arg.operation, children),)
else:
raise TypeError("Unknown argument type '%s' of argument '%s'"
% (type(arg), repr(arg)))
return _args
def _process_null_filters(self, filters):
result = copy.deepcopy(filters)
null_filters = {k: v for k, v in six.iteritems(filters)
if v is None or
(type(v) in [str, six.text_type] and str(v.lower()) == 'null')}
for key in null_filters.keys():
result['%s__exists' % (key)] = False
del result[key]
return result
def _process_datetime_range_filters(self, filters, order_by=None):
ranges = {k: v for k, v in six.iteritems(filters)
if type(v) in [str, six.text_type] and '..' in v}
order_by_list = copy.deepcopy(order_by) if order_by else []
for k, v in six.iteritems(ranges):
values = v.split('..')
dt1 = isotime.parse(values[0])
dt2 = isotime.parse(values[1])
k__gte = '%s__gte' % k
k__lte = '%s__lte' % k
if dt1 < dt2:
query = {k__gte: dt1, k__lte: dt2}
sort_key, reverse_sort_key = k, '-' + k
else:
query = {k__gte: dt2, k__lte: dt1}
sort_key, reverse_sort_key = '-' + k, k
del filters[k]
filters.update(query)
if reverse_sort_key in order_by_list:
idx = order_by_list.index(reverse_sort_key)
order_by_list.pop(idx)
order_by_list.insert(idx, sort_key)
elif sort_key not in order_by_list:
order_by_list = [sort_key] + order_by_list
return filters, order_by_list
class ChangeRevisionMongoDBAccess(MongoDBAccess):
def insert(self, instance):
instance = self.model.objects.insert(instance)
return self._undo_dict_field_escape(instance)
def add_or_update(self, instance, validate=True):
return self.save(instance, validate=validate)
def update(self, instance, **kwargs):
for k, v in six.iteritems(kwargs):
setattr(instance, k, v)
return self.save(instance)
def save(self, instance, validate=True):
if not hasattr(instance, 'id') or not instance.id:
return self.insert(instance)
else:
try:
save_condition = {'id': instance.id, 'rev': instance.rev}
instance.rev = instance.rev + 1
instance.save(save_condition=save_condition, validate=validate)
except mongoengine.SaveConditionError:
raise db_exc.StackStormDBObjectWriteConflictError(instance)
return self._undo_dict_field_escape(instance)
def get_host_names_for_uri_dict(uri_dict):
hosts = []
for host, port in uri_dict['nodelist']:
hosts.append('%s:%s' % (host, port))
hosts = ','.join(hosts)
return hosts
|
the-stack_106_26903 | # coding: utf-8
from .. import fixtures, config
from ..assertions import eq_
from ..config import requirements
from sqlalchemy import Integer, Unicode, UnicodeText, select, TIMESTAMP
from sqlalchemy import Date, DateTime, Time, MetaData, String, \
Text, Numeric, Float, literal, Boolean, cast, null, JSON, and_, \
type_coerce, BigInteger
from ..schema import Table, Column
from ... import testing
import decimal
import datetime
from ...util import u
from ... import util
class _LiteralRoundTripFixture(object):
@testing.provide_metadata
def _literal_round_trip(self, type_, input_, output, filter_=None):
"""test literal rendering """
# for literal, we test the literal render in an INSERT
# into a typed column. we can then SELECT it back as its
# official type; ideally we'd be able to use CAST here
# but MySQL in particular can't CAST fully
t = Table('t', self.metadata, Column('x', type_))
t.create()
for value in input_:
ins = t.insert().values(x=literal(value)).compile(
dialect=testing.db.dialect,
compile_kwargs=dict(literal_binds=True)
)
testing.db.execute(ins)
for row in t.select().execute():
value = row[0]
if filter_ is not None:
value = filter_(value)
assert value in output
class _UnicodeFixture(_LiteralRoundTripFixture):
__requires__ = 'unicode_data',
data = u("Alors vous imaginez ma surprise, au lever du jour, "
"quand une drôle de petite voix m’a réveillé. Elle "
"disait: « S’il vous plaît… dessine-moi un mouton! »")
@classmethod
def define_tables(cls, metadata):
Table('unicode_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('unicode_data', cls.datatype),
)
def test_round_trip(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
{
'unicode_data': self.data,
}
)
row = config.db.execute(
select([
unicode_table.c.unicode_data,
])
).first()
eq_(
row,
(self.data, )
)
assert isinstance(row[0], util.text_type)
def test_round_trip_executemany(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
[
{
'unicode_data': self.data,
}
for i in range(3)
]
)
rows = config.db.execute(
select([
unicode_table.c.unicode_data,
])
).fetchall()
eq_(
rows,
[(self.data, ) for i in range(3)]
)
for row in rows:
assert isinstance(row[0], util.text_type)
def _test_empty_strings(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
{"unicode_data": u('')}
)
row = config.db.execute(
select([unicode_table.c.unicode_data])
).first()
eq_(row, (u(''),))
def test_literal(self):
self._literal_round_trip(self.datatype, [self.data], [self.data])
class UnicodeVarcharTest(_UnicodeFixture, fixtures.TablesTest):
__requires__ = 'unicode_data',
__backend__ = True
datatype = Unicode(255)
@requirements.empty_strings_varchar
def test_empty_strings_varchar(self):
self._test_empty_strings()
class UnicodeTextTest(_UnicodeFixture, fixtures.TablesTest):
__requires__ = 'unicode_data', 'text_type'
__backend__ = True
datatype = UnicodeText()
@requirements.empty_strings_text
def test_empty_strings_text(self):
self._test_empty_strings()
class TextTest(_LiteralRoundTripFixture, fixtures.TablesTest):
__requires__ = 'text_type',
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('text_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('text_data', Text),
)
def test_text_roundtrip(self):
text_table = self.tables.text_table
config.db.execute(
text_table.insert(),
{"text_data": 'some text'}
)
row = config.db.execute(
select([text_table.c.text_data])
).first()
eq_(row, ('some text',))
def test_text_empty_strings(self):
text_table = self.tables.text_table
config.db.execute(
text_table.insert(),
{"text_data": ''}
)
row = config.db.execute(
select([text_table.c.text_data])
).first()
eq_(row, ('',))
def test_literal(self):
self._literal_round_trip(Text, ["some text"], ["some text"])
def test_literal_quoting(self):
data = '''some 'text' hey "hi there" that's text'''
self._literal_round_trip(Text, [data], [data])
def test_literal_backslashes(self):
data = r'backslash one \ backslash two \\ end'
self._literal_round_trip(Text, [data], [data])
def test_literal_percentsigns(self):
data = r'percent % signs %% percent'
self._literal_round_trip(Text, [data], [data])
class StringTest(_LiteralRoundTripFixture, fixtures.TestBase):
__backend__ = True
@requirements.unbounded_varchar
def test_nolength_string(self):
metadata = MetaData()
foo = Table('foo', metadata,
Column('one', String)
)
foo.create(config.db)
foo.drop(config.db)
def test_literal(self):
self._literal_round_trip(String(40), ["some text"], ["some text"])
def test_literal_quoting(self):
data = '''some 'text' hey "hi there" that's text'''
self._literal_round_trip(String(40), [data], [data])
def test_literal_backslashes(self):
data = r'backslash one \ backslash two \\ end'
self._literal_round_trip(String(40), [data], [data])
class _DateFixture(_LiteralRoundTripFixture):
compare = None
@classmethod
def define_tables(cls, metadata):
Table('date_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('date_data', cls.datatype),
)
def test_round_trip(self):
date_table = self.tables.date_table
config.db.execute(
date_table.insert(),
{'date_data': self.data}
)
row = config.db.execute(
select([
date_table.c.date_data,
])
).first()
compare = self.compare or self.data
eq_(row,
(compare, ))
assert isinstance(row[0], type(compare))
def test_null(self):
date_table = self.tables.date_table
config.db.execute(
date_table.insert(),
{'date_data': None}
)
row = config.db.execute(
select([
date_table.c.date_data,
])
).first()
eq_(row, (None,))
@testing.requires.datetime_literals
def test_literal(self):
compare = self.compare or self.data
self._literal_round_trip(self.datatype, [self.data], [compare])
class DateTimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime',
__backend__ = True
datatype = DateTime
data = datetime.datetime(2012, 10, 15, 12, 57, 18)
class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime_microseconds',
__backend__ = True
datatype = DateTime
data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396)
class TimestampMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'timestamp_microseconds',
__backend__ = True
datatype = TIMESTAMP
data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396)
class TimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'time',
__backend__ = True
datatype = Time
data = datetime.time(12, 57, 18)
class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'time_microseconds',
__backend__ = True
datatype = Time
data = datetime.time(12, 57, 18, 396)
class DateTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date',
__backend__ = True
datatype = Date
data = datetime.date(2012, 10, 15)
class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date', 'date_coerces_from_datetime'
__backend__ = True
datatype = Date
data = datetime.datetime(2012, 10, 15, 12, 57, 18)
compare = datetime.date(2012, 10, 15)
class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime_historic',
__backend__ = True
datatype = DateTime
data = datetime.datetime(1850, 11, 10, 11, 52, 35)
class DateHistoricTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date_historic',
__backend__ = True
datatype = Date
data = datetime.date(1727, 4, 1)
class IntegerTest(_LiteralRoundTripFixture, fixtures.TestBase):
__backend__ = True
def test_literal(self):
self._literal_round_trip(Integer, [5], [5])
def test_huge_int(self):
self._round_trip(BigInteger, 1376537018368127)
@testing.provide_metadata
def _round_trip(self, datatype, data):
metadata = self.metadata
int_table = Table(
'integer_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('integer_data', datatype),
)
metadata.create_all(config.db)
config.db.execute(
int_table.insert(),
{'integer_data': data}
)
row = config.db.execute(
select([
int_table.c.integer_data,
])
).first()
eq_(row, (data, ))
if util.py3k:
assert isinstance(row[0], int)
else:
assert isinstance(row[0], (long, int))
class NumericTest(_LiteralRoundTripFixture, fixtures.TestBase):
__backend__ = True
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
@testing.provide_metadata
def _do_test(self, type_, input_, output,
filter_=None, check_scale=False):
metadata = self.metadata
t = Table('t', metadata, Column('x', type_))
t.create()
t.insert().execute([{'x': x} for x in input_])
result = {row[0] for row in t.select().execute()}
output = set(output)
if filter_:
result = set(filter_(x) for x in result)
output = set(filter_(x) for x in output)
eq_(result, output)
if check_scale:
eq_(
[str(x) for x in result],
[str(x) for x in output],
)
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
def test_render_literal_numeric(self):
self._literal_round_trip(
Numeric(precision=8, scale=4),
[15.7563, decimal.Decimal("15.7563")],
[decimal.Decimal("15.7563")],
)
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
def test_render_literal_numeric_asfloat(self):
self._literal_round_trip(
Numeric(precision=8, scale=4, asdecimal=False),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
)
def test_render_literal_float(self):
self._literal_round_trip(
Float(4),
[15.7563, decimal.Decimal("15.7563")],
[15.7563, ],
filter_=lambda n: n is not None and round(n, 5) or None
)
@testing.requires.precision_generic_float_type
def test_float_custom_scale(self):
self._do_test(
Float(None, decimal_return_scale=7, asdecimal=True),
[15.7563827, decimal.Decimal("15.7563827")],
[decimal.Decimal("15.7563827"), ],
check_scale=True
)
def test_numeric_as_decimal(self):
self._do_test(
Numeric(precision=8, scale=4),
[15.7563, decimal.Decimal("15.7563")],
[decimal.Decimal("15.7563")],
)
def test_numeric_as_float(self):
self._do_test(
Numeric(precision=8, scale=4, asdecimal=False),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
)
@testing.requires.fetch_null_from_numeric
def test_numeric_null_as_decimal(self):
self._do_test(
Numeric(precision=8, scale=4),
[None],
[None],
)
@testing.requires.fetch_null_from_numeric
def test_numeric_null_as_float(self):
self._do_test(
Numeric(precision=8, scale=4, asdecimal=False),
[None],
[None],
)
@testing.requires.floats_to_four_decimals
def test_float_as_decimal(self):
self._do_test(
Float(precision=8, asdecimal=True),
[15.7563, decimal.Decimal("15.7563"), None],
[decimal.Decimal("15.7563"), None],
)
def test_float_as_float(self):
self._do_test(
Float(precision=8),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
filter_=lambda n: n is not None and round(n, 5) or None
)
def test_float_coerce_round_trip(self):
expr = 15.7563
val = testing.db.scalar(
select([literal(expr)])
)
eq_(val, expr)
# TODO: this one still breaks on MySQL
# def test_decimal_coerce_round_trip(self):
# expr = decimal.Decimal("15.7563")
#
# val = testing.db.scalar(
# select([literal(expr)])
# )
# eq_(val, expr)
@testing.requires.precision_numerics_general
def test_precision_decimal(self):
numbers = set([
decimal.Decimal("54.234246451650"),
decimal.Decimal("0.004354"),
decimal.Decimal("900.0"),
])
self._do_test(
Numeric(precision=18, scale=12),
numbers,
numbers,
)
@testing.requires.precision_numerics_enotation_large
def test_enotation_decimal(self):
"""test exceedingly small decimals.
Decimal reports values with E notation when the exponent
is greater than 6.
"""
numbers = set([
decimal.Decimal('1E-2'),
decimal.Decimal('1E-3'),
decimal.Decimal('1E-4'),
decimal.Decimal('1E-5'),
decimal.Decimal('1E-6'),
decimal.Decimal('1E-7'),
decimal.Decimal('1E-8'),
decimal.Decimal("0.01000005940696"),
decimal.Decimal("0.00000005940696"),
decimal.Decimal("0.00000000000696"),
decimal.Decimal("0.70000000000696"),
decimal.Decimal("696E-12"),
])
self._do_test(
Numeric(precision=18, scale=14),
numbers,
numbers
)
@testing.requires.precision_numerics_enotation_large
def test_enotation_decimal_large(self):
"""test exceedingly large decimals.
"""
numbers = set([
decimal.Decimal('4E+8'),
decimal.Decimal("5748E+15"),
decimal.Decimal('1.521E+15'),
decimal.Decimal('00000000000000.1E+12'),
])
self._do_test(
Numeric(precision=25, scale=2),
numbers,
numbers
)
@testing.requires.precision_numerics_many_significant_digits
def test_many_significant_digits(self):
numbers = set([
decimal.Decimal("31943874831932418390.01"),
decimal.Decimal("319438950232418390.273596"),
decimal.Decimal("87673.594069654243"),
])
self._do_test(
Numeric(precision=38, scale=12),
numbers,
numbers
)
@testing.requires.precision_numerics_retains_significant_digits
def test_numeric_no_decimal(self):
numbers = set([
decimal.Decimal("1.000")
])
self._do_test(
Numeric(precision=5, scale=3),
numbers,
numbers,
check_scale=True
)
class BooleanTest(_LiteralRoundTripFixture, fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('boolean_table', metadata,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('value', Boolean),
Column('unconstrained_value', Boolean(create_constraint=False)),
)
def test_render_literal_bool(self):
self._literal_round_trip(
Boolean(),
[True, False],
[True, False]
)
def test_round_trip(self):
boolean_table = self.tables.boolean_table
config.db.execute(
boolean_table.insert(),
{
'id': 1,
'value': True,
'unconstrained_value': False
}
)
row = config.db.execute(
select([
boolean_table.c.value,
boolean_table.c.unconstrained_value
])
).first()
eq_(
row,
(True, False)
)
assert isinstance(row[0], bool)
def test_null(self):
boolean_table = self.tables.boolean_table
config.db.execute(
boolean_table.insert(),
{
'id': 1,
'value': None,
'unconstrained_value': None
}
)
row = config.db.execute(
select([
boolean_table.c.value,
boolean_table.c.unconstrained_value
])
).first()
eq_(
row,
(None, None)
)
def test_whereclause(self):
# testing "WHERE <column>" renders a compatible expression
boolean_table = self.tables.boolean_table
with config.db.connect() as conn:
conn.execute(
boolean_table.insert(),
[
{'id': 1, 'value': True, 'unconstrained_value': True},
{'id': 2, 'value': False, 'unconstrained_value': False}
]
)
eq_(
conn.scalar(
select([boolean_table.c.id]).where(boolean_table.c.value)
),
1
)
eq_(
conn.scalar(
select([boolean_table.c.id]).where(
boolean_table.c.unconstrained_value)
),
1
)
eq_(
conn.scalar(
select([boolean_table.c.id]).where(~boolean_table.c.value)
),
2
)
eq_(
conn.scalar(
select([boolean_table.c.id]).where(
~boolean_table.c.unconstrained_value)
),
2
)
class JSONTest(_LiteralRoundTripFixture, fixtures.TablesTest):
__requires__ = 'json_type',
__backend__ = True
datatype = JSON
data1 = {
"key1": "value1",
"key2": "value2"
}
data2 = {
"Key 'One'": "value1",
"key two": "value2",
"key three": "value ' three '"
}
data3 = {
"key1": [1, 2, 3],
"key2": ["one", "two", "three"],
"key3": [{"four": "five"}, {"six": "seven"}]
}
data4 = ["one", "two", "three"]
data5 = {
"nested": {
"elem1": [
{"a": "b", "c": "d"},
{"e": "f", "g": "h"}
],
"elem2": {
"elem3": {"elem4": "elem5"}
}
}
}
data6 = {
"a": 5,
"b": "some value",
"c": {"foo": "bar"}
}
@classmethod
def define_tables(cls, metadata):
Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(30), nullable=False),
Column('data', cls.datatype),
Column('nulldata', cls.datatype(none_as_null=True))
)
def test_round_trip_data1(self):
self._test_round_trip(self.data1)
def _test_round_trip(self, data_element):
data_table = self.tables.data_table
config.db.execute(
data_table.insert(),
{'name': 'row1', 'data': data_element}
)
row = config.db.execute(
select([
data_table.c.data,
])
).first()
eq_(row, (data_element, ))
def test_round_trip_none_as_sql_null(self):
col = self.tables.data_table.c['nulldata']
with config.db.connect() as conn:
conn.execute(
self.tables.data_table.insert(),
{"name": "r1", "data": None}
)
eq_(
conn.scalar(
select([self.tables.data_table.c.name]).
where(col.is_(null()))
),
"r1"
)
eq_(
conn.scalar(
select([col])
),
None
)
def test_round_trip_json_null_as_json_null(self):
col = self.tables.data_table.c['data']
with config.db.connect() as conn:
conn.execute(
self.tables.data_table.insert(),
{"name": "r1", "data": JSON.NULL}
)
eq_(
conn.scalar(
select([self.tables.data_table.c.name]).
where(cast(col, String) == 'null')
),
"r1"
)
eq_(
conn.scalar(
select([col])
),
None
)
def test_round_trip_none_as_json_null(self):
col = self.tables.data_table.c['data']
with config.db.connect() as conn:
conn.execute(
self.tables.data_table.insert(),
{"name": "r1", "data": None}
)
eq_(
conn.scalar(
select([self.tables.data_table.c.name]).
where(cast(col, String) == 'null')
),
"r1"
)
eq_(
conn.scalar(
select([col])
),
None
)
def _criteria_fixture(self):
config.db.execute(
self.tables.data_table.insert(),
[{"name": "r1", "data": self.data1},
{"name": "r2", "data": self.data2},
{"name": "r3", "data": self.data3},
{"name": "r4", "data": self.data4},
{"name": "r5", "data": self.data5},
{"name": "r6", "data": self.data6}]
)
def _test_index_criteria(self, crit, expected, test_literal=True):
self._criteria_fixture()
with config.db.connect() as conn:
stmt = select([self.tables.data_table.c.name]).where(crit)
eq_(
conn.scalar(stmt),
expected
)
if test_literal:
literal_sql = str(stmt.compile(
config.db, compile_kwargs={"literal_binds": True}))
eq_(conn.scalar(literal_sql), expected)
def test_crit_spaces_in_key(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c['data']
# limit the rows here to avoid PG error
# "cannot extract field from a non-object", which is
# fixed in 9.4 but may exist in 9.3
self._test_index_criteria(
and_(
name.in_(["r1", "r2", "r3"]),
cast(col["key two"], String) == '"value2"'
),
"r2"
)
@config.requirements.json_array_indexes
def test_crit_simple_int(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c['data']
# limit the rows here to avoid PG error
# "cannot extract array element from a non-array", which is
# fixed in 9.4 but may exist in 9.3
self._test_index_criteria(
and_(name == 'r4', cast(col[1], String) == '"two"'),
"r4"
)
def test_crit_mixed_path(self):
col = self.tables.data_table.c['data']
self._test_index_criteria(
cast(col[("key3", 1, "six")], String) == '"seven"',
"r3"
)
def test_crit_string_path(self):
col = self.tables.data_table.c['data']
self._test_index_criteria(
cast(col[("nested", "elem2", "elem3", "elem4")], String)
== '"elem5"',
"r5"
)
def test_crit_against_string_basic(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c['data']
self._test_index_criteria(
and_(name == 'r6', cast(col["b"], String) == '"some value"'),
"r6"
)
def test_crit_against_string_coerce_type(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c['data']
self._test_index_criteria(
and_(name == 'r6',
cast(col["b"], String) == type_coerce("some value", JSON)),
"r6",
test_literal=False
)
def test_crit_against_int_basic(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c['data']
self._test_index_criteria(
and_(name == 'r6', cast(col["a"], String) == '5'),
"r6"
)
def test_crit_against_int_coerce_type(self):
name = self.tables.data_table.c.name
col = self.tables.data_table.c['data']
self._test_index_criteria(
and_(name == 'r6', cast(col["a"], String) == type_coerce(5, JSON)),
"r6",
test_literal=False
)
def test_unicode_round_trip(self):
with config.db.connect() as conn:
conn.execute(
self.tables.data_table.insert(),
{
"name": "r1",
"data": {
util.u('réveillé'): util.u('réveillé'),
"data": {"k1": util.u('drôle')}
}
}
)
eq_(
conn.scalar(select([self.tables.data_table.c.data])),
{
util.u('réveillé'): util.u('réveillé'),
"data": {"k1": util.u('drôle')}
},
)
def test_eval_none_flag_orm(self):
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
Base = declarative_base()
class Data(Base):
__table__ = self.tables.data_table
s = Session(testing.db)
d1 = Data(name='d1', data=None, nulldata=None)
s.add(d1)
s.commit()
s.bulk_insert_mappings(
Data, [{"name": "d2", "data": None, "nulldata": None}]
)
eq_(
s.query(
cast(self.tables.data_table.c.data, String(convert_unicode="force")),
cast(self.tables.data_table.c.nulldata, String)
).filter(self.tables.data_table.c.name == 'd1').first(),
("null", None)
)
eq_(
s.query(
cast(self.tables.data_table.c.data, String(convert_unicode="force")),
cast(self.tables.data_table.c.nulldata, String)
).filter(self.tables.data_table.c.name == 'd2').first(),
("null", None)
)
__all__ = ('UnicodeVarcharTest', 'UnicodeTextTest', 'JSONTest',
'DateTest', 'DateTimeTest', 'TextTest',
'NumericTest', 'IntegerTest',
'DateTimeHistoricTest', 'DateTimeCoercedToDateTimeTest',
'TimeMicrosecondsTest', 'TimestampMicrosecondsTest', 'TimeTest',
'DateTimeMicrosecondsTest',
'DateHistoricTest', 'StringTest', 'BooleanTest')
|
the-stack_106_26904 | import itertools
import json
from functools import total_ordering
from django.conf import settings
from django.forms import widgets
from django.forms.utils import flatatt
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.formats import get_format
from django.utils.functional import cached_property
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagWidget
from wagtail.admin.datetimepicker import to_datetimepicker_format
from wagtail.core import hooks
from wagtail.core.models import Page
from wagtail.utils.widgets import WidgetWithScript
DEFAULT_DATE_FORMAT = '%Y-%m-%d'
DEFAULT_DATETIME_FORMAT = '%Y-%m-%d %H:%M'
class AdminAutoHeightTextInput(widgets.Textarea):
template_name = 'wagtailadmin/widgets/auto_height_text_input.html'
def __init__(self, attrs=None):
# Use more appropriate rows default, given autoheight will alter this anyway
default_attrs = {'rows': '1'}
if attrs:
default_attrs.update(attrs)
super().__init__(default_attrs)
class AdminDateInput(widgets.DateInput):
template_name = 'wagtailadmin/widgets/date_input.html'
def __init__(self, attrs=None, format=None):
default_attrs = {'autocomplete': 'off'}
fmt = format
if attrs:
default_attrs.update(attrs)
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_DATE_FORMAT', DEFAULT_DATE_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK'),
'format': self.js_format,
}
context['widget']['config_json'] = json.dumps(config)
return context
class AdminTimeInput(widgets.TimeInput):
template_name = 'wagtailadmin/widgets/time_input.html'
def __init__(self, attrs=None, format='%H:%M'):
default_attrs = {'autocomplete': 'off'}
if attrs:
default_attrs.update(attrs)
super().__init__(attrs=default_attrs, format=format)
class AdminDateTimeInput(widgets.DateTimeInput):
template_name = 'wagtailadmin/widgets/datetime_input.html'
def __init__(self, attrs=None, format=None):
default_attrs = {'autocomplete': 'off'}
fmt = format
if attrs:
default_attrs.update(attrs)
if fmt is None:
fmt = getattr(settings, 'WAGTAIL_DATETIME_FORMAT', DEFAULT_DATETIME_FORMAT)
self.js_format = to_datetimepicker_format(fmt)
super().__init__(attrs=default_attrs, format=fmt)
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
config = {
'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK'),
'format': self.js_format,
}
context['widget']['config_json'] = json.dumps(config)
return context
class AdminTagWidget(TagWidget):
template_name = 'wagtailadmin/widgets/tag_widget.html'
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget']['autocomplete_url'] = reverse('wagtailadmin_tag_autocomplete')
context['widget']['tag_spaces_allowed'] = getattr(settings, 'TAG_SPACES_ALLOWED', True)
return context
class AdminChooser(WidgetWithScript, widgets.Input):
input_type = 'hidden'
choose_one_text = _("Choose an item")
choose_another_text = _("Choose another item")
clear_choice_text = _("Clear choice")
link_to_chosen_text = _("Edit this item")
show_edit_link = True
# when looping over form fields, this one should appear in visible_fields, not hidden_fields
# despite the underlying input being type="hidden"
is_hidden = False
def get_instance(self, model_class, value):
# helper method for cleanly turning 'value' into an instance object
if value is None:
return None
try:
return model_class.objects.get(pk=value)
except model_class.DoesNotExist:
return None
def get_instance_and_id(self, model_class, value):
if value is None:
return (None, None)
elif isinstance(value, model_class):
return (value, value.pk)
else:
try:
return (model_class.objects.get(pk=value), value)
except model_class.DoesNotExist:
return (None, None)
def value_from_datadict(self, data, files, name):
# treat the empty string as None
result = super().value_from_datadict(data, files, name)
if result == '':
return None
else:
return result
def __init__(self, **kwargs):
# allow choose_one_text / choose_another_text to be overridden per-instance
if 'choose_one_text' in kwargs:
self.choose_one_text = kwargs.pop('choose_one_text')
if 'choose_another_text' in kwargs:
self.choose_another_text = kwargs.pop('choose_another_text')
if 'clear_choice_text' in kwargs:
self.clear_choice_text = kwargs.pop('clear_choice_text')
if 'link_to_chosen_text' in kwargs:
self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')
if 'show_edit_link' in kwargs:
self.show_edit_link = kwargs.pop('show_edit_link')
super().__init__(**kwargs)
class AdminPageChooser(AdminChooser):
choose_one_text = _('Choose a page')
choose_another_text = _('Choose another page')
link_to_chosen_text = _('Edit this page')
def __init__(self, target_models=None, can_choose_root=False, user_perms=None, **kwargs):
super().__init__(**kwargs)
if target_models:
models = ', '.join([model._meta.verbose_name.title() for model in target_models if model is not Page])
if models:
self.choose_one_text += ' (' + models + ')'
self.user_perms = user_perms
self.target_models = list(target_models or [Page])
self.can_choose_root = can_choose_root
def _get_lowest_common_page_class(self):
"""
Return a Page class that is an ancestor for all Page classes in
``target_models``, and is also a concrete Page class itself.
"""
if len(self.target_models) == 1:
# Shortcut for a single page type
return self.target_models[0]
else:
return Page
def render_html(self, name, value, attrs):
model_class = self._get_lowest_common_page_class()
instance, value = self.get_instance_and_id(model_class, value)
original_field_html = super().render_html(name, value, attrs)
return render_to_string("wagtailadmin/widgets/page_chooser.html", {
'widget': self,
'original_field_html': original_field_html,
'attrs': attrs,
'value': value,
'page': instance,
})
def render_js_init(self, id_, name, value):
if isinstance(value, Page):
page = value
else:
# Value is an ID look up object
model_class = self._get_lowest_common_page_class()
page = self.get_instance(model_class, value)
parent = page.get_parent() if page else None
return "createPageChooser({id}, {model_names}, {parent}, {can_choose_root}, {user_perms});".format(
id=json.dumps(id_),
model_names=json.dumps([
'{app}.{model}'.format(
app=model._meta.app_label,
model=model._meta.model_name)
for model in self.target_models
]),
parent=json.dumps(parent.id if parent else None),
can_choose_root=('true' if self.can_choose_root else 'false'),
user_perms=json.dumps(self.user_perms),
)
class Media:
js = [
'wagtailadmin/js/page-chooser-modal.js',
'wagtailadmin/js/page-chooser.js',
]
@total_ordering
class Button:
show = True
def __init__(self, label, url, classes=set(), attrs={}, priority=1000):
self.label = label
self.url = url
self.classes = classes
self.attrs = attrs.copy()
self.priority = priority
def render(self):
attrs = {'href': self.url, 'class': ' '.join(sorted(self.classes))}
attrs.update(self.attrs)
return format_html('<a{}>{}</a>', flatatt(attrs), self.label)
def __str__(self):
return self.render()
def __repr__(self):
return '<Button: {}>'.format(self.label)
def __lt__(self, other):
if not isinstance(other, Button):
return NotImplemented
return (self.priority, self.label) < (other.priority, other.label)
def __eq__(self, other):
if not isinstance(other, Button):
return NotImplemented
return (self.label == other.label
and self.url == other.url
and self.classes == other.classes
and self.attrs == other.attrs
and self.priority == other.priority)
class PageListingButton(Button):
def __init__(self, label, url, classes=set(), **kwargs):
classes = {'button', 'button-small', 'button-secondary'} | set(classes)
super().__init__(label, url, classes=classes, **kwargs)
class BaseDropdownMenuButton(Button):
def __init__(self, *args, **kwargs):
super().__init__(*args, url=None, **kwargs)
@cached_property
def dropdown_buttons(self):
raise NotImplementedError
def render(self):
return render_to_string(self.template_name, {
'buttons': self.dropdown_buttons,
'label': self.label,
'title': self.attrs.get('title'),
'is_parent': self.is_parent})
class ButtonWithDropdownFromHook(BaseDropdownMenuButton):
template_name = 'wagtailadmin/pages/listing/_button_with_dropdown.html'
def __init__(self, label, hook_name, page, page_perms, is_parent, **kwargs):
self.hook_name = hook_name
self.page = page
self.page_perms = page_perms
self.is_parent = is_parent
super().__init__(label, **kwargs)
@property
def show(self):
return bool(self.dropdown_buttons)
@cached_property
def dropdown_buttons(self):
button_hooks = hooks.get_hooks(self.hook_name)
return sorted(itertools.chain.from_iterable(
hook(self.page, self.page_perms, self.is_parent)
for hook in button_hooks))
|
the-stack_106_26905 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from collections import OrderedDict
import pandas as pd
from evaluator.evaluator_helpers import Categories, Sub_categories, Metrics
class Table(object):
"""docstring for Table"""
def __init__(self, arg=None):
super(Table, self).__init__()
self.entries = {}
self.sub_entries = {}
self.arg = arg
self.results = {}
self.sub_results = {}
self.collision_test = {}
def add_collision_entry(self, name, result):
self.collision_test[name] = result
def add_entry(self, name, results):
final_results = []
sub_final_results = []
## Overall metrics ADE, FDE, ColI, ColII, Topk_ade, Topk_fde, NLL
table_metrics = Metrics(*([0]*8))
## Metrics for the 4 types of trajectories and interactions
table_categories = Categories(*[Metrics(*([0]*8)) for i in range(1,5)])
table_sub_categories = Sub_categories(*[Metrics(*([0]*8)) for i in range(1,5)])
for dataset, (metrics, categories, sub_categories) in results.items():
## Overall
table_metrics += metrics
## Main Types
table_categories.static_scenes += categories.static_scenes
table_categories.linear_scenes += categories.linear_scenes
table_categories.forced_non_linear_scenes += categories.forced_non_linear_scenes
table_categories.non_linear_scenes += categories.non_linear_scenes
## Sub Types
table_sub_categories.lf += sub_categories.lf
table_sub_categories.ca += sub_categories.ca
table_sub_categories.grp += sub_categories.grp
table_sub_categories.others += sub_categories.others
final_results += table_categories.static_scenes.avg_vals_to_list()
final_results += table_categories.linear_scenes.avg_vals_to_list()
final_results += table_categories.forced_non_linear_scenes.avg_vals_to_list()
final_results += table_categories.non_linear_scenes.avg_vals_to_list()
final_results += table_metrics.avg_vals_to_list()
sub_final_results += table_sub_categories.lf.avg_vals_to_list()
sub_final_results += table_sub_categories.ca.avg_vals_to_list()
sub_final_results += table_sub_categories.grp.avg_vals_to_list()
sub_final_results += table_sub_categories.others.avg_vals_to_list()
self.results[name] = final_results
self.sub_results[name] = sub_final_results
return final_results, sub_final_results
def add_result(self, name, final_results, sub_final_results):
self.results[name] = final_results
self.sub_results[name] = sub_final_results
def render_mpl_table(self, data, col_width=3.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',
bbox=[0, 0, 1, 1], header_columns=0,
ax=None, **kwargs):
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, cellLoc='center', **kwargs)
for (row, col), cell in mpl_table.get_celld().items():
if (row == 0) or (col == 1) or (col == 0):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
return ax
def print_table(self):
fig = plt.figure(figsize=(20, 20))
# ------------------------------------------ TABLES -------------------------------------------
# Overall Table #
ax1 = fig.add_subplot(311)
ax1.axis('tight')
ax1.axis('off')
df = pd.DataFrame(columns=['', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL', 'Col_test'])
it = 0
len_name = 10
for key in self.results:
df.loc[it] = ['Overall'] + [key[:len_name]] + [self.results[key][index].__format__('.2f') for index in range(32, 40)] + [self.collision_test[key]]
it += 1
ax1 = self.render_mpl_table(df, header_columns=0, col_width=2.0, bbox=[0, 0.9, 1, 0.1*len(self.results)], ax=ax1)
ax2 = fig.add_subplot(312)
ax2.axis('tight')
ax2.axis('off')
# Overall Table #
df = pd.DataFrame(columns=['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL'])
type_list = [['I', ''], ['II', ''], ['III', ''], ['III', 'LF'], ['III', 'CA'], ['III', 'Grp'], ['III', 'Oth'], ['IV', '']]
it = 0
##Type I
for key in self.results:
df.loc[it] = type_list[0] + [key[:len_name]] + [self.results[key][index].__format__('.2f') for index in range(8)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type II
for key in self.results:
df.loc[it] = type_list[1] + [key[:len_name]] + [self.results[key][index].__format__('.2f') for index in range(8, 16)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type III
for key in self.results:
df.loc[it] = type_list[2] + [key[:len_name]] + [self.results[key][index].__format__('.2f') for index in range(16, 24)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type III: LF
for key in self.results:
df.loc[it] = type_list[3] + [key[:len_name]] + [self.sub_results[key][index].__format__('.2f') for index in range(8)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type III: CA
for key in self.results:
df.loc[it] = type_list[4] + [key[:len_name]] + [self.sub_results[key][index].__format__('.2f') for index in range(8, 16)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type III: Grp
for key in self.results:
df.loc[it] = type_list[5] + [key[:len_name]] + [self.sub_results[key][index].__format__('.2f') for index in range(16, 24)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type III: Others
for key in self.results:
df.loc[it] = type_list[6] + [key[:len_name]] + [self.sub_results[key][index].__format__('.2f') for index in range(24, 32)]
it += 1
df.loc[it] = ['Type', 'Sub-Type', 'Model', 'No.', 'ADE', 'FDE', 'Col I', 'Col II', 'Top3 ADE', 'Top3 FDE', 'NLL']
it += 1
##Type IV
for key in self.results:
df.loc[it] = type_list[7] + [key[:len_name]] + [self.results[key][index].__format__('.2f') for index in range(24, 32)]
it += 1
ax2 = self.render_mpl_table(df, header_columns=0, col_width=2.0, bbox=[0, -1.6, 1, 0.6*len(self.results)], ax=ax2)
fig.savefig('Results.png')
|
the-stack_106_26908 | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
def test_deadlock(test_case):
if flow.eager_execution_enabled():
return
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.enable_inplace(False)
@flow.global_function(func_config)
def DistributeConcat():
with flow.scope.placement("gpu", "0:0"):
w = flow.get_variable(
"w", (2, 5), initializer=flow.constant_initializer(10)
)
x = w + 1
y = w + 1
ret = flow.advanced.distribute_concat([x, y])
# return ret
DistributeConcat()
|
the-stack_106_26909 | import asyncio
import sys
import toml
from Server import Server
async def main(node_id, config_file):
config = toml.load(config_file)
n = config['n']
t = config['t']
server_config = config["servers"][node_id]
server = Server(n, t, server_id, server_config["host"], server_config["http_port"])
tasks = []
tasks.append(asyncio.ensure_future(server.http_server()))
for task in tasks:
await task
if __name__ == "__main__":
server_id = int(sys.argv[1])
config_file = "Scripts/hbswap/conf/config.toml"
asyncio.run(main(server_id, config_file)) |
the-stack_106_26911 | # -*- coding: utf-8 -*-
"""
Created on 2018/10/31
@author: gaoan
"""
import six
import pandas as pd
from tigeropen.common.util.string_utils import get_string
from tigeropen.common.response import TigerResponse
COLUMNS = ['symbol', 'settlement_date', 'short_interest', 'avg_daily_volume', 'days_to_cover', 'percent_of_float']
SHORT_INTEREST_FIELD_MAPPINGS = {'settlementDate': 'settlement_date', 'shortInterest': 'short_interest',
'avgDailyVolume': 'avg_daily_volume', 'daysToCover': 'days_to_cover',
'percentOfFloat': 'percent_of_float'}
class ShortInterestResponse(TigerResponse):
def __init__(self):
super(ShortInterestResponse, self).__init__()
self.short_interests = None
self._is_success = None
def parse_response_content(self, response_content):
response = super(ShortInterestResponse, self).parse_response_content(response_content)
if 'is_success' in response:
self._is_success = response['is_success']
if self.data and isinstance(self.data, list):
short_interest_items = []
for symbol_item in self.data:
symbol = symbol_item.get('symbol')
items = symbol_item.get('items')
for item in items:
item_values = {'symbol': symbol}
for key, value in item.items():
if value is None:
continue
if isinstance(value, six.string_types):
value = get_string(value)
tag = SHORT_INTEREST_FIELD_MAPPINGS[key] if key in SHORT_INTEREST_FIELD_MAPPINGS else key
item_values[tag] = value
short_interest_items.append([item_values.get(tag) for tag in COLUMNS])
self.short_interests = pd.DataFrame(short_interest_items, columns=COLUMNS)
|
the-stack_106_26912 | import sys
import os
os.environ["OMP_NUM_THREADS"] = "1"
import pyDNMFk.config as config
#import pytest
config.init(0)
from pyDNMFk.pyDNMF import *
from pyDNMFk.dist_comm import *
#@pytest.mark.mpi
def test_dist_nmf_2d():
np.random.seed(100)
comm = MPI.COMM_WORLD
m, k, n = 24, 2, 12
W = np.random.rand(m, k)
H = np.random.rand(k, n)
A = W @ H
for grid in ([[2, 1]]):
p_r, p_c = grid[0], grid[1]
comms = MPI_comm(comm, p_r, p_c)
comm1 = comms.comm
rank = comm.rank
size = comm.size
args = parse()
args.size, args.rank, args.comm1, args.comm, args.p_r, args.p_c = size, rank, comm1, comms, p_r, p_c
args.m, args.n, args.k = m, n, k
args.itr, args.init = 2000, 'rand'
args.row_comm, args.col_comm, args.comm1 = comms.cart_1d_row(), comms.cart_1d_column(), comm1
args.verbose = True
dtr_blk_shp = determine_block_params(rank, (p_r, p_c), A.shape)
blk_indices = dtr_blk_shp.determine_block_index_range_asymm()
A_ij = A[blk_indices[0][0]:blk_indices[1][0] + 1, blk_indices[0][1]:blk_indices[1][1] + 1]
for mthd in ['mu', 'bcd', 'hals']: # Frobenius norm, KL divergence, and BCD implementation
for norm in ['fro', 'kl']:
args.method, args.norm = mthd, norm
if norm == 'kl' and mthd != 'mu':
continue
W_ij, H_ij, rel_error = PyNMF(A_ij, factors=None, params=args).fit()
if rank == 0: print('working on grid=', grid, 'with norm = ', norm, ' method= ', mthd, 'rel error=',
rel_error)
assert rel_error < 1e-4
def main():
test_dist_nmf_2d()
if __name__ == '__main__':
main()
|
the-stack_106_26913 | """
Sfaturi de implementare:
- from lab2 import Dfa
- creati 3 obiecte Dfa pentru A3, A4 si A5, incepand de la string
- creati o clasa Lexer care primeste o lista de Dfa-uri si pentru fiecare un
nume
- creati metoda Lexer.longest_prefix care primeste un cuvant (string) si
gaseste cel mai lung prefix acceptat de 1 din Dfa-uri
- creati metoda Lexer.parse care tot apeleaza longest_prefix pana a consumat
tot cuvantul
"""
from lab1 import Dfa
A3_text = '0\n0 a 0\n0 b 1\n1 a 1\n1 b 1\n0\n'
A4_text = '0\n0 b 0\n0 a 1\n1 a 1\n1 b 1\n0\n'
A5_text = '0\n0 a 1\n0 b 3\n1 a 3\n1 b 2\n2 a 3\n2 b 0\n3 a 3\n3 b 3\n1\n'
class Lexer:
_dfas = []
#parse string and create each dfa
def __init__(self,dfas):
self._dfas = []
l = dfas.split("\n\n")
for x in l:
self._dfas.append(Dfa(x))
def longest_prefix(self,word):
current_prefix = ""
identifer = ""
index = -1
#reverse list in order to get first priority if prefix length is equal
for i in range(1,len(word) + 1):
for x in reversed(self._dfas):
if x.accepted(word[:i]):
#save accepted word
current_prefix = word[:i]
#save afd name
identifer = x._token
#save index to delete prefix from word
index = i
return (identifer,current_prefix,index)
def parse(self,word):
text = ""
#consume word by finding longest prefix
while word != "":
tup = self.longest_prefix(word)
#build string
ide = str(tup[0])
prefix = tup[1]
index_for_delete = tup[2]
text = text + ide + ' ' + prefix + '\n'
#delete prefix
if len(word) == 1:
word = ""
else:
word = word[index_for_delete:]
#delete last '\n'
text = text[:-1]
return text
def runlexer(dfa_name,input_name,output_name):
dfa_file = open(dfa_name,"r")
input_file = open(input_name,"r")
output_file = open(output_name,"w")
dfa_data = dfa_file.read()
input_data = input_file.read()
x = Lexer(dfa_data)
output_data = x.parse(input_data)
output_file.write(output_data)
dfa_file.close()
input_file.close()
output_file.close()
if __name__ == "__main__":
runlexer("T1.1.lex","T1.1.1.in","test.txt")
|
the-stack_106_26914 | import sys
sys.path.insert(0, '/home/apprenant/simplon_projects/personal_diary/')
from src.config import USER, PASSWORD
from datetime import datetime
import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import mysql.connector
import requests
import locale
locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
def call_connector(db="none"):
'''
Function which connects to MySQL
'''
if db == "none":
db_connection = mysql.connector.connect(
host="localhost",
user=USER,
passwd=PASSWORD)
else:
db_connection = mysql.connector.connect(
host="localhost",
user=USER,
passwd=PASSWORD,
database=db
)
db_cursor = db_connection.cursor(buffered=True, dictionary=False)
return db_connection, db_cursor
def get_emotion(text):
'''
Get the emotion in the text
'''
res = requests.get(
f"http://0.0.0.0:8080/emotion/{text}"
)
path = res.json()
emotion = path['label']
probas = path['probas']
return emotion, probas
def get_users():
'''
Get a JSON of all
'''
res = requests.get(
f"http://0.0.0.0:8080/users"
)
path = res.json()
print(path)
return path
def get_user_info(id):
'''
Get the name, first name and email adress from the user id
'''
res = requests.get(
f"http://0.0.0.0:8080/user_id/{id}"
)
path = res.json()
user = path['user_id'][0]
name = user[1]
first_name = user[2]
email = user[3]
return name, first_name, email
def add_user(user):
'''
Add a new user
'''
response = requests.post(
"http://0.0.0.0:8080/user", json=user
)
return response
def update_user(user_id, user):
'''
Update a user
'''
response = requests.put(
f"http://0.0.0.0:8080/update_user/{user_id}", json=user
)
return response
def add_entry(diary_entry):
'''
Add a new diary entry
'''
requests.post(
"http://0.0.0.0:8080/add_entry", json=diary_entry
)
def get_entries(user_id):
'''
Get all the entries from a user's id
'''
res = requests.get(
f"http://0.0.0.0:8080/user_id/entries/{user_id}"
)
path = res.json()
entries = path['entries']
return entries
def get_entries_date(user_id, date):
'''
Get all the entries from a user's id
'''
res = requests.get(f"http://0.0.0.0:8080/user_id/entries/{user_id}/{date}")
print('get entries')
print(date)
path = res.json()
entries = path['entries']
return entries
def get_all_entries_dates(date_1, date_2):
'''
Get all the entries between 2 dates
'''
res = requests.get(f"http://0.0.0.0:8080/entries/{date_1}/{date_2}")
print('get entries')
print(date_1)
print(date_2)
path = res.json()
print(path)
entries = path['entries']
return entries
def delete_user(id):
requests.delete(f"http://0.0.0.0:8080/delete_user/{id}")
def date_to_datetime(date):
return datetime.combine(date, datetime.min.time())
def save_img(user_id):
response = requests.get("https://thispersondoesnotexist.com/image")
file = open("./avatars/{}.png".format(user_id), "wb")
file.write(response.content)
file.close()
def get_image_path(user_id):
return "./avatars/{}.png".format(user_id)
def display_entries(entries):
for item in range(len(entries)):
d = datetime.strptime(entries[item][2], '%Y-%m-%dT%H:%M:%S')
st.write("Date: {0:%d} {0:%B} {0:%Y}".format(d))
text = entries[item][3]
st.write("Phrase: ", text)
emotion = get_emotion(text)[0]
st.write("Émotion dominante: ", emotion)
probas = get_emotion(text)[1]
probas = pd.DataFrame(probas)
probas.sort_values(by='Probabilities', ascending=False, inplace=True)
st.write("Probabilités: ")
st.dataframe(probas)
st.markdown("<hr />", unsafe_allow_html=True)
def display_pie_chart(entries):
entries_df = pd.DataFrame(entries)
wheel_labels = pd.DataFrame()
wheel_labels['emotions'] = entries_df[4].unique()
wheel_labels['rates'] = entries_df[4].value_counts(normalize=True).values
# Pie chart, where the slices will be
# ordered and plotted counter-clockwise:
labels = wheel_labels['emotions']
sizes = wheel_labels['rates']
length = len(labels.unique())
# only "explode" the 2nd slice (i.e. 'Hogs')
explode = ([0.01 for i in range(length)])
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=False, startangle=90)
# Equal aspect ratio ensures that pie is drawn as a circle.
ax1.axis('equal')
st.pyplot(fig1)
|
the-stack_106_26918 | # coding: utf-8
# Copyright (c) 2018-2019, Taku MURAKAMI. All rights reserved.
# Distributed under the terms of the BSD 3-clause License.
from setuptools import setup
from setuptools import find_packages
with open("README.md") as file:
readme = file.read()
with open('LICENSE') as file:
license = file.read()
setup(
name="pythroughput",
version="1.0.1",
description="Python module to perform high-throughput first-principles calculation in 'Xenonpy' package.",
long_description=readme,
author="Taku MURAKAMI",
author_email="[email protected]",
url="https://github.com/murakami17/pythroughput",
license=license,
packages=find_packages(exclude=("tests", "docs"))
)
|
the-stack_106_26919 | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Reading the file
data=pd.read_csv(path)
#Code starts here
#Creating a new variable to store the value counts
loan_status = data['Loan_Status'].value_counts()
print(loan_status)
#Plotting bar plot
plt.figure(figsize=[10,6])
# label the axes
plt.xlabel("Loan Status")
plt.ylabel("No of Loans")
# title the plot
plt.title("loans vs loan status ")
loan_status.plot(kind = 'bar')
# bar chart
plt.show()
# Step 2
#Plotting an unstacked bar plot
property_and_loan = data.groupby(['Property_Area' ,'Loan_Status']).size().unstack()
property_and_loan.plot(kind='bar', stacked=False, figsize=(15,10))
# Label X-axes and Y-axes
plt.xlabel('Property Area')
plt.ylabel('Loan Status')
# Rotate X-axes labels
plt.xticks(rotation=45)
# Display plot
plt.show()
# Step 3
#Plotting a stacked bar plot
education_and_loan = data.groupby(['Education' ,'Loan_Status']).size().unstack()
education_and_loan.plot(kind='bar', stacked=True, figsize=(15,10))
# Label X-axes and Y-axes
plt.xlabel('Education')
plt.ylabel('Loan Status')
# Rotate X-axes labels
plt.xticks(rotation=45)
# Display
#Changing the x-axis label
#Changing the y-axis label
#Rotating the ticks of X-axis
# Step 4
#Subsetting the dataframe based on 'Education' column
graduate = data[data['Education'] == 'Graduate']
#Subsetting the dataframe based on 'Education' column
not_graduate= data[data['Education'] == 'Not Graduate']
#Plotting density plot for 'Graduate'
graduate['LoanAmount'].plot(kind = "density", label = "Graduate")
not_graduate['LoanAmount'].plot(kind = "density", label = "Not Graduate")
#For automatic legend display
# Step 5
#Setting up the subplots
fig ,(ax_1,ax_2,ax_3) = plt.subplots(3,1, figsize=(15,8))
#Plotting scatter plot
data.plot.scatter(x='ApplicantIncome', y='LoanAmount')
ax_1.set_title('Applicant Income')
plt.show()
#Setting the subplot axis title
data.plot.scatter(x='CoapplicantIncome', y='LoanAmount')
ax_2.set_title('Coapplicant Income')
plt.show()
data['TotalIncome'] = data['ApplicantIncome'] + data['CoapplicantIncome']
data.plot.scatter(x='TotalIncome', y='LoanAmount')
ax_3.set_title('Total Income')
plt.show()
#Plotting scatter plot
#Setting the subplot axis title
#Creating a new column 'TotalIncome'
#Plotting scatter plot
#Setting the subplot axis title
|
the-stack_106_26922 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility for fetching a resource (e.g. a template) from a URL."""
from oslo_config import cfg
from oslo_log import log as logging
import requests
from requests import exceptions
from six.moves import urllib
from heat.common import exception
from heat.common.i18n import _
cfg.CONF.import_opt('max_template_size', 'heat.common.config')
LOG = logging.getLogger(__name__)
class URLFetchError(exception.Error, IOError):
pass
def get(url, allowed_schemes=('http', 'https')):
"""Get the data at the specified URL.
The URL must use the http: or https: schemes.
The file: scheme is also supported if you override
the allowed_schemes argument.
Raise an IOError if getting the data fails.
"""
LOG.info('Fetching data from %s', url)
components = urllib.parse.urlparse(url)
if components.scheme not in allowed_schemes:
raise URLFetchError(_('Invalid URL scheme %s') % components.scheme)
if components.scheme == 'file':
try:
return urllib.request.urlopen(url).read()
except urllib.error.URLError as uex:
raise URLFetchError(_('Failed to retrieve template: %s') % uex)
try:
resp = requests.get(url, stream=True)
resp.raise_for_status()
# We cannot use resp.text here because it would download the
# entire file, and a large enough file would bring down the
# engine. The 'Content-Length' header could be faked, so it's
# necessary to download the content in chunks to until
# max_template_size is reached. The chunk_size we use needs
# to balance CPU-intensive string concatenation with accuracy
# (eg. it's possible to fetch 1000 bytes greater than
# max_template_size with a chunk_size of 1000).
reader = resp.iter_content(chunk_size=1000)
result = b""
for chunk in reader:
result += chunk
if len(result) > cfg.CONF.max_template_size:
raise URLFetchError(_("Template exceeds maximum allowed size "
"(%s bytes)") %
cfg.CONF.max_template_size)
return result
except exceptions.RequestException as ex:
LOG.info('Failed to retrieve template: %s', ex)
raise URLFetchError(_('Failed to retrieve template from %s') % url)
|
the-stack_106_26930 | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List, Optional, Text, Tuple
import tensorflow as tf
from examples.common.datasets import augment
# Calculated from the ImageNet training set
MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
IMAGE_SIZE = 224
CROP_PADDING = 32
def mean_image_subtraction(
image_bytes: tf.Tensor,
means: Tuple[float, ...],
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
) -> tf.Tensor:
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image_bytes = mean_image_subtraction(image_bytes, means)
Note that the rank of `image` must be known.
Args:
image_bytes: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
num_channels: number of color channels in the image that will be distorted.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image_bytes.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
means = tf.broadcast_to(means, tf.shape(image_bytes))
if dtype is not None:
means = tf.cast(means, dtype)
return image_bytes - means
def standardize_image(
image_bytes: tf.Tensor,
stddev: Tuple[float, ...],
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
) -> tf.Tensor:
"""Divides the given stddev from each image channel.
For example:
stddev = [123.68, 116.779, 103.939]
image_bytes = standardize_image(image_bytes, stddev)
Note that the rank of `image` must be known.
Args:
image_bytes: a tensor of size [height, width, C].
stddev: a C-vector of values to divide from each channel.
num_channels: number of color channels in the image that will be distorted.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `stddev`.
"""
if image_bytes.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(stddev) != num_channels:
raise ValueError('len(stddev) must match the number of channels')
# We have a 1-D tensor of stddev; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
stddev = tf.broadcast_to(stddev, tf.shape(image_bytes))
if dtype is not None:
stddev = tf.cast(stddev, dtype)
return image_bytes / stddev
def normalize_images(features: tf.Tensor,
mean_rgb: Tuple[float, ...] = MEAN_RGB,
stddev_rgb: Tuple[float, ...] = STDDEV_RGB,
num_channels: int = 3,
dtype: tf.dtypes.DType = tf.float32,
data_format: Text = 'channels_last') -> tf.Tensor:
"""Normalizes the input image channels with the given mean and stddev.
Args:
features: `Tensor` representing decoded images in float format.
mean_rgb: the mean of the channels to subtract.
stddev_rgb: the stddev of the channels to divide.
num_channels: the number of channels in the input image tensor.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
data_format: the format of the input image tensor
['channels_first', 'channels_last'].
Returns:
A normalized image `Tensor`.
"""
# TODO(allencwang) - figure out how to use mean_image_subtraction and
# standardize_image on batches of images and replace the following.
if data_format == 'channels_first':
stats_shape = [num_channels, 1, 1]
else:
stats_shape = [1, 1, num_channels]
if dtype is not None:
features = tf.image.convert_image_dtype(features, dtype=dtype)
if mean_rgb is not None:
mean_rgb = tf.constant(mean_rgb,
shape=stats_shape,
dtype=features.dtype)
mean_rgb = tf.broadcast_to(mean_rgb, tf.shape(features))
features = features - mean_rgb
if stddev_rgb is not None:
stddev_rgb = tf.constant(stddev_rgb,
shape=stats_shape,
dtype=features.dtype)
stddev_rgb = tf.broadcast_to(stddev_rgb, tf.shape(features))
features = features / stddev_rgb
return features
def decode_and_center_crop(image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
crop_padding: int = CROP_PADDING) -> tf.Tensor:
"""Crops to center of image with padding then scales image_size.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image height/width dimension.
crop_padding: the padding size to use when centering the crop.
Returns:
A decoded and cropped image `Tensor`.
"""
decoded = image_bytes.dtype != tf.string
shape = (tf.shape(image_bytes) if decoded
else tf.image.extract_jpeg_shape(image_bytes))
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + crop_padding)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
if decoded:
image = tf.image.crop_to_bounding_box(
image_bytes,
offset_height=offset_height,
offset_width=offset_width,
target_height=padded_center_crop_size,
target_width=padded_center_crop_size)
else:
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = resize_image(image_bytes=image,
height=image_size,
width=image_size)
return image
def decode_crop_and_flip(image_bytes: tf.Tensor) -> tf.Tensor:
"""Crops an image to a random part of the image, then randomly flips.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
Returns:
A decoded and cropped image `Tensor`.
"""
decoded = image_bytes.dtype != tf.string
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
shape = (tf.shape(image_bytes) if decoded
else tf.image.extract_jpeg_shape(image_bytes))
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Reassemble the bounding box in the format the crop op requires.
offset_height, offset_width, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_height, offset_width,
target_height, target_width])
if decoded:
cropped = tf.image.crop_to_bounding_box(
image_bytes,
offset_height=offset_height,
offset_width=offset_width,
target_height=target_height,
target_width=target_width)
else:
cropped = tf.image.decode_and_crop_jpeg(image_bytes,
crop_window,
channels=3)
# Flip to add a little more random distortion in.
cropped = tf.image.random_flip_left_right(cropped)
return cropped
def resize_image(image_bytes: tf.Tensor,
height: int = IMAGE_SIZE,
width: int = IMAGE_SIZE) -> tf.Tensor:
"""Resizes an image to a given height and width.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
height: image height dimension.
width: image width dimension.
Returns:
A tensor containing the resized image.
"""
return tf.compat.v1.image.resize(
image_bytes, [height, width], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
def preprocess_for_eval(
image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
num_channels: int = 3,
mean_subtract: bool = False,
standardize: bool = False,
dtype: tf.dtypes.DType = tf.float32,
preprocess_fn=None
) -> tf.Tensor:
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
image_size: image height/width dimension.
num_channels: number of image input channels.
mean_subtract: whether or not to apply mean subtraction.
standardize: whether or not to apply standardization.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
preprocess_fn: function which applies value scaling preprocessing to image (tf.Tensor),
if preprocess_fn is specified, mean_subtract, standardize and dtype parameters will be ignored
Returns:
A preprocessed and normalized image `Tensor`.
"""
image = tf.image.decode_jpeg(image_bytes, channels=num_channels)
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.compat.v1.image.resize(
image, [image_size, image_size], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
image.set_shape([image_size, image_size, num_channels])
image = image/255
return tf.image.convert_image_dtype(image, dtype=tf.float32)
def preprocess_for_train(image_bytes: tf.Tensor,
image_size: int = IMAGE_SIZE,
augmenter: Optional[augment.ImageAugment] = None,
mean_subtract: bool = False,
standardize: bool = False,
dtype: tf.dtypes.DType = tf.float32,
preprocess_fn=None) -> tf.Tensor:
"""Preprocesses the given image for training.
Args:
image_bytes: `Tensor` representing an image binary of
arbitrary size of dtype tf.uint8.
image_size: image height/width dimension.
augmenter: the image augmenter to apply.
mean_subtract: whether or not to apply mean subtraction.
standardize: whether or not to apply standardization.
dtype: the dtype to convert the images to. Set to `None` to skip conversion.
preprocess_fn: function which applies value scaling preprocessing to image (tf.Tensor),
if preprocess_fn is specified, augmenter, mean_subtract, standardize and dtype parameters will be ignored
Returns:
A preprocessed and normalized image `Tensor`.
"""
image = tf.image.decode_jpeg(image_bytes, channels=3)
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.image.random_flip_left_right(image)
image = tf.compat.v1.image.resize(
image, [image_size, image_size], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
image.set_shape([image_size, image_size, 3])
image = image / 255
return tf.image.convert_image_dtype(image, dtype=tf.float32)
def get_preprocess_fn(model_name, dataset_name):
preprocess_fn_name_mapping = {
'imagenet2012': {
'InceptionV3': 'inception_v3',
'MobileNetV2': 'mobilenet_v2',
'ResNet50': 'resnet50',
'ResNet50V2': 'resnet_v2'
}
}
preprocess_fn = None
if model_name in preprocess_fn_name_mapping.get(dataset_name, []):
preprocess_fn_name = preprocess_fn_name_mapping[dataset_name][model_name]
preprocess_fn = tf.keras.applications.__dict__[preprocess_fn_name].preprocess_input
return preprocess_fn
|
the-stack_106_26931 | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("edit/<str:title>", views.edit, name="edit"),
path("create", views.create, name="create"),
path("search", views.search, name="search"),
path("random", views.randompage, name="random"),
path("<str:title>", views.entry, name="entry"),
] |
the-stack_106_26934 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AllowDbUserPrivilegeRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'x_language': 'str',
'instance_id': 'str',
'body': 'GrantRequest'
}
attribute_map = {
'x_language': 'X-Language',
'instance_id': 'instance_id',
'body': 'body'
}
def __init__(self, x_language=None, instance_id=None, body=None):
"""AllowDbUserPrivilegeRequest - a model defined in huaweicloud sdk"""
self._x_language = None
self._instance_id = None
self._body = None
self.discriminator = None
if x_language is not None:
self.x_language = x_language
self.instance_id = instance_id
if body is not None:
self.body = body
@property
def x_language(self):
"""Gets the x_language of this AllowDbUserPrivilegeRequest.
语言
:return: The x_language of this AllowDbUserPrivilegeRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this AllowDbUserPrivilegeRequest.
语言
:param x_language: The x_language of this AllowDbUserPrivilegeRequest.
:type: str
"""
self._x_language = x_language
@property
def instance_id(self):
"""Gets the instance_id of this AllowDbUserPrivilegeRequest.
实例ID。
:return: The instance_id of this AllowDbUserPrivilegeRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this AllowDbUserPrivilegeRequest.
实例ID。
:param instance_id: The instance_id of this AllowDbUserPrivilegeRequest.
:type: str
"""
self._instance_id = instance_id
@property
def body(self):
"""Gets the body of this AllowDbUserPrivilegeRequest.
:return: The body of this AllowDbUserPrivilegeRequest.
:rtype: GrantRequest
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this AllowDbUserPrivilegeRequest.
:param body: The body of this AllowDbUserPrivilegeRequest.
:type: GrantRequest
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AllowDbUserPrivilegeRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_26935 | q = int(input())
soma = 0
conta = input()
for i in range(12):
for j in range(12):
valor = float(input())
if(i == q):
soma += valor
if(conta == 'S'):
print("%.1f" %soma)
else:
print("%.1f" %(soma/12.0)) |
the-stack_106_26936 | from dku_utils.access import _default_if_blank, _default_if_property_blank
import dataiku
from dataiku.core.intercom import backend_json_call
from dku_utils.access import _has_not_blank_property
import json, logging
def make_overrides(config, kube_config, kube_config_path):
# alter the spark configurations to put the cluster master and image repo in the properties
container_settings = {
'executionConfigsGenericOverrides': {
'kubeCtlContext': kube_config["current-context"], # has to exist, it's a config file we just built
'kubeConfigPath': kube_config_path, # the config is not merged into the main config file, so we need to pass the config file pth
'baseImage': _default_if_property_blank(config, "baseImage", None),
'repositoryURL': _default_if_property_blank(config, "repositoryURL", None)
}
}
return {'container':container_settings}
def get_cluster_from_dss_cluster(dss_cluster_id):
# get the public API client
client = dataiku.api_client()
# get the cluster object in DSS
found = False
for c in client.list_clusters():
if c['name'] == dss_cluster_id:
found = True
if not found:
raise Exception("DSS cluster %s doesn't exist" % dss_cluster_id)
dss_cluster = client.get_cluster(dss_cluster_id)
# get the settings in it
dss_cluster_settings = dss_cluster.get_settings()
dss_cluster_config = dss_cluster_settings.get_raw()['params']['config']
# resolve since we get the config with the raw preset setup
dss_cluster_config = backend_json_call('plugins/get-resolved-settings', data={'elementConfig':json.dumps(dss_cluster_config), 'elementType':dss_cluster_settings.get_raw()['type']})
logging.info("Resolved cluster config : %s" % json.dumps(dss_cluster_config))
cluster_data = dss_cluster_settings.get_plugin_data()
return cluster_data, dss_cluster_settings, dss_cluster_config
def get_cluster_generic_property(dss_cluster_settings, key, default_value=None):
props = dss_cluster_settings.settings['containerSettings']['executionConfigsGenericOverrides']['properties']
found_value = default_value
for prop in props:
if prop['key'] == key:
found_value = prop['value']
return found_value
def set_cluster_generic_property(dss_cluster_settings, key, value, replace_if_exists=False):
props = dss_cluster_settings.settings['containerSettings']['executionConfigsGenericOverrides']['properties']
found_prop = None
for prop in props:
if prop['key'] == key:
found_prop = prop
if found_prop is None:
props.append({'key':key, 'value':value})
dss_cluster_settings.save()
elif replace_if_exists:
found_prop['value'] = value
dss_cluster_settings.save()
|
the-stack_106_26937 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 15:29:18 2020
@author: mike_ubuntu
"""
import time
import numpy as np
from copy import deepcopy, copy
from functools import reduce
from moea_dd.src.moeadd_supplementary import fast_non_dominated_sorting,\
slow_non_dominated_sorting, NDL_update, Equality, Inequality, acute_angle
class moeadd_solution(object):
def __init__(self, x: np.ndarray, obj_funs: list):
self.vals = x # генотип, в случая индивида - набор токенов + коэф. регуляризации
self.obj_funs = obj_funs # функции-критерии - каждая делает преобразование генотипа в число
self._obj_fun = None
self._domain = None
self.precomputed_value = False
self.precomputed_domain = False
@property
def obj_fun(self) -> np.ndarray:
if self.precomputed_value:
return self._obj_fun
else:
# в сумме формируется флоат-вектор, в который отображается индивид - многомерный фитнесс, фенотип.
# формируется путем конкантенации чисел от каждого критерия
self._obj_fun = np.fromiter(map(lambda obj_fun: obj_fun(self.vals), self.obj_funs), dtype=float)
self.precomputed_value = True
return self._obj_fun
def get_domain(self, weights) -> int:
if self.precomputed_domain:
return self._domain
else:
self._domain = get_domain_idx(self, weights)
self.precomputed_domain = True
return self._domain
def __eq__(self, other):
if isinstance(other, type(self)):
return self.vals == other.vals
return False
def __call__(self):
return self.obj_fun
# def __hash__(self):
# raise NotImplementedError('The hash needs to be defined in the subclass')
def get_domain_idx(solution, weights) -> int:
if type(solution) == np.ndarray:
return np.fromiter(map(lambda x: acute_angle(x, solution), weights), dtype=float).argmin()
elif type(solution.obj_fun) == np.ndarray:
return np.fromiter(map(lambda x: acute_angle(x, solution.obj_fun), weights), dtype=float).argmin()
else:
raise ValueError('Can not detect the vector of objective function for individ')
def penalty_based_intersection(sol_obj, weight, ideal_obj, penalty_factor = 1) -> float:
d_1 = np.dot((sol_obj.obj_fun - ideal_obj), weight) / np.linalg.norm(weight)
d_2 = np.linalg.norm(sol_obj.obj_fun - (ideal_obj + d_1 * weight/np.linalg.norm(weight)))
return d_1 + penalty_factor * d_2
def population_to_sectors(population, weights): # Много жрёт
solution_selection = lambda weight_idx: [solution for solution in population if solution.get_domain(weights) == weight_idx]
return list(map(solution_selection, np.arange(len(weights))))
def clear_list_of_lists(inp_list) -> list:
return [elem for elem in inp_list if len(elem) > 0]
class pareto_levels(object):
def __init__(self, population, sorting_method = fast_non_dominated_sorting, update_method = NDL_update):
self._sorting_method = sorting_method
self.population = population
self._update_method = update_method
self.levels = self._sorting_method(self.population)
def sort(self):
self.levels = self._sorting_method(self.population)
def update(self, point):
self.levels = self._update_method(point, self.levels)
self.population.append(point)
def delete_point(self, point): # Разобраться с удалением. Потенциально ошибка
# print('deleting', point.vals)
new_levels = []
print('New pareto')
for level in self.levels:
print('New level processing')
# temp = deepcopy(level)
temp = []
for element in level:
if element is not point:
print('found point')
temp.append(element)
if not len(temp) == 0:
new_levels.append(temp) # Точка находится в нескольких уровнях
# print(point, point.vals, type(point), '\n')
# print('population vals:', [individ.vals for individ in self.population], '\n')
# print('population objects:', [individ for individ in self.population], '\n')
population_cleared = []
for elem in self.population:
if elem is not point:
population_cleared.append(elem)
if len(population_cleared) != sum([len(level) for level in new_levels]):
print('initial population', [solution.vals for solution in self.population],'\n')
print('cleared population', [solution.vals for solution in population_cleared],'\n')
print(point.vals)
raise Exception('Deleted something extra')
# new_ind = deepcopy(point)
# new_ind.vals.structure = []
# new_ind.vals.fitness = None
# new_ind.vals.change_all_fixes(False)
population_cleared.append(point)
self.levels = new_levels
self.population = population_cleared
# self.population.remove(point)
def locate_pareto_worst(levels, weights, best_obj, penalty_factor = 1.):
domain_solutions = population_to_sectors(levels.population, weights)
most_crowded_count = np.max([len(domain) for domain in domain_solutions]); crowded_domains = [domain_idx for domain_idx in np.arange(len(weights)) if
len(domain_solutions[domain_idx]) == most_crowded_count]
if len(crowded_domains) == 1:
most_crowded_domain = crowded_domains[0]
else:
PBI = lambda domain_idx: np.sum([penalty_based_intersection(sol_obj, weights[domain_idx], best_obj, penalty_factor) for sol_obj in domain_solutions[domain_idx]])
PBIS = np.fromiter(map(PBI, crowded_domains), dtype = float)
most_crowded_domain = crowded_domains[np.argmax(PBIS)]
worst_NDL_section = []
domain_solution_NDL_idxs = np.empty(most_crowded_count)
for solution_idx, solution in enumerate(domain_solutions[most_crowded_domain]):
domain_solution_NDL_idxs[solution_idx] = [level_idx for level_idx in np.arange(len(levels.levels))
if np.any([solution == level_solution for level_solution in levels.levels[level_idx]])][0]
max_level = np.max(domain_solution_NDL_idxs)
worst_NDL_section = [domain_solutions[most_crowded_domain][sol_idx] for sol_idx in np.arange(len(domain_solutions[most_crowded_domain]))
if domain_solution_NDL_idxs[sol_idx] == max_level]
PBIS = np.fromiter(map(lambda solution: penalty_based_intersection(solution, weights[most_crowded_domain], best_obj, penalty_factor), worst_NDL_section), dtype = float)
return worst_NDL_section[np.argmax(PBIS)]
class moeadd_optimizer(object):
'''
Solving multiobjective optimization problem (minimizing set of functions)
'''
def __init__(self, pop_constructor, weights_num, pop_size, optimized_functionals, solution_params, delta, neighbors_number,
NDS_method = fast_non_dominated_sorting, NDL_update = NDL_update):
population = []
for solution_idx in range(pop_size):
while True:
temp_solution = pop_constructor.create(solution_params)
# if not np.any([temp_solution == solution for solution in population]):
if temp_solution not in population:
population.append(temp_solution)
break
self.pareto_levels = pareto_levels(population, sorting_method=NDS_method, update_method=NDL_update)
self.opt_functionals = optimized_functionals
self.weights = []
weights_size = len(optimized_functionals) #np.empty((pop_size, len(optimized_functionals)))
for weights_idx in range(weights_num):
while True:
temp_weights = self.weights_generation(weights_size, delta)
if temp_weights not in self.weights:
self.weights.append(temp_weights)
break
self.weights = np.array(self.weights)
self.neighborhood_lists = []
for weights_idx in range(weights_num):
self.neighborhood_lists.append([elem_idx for elem_idx, _ in sorted(
list(zip(np.arange(weights_num), [np.linalg.norm(self.weights[weights_idx, :] - self.weights[weights_idx_inner, :]) for weights_idx_inner in np.arange(weights_num)])),
key = lambda pair: pair[1])][:neighbors_number+1]) # срез листа - задаёт регион "близости"
self.best_obj = None
@staticmethod
def weights_generation(weights_num, delta) -> list:
weights = np.empty(weights_num)
assert 1./delta == round(1./delta) # check, if 1/delta is integer number
m = np.zeros(weights_num)
for weight_idx in np.arange(weights_num):
weights[weight_idx] = np.random.choice([div_idx * delta for div_idx in np.arange(1./delta + 1 - np.sum(m[:weight_idx + 1]))])
m[weight_idx] = weights[weight_idx]/delta
weights[-1] = 1 - np.sum(weights[:-1])
assert (weights[-1] <= 1 and weights[-1] >= 0)
return list(weights) # Переделать, т.к. костыль
def pass_best_objectives(self, *args) -> None:
assert len(args) == len(self.opt_functionals)
self.best_obj = np.empty(len(self.opt_functionals))
for arg_idx, arg in enumerate(args):
self.best_obj[arg_idx] = arg if isinstance(arg, int) or isinstance(arg, float) else arg() # Переделать под больше elif'ов
def set_evolutionary(self, operator) -> None:
# добавить возможность теста оператора
self.evolutionary_operator = operator
@staticmethod
def mating_selection(weight_idx, weights, neighborhood_vectors, population, neighborhood_selector, neighborhood_selector_params, delta) -> list:
# parents_number = int(len(population)/4.) # Странное упрощение
parents_number = 4
if np.random.uniform() < delta: # особый выбор
selected_regions_idxs = neighborhood_selector(neighborhood_vectors[weight_idx], *neighborhood_selector_params)
candidate_solution_domains = list(map(lambda x: x.get_domain(weights), [candidate for candidate in population]))
solution_mask = [(population[solution_idx].get_domain(weights) in selected_regions_idxs) for solution_idx in candidate_solution_domains]
available_in_proximity = sum(solution_mask)
parent_idxs = np.random.choice([idx for idx in np.arange(len(population)) if solution_mask[idx]],
size = min(available_in_proximity, parents_number),
replace = False)
if available_in_proximity < parents_number:
parent_idxs_additional = np.random.choice([idx for idx in np.arange(len(population)) if not solution_mask[idx]],
size = parents_number - available_in_proximity,
replace = False)
parent_idxs_temp = np.empty(shape = parent_idxs.size + parent_idxs_additional.size)
parent_idxs_temp[:parent_idxs.size] = parent_idxs; parent_idxs_temp[parent_idxs.size:] = parent_idxs_additional
parent_idxs = parent_idxs_temp
else: # либо просто выбирает из всех
parent_idxs = np.random.choice(np.arange(len(population)), size=parents_number, replace=False)
return parent_idxs
def update_population(self, offspring, PBI_penalty):
'''
Update population to get the pareto-nondomiated levels with the worst element removed.
Here, "worst" means the individ with highest PBI value (penalty-based boundary intersection)
'''
# domain = get_domain_idx(offspring, self.weights)
self.pareto_levels.update(offspring) #levels_updated = NDL_update(offspring, levels)
if len(self.pareto_levels.levels) == 1:
worst_solution = locate_pareto_worst(self.pareto_levels, self.weights, self.best_obj, PBI_penalty)
else:
if self.pareto_levels.levels[len(self.pareto_levels.levels) - 1] == 1:
domain_solutions = population_to_sectors(self.pareto_levels.population, self.weights)
reference_solution = self.pareto_levels.levels[len(self.pareto_levels.levels) - 1][0]
reference_solution_domain = [idx for idx in np.arange(domain_solutions) if reference_solution in domain_solutions[idx]]
if len(domain_solutions[reference_solution_domain] == 1):
worst_solution = locate_pareto_worst(self.pareto_levels.levels, self.weights, self.best_obj, PBI_penalty)
else:
worst_solution = reference_solution
else:
last_level_by_domains = population_to_sectors(self.pareto_levels.levels[len(self.pareto_levels.levels)-1], self.weights)
most_crowded_count = np.max([len(domain) for domain in last_level_by_domains]);
crowded_domains = [domain_idx for domain_idx in np.arange(len(self.weights)) if len(last_level_by_domains[domain_idx]) == most_crowded_count]
if len(crowded_domains) == 1:
most_crowded_domain = crowded_domains[0]
else:
PBI = lambda domain_idx: np.sum([penalty_based_intersection(sol_obj, self.weights[domain_idx], self.best_obj, PBI_penalty)
for sol_obj in last_level_by_domains[domain_idx]])
PBIS = np.fromiter(map(PBI, crowded_domains), dtype = float)
most_crowded_domain = crowded_domains[np.argmax(PBIS)]
if len(last_level_by_domains[most_crowded_domain]) == 1:
worst_solution = locate_pareto_worst(self.pareto_levels, self.weights, self.best_obj, PBI_penalty)
else:
PBIS = np.fromiter(map(lambda solution: penalty_based_intersection(solution, self.weights[most_crowded_domain], self.best_obj, PBI_penalty),
last_level_by_domains[most_crowded_domain]), dtype = float)
worst_solution = last_level_by_domains[most_crowded_domain][np.argmax(PBIS)]
self.pareto_levels.delete_point(worst_solution)
def optimize(self, neighborhood_selector, delta, neighborhood_selector_params, epochs, PBI_penalty):
assert not type(self.best_obj) == type(None)
for epoch_idx in np.arange(epochs):
for weight_idx in np.arange(len(self.weights)):
print('\n\n\n')
print(epoch_idx, weight_idx)
print('\n\n\n')
# time.sleep(2)
parent_idxs = self.mating_selection(weight_idx, self.weights, self.neighborhood_lists, self.pareto_levels.population,
neighborhood_selector, neighborhood_selector_params, delta)
offsprings = self.evolutionary_operator.crossover([self.pareto_levels.population[int(idx)] for idx in parent_idxs]) # В объекте эволюционного оператора выделять кроссовер
# try:
for offspring_idx, offspring in enumerate(offsprings):
while True:
temp_offspring = self.evolutionary_operator.mutation(offspring)
if not np.any([temp_offspring == solution for solution in self.pareto_levels.population]):
# if temp_offspring not in self.pareto_levels.population:
break
self.update_population(temp_offspring, PBI_penalty)
# except TypeError:
# while True:
# temp_offspring = self.evolutionary_operator.mutation(offsprings)
# if not np.any([temp_offspring == solution for solution in self.pareto_levels.population]):
# break
# self.update_population(temp_offspring, PBI_penalty)
if len(self.pareto_levels.levels) == 1:
break
class moeadd_optimizer_constrained(moeadd_optimizer):
def set_constraints(self, *args) -> None:
self.constraints = args
def constaint_violation(self, solution) -> float:
summ = 0
x = solution.vals
for constraint in self.constraints:
summ += constraint(x)
return summ
# return np.sum(np.fromiter(map(lambda constr: constr(individ.vals), self.constraints), dtype = float))
def tournament_selection(self, candidate_1, candidate_2):
if self.constaint_violation(candidate_1) < self.constaint_violation(candidate_2):
return candidate_1
elif self.constaint_violation(candidate_1) > self.constaint_violation(candidate_2):
return candidate_2
else:
return np.random.choice((candidate_1, candidate_2))
def update_population(self, offspring, PBI_penalty):
self.pareto_levels.update(offspring)
cv_values = np.zeros(len(self.pareto_levels.population))
for sol_idx, solution in enumerate(self.pareto_levels.population):
cv_val = self.constaint_violation(solution)
if cv_val > 0:
cv_values[sol_idx] = cv_val
if sum(cv_values) == 0:
if len(self.pareto_levels.levels) == 1:
worst_solution = locate_pareto_worst(self.pareto_levels, self.weights, self.best_obj, PBI_penalty)
else:
if self.pareto_levels.levels[len(self.pareto_levels.levels) - 1] == 1:
domain_solutions = population_to_sectors(self.pareto_levels.population, self.weights)
reference_solution = self.pareto_levels.levels[len(self.pareto_levels.levels) - 1][0]
reference_solution_domain = [idx for idx in np.arange(domain_solutions) if reference_solution in domain_solutions[idx]]
if len(domain_solutions[reference_solution_domain] == 1):
worst_solution = locate_pareto_worst(self.pareto_levels.levels, self.weights, self.best_obj, PBI_penalty)
else:
worst_solution = reference_solution
else:
last_level_by_domains = population_to_sectors(self.pareto_levels.levels[len(self.pareto_levels.levels)-1], self.weights)
most_crowded_count = np.max([len(domain) for domain in last_level_by_domains]);
crowded_domains = [domain_idx for domain_idx in np.arange(len(self.weights)) if len(last_level_by_domains[domain_idx]) == most_crowded_count]
if len(crowded_domains) == 1:
most_crowded_domain = crowded_domains[0]
else:
PBI = lambda domain_idx: np.sum([penalty_based_intersection(sol_obj, self.weights[domain_idx], self.best_obj, PBI_penalty)
for sol_obj in last_level_by_domains[domain_idx]])
PBIS = np.fromiter(map(PBI, crowded_domains), dtype = float)
most_crowded_domain = crowded_domains[np.argmax(PBIS)]
if len(last_level_by_domains[most_crowded_domain]) == 1:
worst_solution = locate_pareto_worst(self.pareto_levels, self.weights, self.best_obj, PBI_penalty)
else:
# print('the most crowded domain', most_crowded_domain)
PBIS = np.fromiter(map(lambda solution: penalty_based_intersection(solution, self.weights[most_crowded_domain], self.best_obj, PBI_penalty),
last_level_by_domains[most_crowded_domain]), dtype = float)
# print('PBIS', PBIS, last_level_by_domains)
worst_solution = last_level_by_domains[most_crowded_domain][np.argmax(PBIS)]
else:
infeasible = [solution for solution, _ in sorted(list(zip(self.pareto_levels.population, cv_values)), key = lambda pair: pair[1])]
infeasible.reverse()
# print(np.nonzero(cv_values))
infeasible = infeasible[:np.nonzero(cv_values)[0].size]
deleted = False
domain_solutions = population_to_sectors(self.pareto_levels.population, self.weights)
for infeasable_element in infeasible:
domain_idx = [domain_idx for domain_idx, domain in enumerate(domain_solutions) if infeasable_element in domain][0]
if len(domain_solutions[domain_idx]) > 1:
deleted = True
worst_solution = infeasable_element
break
if not deleted:
worst_solution = infeasible[0]
self.pareto_levels.delete_point(worst_solution)
def optimize(self, neighborhood_selector, delta, neighborhood_selector_params, epochs, PBI_penalty):
assert not type(self.best_obj) == type(None)
self.train_hist = []
for epoch_idx in np.arange(epochs):
for weight_idx in np.arange(len(self.weights)):
print(epoch_idx, weight_idx)
obj_fun = np.array([solution.obj_fun for solution in self.pareto_levels.population])
self.train_hist.append(np.mean(obj_fun, axis=0))
parent_idxs = self.mating_selection(weight_idx, self.weights, self.neighborhood_lists, self.pareto_levels.population,
neighborhood_selector, neighborhood_selector_params, delta)
if len(parent_idxs) % 2:
parent_idxs = parent_idxs[:-1]
np.random.shuffle(parent_idxs)
parents_selected = [self.tournament_selection(self.pareto_levels.population[int(parent_idxs[2*p_metaidx])],
self.pareto_levels.population[int(parent_idxs[2*p_metaidx+1])]) for
p_metaidx in np.arange(int(len(parent_idxs)/2.))]
offsprings = self.evolutionary_operator.crossover(parents_selected) # В объекте эволюционного оператора выделять кроссовер
try:
for offspring_idx, offspring in enumerate(offsprings):
while True:
temp_offspring = self.evolutionary_operator.mutation(offspring)
if not np.any([temp_offspring == solution for solution in self.pareto_levels.population]):
break
self.update_population(temp_offspring, PBI_penalty)
except TypeError:
while True:
temp_offspring = self.evolutionary_operator.mutation(offsprings)
if not np.any([temp_offspring == solution for solution in self.pareto_levels.population]):
break
self.update_population(temp_offspring, PBI_penalty)
if len(self.pareto_levels.levels) == 1:
break |
the-stack_106_26938 | # helper functions for shuffling bits around various formats
from __future__ import absolute_import, division
import binascii
import hashlib
import struct
import bitstring
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58index = { k: n for n, k in enumerate(__b58chars) }
def base58_decode(b58):
'''Take a base58 encoded string and return the represented value as an integer.'''
value = 0
for c in b58:
value = value * 58 + __b58index[c]
return value
def base58_encode(value):
'''Take an integer and return the shortest base58 encoded string representation.'''
b58 = ''
while value > 0:
b58 = __b58chars[value % 58] + b58
value = value // 58
return b58
def _get_5bit_checksum(value):
'''Take an unsigned 64-bit integer and return an unsigned 5-bit checksum.
The checksum is calculated by running SHA256 on the integer and extracting
the five leading bits of the resulting hash.'''
# Convert the number to an 8 byte string and run SHA256
checksumhash = hashlib.sha256(struct.pack('<Q', value)).digest()
# Extract the five most significant bits of the first byte
return ord(checksumhash[0:1]) >> 3
def block7_encode(value):
'''Take an integer < 2^36 (36 bits worth of data), add a 5 bit checksum, and
return a 7 character base58 encoded string (41 bits worth of data).
The checksum is used purely to guard against accidental mistyping of the
code block. It is not relevant from a cryptographical perspective.
'''
# The checksum is calculated by treating the passed integer as a 64-bit value,
# running SHA256 and extracting the five leading bits of the resulting hash.
checksum = _get_5bit_checksum(value)
# The 5 checksum bits are appended (as LSB) to the 36 data bits. This results
# in a 41 bit value, which fits the 7 character base 58 block nearly exactly:
# 2^41 = 2 199 023 255 552 < 2 207 984 167 552 = 58^7
block7 = base58_encode((value << 5) + checksum)
# blocks shorter than 7 characters (= values < 58^6 / 32 = 1 189 646 642) are
# prefixed with leading '1's to reach 7 characters.
block7 = '1' * (7 - len(block7)) + block7
return block7
def block7_decode(block7):
'''Take a 7 character base58 encoded string (41 bits), separate into data
and checksum, verify said checksum, and return a dictionary containing
an unsigned integer representation of the data portion, and a boolean
indicating whether the checksum was valid or not.
'''
# Decode and split numeric value into data and checksum
value = base58_decode(block7)
checksum = value & 0b00011111
value = value >> 5
# Calculate the expected checksum for the data portion, see block7_encode().
expchecksum = _get_5bit_checksum(value)
return { 'value': value, \
'valid': checksum == expchecksum }
def block7_split(data):
'''Take a byte string of a multiple of 36 bits (which, as 36 is not divisible
by 8, really means 72 bits or 9 bytes), and split it into encoded block7
strings.
'''
assert (len(data) * 8) % 36 == 0, \
"Function expects a multiple of 36 bits. %d bits provided" % (len(data) * 8)
return [ block7_encode(block.uint) \
for block in bitstring.BitArray(bytes=data).cut(36) ]
def block7_merge(data):
'''Take a list of block7 strings and merge them back into a byte string.
Returns a dictionary containing the byte string and a field indicating
the validity of all passed block7 strings.
'''
key = bitstring.BitArray()
valid = True
for block7 in data:
block = block7_decode(block7)
valid &= block['valid']
key += bitstring.BitArray(uint=block['value'], length=36)
return { 'key': key.bytes, 'valid': valid }
def crc8(data):
'''Generate an 8 bit non-cryptographical checksum for any string.
This is used only for direct user feedback to avoid input errors.
'''
return "%02x" % (binascii.crc32(bytearray(data, 'ascii')) & 0xff)
|
the-stack_106_26939 | import ast
import errno
import glob
import importlib
import os
import py_compile
import stat
import sys
import textwrap
import zipfile
from functools import partial
import py
import _pytest._code
import pytest
from _pytest.assertion import util
from _pytest.assertion.rewrite import _get_assertion_exprs
from _pytest.assertion.rewrite import AssertionRewritingHook
from _pytest.assertion.rewrite import get_cache_dir
from _pytest.assertion.rewrite import PYC_TAIL
from _pytest.assertion.rewrite import PYTEST_TAG
from _pytest.assertion.rewrite import rewrite_asserts
from _pytest.config import ExitCode
from _pytest.pathlib import Path
def setup_module(mod):
mod._old_reprcompare = util._reprcompare
_pytest._code._reprcompare = None
def teardown_module(mod):
util._reprcompare = mod._old_reprcompare
del mod._old_reprcompare
def rewrite(src):
tree = ast.parse(src)
rewrite_asserts(tree, src.encode())
return tree
def getmsg(f, extra_ns=None, must_pass=False):
"""Rewrite the assertions in f, run it, and get the failure message."""
src = "\n".join(_pytest._code.Code(f).source().lines)
mod = rewrite(src)
code = compile(mod, "<test>", "exec")
ns = {}
if extra_ns is not None:
ns.update(extra_ns)
exec(code, ns)
func = ns[f.__name__]
try:
func()
except AssertionError:
if must_pass:
pytest.fail("shouldn't have raised")
s = str(sys.exc_info()[1])
if not s.startswith("assert"):
return "AssertionError: " + s
return s
else:
if not must_pass:
pytest.fail("function didn't raise at all")
class TestAssertionRewrite:
def test_place_initial_imports(self):
s = """'Doc string'\nother = stuff"""
m = rewrite(s)
assert isinstance(m.body[0], ast.Expr)
for imp in m.body[1:3]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 2
assert imp.col_offset == 0
assert isinstance(m.body[3], ast.Assign)
s = """from __future__ import division\nother_stuff"""
m = rewrite(s)
assert isinstance(m.body[0], ast.ImportFrom)
for imp in m.body[1:3]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 2
assert imp.col_offset == 0
assert isinstance(m.body[3], ast.Expr)
s = """'doc string'\nfrom __future__ import division"""
m = rewrite(s)
assert isinstance(m.body[0], ast.Expr)
assert isinstance(m.body[1], ast.ImportFrom)
for imp in m.body[2:4]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 2
assert imp.col_offset == 0
s = """'doc string'\nfrom __future__ import division\nother"""
m = rewrite(s)
assert isinstance(m.body[0], ast.Expr)
assert isinstance(m.body[1], ast.ImportFrom)
for imp in m.body[2:4]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 3
assert imp.col_offset == 0
assert isinstance(m.body[4], ast.Expr)
s = """from . import relative\nother_stuff"""
m = rewrite(s)
for imp in m.body[:2]:
assert isinstance(imp, ast.Import)
assert imp.lineno == 1
assert imp.col_offset == 0
assert isinstance(m.body[3], ast.Expr)
def test_dont_rewrite(self):
s = """'PYTEST_DONT_REWRITE'\nassert 14"""
m = rewrite(s)
assert len(m.body) == 2
assert m.body[1].msg is None
def test_dont_rewrite_plugin(self, testdir):
contents = {
"conftest.py": "pytest_plugins = 'plugin'; import plugin",
"plugin.py": "'PYTEST_DONT_REWRITE'",
"test_foo.py": "def test_foo(): pass",
}
testdir.makepyfile(**contents)
result = testdir.runpytest_subprocess()
assert "warning" not in "".join(result.outlines)
def test_rewrites_plugin_as_a_package(self, testdir):
pkgdir = testdir.mkpydir("plugin")
pkgdir.join("__init__.py").write(
"import pytest\n"
"@pytest.fixture\n"
"def special_asserter():\n"
" def special_assert(x, y):\n"
" assert x == y\n"
" return special_assert\n"
)
testdir.makeconftest('pytest_plugins = ["plugin"]')
testdir.makepyfile("def test(special_asserter): special_asserter(1, 2)\n")
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*assert 1 == 2*"])
def test_honors_pep_235(self, testdir, monkeypatch):
# note: couldn't make it fail on macos with a single `sys.path` entry
# note: these modules are named `test_*` to trigger rewriting
testdir.tmpdir.join("test_y.py").write("x = 1")
xdir = testdir.tmpdir.join("x").ensure_dir()
xdir.join("test_Y").ensure_dir().join("__init__.py").write("x = 2")
testdir.makepyfile(
"import test_y\n"
"import test_Y\n"
"def test():\n"
" assert test_y.x == 1\n"
" assert test_Y.x == 2\n"
)
monkeypatch.syspath_prepend(xdir)
testdir.runpytest().assert_outcomes(passed=1)
def test_name(self, request):
def f():
assert False
assert getmsg(f) == "assert False"
def f():
f = False
assert f
assert getmsg(f) == "assert False"
def f():
assert a_global # noqa
assert getmsg(f, {"a_global": False}) == "assert False"
def f():
assert sys == 42
verbose = request.config.getoption("verbose")
msg = getmsg(f, {"sys": sys})
if verbose > 0:
assert msg == (
"assert <module 'sys' (built-in)> == 42\n"
" +<module 'sys' (built-in)>\n"
" -42"
)
else:
assert msg == "assert sys == 42"
def f():
assert cls == 42 # noqa: F821
class X:
pass
msg = getmsg(f, {"cls": X}).splitlines()
if verbose > 1:
assert msg == ["assert {!r} == 42".format(X), " +{!r}".format(X), " -42"]
elif verbose > 0:
assert msg == [
"assert <class 'test_...e.<locals>.X'> == 42",
" +{!r}".format(X),
" -42",
]
else:
assert msg == ["assert cls == 42"]
def test_assertrepr_compare_same_width(self, request):
"""Should use same width/truncation with same initial width."""
def f():
assert "1234567890" * 5 + "A" == "1234567890" * 5 + "B"
msg = getmsg(f).splitlines()[0]
if request.config.getoption("verbose") > 1:
assert msg == (
"assert '12345678901234567890123456789012345678901234567890A' "
"== '12345678901234567890123456789012345678901234567890B'"
)
else:
assert msg == (
"assert '123456789012...901234567890A' "
"== '123456789012...901234567890B'"
)
def test_dont_rewrite_if_hasattr_fails(self, request):
class Y:
""" A class whos getattr fails, but not with `AttributeError` """
def __getattr__(self, attribute_name):
raise KeyError()
def __repr__(self):
return "Y"
def __init__(self):
self.foo = 3
def f():
assert cls().foo == 2 # noqa
# XXX: looks like the "where" should also be there in verbose mode?!
message = getmsg(f, {"cls": Y}).splitlines()
if request.config.getoption("verbose") > 0:
assert message == ["assert 3 == 2", " +3", " -2"]
else:
assert message == [
"assert 3 == 2",
" + where 3 = Y.foo",
" + where Y = cls()",
]
def test_assert_already_has_message(self):
def f():
assert False, "something bad!"
assert getmsg(f) == "AssertionError: something bad!\nassert False"
def test_assertion_message(self, testdir):
testdir.makepyfile(
"""
def test_foo():
assert 1 == 2, "The failure message"
"""
)
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(
["*AssertionError*The failure message*", "*assert 1 == 2*"]
)
def test_assertion_message_multiline(self, testdir):
testdir.makepyfile(
"""
def test_foo():
assert 1 == 2, "A multiline\\nfailure message"
"""
)
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(
["*AssertionError*A multiline*", "*failure message*", "*assert 1 == 2*"]
)
def test_assertion_message_tuple(self, testdir):
testdir.makepyfile(
"""
def test_foo():
assert 1 == 2, (1, 2)
"""
)
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(
["*AssertionError*%s*" % repr((1, 2)), "*assert 1 == 2*"]
)
def test_assertion_message_expr(self, testdir):
testdir.makepyfile(
"""
def test_foo():
assert 1 == 2, 1 + 2
"""
)
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(["*AssertionError*3*", "*assert 1 == 2*"])
def test_assertion_message_escape(self, testdir):
testdir.makepyfile(
"""
def test_foo():
assert 1 == 2, 'To be escaped: %'
"""
)
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(
["*AssertionError: To be escaped: %", "*assert 1 == 2"]
)
def test_assertion_messages_bytes(self, testdir):
testdir.makepyfile("def test_bytes_assertion():\n assert False, b'ohai!'\n")
result = testdir.runpytest()
assert result.ret == 1
result.stdout.fnmatch_lines(["*AssertionError: b'ohai!'", "*assert False"])
def test_boolop(self):
def f():
f = g = False
assert f and g
assert getmsg(f) == "assert (False)"
def f():
f = True
g = False
assert f and g
assert getmsg(f) == "assert (True and False)"
def f():
f = False
g = True
assert f and g
assert getmsg(f) == "assert (False)"
def f():
f = g = False
assert f or g
assert getmsg(f) == "assert (False or False)"
def f():
f = g = False
assert not f and not g
getmsg(f, must_pass=True)
def x():
return False
def f():
assert x() and x()
assert (
getmsg(f, {"x": x})
== """assert (False)
+ where False = x()"""
)
def f():
assert False or x()
assert (
getmsg(f, {"x": x})
== """assert (False or False)
+ where False = x()"""
)
def f():
assert 1 in {} and 2 in {}
assert getmsg(f) == "assert (1 in {})"
def f():
x = 1
y = 2
assert x in {1: None} and y in {}
assert getmsg(f) == "assert (1 in {1: None} and 2 in {})"
def f():
f = True
g = False
assert f or g
getmsg(f, must_pass=True)
def f():
f = g = h = lambda: True
assert f() and g() and h()
getmsg(f, must_pass=True)
def test_short_circuit_evaluation(self):
def f():
assert True or explode # noqa
getmsg(f, must_pass=True)
def f():
x = 1
assert x == 1 or x == 2
getmsg(f, must_pass=True)
def test_unary_op(self):
def f():
x = True
assert not x
assert getmsg(f) == "assert not True"
def f():
x = 0
assert ~x + 1
assert getmsg(f) == "assert (~0 + 1)"
def f():
x = 3
assert -x + x
assert getmsg(f) == "assert (-3 + 3)"
def f():
x = 0
assert +x + x
assert getmsg(f) == "assert (+0 + 0)"
def test_binary_op(self):
def f():
x = 1
y = -1
assert x + y
assert getmsg(f) == "assert (1 + -1)"
def f():
assert not 5 % 4
assert getmsg(f) == "assert not (5 % 4)"
def test_boolop_percent(self):
def f():
assert 3 % 2 and False
assert getmsg(f) == "assert ((3 % 2) and False)"
def f():
assert False or 4 % 2
assert getmsg(f) == "assert (False or (4 % 2))"
def test_at_operator_issue1290(self, testdir):
testdir.makepyfile(
"""
class Matrix(object):
def __init__(self, num):
self.num = num
def __matmul__(self, other):
return self.num * other.num
def test_multmat_operator():
assert Matrix(2) @ Matrix(3) == 6"""
)
testdir.runpytest().assert_outcomes(passed=1)
def test_starred_with_side_effect(self, testdir):
"""See #4412"""
testdir.makepyfile(
"""\
def test():
f = lambda x: x
x = iter([1, 2, 3])
assert 2 * next(x) == f(*[next(x)])
"""
)
testdir.runpytest().assert_outcomes(passed=1)
def test_call(self):
def g(a=42, *args, **kwargs):
return False
ns = {"g": g}
def f():
assert g()
assert (
getmsg(f, ns)
== """assert False
+ where False = g()"""
)
def f():
assert g(1)
assert (
getmsg(f, ns)
== """assert False
+ where False = g(1)"""
)
def f():
assert g(1, 2)
assert (
getmsg(f, ns)
== """assert False
+ where False = g(1, 2)"""
)
def f():
assert g(1, g=42)
assert (
getmsg(f, ns)
== """assert False
+ where False = g(1, g=42)"""
)
def f():
assert g(1, 3, g=23)
assert (
getmsg(f, ns)
== """assert False
+ where False = g(1, 3, g=23)"""
)
def f():
seq = [1, 2, 3]
assert g(*seq)
assert (
getmsg(f, ns)
== """assert False
+ where False = g(*[1, 2, 3])"""
)
def f():
x = "a"
assert g(**{x: 2})
assert (
getmsg(f, ns)
== """assert False
+ where False = g(**{'a': 2})"""
)
def test_attribute(self):
class X:
g = 3
ns = {"x": X}
def f():
assert not x.g # noqa
assert (
getmsg(f, ns)
== """assert not 3
+ where 3 = x.g"""
)
def f():
x.a = False # noqa
assert x.a # noqa
assert (
getmsg(f, ns)
== """assert False
+ where False = x.a"""
)
def test_comparisons(self):
def f():
a, b = range(2)
assert b < a
assert getmsg(f) == """assert 1 < 0"""
def f():
a, b, c = range(3)
assert a > b > c
assert getmsg(f) == """assert 0 > 1"""
def f():
a, b, c = range(3)
assert a < b > c
assert getmsg(f) == """assert 1 > 2"""
def f():
a, b, c = range(3)
assert a < b <= c
getmsg(f, must_pass=True)
def f():
a, b, c = range(3)
assert a < b
assert b < c
getmsg(f, must_pass=True)
def test_len(self, request):
def f():
values = list(range(10))
assert len(values) == 11
msg = getmsg(f)
if request.config.getoption("verbose") > 0:
assert msg == "assert 10 == 11\n +10\n -11"
else:
assert msg == "assert 10 == 11\n + where 10 = len([0, 1, 2, 3, 4, 5, ...])"
def test_custom_reprcompare(self, monkeypatch):
def my_reprcompare(op, left, right):
return "42"
monkeypatch.setattr(util, "_reprcompare", my_reprcompare)
def f():
assert 42 < 3
assert getmsg(f) == "assert 42"
def my_reprcompare(op, left, right):
return "{} {} {}".format(left, op, right)
monkeypatch.setattr(util, "_reprcompare", my_reprcompare)
def f():
assert 1 < 3 < 5 <= 4 < 7
assert getmsg(f) == "assert 5 <= 4"
def test_assert_raising_nonzero_in_comparison(self):
def f():
class A:
def __nonzero__(self):
raise ValueError(42)
def __lt__(self, other):
return A()
def __repr__(self):
return "<MY42 object>"
def myany(x):
return False
assert myany(A() < 0)
assert "<MY42 object> < 0" in getmsg(f)
def test_formatchar(self):
def f():
assert "%test" == "test"
assert getmsg(f).startswith("assert '%test' == 'test'")
def test_custom_repr(self, request):
def f():
class Foo:
a = 1
def __repr__(self):
return "\n{ \n~ \n}"
f = Foo()
assert 0 == f.a
lines = util._format_lines([getmsg(f)])
if request.config.getoption("verbose") > 0:
assert lines == ["assert 0 == 1\n +0\n -1"]
else:
assert lines == ["assert 0 == 1\n + where 1 = \\n{ \\n~ \\n}.a"]
def test_custom_repr_non_ascii(self):
def f():
class A:
name = "ä"
def __repr__(self):
return self.name.encode("UTF-8") # only legal in python2
a = A()
assert not a.name
msg = getmsg(f)
assert "UnicodeDecodeError" not in msg
assert "UnicodeEncodeError" not in msg
class TestRewriteOnImport:
def test_pycache_is_a_file(self, testdir):
testdir.tmpdir.join("__pycache__").write("Hello")
testdir.makepyfile(
"""
def test_rewritten():
assert "@py_builtins" in globals()"""
)
assert testdir.runpytest().ret == 0
def test_pycache_is_readonly(self, testdir):
cache = testdir.tmpdir.mkdir("__pycache__")
old_mode = cache.stat().mode
cache.chmod(old_mode ^ stat.S_IWRITE)
testdir.makepyfile(
"""
def test_rewritten():
assert "@py_builtins" in globals()"""
)
try:
assert testdir.runpytest().ret == 0
finally:
cache.chmod(old_mode)
def test_zipfile(self, testdir):
z = testdir.tmpdir.join("myzip.zip")
z_fn = str(z)
f = zipfile.ZipFile(z_fn, "w")
try:
f.writestr("test_gum/__init__.py", "")
f.writestr("test_gum/test_lizard.py", "")
finally:
f.close()
z.chmod(256)
testdir.makepyfile(
"""
import sys
sys.path.append(%r)
import test_gum.test_lizard"""
% (z_fn,)
)
assert testdir.runpytest().ret == ExitCode.NO_TESTS_COLLECTED
def test_readonly(self, testdir):
sub = testdir.mkdir("testing")
sub.join("test_readonly.py").write(
b"""
def test_rewritten():
assert "@py_builtins" in globals()
""",
"wb",
)
old_mode = sub.stat().mode
sub.chmod(320)
try:
assert testdir.runpytest().ret == 0
finally:
sub.chmod(old_mode)
def test_dont_write_bytecode(self, testdir, monkeypatch):
testdir.makepyfile(
"""
import os
def test_no_bytecode():
assert "__pycache__" in __cached__
assert not os.path.exists(__cached__)
assert not os.path.exists(os.path.dirname(__cached__))"""
)
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
assert testdir.runpytest_subprocess().ret == 0
def test_orphaned_pyc_file(self, testdir):
testdir.makepyfile(
"""
import orphan
def test_it():
assert orphan.value == 17
"""
)
testdir.makepyfile(
orphan="""
value = 17
"""
)
py_compile.compile("orphan.py")
os.remove("orphan.py")
# Python 3 puts the .pyc files in a __pycache__ directory, and will
# not import from there without source. It will import a .pyc from
# the source location though.
if not os.path.exists("orphan.pyc"):
pycs = glob.glob("__pycache__/orphan.*.pyc")
assert len(pycs) == 1
os.rename(pycs[0], "orphan.pyc")
assert testdir.runpytest().ret == 0
def test_cached_pyc_includes_pytest_version(self, testdir, monkeypatch):
"""Avoid stale caches (#1671)"""
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
testdir.makepyfile(
test_foo="""
def test_foo():
assert True
"""
)
result = testdir.runpytest_subprocess()
assert result.ret == 0
found_names = glob.glob(
"__pycache__/*-pytest-{}.pyc".format(pytest.__version__)
)
assert found_names, "pyc with expected tag not found in names: {}".format(
glob.glob("__pycache__/*.pyc")
)
@pytest.mark.skipif('"__pypy__" in sys.modules')
def test_pyc_vs_pyo(self, testdir, monkeypatch):
testdir.makepyfile(
"""
import pytest
def test_optimized():
"hello"
assert test_optimized.__doc__ is None"""
)
p = py.path.local.make_numbered_dir(
prefix="runpytest-", keep=None, rootdir=testdir.tmpdir
)
tmp = "--basetemp=%s" % p
monkeypatch.setenv("PYTHONOPTIMIZE", "2")
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
assert testdir.runpytest_subprocess(tmp).ret == 0
tagged = "test_pyc_vs_pyo." + PYTEST_TAG
assert tagged + ".pyo" in os.listdir("__pycache__")
monkeypatch.undo()
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
assert testdir.runpytest_subprocess(tmp).ret == 1
assert tagged + ".pyc" in os.listdir("__pycache__")
def test_package(self, testdir):
pkg = testdir.tmpdir.join("pkg")
pkg.mkdir()
pkg.join("__init__.py").ensure()
pkg.join("test_blah.py").write(
"""
def test_rewritten():
assert "@py_builtins" in globals()"""
)
assert testdir.runpytest().ret == 0
def test_translate_newlines(self, testdir):
content = "def test_rewritten():\r\n assert '@py_builtins' in globals()"
b = content.encode("utf-8")
testdir.tmpdir.join("test_newlines.py").write(b, "wb")
assert testdir.runpytest().ret == 0
def test_package_without__init__py(self, testdir):
pkg = testdir.mkdir("a_package_without_init_py")
pkg.join("module.py").ensure()
testdir.makepyfile("import a_package_without_init_py.module")
assert testdir.runpytest().ret == ExitCode.NO_TESTS_COLLECTED
def test_rewrite_warning(self, testdir):
testdir.makeconftest(
"""
import pytest
pytest.register_assert_rewrite("_pytest")
"""
)
# needs to be a subprocess because pytester explicitly disables this warning
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*Module already imported*: _pytest"])
def test_rewrite_module_imported_from_conftest(self, testdir):
testdir.makeconftest(
"""
import test_rewrite_module_imported
"""
)
testdir.makepyfile(
test_rewrite_module_imported="""
def test_rewritten():
assert "@py_builtins" in globals()
"""
)
assert testdir.runpytest_subprocess().ret == 0
def test_remember_rewritten_modules(self, pytestconfig, testdir, monkeypatch):
"""
AssertionRewriteHook should remember rewritten modules so it
doesn't give false positives (#2005).
"""
monkeypatch.syspath_prepend(testdir.tmpdir)
testdir.makepyfile(test_remember_rewritten_modules="")
warnings = []
hook = AssertionRewritingHook(pytestconfig)
monkeypatch.setattr(
hook, "_warn_already_imported", lambda code, msg: warnings.append(msg)
)
spec = hook.find_spec("test_remember_rewritten_modules")
module = importlib.util.module_from_spec(spec)
hook.exec_module(module)
hook.mark_rewrite("test_remember_rewritten_modules")
hook.mark_rewrite("test_remember_rewritten_modules")
assert warnings == []
def test_rewrite_warning_using_pytest_plugins(self, testdir):
testdir.makepyfile(
**{
"conftest.py": "pytest_plugins = ['core', 'gui', 'sci']",
"core.py": "",
"gui.py": "pytest_plugins = ['core', 'sci']",
"sci.py": "pytest_plugins = ['core']",
"test_rewrite_warning_pytest_plugins.py": "def test(): pass",
}
)
testdir.chdir()
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*= 1 passed in *=*"])
result.stdout.no_fnmatch_line("*pytest-warning summary*")
def test_rewrite_warning_using_pytest_plugins_env_var(self, testdir, monkeypatch):
monkeypatch.setenv("PYTEST_PLUGINS", "plugin")
testdir.makepyfile(
**{
"plugin.py": "",
"test_rewrite_warning_using_pytest_plugins_env_var.py": """
import plugin
pytest_plugins = ['plugin']
def test():
pass
""",
}
)
testdir.chdir()
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*= 1 passed in *=*"])
result.stdout.no_fnmatch_line("*pytest-warning summary*")
class TestAssertionRewriteHookDetails:
def test_sys_meta_path_munged(self, testdir):
testdir.makepyfile(
"""
def test_meta_path():
import sys; sys.meta_path = []"""
)
assert testdir.runpytest().ret == 0
def test_write_pyc(self, testdir, tmpdir, monkeypatch):
from _pytest.assertion.rewrite import _write_pyc
from _pytest.assertion import AssertionState
config = testdir.parseconfig([])
state = AssertionState(config, "rewrite")
source_path = str(tmpdir.ensure("source.py"))
pycpath = tmpdir.join("pyc").strpath
assert _write_pyc(state, [1], os.stat(source_path), pycpath)
if sys.platform == "win32":
from contextlib import contextmanager
@contextmanager
def atomic_write_failed(fn, mode="r", overwrite=False):
e = OSError()
e.errno = 10
raise e
yield
monkeypatch.setattr(
_pytest.assertion.rewrite, "atomic_write", atomic_write_failed
)
else:
def raise_oserror(*args):
raise OSError()
monkeypatch.setattr("os.rename", raise_oserror)
assert not _write_pyc(state, [1], os.stat(source_path), pycpath)
def test_resources_provider_for_loader(self, testdir):
"""
Attempts to load resources from a package should succeed normally,
even when the AssertionRewriteHook is used to load the modules.
See #366 for details.
"""
pytest.importorskip("pkg_resources")
testdir.mkpydir("testpkg")
contents = {
"testpkg/test_pkg": """
import pkg_resources
import pytest
from _pytest.assertion.rewrite import AssertionRewritingHook
def test_load_resource():
assert isinstance(__loader__, AssertionRewritingHook)
res = pkg_resources.resource_string(__name__, 'resource.txt')
res = res.decode('ascii')
assert res == 'Load me please.'
"""
}
testdir.makepyfile(**contents)
testdir.maketxtfile(**{"testpkg/resource": "Load me please."})
result = testdir.runpytest_subprocess()
result.assert_outcomes(passed=1)
def test_read_pyc(self, tmpdir):
"""
Ensure that the `_read_pyc` can properly deal with corrupted pyc files.
In those circumstances it should just give up instead of generating
an exception that is propagated to the caller.
"""
import py_compile
from _pytest.assertion.rewrite import _read_pyc
source = tmpdir.join("source.py")
pyc = source + "c"
source.write("def test(): pass")
py_compile.compile(str(source), str(pyc))
contents = pyc.read(mode="rb")
strip_bytes = 20 # header is around 8 bytes, strip a little more
assert len(contents) > strip_bytes
pyc.write(contents[:strip_bytes], mode="wb")
assert _read_pyc(str(source), str(pyc)) is None # no error
def test_reload_is_same(self, testdir):
# A file that will be picked up during collecting.
testdir.tmpdir.join("file.py").ensure()
testdir.tmpdir.join("pytest.ini").write(
textwrap.dedent(
"""
[pytest]
python_files = *.py
"""
)
)
testdir.makepyfile(
test_fun="""
import sys
try:
from imp import reload
except ImportError:
pass
def test_loader():
import file
assert sys.modules["file"] is reload(file)
"""
)
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines(["* 1 passed*"])
def test_reload_reloads(self, testdir):
"""Reloading a module after change picks up the change."""
testdir.tmpdir.join("file.py").write(
textwrap.dedent(
"""
def reloaded():
return False
def rewrite_self():
with open(__file__, 'w') as self:
self.write('def reloaded(): return True')
"""
)
)
testdir.tmpdir.join("pytest.ini").write(
textwrap.dedent(
"""
[pytest]
python_files = *.py
"""
)
)
testdir.makepyfile(
test_fun="""
import sys
try:
from imp import reload
except ImportError:
pass
def test_loader():
import file
assert not file.reloaded()
file.rewrite_self()
reload(file)
assert file.reloaded()
"""
)
result = testdir.runpytest("-s")
result.stdout.fnmatch_lines(["* 1 passed*"])
def test_get_data_support(self, testdir):
"""Implement optional PEP302 api (#808).
"""
path = testdir.mkpydir("foo")
path.join("test_foo.py").write(
textwrap.dedent(
"""\
class Test(object):
def test_foo(self):
import pkgutil
data = pkgutil.get_data('foo.test_foo', 'data.txt')
assert data == b'Hey'
"""
)
)
path.join("data.txt").write("Hey")
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_issue731(testdir):
testdir.makepyfile(
"""
class LongReprWithBraces(object):
def __repr__(self):
return 'LongReprWithBraces({' + ('a' * 80) + '}' + ('a' * 120) + ')'
def some_method(self):
return False
def test_long_repr():
obj = LongReprWithBraces()
assert obj.some_method()
"""
)
result = testdir.runpytest()
result.stdout.no_fnmatch_line("*unbalanced braces*")
class TestIssue925:
def test_simple_case(self, testdir):
testdir.makepyfile(
"""
def test_ternary_display():
assert (False == False) == False
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*E*assert (False == False) == False"])
def test_long_case(self, testdir):
testdir.makepyfile(
"""
def test_ternary_display():
assert False == (False == True) == True
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*E*assert (False == True) == True"])
def test_many_brackets(self, testdir):
testdir.makepyfile(
"""
def test_ternary_display():
assert True == ((False == True) == True)
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*E*assert True == ((False == True) == True)"])
class TestIssue2121:
def test_rewrite_python_files_contain_subdirs(self, testdir):
testdir.makepyfile(
**{
"tests/file.py": """
def test_simple_failure():
assert 1 + 1 == 3
"""
}
)
testdir.makeini(
"""
[pytest]
python_files = tests/**.py
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*E*assert (1 + 1) == 3"])
@pytest.mark.skipif(
sys.maxsize <= (2 ** 31 - 1), reason="Causes OverflowError on 32bit systems"
)
@pytest.mark.parametrize("offset", [-1, +1])
def test_source_mtime_long_long(testdir, offset):
"""Support modification dates after 2038 in rewritten files (#4903).
pytest would crash with:
fp.write(struct.pack("<ll", mtime, size))
E struct.error: argument out of range
"""
p = testdir.makepyfile(
"""
def test(): pass
"""
)
# use unsigned long timestamp which overflows signed long,
# which was the cause of the bug
# +1 offset also tests masking of 0xFFFFFFFF
timestamp = 2 ** 32 + offset
os.utime(str(p), (timestamp, timestamp))
result = testdir.runpytest()
assert result.ret == 0
def test_rewrite_infinite_recursion(testdir, pytestconfig, monkeypatch):
"""Fix infinite recursion when writing pyc files: if an import happens to be triggered when writing the pyc
file, this would cause another call to the hook, which would trigger another pyc writing, which could
trigger another import, and so on. (#3506)"""
from _pytest.assertion import rewrite
testdir.syspathinsert()
testdir.makepyfile(test_foo="def test_foo(): pass")
testdir.makepyfile(test_bar="def test_bar(): pass")
original_write_pyc = rewrite._write_pyc
write_pyc_called = []
def spy_write_pyc(*args, **kwargs):
# make a note that we have called _write_pyc
write_pyc_called.append(True)
# try to import a module at this point: we should not try to rewrite this module
assert hook.find_spec("test_bar") is None
return original_write_pyc(*args, **kwargs)
monkeypatch.setattr(rewrite, "_write_pyc", spy_write_pyc)
monkeypatch.setattr(sys, "dont_write_bytecode", False)
hook = AssertionRewritingHook(pytestconfig)
spec = hook.find_spec("test_foo")
assert spec is not None
module = importlib.util.module_from_spec(spec)
hook.exec_module(module)
assert len(write_pyc_called) == 1
class TestEarlyRewriteBailout:
@pytest.fixture
def hook(self, pytestconfig, monkeypatch, testdir):
"""Returns a patched AssertionRewritingHook instance so we can configure its initial paths and track
if PathFinder.find_spec has been called.
"""
import importlib.machinery
self.find_spec_calls = []
self.initial_paths = set()
class StubSession:
_initialpaths = self.initial_paths
def isinitpath(self, p):
return p in self._initialpaths
def spy_find_spec(name, path):
self.find_spec_calls.append(name)
return importlib.machinery.PathFinder.find_spec(name, path)
hook = AssertionRewritingHook(pytestconfig)
# use default patterns, otherwise we inherit pytest's testing config
hook.fnpats[:] = ["test_*.py", "*_test.py"]
monkeypatch.setattr(hook, "_find_spec", spy_find_spec)
hook.set_session(StubSession())
testdir.syspathinsert()
return hook
def test_basic(self, testdir, hook):
"""
Ensure we avoid calling PathFinder.find_spec when we know for sure a certain
module will not be rewritten to optimize assertion rewriting (#3918).
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def fix(): return 1
"""
)
testdir.makepyfile(test_foo="def test_foo(): pass")
testdir.makepyfile(bar="def bar(): pass")
foobar_path = testdir.makepyfile(foobar="def foobar(): pass")
self.initial_paths.add(foobar_path)
# conftest files should always be rewritten
assert hook.find_spec("conftest") is not None
assert self.find_spec_calls == ["conftest"]
# files matching "python_files" mask should always be rewritten
assert hook.find_spec("test_foo") is not None
assert self.find_spec_calls == ["conftest", "test_foo"]
# file does not match "python_files": early bailout
assert hook.find_spec("bar") is None
assert self.find_spec_calls == ["conftest", "test_foo"]
# file is an initial path (passed on the command-line): should be rewritten
assert hook.find_spec("foobar") is not None
assert self.find_spec_calls == ["conftest", "test_foo", "foobar"]
def test_pattern_contains_subdirectories(self, testdir, hook):
"""If one of the python_files patterns contain subdirectories ("tests/**.py") we can't bailout early
because we need to match with the full path, which can only be found by calling PathFinder.find_spec
"""
p = testdir.makepyfile(
**{
"tests/file.py": """\
def test_simple_failure():
assert 1 + 1 == 3
"""
}
)
testdir.syspathinsert(p.dirpath())
hook.fnpats[:] = ["tests/**.py"]
assert hook.find_spec("file") is not None
assert self.find_spec_calls == ["file"]
@pytest.mark.skipif(
sys.platform.startswith("win32"), reason="cannot remove cwd on Windows"
)
def test_cwd_changed(self, testdir, monkeypatch):
# Setup conditions for py's fspath trying to import pathlib on py34
# always (previously triggered via xdist only).
# Ref: https://github.com/pytest-dev/py/pull/207
monkeypatch.syspath_prepend("")
monkeypatch.delitem(sys.modules, "pathlib", raising=False)
testdir.makepyfile(
**{
"test_setup_nonexisting_cwd.py": """\
import os
import shutil
import tempfile
d = tempfile.mkdtemp()
os.chdir(d)
shutil.rmtree(d)
""",
"test_test.py": """\
def test():
pass
""",
}
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 passed in *"])
class TestAssertionPass:
def test_option_default(self, testdir):
config = testdir.parseconfig()
assert config.getini("enable_assertion_pass_hook") is False
@pytest.fixture
def flag_on(self, testdir):
testdir.makeini("[pytest]\nenable_assertion_pass_hook = True\n")
@pytest.fixture
def hook_on(self, testdir):
testdir.makeconftest(
"""\
def pytest_assertion_pass(item, lineno, orig, expl):
raise Exception("Assertion Passed: {} {} at line {}".format(orig, expl, lineno))
"""
)
def test_hook_call(self, testdir, flag_on, hook_on):
testdir.makepyfile(
"""\
def test_simple():
a=1
b=2
c=3
d=0
assert a+b == c+d
# cover failing assertions with a message
def test_fails():
assert False, "assert with message"
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"*Assertion Passed: a+b == c+d (1 + 2) == (3 + 0) at line 7*"
)
def test_hook_call_with_parens(self, testdir, flag_on, hook_on):
testdir.makepyfile(
"""\
def f(): return 1
def test():
assert f()
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines("*Assertion Passed: f() 1")
def test_hook_not_called_without_hookimpl(self, testdir, monkeypatch, flag_on):
"""Assertion pass should not be called (and hence formatting should
not occur) if there is no hook declared for pytest_assertion_pass"""
def raise_on_assertionpass(*_, **__):
raise Exception("Assertion passed called when it shouldn't!")
monkeypatch.setattr(
_pytest.assertion.rewrite, "_call_assertion_pass", raise_on_assertionpass
)
testdir.makepyfile(
"""\
def test_simple():
a=1
b=2
c=3
d=0
assert a+b == c+d
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def test_hook_not_called_without_cmd_option(self, testdir, monkeypatch):
"""Assertion pass should not be called (and hence formatting should
not occur) if there is no hook declared for pytest_assertion_pass"""
def raise_on_assertionpass(*_, **__):
raise Exception("Assertion passed called when it shouldn't!")
monkeypatch.setattr(
_pytest.assertion.rewrite, "_call_assertion_pass", raise_on_assertionpass
)
testdir.makeconftest(
"""\
def pytest_assertion_pass(item, lineno, orig, expl):
raise Exception("Assertion Passed: {} {} at line {}".format(orig, expl, lineno))
"""
)
testdir.makepyfile(
"""\
def test_simple():
a=1
b=2
c=3
d=0
assert a+b == c+d
"""
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
@pytest.mark.parametrize(
("src", "expected"),
(
# fmt: off
pytest.param(b"", {}, id="trivial"),
pytest.param(
b"def x(): assert 1\n",
{1: "1"},
id="assert statement not on own line",
),
pytest.param(
b"def x():\n"
b" assert 1\n"
b" assert 1+2\n",
{2: "1", 3: "1+2"},
id="multiple assertions",
),
pytest.param(
# changes in encoding cause the byte offsets to be different
"# -*- coding: latin1\n"
"def ÀÀÀÀÀ(): assert 1\n".encode("latin1"),
{2: "1"},
id="latin1 encoded on first line\n",
),
pytest.param(
# using the default utf-8 encoding
"def ÀÀÀÀÀ(): assert 1\n".encode(),
{1: "1"},
id="utf-8 encoded on first line",
),
pytest.param(
b"def x():\n"
b" assert (\n"
b" 1 + 2 # comment\n"
b" )\n",
{2: "(\n 1 + 2 # comment\n )"},
id="multi-line assertion",
),
pytest.param(
b"def x():\n"
b" assert y == [\n"
b" 1, 2, 3\n"
b" ]\n",
{2: "y == [\n 1, 2, 3\n ]"},
id="multi line assert with list continuation",
),
pytest.param(
b"def x():\n"
b" assert 1 + \\\n"
b" 2\n",
{2: "1 + \\\n 2"},
id="backslash continuation",
),
pytest.param(
b"def x():\n"
b" assert x, y\n",
{2: "x"},
id="assertion with message",
),
pytest.param(
b"def x():\n"
b" assert (\n"
b" f(1, 2, 3)\n"
b" ), 'f did not work!'\n",
{2: "(\n f(1, 2, 3)\n )"},
id="assertion with message, test spanning multiple lines",
),
pytest.param(
b"def x():\n"
b" assert \\\n"
b" x\\\n"
b" , 'failure message'\n",
{2: "x"},
id="escaped newlines plus message",
),
pytest.param(
b"def x(): assert 5",
{1: "5"},
id="no newline at end of file",
),
# fmt: on
),
)
def test_get_assertion_exprs(src, expected):
assert _get_assertion_exprs(src) == expected
def test_try_makedirs(monkeypatch, tmp_path):
from _pytest.assertion.rewrite import try_makedirs
p = tmp_path / "foo"
# create
assert try_makedirs(str(p))
assert p.is_dir()
# already exist
assert try_makedirs(str(p))
# monkeypatch to simulate all error situations
def fake_mkdir(p, exist_ok=False, *, exc):
assert isinstance(p, str)
raise exc
monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=FileNotFoundError()))
assert not try_makedirs(str(p))
monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=NotADirectoryError()))
assert not try_makedirs(str(p))
monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=PermissionError()))
assert not try_makedirs(str(p))
err = OSError()
err.errno = errno.EROFS
monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=err))
assert not try_makedirs(str(p))
# unhandled OSError should raise
err = OSError()
err.errno = errno.ECHILD
monkeypatch.setattr(os, "makedirs", partial(fake_mkdir, exc=err))
with pytest.raises(OSError) as exc_info:
try_makedirs(str(p))
assert exc_info.value.errno == errno.ECHILD
class TestPyCacheDir:
@pytest.mark.parametrize(
"prefix, source, expected",
[
("c:/tmp/pycs", "d:/projects/src/foo.py", "c:/tmp/pycs/projects/src"),
(None, "d:/projects/src/foo.py", "d:/projects/src/__pycache__"),
("/tmp/pycs", "/home/projects/src/foo.py", "/tmp/pycs/home/projects/src"),
(None, "/home/projects/src/foo.py", "/home/projects/src/__pycache__"),
],
)
def test_get_cache_dir(self, monkeypatch, prefix, source, expected):
if prefix:
if sys.version_info < (3, 8):
pytest.skip("pycache_prefix not available in py<38")
monkeypatch.setattr(sys, "pycache_prefix", prefix)
assert get_cache_dir(Path(source)) == Path(expected)
@pytest.mark.skipif(
sys.version_info < (3, 8), reason="pycache_prefix not available in py<38"
)
def test_sys_pycache_prefix_integration(self, tmp_path, monkeypatch, testdir):
"""Integration test for sys.pycache_prefix (#4730)."""
pycache_prefix = tmp_path / "my/pycs"
monkeypatch.setattr(sys, "pycache_prefix", str(pycache_prefix))
monkeypatch.setattr(sys, "dont_write_bytecode", False)
testdir.makepyfile(
**{
"src/test_foo.py": """
import bar
def test_foo():
pass
""",
"src/bar/__init__.py": "",
}
)
result = testdir.runpytest()
assert result.ret == 0
test_foo = Path(testdir.tmpdir) / "src/test_foo.py"
bar_init = Path(testdir.tmpdir) / "src/bar/__init__.py"
assert test_foo.is_file()
assert bar_init.is_file()
# test file: rewritten, custom pytest cache tag
test_foo_pyc = get_cache_dir(test_foo) / ("test_foo" + PYC_TAIL)
assert test_foo_pyc.is_file()
# normal file: not touched by pytest, normal cache tag
bar_init_pyc = get_cache_dir(bar_init) / "__init__.{cache_tag}.pyc".format(
cache_tag=sys.implementation.cache_tag
)
assert bar_init_pyc.is_file()
|
the-stack_106_26940 | import os
import os.path as osp
import time
import argparse
import torch
import torch.distributed as dist
from .logger import get_logger
from utils.pyt_utils import load_model, parse_devices, extant_file, link_file, \
ensure_dir
logger = get_logger()
# State dictation class. Register training dictations that helps the
# Engine class
class State(object):
def __init__(self):
self.epoch = 0
self.iteration = 0
self.dataloader = None
self.model = None
self.optimizer = None
def register(self, **kwargs):
for k, v in kwargs.items():
assert k in ['epoch', 'iteration', 'dataloader', 'model',
'optimizer']
setattr(self, k, v)
# training processing Engine. It manage whole training process including
# distributed training and non-distributed training.
class Engine(object):
def __init__(self, custom_parser=None):
logger.info(
"PyTorch Version {}".format(torch.__version__))
self.state = State()
self.devices = None
self.distributed = False
if custom_parser is None:
self.parser = argparse.ArgumentParser()
else:
assert isinstance(custom_parser, argparse.ArgumentParser)
self.parser = custom_parser
self.inject_default_parser()
self.args = self.parser.parse_args()
self.continue_state_object = self.args.continue_fpath
# if 'WORLD_SIZE' in os.environ:
# self.distributed = int(os.environ['WORLD_SIZE']) > 1
# if self.distributed:
# self.local_rank = self.args.local_rank
# self.world_size = int(os.environ['WORLD_SIZE'])
# torch.cuda.set_device(self.local_rank)
# dist.init_process_group(backend="nccl", init_method='env://')
# self.devices = [i for i in range(self.world_size)]
# else:
# self.devices = parse_devices(self.args.devices)
# Register default arguments
def inject_default_parser(self):
p = self.parser
p.add_argument('-d', '--devices', default='',
help='set data parallel training')
p.add_argument('-c', '--continue', type=extant_file,
metavar="FILE",
dest="continue_fpath",
help='continue from one certain checkpoint')
p.add_argument('--local_rank', default=0, type=int,
help='process rank on node')
# Register current state
def register_state(self, **kwargs):
self.state.register(**kwargs)
def update_iteration(self, epoch, iteration):
self.state.epoch = epoch
self.state.iteration = iteration
#function only for saving checkpoint
def save_checkpoint(self, path):
logger.info("Saving checkpoint to file {}".format(path))
t_start = time.time()
state_dict = {}
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in self.state.model.state_dict().items():
key = k
if k.split('.')[0] == 'module':
key = k[7:]
new_state_dict[key] = v
state_dict['model'] = new_state_dict
state_dict['optimizer'] = self.state.optimizer.state_dict()
state_dict['epoch'] = self.state.epoch
state_dict['iteration'] = self.state.iteration
t_iobegin = time.time()
torch.save(state_dict, path)
del state_dict
del new_state_dict
t_end = time.time()
logger.info(
"Save checkpoint to file {}, "
"Time usage:\n\tprepare snapshot: {}, IO: {}".format(
path, t_iobegin - t_start, t_end - t_iobegin))
#Function for saving current training model parameters and create symbolic link under current folder
def save_and_link_checkpoint(self, snapshot_dir, log_dir, log_dir_link):
ensure_dir(snapshot_dir)
if not osp.exists(log_dir_link):
link_file(log_dir, log_dir_link)
current_epoch_checkpoint = osp.join(snapshot_dir, 'epoch-{}.pth'.format(
self.state.epoch))
self.save_checkpoint(current_epoch_checkpoint)
last_epoch_checkpoint = osp.join(snapshot_dir,
'epoch-last.pth')
link_file(current_epoch_checkpoint, last_epoch_checkpoint)
#Funtion for continuing training is interrupted
def restore_checkpoint(self):
t_start = time.time()
if self.distributed:
tmp = torch.load(self.continue_state_object,
map_location=lambda storage, loc: storage.cuda(
self.local_rank))
else:
tmp = torch.load(self.continue_state_object)
t_ioend = time.time()
self.state.model = load_model(self.state.model, tmp['model'],
True)
self.state.optimizer.load_state_dict(tmp['optimizer'])
self.state.epoch = tmp['epoch'] + 1
self.state.iteration = tmp['iteration']
del tmp
t_end = time.time()
logger.info(
"Load checkpoint from file {}, "
"Time usage:\n\tIO: {}, restore snapshot: {}".format(
self.continue_state_object, t_ioend - t_start, t_end - t_ioend))
def __enter__(self):
return self
def __exit__(self, type, value, tb):
torch.cuda.empty_cache()
if type is not None:
logger.warning(
"A exception occurred during Engine initialization, "
"give up running process")
return False
|
the-stack_106_26941 | # -*- coding: utf-8 -*-
"""
Tests for discord.ext.tasks
"""
import asyncio
import datetime
import pytest
import sys
from discord import utils
from discord.ext import tasks
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_single():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
has_run = False
async def inner():
nonlocal has_run
has_run = True
time = utils.utcnow() - datetime.timedelta(minutes=1)
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=datetime.time(hour=time.hour, minute=time.minute))(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
@pytest.mark.asyncio
async def test_explicit_initial_runs_tomorrow_multi():
now = utils.utcnow()
if not ((0, 4) < (now.hour, now.minute) < (23, 59)):
await asyncio.sleep(5 * 60) # sleep for 5 minutes
now = utils.utcnow()
# multiple times that are in the past for today
times = []
for _ in range(3):
now -= datetime.timedelta(minutes=1)
times.append(datetime.time(hour=now.hour, minute=now.minute))
has_run = False
async def inner():
nonlocal has_run
has_run = True
# a loop that should have an initial run tomorrow
loop = tasks.loop(time=times)(inner)
loop.start()
await asyncio.sleep(1)
try:
assert not has_run
finally:
loop.cancel()
def test_task_regression_issue7659():
jst = datetime.timezone(datetime.timedelta(hours=9))
# 00:00, 03:00, 06:00, 09:00, 12:00, 15:00, 18:00, 21:00
times = [datetime.time(hour=h, tzinfo=jst) for h in range(0, 24, 3)]
@tasks.loop(time=times)
async def loop():
pass
before_midnight = datetime.datetime(2022, 3, 12, 23, 50, 59, tzinfo=jst)
after_midnight = before_midnight + datetime.timedelta(minutes=9, seconds=2)
expected_before_midnight = datetime.datetime(2022, 3, 13, 0, 0, 0, tzinfo=jst)
expected_after_midnight = datetime.datetime(2022, 3, 13, 3, 0, 0, tzinfo=jst)
assert loop._get_next_sleep_time(before_midnight) == expected_before_midnight
assert loop._get_next_sleep_time(after_midnight) == expected_after_midnight
today = datetime.date.today()
minute_before = [datetime.datetime.combine(today, time, tzinfo=jst) - datetime.timedelta(minutes=1) for time in times]
for before, expected_time in zip(minute_before, times):
expected = datetime.datetime.combine(today, expected_time, tzinfo=jst)
actual = loop._get_next_sleep_time(before)
assert actual == expected
def test_task_regression_issue7676():
jst = datetime.timezone(datetime.timedelta(hours=9))
# 00:00, 03:00, 06:00, 09:00, 12:00, 15:00, 18:00, 21:00
times = [datetime.time(hour=h, tzinfo=jst) for h in range(0, 24, 3)]
@tasks.loop(time=times)
async def loop():
pass
# Create pseudo UTC times
now = utils.utcnow()
today = now.date()
times_before_in_utc = [
datetime.datetime.combine(today, time, tzinfo=jst).astimezone(datetime.timezone.utc) - datetime.timedelta(minutes=1)
for time in times
]
for before, expected_time in zip(times_before_in_utc, times):
actual = loop._get_next_sleep_time(before)
actual_time = actual.timetz()
assert actual_time == expected_time
@pytest.mark.skipif(sys.version_info < (3, 9), reason="zoneinfo requires 3.9")
def test_task_is_imaginary():
import zoneinfo
tz = zoneinfo.ZoneInfo('America/New_York')
# 2:30 AM was skipped
dt = datetime.datetime(2022, 3, 13, 2, 30, tzinfo=tz)
assert tasks.is_imaginary(dt)
now = utils.utcnow()
# UTC time is never imaginary or ambiguous
assert not tasks.is_imaginary(now)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="zoneinfo requires 3.9")
def test_task_is_ambiguous():
import zoneinfo
tz = zoneinfo.ZoneInfo('America/New_York')
# 1:30 AM happened twice
dt = datetime.datetime(2022, 11, 6, 1, 30, tzinfo=tz)
assert tasks.is_ambiguous(dt)
now = utils.utcnow()
# UTC time is never imaginary or ambiguous
assert not tasks.is_imaginary(now)
@pytest.mark.skipif(sys.version_info < (3, 9), reason="zoneinfo requires 3.9")
@pytest.mark.parametrize(
('dt', 'key', 'expected'),
[
(datetime.datetime(2022, 11, 6, 1, 30), 'America/New_York', datetime.datetime(2022, 11, 6, 1, 30, fold=1)),
(datetime.datetime(2022, 3, 13, 2, 30), 'America/New_York', datetime.datetime(2022, 3, 13, 3, 30)),
(datetime.datetime(2022, 4, 8, 2, 30), 'America/New_York', datetime.datetime(2022, 4, 8, 2, 30)),
(datetime.datetime(2023, 1, 7, 12, 30), 'UTC', datetime.datetime(2023, 1, 7, 12, 30)),
],
)
def test_task_date_resolve(dt, key, expected):
import zoneinfo
tz = zoneinfo.ZoneInfo(key)
actual = tasks.resolve_datetime(dt.replace(tzinfo=tz))
expected = expected.replace(tzinfo=tz)
assert actual == expected
|
the-stack_106_26943 | import argparse
import os
from random import seed
import torch
from allennlp.data.iterators import BucketIterator
from allennlp.data.vocabulary import DEFAULT_OOV_TOKEN, DEFAULT_PADDING_TOKEN
from allennlp.data.vocabulary import Vocabulary
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from gector.bert_token_embedder import PretrainedBertEmbedder
from gector.datareader import Seq2LabelsDatasetReader
from gector.seq2labels_model import Seq2Labels
from gector.trainer import Trainer
from gector.tokenizer_indexer import PretrainedBertIndexer
from utils.helpers import get_weights_name
def fix_seed():
torch.manual_seed(1)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
seed(43)
def get_token_indexers(model_name, max_pieces_per_token=5, lowercase_tokens=True, special_tokens_fix=0):
bert_token_indexer = PretrainedBertIndexer(
pretrained_model=model_name,
max_pieces_per_token=max_pieces_per_token,
do_lowercase=lowercase_tokens,
special_tokens_fix=special_tokens_fix
)
return {'bert': bert_token_indexer}
def get_token_embedders(model_name, tune_bert=False, special_tokens_fix=0):
take_grads = True if tune_bert > 0 else False
bert_token_emb = PretrainedBertEmbedder(
pretrained_model=model_name,
top_layer_only=True, requires_grad=take_grads,
special_tokens_fix=special_tokens_fix)
token_embedders = {'bert': bert_token_emb}
embedder_to_indexer_map = {"bert": ["bert", "bert-offsets"]}
text_filed_emd = BasicTextFieldEmbedder(token_embedders=token_embedders,
embedder_to_indexer_map=embedder_to_indexer_map,
allow_unmatched_keys=True)
return text_filed_emd
def get_data_reader(model_name, max_len, skip_correct=False, skip_complex=0,
test_mode=False, tag_strategy="keep_one",
broken_dot_strategy="keep", lowercase_tokens=True,
max_pieces_per_token=3, tn_prob=0, tp_prob=1, special_tokens_fix=0,):
token_indexers = get_token_indexers(model_name,
max_pieces_per_token=max_pieces_per_token,
lowercase_tokens=lowercase_tokens,
special_tokens_fix=special_tokens_fix
)
reader = Seq2LabelsDatasetReader(token_indexers=token_indexers,
max_len=max_len,
skip_correct=skip_correct,
skip_complex=skip_complex,
test_mode=test_mode,
tag_strategy=tag_strategy,
broken_dot_strategy=broken_dot_strategy,
lazy=True,
tn_prob=tn_prob,
tp_prob=tp_prob)
return reader
def get_model(model_name, vocab, tune_bert=False,
predictor_dropout=0,
label_smoothing=0.0,
confidence=0,
special_tokens_fix=0):
token_embs = get_token_embedders(model_name, tune_bert=tune_bert, special_tokens_fix=special_tokens_fix)
model = Seq2Labels(vocab=vocab,
text_field_embedder=token_embs,
predictor_dropout=predictor_dropout,
label_smoothing=label_smoothing,
confidence=confidence)
return model
def main(args):
fix_seed()
if not os.path.exists(args.model_dir):
os.mkdir(args.model_dir)
weights_name = get_weights_name(args.transformer_model, args.lowercase_tokens)
# read datasets
reader = get_data_reader(weights_name, args.max_len, skip_correct=bool(args.skip_correct),
skip_complex=args.skip_complex,
test_mode=False,
tag_strategy=args.tag_strategy,
lowercase_tokens=args.lowercase_tokens,
max_pieces_per_token=args.pieces_per_token,
tn_prob=args.tn_prob,
tp_prob=args.tp_prob,
special_tokens_fix=args.special_tokens_fix)
train_data = reader.read(args.train_set)
dev_data = reader.read(args.dev_set)
default_tokens = [DEFAULT_OOV_TOKEN, DEFAULT_PADDING_TOKEN]
namespaces = ['labels', 'd_tags']
tokens_to_add = {x: default_tokens for x in namespaces}
# build vocab
if args.vocab_path:
vocab = Vocabulary.from_files(args.vocab_path)
else:
vocab = Vocabulary.from_instances(train_data,
max_vocab_size={'tokens': 30000,
'labels': args.target_vocab_size,
'd_tags': 2},
tokens_to_add=tokens_to_add)
vocab.save_to_files(os.path.join(args.model_dir, 'vocabulary'))
print("Data is loaded")
model = get_model(weights_name, vocab,
tune_bert=args.tune_bert,
predictor_dropout=args.predictor_dropout,
label_smoothing=args.label_smoothing,
special_tokens_fix=args.special_tokens_fix)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
cuda_device = list(range(torch.cuda.device_count()))
else:
cuda_device = 0
else:
cuda_device = -1
if args.pretrain:
model.load_state_dict(
torch.load(os.path.join(args.pretrain_folder, args.pretrain + '.th')),
strict=False,
)
model = model.to(device)
print("Model is set")
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.1, patience=10)
instances_per_epoch = None if not args.updates_per_epoch else \
int(args.updates_per_epoch * args.batch_size * args.accumulation_size)
iterator = BucketIterator(batch_size=args.batch_size,
sorting_keys=[("tokens", "num_tokens")],
biggest_batch_first=True,
max_instances_in_memory=instances_per_epoch,
instances_per_epoch=instances_per_epoch,
)
iterator.index_with(vocab)
val_iterator = BucketIterator(batch_size=args.batch_size,
sorting_keys=[("tokens", "num_tokens")],
instances_per_epoch=None)
val_iterator.index_with(vocab)
trainer = Trainer(model=model,
optimizer=optimizer,
scheduler=scheduler,
iterator=iterator,
validation_iterator=val_iterator,
train_dataset=train_data,
validation_dataset=dev_data,
serialization_dir=args.model_dir,
patience=args.patience,
num_epochs=args.n_epoch,
cuda_device=cuda_device,
shuffle=False,
accumulated_batch_count=args.accumulation_size,
cold_step_count=args.cold_steps_count,
cold_lr=args.cold_lr,
cuda_verbose_step=int(args.cuda_verbose_steps)
if args.cuda_verbose_steps else None
)
print("Start training")
trainer.train()
# Here's how to save the model.
out_model = os.path.join(args.model_dir, 'model.th')
with open(out_model, 'wb') as f:
torch.save(model.state_dict(), f)
print("Model is dumped")
if __name__ == '__main__':
# read parameters
parser = argparse.ArgumentParser()
parser.add_argument('--train_set',
help='Path to the train data', required=True)
parser.add_argument('--dev_set',
help='Path to the dev data', required=True)
parser.add_argument('--model_dir',
help='Path to the model dir', required=True)
parser.add_argument('--vocab_path',
help='Path to the model vocabulary directory.'
'If not set then build vocab from data',
default='')
parser.add_argument('--batch_size',
type=int,
help='The size of the batch.',
default=32)
parser.add_argument('--max_len',
type=int,
help='The max sentence length'
'(all longer will be truncated)',
default=50)
parser.add_argument('--target_vocab_size',
type=int,
help='The size of target vocabularies.',
default=1000)
parser.add_argument('--n_epoch',
type=int,
help='The number of epoch for training model.',
default=20)
parser.add_argument('--patience',
type=int,
help='The number of epoch with any improvements'
' on validation set.',
default=3)
parser.add_argument('--skip_correct',
type=int,
help='If set than correct sentences will be skipped '
'by data reader.',
default=1)
parser.add_argument('--skip_complex',
type=int,
help='If set than complex corrections will be skipped '
'by data reader.',
choices=[0, 1, 2, 3, 4, 5],
default=0)
parser.add_argument('--tune_bert',
type=int,
help='If more then 0 then fine tune bert.',
default=1)
parser.add_argument('--tag_strategy',
choices=['keep_one', 'merge_all'],
help='The type of the data reader behaviour.',
default='keep_one')
parser.add_argument('--accumulation_size',
type=int,
help='How many batches do you want accumulate.',
default=4)
parser.add_argument('--lr',
type=float,
help='Set initial learning rate.',
default=1e-5)
parser.add_argument('--cold_steps_count',
type=int,
help='Whether to train only classifier layers first.',
default=4)
parser.add_argument('--cold_lr',
type=float,
help='Learning rate during cold_steps.',
default=1e-3)
parser.add_argument('--predictor_dropout',
type=float,
help='The value of dropout for predictor.',
default=0.0)
parser.add_argument('--lowercase_tokens',
type=int,
help='Whether to lowercase tokens.',
default=0)
parser.add_argument('--pieces_per_token',
type=int,
help='The max number for pieces per token.',
default=5)
parser.add_argument('--cuda_verbose_steps',
help='Number of steps after which CUDA memory information is printed. '
'Makes sense for local testing. Usually about 1000.',
default=None)
parser.add_argument('--label_smoothing',
type=float,
help='The value of parameter alpha for label smoothing.',
default=0.0)
parser.add_argument('--tn_prob',
type=float,
help='The probability to take TN from data.',
default=0)
parser.add_argument('--tp_prob',
type=float,
help='The probability to take TP from data.',
default=1)
parser.add_argument('--updates_per_epoch',
type=int,
help='If set then each epoch will contain the exact amount of updates.',
default=0)
parser.add_argument('--pretrain_folder',
help='The name of the pretrain folder.')
parser.add_argument('--pretrain',
help='The name of the pretrain weights in pretrain_folder param.',
default='')
parser.add_argument('--transformer_model',
choices=['bert', 'distilbert', 'gpt2', 'roberta', 'transformerxl', 'xlnet', 'albert',
'bert-large', 'roberta-large', 'xlnet-large'],
help='Name of the transformer model.',
default='roberta')
parser.add_argument('--special_tokens_fix',
type=int,
help='Whether to fix problem with [CLS], [SEP] tokens tokenization.',
default=1)
args = parser.parse_args()
main(args)
|
the-stack_106_26944 | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Implements a thin wrapper around Translator to compute BLEU scores on (a sample of) validation data during training.
"""
import logging
import os
import random
import time
from typing import Dict, Optional
import mxnet as mx
import sockeye.output_handler
from . import evaluate
from . import constants as C
from . import data_io
from . import inference
from . import utils
logger = logging.getLogger(__name__)
class CheckpointDecoder:
"""
Decodes a (random sample of a) dataset using parameters at given checkpoint and computes BLEU against references.
:param context: MXNet context to bind the model to.
:param inputs: Path to file containing input sentences.
:param references: Path to file containing references.
:param model: Model to load.
:param max_input_len: Maximum input length.
:param beam_size: Size of the beam.
:param bucket_width_source: Source bucket width.
:param bucket_width_target: Target bucket width.
:param length_penalty_alpha: Alpha factor for the length penalty
:param length_penalty_beta: Beta factor for the length penalty
:param softmax_temperature: Optional parameter to control steepness of softmax distribution.
:param max_output_length_num_stds: Number of standard deviations as safety margin for maximum output length.
:param ensemble_mode: Ensemble mode: linear or log_linear combination.
:param sample_size: Maximum number of sentences to sample and decode. If <=0, all sentences are used.
:param random_seed: Random seed for sampling. Default: 42.
"""
def __init__(self,
context: mx.context.Context,
inputs: str,
references: str,
model: str,
max_input_len: Optional[int] = None,
beam_size: int = C.DEFAULT_BEAM_SIZE,
bucket_width_source: int = 10,
length_penalty_alpha: float = 1.0,
length_penalty_beta: float = 0.0,
softmax_temperature: Optional[float] = None,
max_output_length_num_stds: int = C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH,
ensemble_mode: str = 'linear',
sample_size: int = -1,
random_seed: int = 42) -> None:
self.context = context
self.max_input_len = max_input_len
self.max_output_length_num_stds = max_output_length_num_stds
self.ensemble_mode = ensemble_mode
self.beam_size = beam_size
self.batch_size = 16
self.bucket_width_source = bucket_width_source
self.length_penalty_alpha = length_penalty_alpha
self.length_penalty_beta = length_penalty_beta
self.softmax_temperature = softmax_temperature
self.model = model
with data_io.smart_open(inputs) as inputs_fin, data_io.smart_open(references) as references_fin:
input_sentences = inputs_fin.readlines()
target_sentences = references_fin.readlines()
utils.check_condition(len(input_sentences) == len(target_sentences), "Number of sentence pairs do not match")
if sample_size <= 0:
sample_size = len(input_sentences)
if sample_size < len(input_sentences):
# custom random number generator to guarantee the same samples across runs in order to be able to
# compare metrics across independent runs
random_gen = random.Random(random_seed)
self.input_sentences, self.target_sentences = zip(
*random_gen.sample(list(zip(input_sentences, target_sentences)),
sample_size))
else:
self.input_sentences, self.target_sentences = input_sentences, target_sentences
logger.info("Created CheckpointDecoder(max_input_len=%d, beam_size=%d, model=%s, num_sentences=%d)",
max_input_len if max_input_len is not None else -1,
beam_size, model, len(self.input_sentences))
with data_io.smart_open(os.path.join(self.model, C.DECODE_REF_NAME), 'w') as trg_out, \
data_io.smart_open(os.path.join(self.model, C.DECODE_IN_NAME), 'w') as src_out:
[trg_out.write(s) for s in self.target_sentences]
[src_out.write(s) for s in self.input_sentences]
def decode_and_evaluate(self,
checkpoint: Optional[int] = None,
output_name: str = os.devnull) -> Dict[str, float]:
"""
Decodes data set and evaluates given a checkpoint.
:param checkpoint: Checkpoint to load parameters from.
:param output_name: Filename to write translations to. Defaults to /dev/null.
:return: Mapping of metric names to scores.
"""
models, vocab_source, vocab_target = inference.load_models(self.context,
self.max_input_len,
self.beam_size,
self.batch_size,
[self.model],
[checkpoint],
softmax_temperature=self.softmax_temperature,
max_output_length_num_stds=self.max_output_length_num_stds)
translator = inference.Translator(self.context,
self.ensemble_mode,
self.bucket_width_source,
inference.LengthPenalty(self.length_penalty_alpha, self.length_penalty_beta),
models,
vocab_source,
vocab_target)
trans_wall_time = 0.0
translations = []
with data_io.smart_open(output_name, 'w') as output:
handler = sockeye.output_handler.StringOutputHandler(output)
tic = time.time()
# trans_inputs = [translator.ctx_make_input(i, line) for i, line in enumerate(self.input_sentences)]
trans_inputs = []
for i, line in enumerate(self.input_sentences):
contents = line.split('!@#$')
if len(contents) == 1:
contents = ['', contents[0]]
trans_inputs.append(translator.ctx_make_input(i, contents))
trans_outputs = translator.translate(trans_inputs)
trans_wall_time = time.time() - tic
for trans_input, trans_output in zip(trans_inputs, trans_outputs):
handler.handle(trans_input, trans_output)
translations.append(trans_output.translation)
avg_time = trans_wall_time / len(self.input_sentences)
# TODO(fhieber): eventually add more metrics (METEOR etc.)
return {C.BLEU_VAL: evaluate.raw_corpus_bleu(hypotheses=translations,
references=self.target_sentences,
offset=0.01),
C.CHRF_VAL: evaluate.raw_corpus_chrf(hypotheses=translations,
references=self.target_sentences),
C.AVG_TIME: avg_time}
|
the-stack_106_26945 | #!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
from utils import convert_num, display_num
from tweet import twitter_post
hashtags= "\n@The Weeknd #weeknd #music #r&b #streams"
module = "Kworb Charts"
def kworb_data(group):
"""Gets Spotify charts data of an artist
It starts all the tasks needed to get latest data and eventually tweet updates
Args:
- group: dictionary that contains all the data about the group
Returns:
the same group dictionary with updated data
"""
fetched = get_artist_charts(group)
group = check_new_goal(group, fetched)
return group
def get_artist_charts(artist):
"""Gets charts of an artist
It scrapes the page https://kworb.net/spotify/artist/xxxxx.html by parsing the table
containing all the tracks of that artist with the number of streams
Args:
- artist: dictionary that contains all the data about the single artist
Returns:
a list of dictionaries with track id, name and number of streams
"""
URL = 'https://kworb.net/spotify/artist/' + artist["spotify"]["id"]+ '.html'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
table = soup.find('table')
results = []
z = 0
for row in table.findAll('tr'):
elem = row.findAll('td')
if len(elem) > 3:
track = {
"id": elem[1].a.get('href')[9:-5],
"name": elem[1].text,
"streams": int (elem[3].text.replace(",",""))
}
results.append(track)
z+=1
print("[{}] ({}) Fetched {} songs".format(module, artist["name"], z))
return results
def check_new_goal(artist, new):
"""Checks if a track hits a new stream goal (fixed to 10 million)
It tweets if a track reaches a new goal
Args:
- artist: dictionary that contains all the data about the single artist
- new: a list of dictionaries with track id, name and number of streams
Returns:
an artist dictionary with updated data
"""
if "kworb" in artist:
old = artist["kworb"]
for old_song in old:
for new_song in new:
if new_song["id"] == old_song["id"]:
if convert_num("10M", old_song["streams"]) != convert_num("10M", new_song["streams"]):
twitter_post(
"{} reached {} streams on #Spotify\n{}\n{}"
.format(new_song["name"], display_num(new_song["streams"]), link_song(new_song["id"]), hashtags)
)
artist["kworb"] = new
return artist
def link_song(track_id):
"""Generates a song link on Spotify, given the id
Args:
- track_id: id of the song
Returns:
a link to that song on Spotify
"""
return "https://open.spotify.com/track/" + track_id
|
the-stack_106_26946 | from sympy import gcd
__all__ = ('phi', 'partition_sequences')
def phi(n):
""" Euler's totient function."""
assert n >= 0, 'Negative integer.'
out = 0
for i in range(1, n + 1):
if gcd(n, i) == 1:
out += 1
return out
def partition_sequences(k):
""" Generates a set of partition_sequences of size :math:`k`, i.e.
sequences :math:`(n_i)` such that :math:`\sum_{i=1}^k i n_i = k`."""
assert k >= 0, 'Negative integer.'
def f(k, c, i):
if k == 0:
yield [0] * c
elif i == k:
yield [1] + [0] * (c - 1)
elif i < k:
for n in range(0, k // i + 1):
for ps in f(k - (i * n), c - 1, i + 1):
yield [n] + ps
return f(k, k, 1)
|
the-stack_106_26953 | """
Simple Example: Query Something
--------------------------------------------------------------------
Loads Something from disk
"""
from typing import Union, Optional
from datetime import datetime, timezone
from hopeit.app.api import event_api
from hopeit.app.context import EventContext, PostprocessHook
from hopeit.app.logger import app_extra_logger
from hopeit.fs_storage import FileStorage
from model import Something, StatusType, Status, SomethingNotFound
__steps__ = ['load', 'update_status_history']
__api__ = event_api(
summary="Simple Example: Query Something",
query_args=[
('item_id', str, 'Item Id to read')
],
responses={
200: (Something, "Something object returned when found"),
404: (SomethingNotFound, "Information about not found object")
}
)
logger, extra = app_extra_logger()
fs: Optional[FileStorage] = None
async def __init_event__(context):
global fs
if fs is None:
fs = FileStorage(path=str(context.env['fs']['data_path']))
async def load(payload: None, context: EventContext, *,
item_id: str, update_status: bool = False) -> Union[Something, SomethingNotFound]:
"""
Loads json file from filesystem as `Something` instance
:param payload: unused
:param context: EventContext
:param item_id: str, item id to load
:return: Loaded `Something` object or SomethingNotFound if not found or validation fails
"""
assert fs
logger.info(context, "load", extra=extra(something_id=item_id, path=fs.path))
something = await fs.get(key=item_id, datatype=Something)
if something is None:
logger.warning(context, "item not found", extra=extra(something_id=item_id, path=fs.path))
return SomethingNotFound(str(fs.path), item_id)
return something
async def update_status_history(payload: Something, context: EventContext) -> Something:
if payload.status:
payload.history.append(payload.status)
payload.status = Status(
ts=datetime.now(tz=timezone.utc),
type=StatusType.LOADED
)
return payload
async def __postprocess__(payload: Union[Something, SomethingNotFound],
context: EventContext,
response: PostprocessHook) -> Union[Something, SomethingNotFound]:
if isinstance(payload, SomethingNotFound):
response.status = 404
return payload
|
the-stack_106_26956 | # Copyright (c) 2017 VisualDL Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =======================================================================
from __future__ import print_function
import random
import subprocess
import six
def crepr(v):
if type(v) is six.text_type:
return '"%s"' % v
return str(v)
class Rank(object):
def __init__(self, kind, name, priority):
'''
kind: str
name: str
priority: int
'''
self.kind = kind
self.name = name
self.priority = priority
self.nodes = []
def __str__(self):
if not self.nodes:
return ''
return '{' + 'rank={};'.format(self.kind) + \
','.join([node.name for node in self.nodes]) + '}'
# the python package graphviz is too poor.
class Graph(object):
rank_counter = 0
def __init__(self, title, **attrs):
self.title = title
self.attrs = attrs
self.nodes = []
self.edges = []
self.rank_groups = {}
def code(self):
return self.__str__()
def rank_group(self, kind, priority):
name = "rankgroup-%d" % Graph.rank_counter
Graph.rank_counter += 1
rank = Rank(kind, name, priority)
self.rank_groups[name] = rank
return name
def node(self, label, prefix, **attrs):
node = Node(label, prefix, **attrs)
if 'rank' in attrs:
rank = self.rank_groups[attrs['rank']]
del attrs['rank']
rank.nodes.append(node)
self.nodes.append(node)
return node
def edge(self, source, target, **attrs):
edge = Edge(source, target, **attrs)
self.edges.append(edge)
return edge
def display(self, dot_path):
file = open(dot_path, 'w')
file.write(self.__str__())
image_path = dot_path[:-3] + "jpg"
cmd = ["dot", "-Tjpg", dot_path, "-o", image_path]
subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return image_path
def show(self, dot_path):
image = self.display(dot_path)
cmd = ["feh", image]
subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def _rank_repr(self):
ranks = sorted(
self.rank_groups.items(),
cmp=lambda a, b: a[1].priority > b[1].priority)
repr = []
for x in ranks:
repr.append(str(x[1]))
return '\n'.join(repr) + '\n'
def __str__(self):
reprs = [
'digraph G {',
'title = {}'.format(crepr(self.title)),
]
for attr in self.attrs:
reprs.append("{key}={value};".format(
key=attr, value=crepr(self.attrs[attr])))
reprs.append(self._rank_repr())
random.shuffle(self.nodes)
reprs += [str(node) for node in self.nodes]
for x in self.edges:
reprs.append(str(x))
reprs.append('}')
return '\n'.join(reprs)
class Node(object):
counter = 1
def __init__(self, label, prefix, **attrs):
self.label = label
self.name = "%s_%d" % (prefix, Node.counter)
self.attrs = attrs
Node.counter += 1
def __str__(self):
reprs = '{name} [label={label} {extra} ];'.format(
name=self.name,
label=self.label,
extra=',' + ','.join("%s=%s" % (key, crepr(value))
for key, value in self.attrs.items())
if self.attrs else "")
return reprs
class Edge(object):
def __init__(self, source, target, **attrs):
'''
Link source to target.
:param source: Node
:param target: Node
:param graph: Graph
:param attrs: dic
'''
self.source = source
self.target = target
self.attrs = attrs
def __str__(self):
repr = "{source} -> {target} {extra}".format(
source=self.source.name,
target=self.target.name,
extra="" if not self.attrs else
"[" + ','.join("{}={}".format(attr[0], crepr(attr[1]))
for attr in self.attrs.items()) + "]")
return repr
g_graph = Graph(title="some model")
def add_param(label, graph=None):
if not graph:
graph = g_graph
return graph.node(label=label, prefix='param', color='blue')
def add_op(label, graph=None):
if not graph:
graph = g_graph
label = '\n'.join([
'<table border="0">',
' <tr>',
' <td>',
label,
' </td>'
' </tr>',
'</table>',
])
return graph.node(label=label, prefix='op', shape="none")
def add_edge(source, target):
return g_graph.edge(source, target)
if __name__ == '__main__':
n0 = add_param(crepr("layer/W0.w"))
n1 = add_param(crepr("layer/W0.b"))
n2 = add_op("sum")
add_edge(n0, n2)
add_edge(n1, n2)
print(g_graph.code())
g_graph.display('./1.dot')
|
the-stack_106_26957 | # importing the requests and base64 library
import requests
import base64
import pprint
with open('file.jpg', 'rb') as image:
img = base64.b64encode(image.read()).decode("utf-8")
headers = { 'Content-Type': 'application/json', 'Accept': '*/*'}
rf = requests.post(
url = 'http://localhost:8080/selfie' ,
headers = headers ,
json = { "img": img }
)
pp = pprint.PrettyPrinter(indent=4)
data = rf.json()
pp.pprint(data)
|
the-stack_106_26960 | import logging
from connexion import problem
from flask import url_for
from rhub.auth.keycloak import (
KeycloakClient, KeycloakGetError, problem_from_keycloak_error,
)
from rhub.auth.utils import route_require_admin
logger = logging.getLogger(__name__)
def _role_href(role):
return {
'role': url_for('.rhub_api_auth_role_get_role',
role_id=role['id']),
}
# These are "realm-level" roles, "client" level roles can be implemented
# separately later if needed.
def list_roles(keycloak: KeycloakClient):
try:
return [
role | {'_href': _role_href(role)}
for role in keycloak.role_list()
]
except KeycloakGetError as e:
logger.exception(e)
return problem_from_keycloak_error(e)
except Exception as e:
logger.exception(e)
return problem(500, 'Unknown Error', str(e))
@route_require_admin
def create_role(keycloak: KeycloakClient, body, user):
try:
role_id = keycloak.role_create(body)
logger.info(f'Create role {role_id}')
role_data = keycloak.role_get(role_id)
return role_data | {'_href': _role_href(role_data)}
except KeycloakGetError as e:
logger.exception(e)
return problem_from_keycloak_error(e)
except Exception as e:
logger.exception(e)
return problem(500, 'Unknown Error', str(e))
def get_role(keycloak: KeycloakClient, role_id):
try:
role_data = keycloak.role_get(role_id)
return role_data | {'_href': _role_href(role_data)}
except KeycloakGetError as e:
logger.exception(e)
return problem_from_keycloak_error(e)
except Exception as e:
logger.exception(e)
return problem(500, 'Unknown Error', str(e))
@route_require_admin
def update_role(keycloak: KeycloakClient, role_id, body, user):
try:
keycloak.role_update(role_id, body)
role_name = body['name']
logger.info(f'Updated role {role_id}')
role_data = keycloak.role_get(role_name)
return role_data | {'_href': _role_href(role_data)}
except KeycloakGetError as e:
logger.exception(e)
return problem_from_keycloak_error(e)
except Exception as e:
logger.exception(e)
return problem(500, 'Unknown Error', str(e))
@route_require_admin
def delete_role(keycloak: KeycloakClient, role_id, user):
try:
keycloak.role_delete(role_id)
logger.info(f'Deleted role {role_id}')
return {}, 200
except KeycloakGetError as e:
logger.exception(e)
return problem_from_keycloak_error(e)
except Exception as e:
logger.exception(e)
return problem(500, 'Unknown Error', str(e))
|
the-stack_106_26961 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all line items that are missing creatives.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
line_item_service = client.GetService('LineItemService', version='v202005')
# Create a statement to select line items.
statement = (ad_manager.StatementBuilder(version='v202005')
.Where('isMissingCreatives = :isMissingCreatives')
.WithBindVariable('isMissingCreatives', True))
# Retrieve a small amount of line items at a time, paging
# through until all line items have been retrieved.
while True:
response = line_item_service.getLineItemsByStatement(statement.ToStatement(
))
if 'results' in response and len(response['results']):
for line_item in response['results']:
# Print out some information for each line item.
print('Line item with ID "%d" and name "%s" was found.\n' %
(line_item['id'], line_item['name']))
statement.offset += statement.limit
else:
break
print('\nNumber of results found: %s' % response['totalResultSetSize'])
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
|
the-stack_106_26962 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import argparse
import logging
import os
import typing
from typing import Text
from typing import Tuple
from typing import Optional
from rasa_nlu.components import ComponentBuilder
from rasa_nlu.model import Interpreter
from rasa_nlu.model import Trainer
from rasa_nlu.config import RasaNLUConfig
from rasa_nlu.training_data import load_data
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from rasa_nlu.persistor import Persistor
def create_argparser():
parser = argparse.ArgumentParser(
description='train a custom language parser')
parser.add_argument('-p', '--pipeline', default=None,
help="Pipeline to use for the message processing.")
parser.add_argument('-o', '--path', default=None,
help="Path where model files will be saved")
parser.add_argument('-d', '--data', default=None,
help="File containing training data")
parser.add_argument('-c', '--config', required=True,
help="Rasa NLU configuration file")
parser.add_argument('-l', '--language', default=None, choices=['de', 'en'],
help="Model and data language")
parser.add_argument('-t', '--num_threads', default=None, type=int,
help="Number of threads to use during model training")
parser.add_argument('--fixed_model_name',
help="If present, a model will always be persisted "
"in the specified directory instead of creating "
"a folder like 'model_20171020-160213'")
parser.add_argument('-m', '--mitie_file', default=None,
help='File with mitie total_word_feature_extractor')
return parser
class TrainingException(Exception):
"""Exception wrapping lower level exceptions that may happen while training
Attributes:
failed_target_project -- name of the failed project
message -- explanation of why the request is invalid
"""
def __init__(self, failed_target_project=None, exception=None):
self.failed_target_project = failed_target_project
if exception:
self.message = exception.args[0]
def __str__(self):
return self.message
def create_persistor(config):
# type: (RasaNLUConfig) -> Optional[Persistor]
"""Create a remote persistor to store the model if configured."""
if config.get("storage") is not None:
from rasa_nlu.persistor import get_persistor
return get_persistor(config)
else:
return None
def init(): # pragma: no cover
# type: () -> RasaNLUConfig
"""Combines passed arguments to create rasa NLU config."""
parser = create_argparser()
args = parser.parse_args()
config = RasaNLUConfig(args.config, os.environ, vars(args))
return config
def do_train_in_worker(config):
# type: (RasaNLUConfig) -> Text
"""Loads the trainer and the data and runs the training in a worker."""
try:
_, _, persisted_path = do_train(config)
return persisted_path
except Exception as e:
raise TrainingException(config.get("project"), e)
def do_train(config, # type: RasaNLUConfig
component_builder=None # type: Optional[ComponentBuilder]
):
# type: (...) -> Tuple[Trainer, Interpreter, Text]
"""Loads the trainer and the data and runs the training of the model."""
# Ensure we are training a model that we can save in the end
# WARN: there is still a race condition if a model with the same name is
# trained in another subprocess
trainer = Trainer(config, component_builder)
persistor = create_persistor(config)
training_data = load_data(config['data'], config['language'])
interpreter = trainer.train(training_data)
persisted_path = trainer.persist(config['path'], persistor,
config['project'],
config['fixed_model_name'])
return trainer, interpreter, persisted_path
if __name__ == '__main__':
config = init()
logging.basicConfig(level=config['log_level'])
do_train(config)
logger.info("Finished training")
|
the-stack_106_26963 | # Copyright (c) 2021 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial.
# Full text can be found in LICENSE.md
"""
We develop our own evaluation script from the official example Evaluation script for
Objectron dataset. It reads our re-sorted tfrecord, runs evaluation, and outputs a summary report with name
specified in report_file argument. When adopting this for your own model, you
have to modify the Evaluator.predict() function, which basically takes an image and produces
a 3D bounding box.
"""
import eigenpy
eigenpy.switchToNumpyArray()
import math
import os
import warnings
import copy
import argparse
import glob
import numpy as np
import scipy
from scipy.spatial.transform import Rotation as rotation_util
import tensorflow as tf
import tqdm
import objectron.dataset.iou as IoU3D
import objectron.dataset.box as Box
import objectron.dataset.metrics_nvidia as metrics
import objectron.dataset.parser as parser
import sys
sys.path.insert(0, '../..')
from lib.utils.pnp.cuboid_pnp_shell import pnp_shell
import shutil
import simplejson as json
from lib.detectors.detector_factory import detector_factory
from lib.opts import opts
import cv2
from os.path import exists
from eval_opts import eval_opts
import functools
import collections
import eval_utils
import multiprocessing as mp
_MAX_PIXEL_ERROR = 0.1
_MAX_AZIMUTH_ERROR = 30.
_MAX_POLAR_ERROR = 20.
_MAX_SCALE_ERROR = 2.
_MAX_DISTANCE = 1.0 # In meters
_NUM_BINS = 21
# Not used yet
dimension_ref = {
'bike': [[0.65320896, 1.021797894, 1.519635599, 0.6520559199, 1.506392621],
[0.1179380561, 0.176747817, 0.2981715678, 0.1667947895, 0.3830536275]],
'book': [[0.225618019, 0.03949624326, 0.1625821624, 7.021850281, 5.064694187],
[0.1687487664, 0.07391230822, 0.06436673199, 3.59629568, 2.723290812]],
'bottle': [
[0.07889784977450116, 0.24127451915330908, 0.0723714257114412, 0.33644069262302545, 0.3091134992864717, ],
[0.02984649578071775, 0.06381390122918497, 0.03088144838560917, 0.11052240441921059,
0.13327627592012867, ]],
'camera': [[0.11989848700326843, 0.08226238775595619, 0.09871718158089632, 1.507216484439368, 1.1569407159290284, ],
[0.021177290310316968, 0.02158788017191602, 0.055673710278419844, 0.28789183678046854,
0.5342094080365904, ]],
'cereal_box': [
[0.19202754401417296, 0.2593114001714919, 0.07723794925413519, 0.7542602699204104, 0.29441151268928173, ],
[0.08481640897407464, 0.09999915952084068, 0.09495429981036707, 0.19829004029411457, 0.2744797990483879, ]],
'chair': [[0.5740664085137888, 0.8434027515832329, 0.6051523831888338, 0.6949691013776601, 0.7326891354260606, ],
[0.12853104253707456, 0.14852086453095492, 0.13428881418587957, 0.16897092539619352,
0.18636134566748525, ]],
'cup': [[0.08587637391801063, 0.12025228955138188, 0.08486836104868696, 0.7812126934904675, 0.7697895244331658, ],
[0.05886805978497525, 0.06794896438246326, 0.05875681990718713, 0.2887038681446475, 0.283821205157399, ]],
'mug': [[0.14799136566553112, 0.09729087667918128, 0.08845449667169905, 1.3875694883045138, 1.0224997119392225, ],
[1.0488828523223728, 0.2552672927963539, 0.039095350310480705, 0.3947832854104711, 0.31089415283872546, ]],
'laptop': [[0.33685059747485196, 0.1528068814247063, 0.2781020624738614, 35.920214652427696, 23.941173992376903, ],
[0.03529983948867832, 0.07017080198389423, 0.0665823136876069, 391.915687801732, 254.21325950495455, ]],
'shoe': [[0.10308848289662519, 0.10932616184503478, 0.2611737789760352, 1.0301976264129833, 2.6157393112424328, ],
[0.02274768925924402, 0.044958380226590516, 0.04589720205423542, 0.3271000267177176,
0.8460337534776092, ]],
}
epnp_alpha_default = np.array([4.0, -1.0, -1.0, -1.0, 2.0, -1.0, -1.0, 1.0, 2.0,
-1.0, 1.0, -1.0, 0.0, -1.0, 1.0, 1.0, 2.0, 1.0, -1.0, -1.0,
0.0, 1.0, -1.0, 1.0, 0.0, 1.0, 1.0, -1.0, -2.0, 1.0, 1.0,
1.0]).reshape(8, 4)
def CHECK_EQ(a, b):
if a != b:
print('Error!')
exit()
def safe_divide(i1, i2):
divisor = float(i2) if i2 > 0 else 1e-6
return i1 / divisor
def rotation_y_matrix(theta):
M_R = np.array([[np.cos(theta), 0, np.sin(theta), 0],
[0, 1, 0, 0],
[-np.sin(theta), 0, np.cos(theta), 0], [0, 0, 0, 1]])
return M_R
def bounding_box(points):
x_coordinates, y_coordinates = zip(*points)
return [min(x_coordinates), min(y_coordinates), max(x_coordinates), max(y_coordinates)]
# For debug
import matplotlib.pyplot as plt
RADIUS = 10
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (128, 128, 0), (128, 0, 128),
(0, 128, 128), (255, 255, 255), (0, 0, 0)]
def Dataloader(opt):
print('Creating video index!')
videos = [os.path.splitext(os.path.basename(i))[0] for i in glob.glob(f'video_tfrecord_sorted/{opt.c}/*.tfrecord')]
# Sort according to video names
def compfunc(item1, item2):
# E.g., book_batch-1_8
item1_num1 = int(item1[item1.find('-') + 1:item1.rfind('_')])
item1_num2 = int(item1[item1.rfind('_') + 1:])
item2_num1 = int(item2[item2.find('-') + 1:item2.rfind('_')])
item2_num2 = int(item2[item2.rfind('_') + 1:])
if item1_num1 > item2_num1:
return 1 # larger
elif item1_num1 == item2_num1:
if item1_num2 > item2_num2:
return 1
else:
return -1 # smaller
else:
return -1
videos = sorted(videos, key=functools.cmp_to_key(compfunc))
return videos
class Evaluator(object):
"""Class for evaluating a deep pursuit model."""
def __init__(self, opt, height=1920, width=1440):
self.opt = opt
self.height, self.width = int(height / self.opt.eval_resolution_ratio), int(
width / self.opt.eval_resolution_ratio)
self.encoder = parser.ObjectronParser(self.opt.c, self.height, self.width)
self._vis_thresh = 0.1
self._error_scale = 0.
self._error_2d = 0.
self._matched = 0
self._iou_3d = 0.
self._azimuth_error = 0.
self._polar_error = 0.
self._scale_thresholds = np.linspace(0.0, 1., num=_NUM_BINS)
self._iou_thresholds = np.linspace(0.0, 1., num=_NUM_BINS)
self._pixel_thresholds = np.linspace(0.0, _MAX_PIXEL_ERROR, num=_NUM_BINS)
self._azimuth_thresholds = np.linspace(
0.0, _MAX_AZIMUTH_ERROR, num=_NUM_BINS)
self._polar_thresholds = np.linspace(0.0, _MAX_POLAR_ERROR, num=_NUM_BINS)
self._add_thresholds = np.linspace(0.0, _MAX_DISTANCE, num=_NUM_BINS)
self._adds_thresholds = np.linspace(0.0, _MAX_DISTANCE, num=_NUM_BINS)
self._scale_ap = metrics.AveragePrecision(_NUM_BINS)
self._iou_ap = metrics.AveragePrecision(_NUM_BINS)
self._pixel_ap = metrics.AveragePrecision(_NUM_BINS)
self._azimuth_ap = metrics.AveragePrecision(_NUM_BINS)
self._polar_ap = metrics.AveragePrecision(_NUM_BINS)
self._add_ap = metrics.AveragePrecision(_NUM_BINS)
self._adds_ap = metrics.AveragePrecision(_NUM_BINS)
self.dict_consistency = {}
self.consistency_score = 0
# Init the detector
os.environ['CUDA_VISIBLE_DEVICES'] = self.opt.gpus_str
Detector = detector_factory[self.opt.task]
if self.opt.c != 'cup':
self.detector = Detector(self.opt)
else:
# Two detectors for the cup category
if self.opt.tracking_task == True:
self.opt.load_model = f"../../../models/CenterPoseTrack/cup_mug_15.pth"
self.detector_mug = Detector(self.opt)
self.opt.load_model = f"../../../models/CenterPoseTrack/cup_cup_sym_12_15.pth"
self.detector_cup = Detector(self.opt)
else:
if 'v1' in self.opt.arch:
self.opt.load_model = f"../../../models/CenterPose/cup_mug_v1_140.pth"
self.detector_mug = Detector(self.opt)
self.opt.load_model = f"../../../models/CenterPose/cup_cup_v1_sym_12_140.pth"
self.detector_cup = Detector(self.opt)
else:
self.opt.load_model = f"../../../models/CenterPose/cup_mug_140.pth"
self.detector_mug = Detector(self.opt)
self.opt.load_model = f"../../../models/CenterPose/cup_cup_sym_12_140.pth"
self.detector_cup = Detector(self.opt)
if self.opt.eval_CenterPose_initialization:
# Create a new detector
opt_CenterPose = opts().parser.parse_args([])
opt_CenterPose = opts().parse(opt_CenterPose)
opt_CenterPose.c = self.opt.c
opt_CenterPose.debug = 0
# opt_CenterPose.debug = 4
opt_CenterPose.arch = 'dlav1_34'
opt_CenterPose.use_pnp = True
opt_CenterPose.rep_mode = 1
opt_CenterPose.obj_scale = True
opt_CenterPose = opts().init(opt_CenterPose)
# Init the detector
Detector_CenterPose = detector_factory[opt_CenterPose.task]
if opt_CenterPose.c != 'cup':
# Todo: path to be updated
# Path is related to the entry script
if opt_CenterPose.c != 'bottle':
opt_CenterPose.load_model = f"../../../models/CenterPose/{opt_CenterPose.c}_v1_140.pth"
else:
opt_CenterPose.load_model = f"../../../models/CenterPose/bottle_v1_sym_12_140.pth"
self.detector_CenterPose = Detector_CenterPose(opt_CenterPose)
else:
# Two detectors for the cup category
opt_CenterPose.load_model = f"../../../models/CenterPose/cup_mug_v1_140.pth"
self.detector_mug_CenterPose = Detector_CenterPose(opt_CenterPose)
opt_CenterPose.load_model = f"../../../models/CenterPose/cup_cup_v1_sym_12_140.pth"
self.detector_cup_CenterPose = Detector_CenterPose(opt_CenterPose)
if self.opt.c == 'cup' and self.opt.mug_only:
self.opt.dimension_ref = dimension_ref['mug']
else:
self.opt.dimension_ref = dimension_ref[self.opt.c]
if self.opt.eval_use_absolute_scale:
self.opt.dimension_ref = self.opt.dimension_ref[0][0:3]
else:
# Relative scale
self.opt.dimension_ref = [self.opt.dimension_ref[0][3], 1, self.opt.dimension_ref[0][4]]
# Save the external input for the last frame
self.last_frame_info = None
def reset(self):
# Reset AP stuff
self._scale_ap = metrics.AveragePrecision(_NUM_BINS)
self._iou_ap = metrics.AveragePrecision(_NUM_BINS)
self._pixel_ap = metrics.AveragePrecision(_NUM_BINS)
self._azimuth_ap = metrics.AveragePrecision(_NUM_BINS)
self._polar_ap = metrics.AveragePrecision(_NUM_BINS)
self._add_ap = metrics.AveragePrecision(_NUM_BINS)
self._adds_ap = metrics.AveragePrecision(_NUM_BINS)
# Reset mean related
self._error_scale = 0.
self._error_2d = 0.
self._matched = 0
self._iou_3d = 0.
self._azimuth_error = 0.
self._polar_error = 0.
# Reset consistency evaluation
self.dict_consistency = {}
self.consistency_score = 0
def predict_CenterPose(self, image, label, camera_matrix, projection_matrix, filename, sample_id,
MugFlag_instance=[]):
"""
Predict the box's 2D and 3D keypoint from the input images.
Note that the predicted 3D bounding boxes are correct up to an scale.
Given CenterPose initialization for CenterPoseTrack paper.
"""
meta = {}
meta['camera_matrix'] = camera_matrix
meta['id'] = int(sample_id)
# Change to BGR space
image = cv2.cvtColor(image.copy(), cv2.COLOR_RGB2BGR)
if MugFlag_instance == []:
ret = self.detector_CenterPose.run(image, filename=filename + f'_{sample_id}', meta_inp=meta)
elif MugFlag_instance == True:
ret = self.detector_mug_CenterPose.run(image, filename=filename + f'_{sample_id}', meta_inp=meta)
elif MugFlag_instance == False:
ret = self.detector_cup_CenterPose.run(image, filename=filename + f'_{sample_id}', meta_inp=meta)
boxes = ret['boxes']
return boxes
def predict(self, image, label, camera_matrix, projection_matrix, filename, sample_id, MugFlag_instance=[]):
"""
Predict the box's 2D and 3D keypoint from the input images.
Note that the predicted 3D bounding boxes are correct up to an scale.
"""
meta = {}
meta['camera_matrix'] = camera_matrix
meta['id'] = int(sample_id)
# Provide the detector with the gt input
if self.opt.gt_pre_hm_hmhp == True or self.opt.gt_pre_hm_hmhp_first == True or \
self.opt.eval_fake_output:
# Go with
# self.opt.gt_pre_hm_hmhp_first == True and int(sample_id) == 0
# self.opt.gt_pre_hm_hmhp == True
# self.opt.eval_fake_output
if not (self.opt.gt_pre_hm_hmhp_first == True and int(sample_id) != 0):
pre_dets = []
if self.opt.eval_CenterPose_initialization:
boxes_CenterPose = self.predict_CenterPose(image, label, camera_matrix, projection_matrix, filename,
label['image_id'], MugFlag_instance)
for box_CenterPose in boxes_CenterPose:
# Correspond to one prediction in one image
# box_point_2d_ori from kps is not used yet
box_point_2d, box_point_3d, relative_scale, box_point_2d_ori, result_ori = box_CenterPose
kps_ori = box_point_2d
scale_ori = relative_scale
# Todo:
# Not scaled yet, not important for now
kps_3d_ori = box_point_3d
kps = copy.deepcopy(kps_ori)
kps[:, 0] = kps_ori[:, 0] * image.shape[1]
kps[:, 1] = kps_ori[:, 1] * image.shape[0]
bbox = bounding_box(kps) # not normalized 1*4
kps = kps[1:].flatten() # not normalized 1*16
pre_det = {'bbox': bbox, # 4
'score': 1,
'cls': 0,
'obj_scale': scale_ori, # 3
'obj_scale_uncertainty': np.ones(3) * 1e-4, # 3
'tracking': np.zeros(2), # 2
'tracking_hp': np.zeros(16), # 16
'kps': kps, # 16
'kps_displacement_mean': kps, # 16
'kps_displacement_std': np.ones(16) * 1e-4, # 16
'kps_heatmap_mean': kps, # 16
'kps_heatmap_std': np.ones(16) * 1e-4, # 16
'kps_heatmap_height': np.ones(8), # 8
'kps_fusion_mean': kps, # 16
'kps_fusion_std': np.ones(16) * 1e-4, # 16
'kps_pnp': kps_ori, # 9*2
'kps_gt': kps_ori, # 9*2
'kps_3d_cam': kps_3d_ori, # 9*3
'kps_ori': kps_ori, # 9*2
}
pre_dets.append(pre_det)
else:
for idx in range(len(label['2d_instance'])):
scale_ori = label['scale_instance'][idx]
kps_ori = label['2d_instance'][idx] # normalized 9*2
kps_3d_ori = label['3d_instance'][idx] # 9*3, unit: m
scale = scale_ori / scale_ori[1] # normalized 1*3
if self.opt.eval_add_noise:
# Add scale noise (within self.opt.eval_noise_scale)
scale_noise = (-1) ** np.random.randint(2) * np.random.rand(1,
3) * self.opt.eval_noise_scale / 100
scale_ori = (scale_ori * (scale_noise + 1)).flatten()
scale = scale_ori / scale_ori[1] # normalized 1*3
instances_Mo2c = label['Mo2c_instance']
kps_3d_o = Box.UNIT_BOX * scale_ori
kps_3d_ori = instances_Mo2c[idx] @ np.hstack((kps_3d_o, np.ones((kps_3d_o.shape[0], 1)))).T
kps_3d_ori = kps_3d_ori[:3, ].T # 9*3
# Add translation noise
translation_noise = self.opt.eval_noise_translation * np.random.randn(1, 3)
translation_noise = np.tile(translation_noise, (9, 1))
kps_3d_ori = kps_3d_ori + translation_noise
# Add rotation noise
rot_noise = np.deg2rad(np.random.randn() * self.opt.eval_noise_rot)
kps_3d_ori = self._get_rotated_box(kps_3d_ori, rot_noise)
# Override kps_ori
kps_ori = projection_matrix @ np.hstack((kps_3d_ori, np.ones((kps_3d_ori.shape[0], 1)))).T
pp2 = (kps_ori / kps_ori[3])[:2]
viewport_point = (pp2 + 1.0) / 2.0
viewport_point[[0, 1]] = viewport_point[[1, 0]]
kps_ori = viewport_point.T
kps = copy.deepcopy(kps_ori)
kps[:, 0] = kps_ori[:, 0] * image.shape[1]
kps[:, 1] = kps_ori[:, 1] * image.shape[0]
bbox = bounding_box(kps) # not normalized 1*4
kps = kps[1:].flatten() # not normalized 1*16
pre_det = {'bbox': bbox, # 4
'score': 1,
'cls': 0,
'obj_scale': scale, # 3
'obj_scale_uncertainty': np.ones(3) * 1e-4, # 3
'tracking': np.zeros(2), # 2
'tracking_hp': np.zeros(16), # 16
'kps': kps, # 16
'kps_displacement_mean': kps, # 16
'kps_displacement_std': np.ones(16) * 1e-4, # 16
'kps_heatmap_mean': kps, # 16
'kps_heatmap_std': np.ones(16) * 1e-4, # 16
'kps_heatmap_height': np.ones(8), # 8
'kps_fusion_mean': kps, # 16
'kps_fusion_std': np.ones(16) * 1e-4, # 16
'kps_pnp': kps_ori, # 9*2
'kps_gt': kps_ori, # 9*2
'kps_3d_cam': kps_3d_ori, # 9*3
'kps_ori': kps_ori, # 9*2
}
pre_dets.append(pre_det)
if int(sample_id) != 0:
meta['pre_dets'] = self.last_frame_info
else:
meta['pre_dets'] = pre_dets
self.last_frame_info = pre_dets
if not self.opt.eval_fake_output:
# Change to BGR space
image = cv2.cvtColor(image.copy(), cv2.COLOR_RGB2BGR)
if MugFlag_instance == []:
ret = self.detector.run(image, filename=filename + f'_{sample_id}', meta_inp=meta)
elif MugFlag_instance == True:
ret = self.detector_mug.run(image, filename=filename + f'_{sample_id}', meta_inp=meta)
elif MugFlag_instance == False:
ret = self.detector_cup.run(image, filename=filename + f'_{sample_id}', meta_inp=meta)
boxes = ret['boxes']
else:
boxes = []
for box in meta['pre_dets']:
# keypoint_2d_pnp, keypoint_3d, predicted_scale, keypoint_2d_ori, result_ori for debug
keypoint_2d_pnp = box['kps_gt']
keypoint_2d_ori = box['kps_gt']
keypoint_3d = box['kps_3d_cam']
predicted_scale = box['obj_scale']
result_ori = box
boxes.append([keypoint_2d_pnp, keypoint_3d, predicted_scale, keypoint_2d_ori, result_ori])
return boxes
def predict_gt_scale(self, bbox, scale, camera_matrix):
"""
Predict the box's 2D and 3D keypoint from the input images.
Note that the predicted 3D bounding boxes are correct up to an scale.
Give the ground truth scale for CenterPose paper.
"""
meta = {}
meta['camera_matrix'] = camera_matrix
meta['width'] = self.width
meta['height'] = self.height
points = np.array(bbox['kps']).reshape(-1, 2)
points = [(x[0], x[1]) for x in points]
ret = pnp_shell(self.opt, meta, bbox, points, np.array(scale) / scale[1])
if ret is not None:
return ret
else:
return None
def evaluate(self, batch):
"""
Evaluates a batch of serialized tf.Example protos.
"""
images, labels, projs, cam_intrinsics, planes, views, filenames = [], [], [], [], [], [], []
for serialized in batch:
example = tf.train.Example.FromString(serialized)
image, label, filename = self.encoder.parse_example(example)
proj, view, cam_intrinsic = self.encoder.parse_camera(example)
plane = self.encoder.parse_plane(example)
images.append(image)
labels.append(label)
filenames.append(filename)
projs.append(proj)
views.append(view)
cam_intrinsics.append(cam_intrinsic)
planes.append(plane)
# Create sub folders for each video
if self.opt.eval_debug == True or self.opt.eval_debug_json == True:
if filenames:
if os.path.isdir(f'{self.opt.outf}/{self.opt.c}_{self.opt.eval_save_id}/{filenames[0]}'):
print(f'folder {self.opt.outf}/{self.opt.c}_{self.opt.eval_save_id}/{filenames[0]} exists')
else:
os.mkdir(f'{self.opt.outf}/{self.opt.c}_{self.opt.eval_save_id}/{filenames[0]}')
print(f'created folder {self.opt.outf}/{self.opt.c}_{self.opt.eval_save_id}/{filenames[0]}')
# It can be incorporated into the next for block if we support batch processing.
# Since we use pnp here, not valid for now.
# local_id = 0
results = []
for image, label, cam_intrinsic, filename, projection_matrix, view in zip(images, labels, cam_intrinsics,
filenames, projs, views):
# The camera intrinsics have to be updated
cam_intrinsic[:2, :3] = cam_intrinsic[:2, :3] / self.opt.eval_resolution_ratio
cx = cam_intrinsic[0, 2]
cy = cam_intrinsic[1, 2]
cam_intrinsic[0, 2] = cy
cam_intrinsic[1, 2] = cx
if self.opt.c == 'cup':
if all(label['MugFlag_instance']) == True:
results.append(
self.predict(image, label, cam_intrinsic, projection_matrix, filename, label['image_id'], True))
elif all(np.invert(label['MugFlag_instance'])) == True:
results.append(
self.predict(image, label, cam_intrinsic, projection_matrix, filename, label['image_id'],
False))
else:
# For convenience, if mug & cup co-exist, we assume all of them are mug
results.append(
self.predict(image, label, cam_intrinsic, projection_matrix, filename, label['image_id'], True))
else:
results.append(
self.predict(image, label, cam_intrinsic, projection_matrix, filename, label['image_id']))
# Initialization on the first frame
if int(labels[0]['image_id']) == 0:
for i in range(labels[0]['ORI_NUM_INSTANCE']):
self.dict_consistency[i] = []
for boxes, label, plane, image, filename, cam_intrinsic, projection_matrix, view in zip(results, labels, planes,
images, filenames,
cam_intrinsics, projs,
views):
# Extract gt info
instances_scale = label['scale_instance']
instances_2d = label['2d_instance']
instances_3d = label['3d_instance']
instances_Mo2c = label['Mo2c_instance']
instances_ori_index = label['ORI_INDEX']
if self.opt.c == 'cup':
instances_MugFlag = label['MugFlag_instance']
if self.opt.mug_only == True:
# Only count the case with mug
if all(np.invert(label['MugFlag_instance'])) == True:
continue
elif self.opt.mug_only == False:
# Only count the case with cup
if all(np.invert(label['MugFlag_instance'])) == False:
continue
visibilities = label['visibility']
num_instances = 0
for instance, instance_3d, visibility in zip(
instances_2d, instances_3d, visibilities):
if (visibility > self._vis_thresh and
self._is_visible(instance[0]) and instance_3d[0, 2] < 0):
num_instances += 1
# We don't have negative examples in evaluation.
if num_instances == 0:
continue
scale_hit_miss = metrics.HitMiss(self._scale_thresholds)
iou_hit_miss = metrics.HitMiss(self._iou_thresholds)
azimuth_hit_miss = metrics.HitMiss(self._azimuth_thresholds)
polar_hit_miss = metrics.HitMiss(self._polar_thresholds)
pixel_hit_miss = metrics.HitMiss(self._pixel_thresholds)
add_hit_miss = metrics.HitMiss(self._add_thresholds)
adds_hit_miss = metrics.HitMiss(self._adds_thresholds)
# For debug
pred_box_list = []
gt_box_list = []
# Save gt info for Stephen
M_c2w = np.linalg.inv(view)
dict_save = {
'filename': filename,
'camera_pose': M_c2w.tolist(), # M_c2w
'camera_intrinsics': cam_intrinsic.tolist(), # has been transformed to list
'image_id': int(label['image_id']),
"objects": [],
}
num_matched = 0
for idx_box, box in enumerate(boxes):
# Correspond to one prediction in one image
# box_point_2d_ori from kps is not used yet
box_point_2d, box_point_3d, relative_scale, box_point_2d_ori, result_ori = box
if self.opt.eval_MobilePose_postprocessing == True:
box_point_2d, box_point_3d = self.Lift2DTo3D(projection_matrix, result_ori, image.shape[0],
image.shape[1])
index = self.match_box(box_point_2d, instances_2d, visibilities)
if index >= 0:
num_matched += 1
# Apply gt_scale to recalculate pnp
if self.opt.eval_gt_scale == True:
result_gt_scale = self.predict_gt_scale(result_ori, instances_scale[index], cam_intrinsic)
if result_gt_scale is not None:
box_point_2d, box_point_3d, _, _, _ = result_gt_scale
# Todo:
# Sometimes, the gt annotation may be missing. We may have some "false positive" .
# Not so important if we set up a high threshold
# If you only compute the 3D bounding boxes from RGB images,
# your 3D keypoints may be upto scale. However the ground truth
# is at metric scale. There is a hack to re-scale your box using
# the ground planes (assuming your box is sitting on the ground).
# However many models learn to predict depths and scale correctly.
if not self.opt.use_absolute_scale:
scale = self.compute_scale(box_point_3d, plane)
# Read the scale directly, worse than the current idea
# scale2 = instances_scale[index][1]
box_point_3d = box_point_3d * scale
boxes[idx_box] = list(boxes[idx_box])
boxes[idx_box].append(box_point_3d)
frame_id = int(label['image_id'])
print(f'Frame {frame_id}')
print(f'GT: {instances_scale[index] / instances_scale[index][1]}')
print(f'Pred: {relative_scale / relative_scale[1]}')
if self.opt.c == 'cup':
pixel_error = self.evaluate_2d(box_point_2d, instances_2d[index], instances_3d[index],
instances_Mo2c[index], projection_matrix,
instances_MugFlag[index])
azimuth_error, polar_error, iou, pred_box, gt_box, add, adds = self.evaluate_3d(box_point_3d,
instances_3d[
index],
instances_Mo2c[
index],
instances_MugFlag[
index])
else:
pixel_error = self.evaluate_2d(box_point_2d, instances_2d[index], instances_3d[index],
instances_Mo2c[index], projection_matrix)
azimuth_error, polar_error, iou, pred_box, gt_box, add, adds = self.evaluate_3d(box_point_3d,
instances_3d[
index],
instances_Mo2c[
index])
# Record some predictions & gt
M_o2w = M_c2w @ instances_Mo2c[index]
instances_3d_w = M_c2w @ np.hstack(
(instances_3d[index], np.ones((instances_3d[index].shape[0], 1)))).T
instances_3d_w = instances_3d_w[:3, :].T
keypoint_3d_pred_unscaled_c = np.array(boxes[idx_box][1]).reshape(-1, 3)
keypoint_3d_pred_unscaled_w = M_c2w @ np.hstack(
(keypoint_3d_pred_unscaled_c, np.ones((keypoint_3d_pred_unscaled_c.shape[0], 1)))).T
keypoint_3d_pred_unscaled_w = keypoint_3d_pred_unscaled_w[:3, :].T
keypoint_3d_pred_scaled_c = np.array(boxes[idx_box][5]).reshape(-1, 3)
keypoint_3d_pred_scaled_w = M_c2w @ np.hstack(
(keypoint_3d_pred_scaled_c, np.ones((keypoint_3d_pred_scaled_c.shape[0], 1)))).T
keypoint_3d_pred_scaled_w = keypoint_3d_pred_scaled_w[:3, :].T
keypoint_2d_gt = [np.multiply(keypoint, np.asarray([self.width, self.height], np.float32)) for
keypoint in instances_2d[index]]
result_pnp = [np.multiply(keypoint, np.asarray([self.width, self.height], np.float32)) for
keypoint in box_point_2d]
scale_error = self.evaluate_scale(relative_scale, instances_scale[index])
print(f'Scale_error: {scale_error}')
print('\n')
dict_obj = {
'class': self.opt.c,
'index_gt': int(instances_ori_index[index]),
'conf': result_ori['score'],
'location': result_ori['location'],
'quaternion_xyzw': np.array(result_ori['quaternion_xyzw']).tolist(),
'keypoint_2d_pred_displacement': np.array(result_ori['kps_displacement_mean']).reshape(1,
-1).tolist(),
'keypoint_2d_pred_heatmap': np.array(result_ori['kps_heatmap_mean']).reshape(1, -1).tolist(),
'keypoint_2d_pred_pnp': np.array(result_pnp).reshape(1, -1).tolist(),
'keypoint_2d_gt': np.array(keypoint_2d_gt).reshape(1, -1).tolist(),
'relative_scale': relative_scale.tolist(),
'relative_scale_gt': instances_scale[index].tolist(),
'object_pose_gt_w': M_o2w.tolist(), # 4x4 matrix
'keypoint_3d_gt_w': instances_3d_w.tolist(), # 9x3 array
'keypoint_3d_pred_unscaled_w': keypoint_3d_pred_unscaled_w.tolist(), # 9x3 array
'keypoint_3d_pred_scaled_w': keypoint_3d_pred_scaled_w.tolist(), # 9x3 array
'3DIoU': iou,
'error_2Dpixel': pixel_error,
'error_azimuth': azimuth_error,
'error_polar_error': polar_error,
'error_scale': scale_error
}
dict_save['objects'].append(dict_obj)
pred_box_list.append(pred_box)
gt_box_list.append(gt_box)
# Append image_id, keypoint_3d_pred_scaled_w
try:
self.dict_consistency[int(instances_ori_index[index])].append(
(int(label['image_id']), keypoint_3d_pred_scaled_w))
except:
print('s')
conf = result_ori['score']
else:
conf = 0
pixel_error = _MAX_PIXEL_ERROR
azimuth_error = _MAX_AZIMUTH_ERROR
polar_error = _MAX_POLAR_ERROR
iou = 0.
add = _MAX_DISTANCE
adds = _MAX_DISTANCE
scale_error = _MAX_SCALE_ERROR
# New
scale_hit_miss.record_hit_miss([scale_error, conf], greater=False)
iou_hit_miss.record_hit_miss([iou, conf])
add_hit_miss.record_hit_miss([add, conf], greater=False)
adds_hit_miss.record_hit_miss([adds, conf], greater=False)
pixel_hit_miss.record_hit_miss([pixel_error, conf], greater=False)
azimuth_hit_miss.record_hit_miss([azimuth_error, conf], greater=False)
polar_hit_miss.record_hit_miss([polar_error, conf], greater=False)
# # Old
# scale_hit_miss.record_hit_miss(scale_error, greater=False)
# iou_hit_miss.record_hit_miss(iou)
# add_hit_miss.record_hit_miss(add, greater=False)
# adds_hit_miss.record_hit_miss(adds, greater=False)
# pixel_hit_miss.record_hit_miss(pixel_error, greater=False)
# azimuth_hit_miss.record_hit_miss(azimuth_error, greater=False)
# polar_hit_miss.record_hit_miss(polar_error, greater=False)
image_id = label['image_id']
if self.opt.eval_debug_json == True:
json_filename = f'{self.opt.outf}/{self.opt.c}_{self.opt.eval_save_id}/{filename}/{str(image_id).zfill(4)}_record.json'
with open(json_filename, 'w+') as fp:
json.dump(dict_save, fp, indent=4, sort_keys=True)
# For debug
if self.opt.eval_debug == True:
# if self.opt.eval_debug == True and iou<self.opt.eval_debug_save_thresh:
self.debug(image.copy(), num_instances, instances_2d, instances_3d, projection_matrix, boxes,
instances_scale, filename, pred_box_list, gt_box_list, image_id)
# assert num_matched == len(instances_2d)
self._scale_ap.append(scale_hit_miss, len(instances_2d))
self._iou_ap.append(iou_hit_miss, len(instances_2d))
self._pixel_ap.append(pixel_hit_miss, len(instances_2d))
self._azimuth_ap.append(azimuth_hit_miss, len(instances_2d))
self._polar_ap.append(polar_hit_miss, len(instances_2d))
self._add_ap.append(add_hit_miss, len(instances_2d))
self._adds_ap.append(adds_hit_miss, len(instances_2d))
self._matched += num_matched
# Evaluate consistency score here
self.consistency_score = self.evaluate_consistency()
print('Consistency score:', self.consistency_score)
def evaluate_consistency(self):
score_list = []
for key in self.dict_consistency:
iou_sum = 0
count = 0
# Sometimes, it is empty
if len(self.dict_consistency[key]) == 0:
continue
keypoint_3d_list = np.array(self.dict_consistency[key])[:, 1]
# consistency_matrix=np.zeros((len(self.dict_consistency[key][:,1]),len(self.dict_consistency[key][:,1])))
for i in range(len(keypoint_3d_list)):
# Loop over the K nearest frames. Ideally they are consecutive, but some frames may be missing.
for j in range(i + 1, min(i + 1 + self.opt.eval_consistency_local_window, len(keypoint_3d_list))):
# If some frame is missing, its id responding to j will be larger.
if self.dict_consistency[key][j][0] - self.dict_consistency[key][i][
0] > self.opt.eval_consistency_local_window:
continue
box_point_3d = keypoint_3d_list[i]
instance_3d = keypoint_3d_list[j]
iou, pred_box, gt_box = self.evaluate_iou(box_point_3d, instance_3d)
iou_sum += iou
count += 1
if count != 0:
score = iou_sum / count
score_list.append(score)
if len(score_list) > 0:
score_avg = np.mean(score_list)
else:
score_avg = None
return score_avg
def draw_boxes(self, filename, sample_id, boxes=[], clips=[], colors=['b', 'g', 'r', 'k']):
"""
Draw a list of boxes.
The boxes are defined as a list of vertices
"""
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
for i, b in enumerate(boxes):
x, y, z = b[:, 0], b[:, 1], b[:, 2]
ax.scatter(x, y, z, c='r')
for idx, pos in enumerate(zip(x, y, z)):
ax.text(pos[0], pos[1], pos[2], f'{idx}')
for e in Box.EDGES:
ax.plot(x[e], y[e], z[e], linewidth=2, c=colors[i % len(colors)])
for e in Box.BOTTOM:
ax.plot(x[e], y[e], z[e], linewidth=2, c=colors[i % len(colors)])
if (len(clips)):
points = np.array(clips)
ax.scatter(points[:, 0], points[:, 1], points[:, 2], s=100, c='k')
plt.gca().patch.set_facecolor('white')
ax.w_xaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
ax.w_yaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
ax.w_zaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
# rotate the axes and update
ax.view_init(30, 12)
ax.set_box_aspect((1, 1, 1))
plt.draw()
if self.opt.eval_debug_display:
plt.show()
plt.savefig(f'{self.opt.outf}/{self.opt.c}/{filename}/{sample_id}_3DIoU.png', bbox_inches='tight')
def debug(self, image_src, num_instances, instances_2d, instances_3d, projection_matrix, boxes, instances_scale,
filename, pred_box_list, gt_box_list, sample_id, GT_only=False):
# cv2.imwrite(f'{self.opt.outf}/{self.opt.c}/{filename}/{str(sample_id).zfill(4)}_output_ori.png',
# cv2.cvtColor(image_debug, cv2.COLOR_RGB2BGR))
image_debug = image_src.copy()
# GT label Green
for object_id in range(num_instances):
for kp_id in range(1, 9):
kp_pixel = instances_2d[object_id, kp_id, :]
cv2.circle(image_debug,
(int(image_debug.shape[1] * kp_pixel[0]), int(image_debug.shape[0] * kp_pixel[1])),
RADIUS, (0, 255, 0), -1)
for edge in Box.EDGES:
start_kp = instances_2d[object_id, edge[0], :]
start_x = int(image_debug.shape[1] * start_kp[0])
start_y = int(image_debug.shape[0] * start_kp[1])
end_kp = instances_2d[object_id, edge[1], :]
end_x = int(image_debug.shape[1] * end_kp[0])
end_y = int(image_debug.shape[0] * end_kp[1])
cv2.line(image_debug, (start_x, start_y), (end_x, end_y),
(0, 255, 0), 3)
# Draw pose axes
eval_utils.draw_axes(image_debug, instances_3d[object_id], projection_matrix, image_debug.shape[0],
image_debug.shape[1], self.opt.c)
cv2.imwrite(
f'{self.opt.outf}/{self.opt.c}_{self.opt.eval_save_id}/{filename}/{str(sample_id).zfill(4)}_output_gt.png',
cv2.cvtColor(image_debug, cv2.COLOR_RGB2BGR))
if GT_only == True:
cv2.imwrite(
f'{self.opt.outf}/{self.opt.c}_{self.opt.eval_save_id}/{filename}/{str(sample_id).zfill(4)}_output_pred.png',
cv2.cvtColor(image_src, cv2.COLOR_RGB2BGR))
return
image_debug = image_src.copy()
# PnP results Blue
for object_id in range(len(boxes)):
for kp_id in range(1, 9):
kp_pixel = boxes[object_id][0][kp_id, :]
# cv2.circle(image_debug,
# (int(image_debug.shape[1] * kp_pixel[0]), int(image_debug.shape[0] * kp_pixel[1])),
# RADIUS, (0, 255, 255), -1)
# cv2.circle(image_debug,
# (int(image_debug.shape[1] * kp_pixel[0]), int(image_debug.shape[0] * kp_pixel[1])),
# RADIUS, (255, 165, 0), -1)
cv2.circle(image_debug,
(int(image_debug.shape[1] * kp_pixel[0]), int(image_debug.shape[0] * kp_pixel[1])),
RADIUS, (255, 0, 0), -1)
for edge in Box.EDGES:
start_kp = boxes[object_id][0][edge[0], :]
start_x = int(image_debug.shape[1] * start_kp[0])
start_y = int(image_debug.shape[0] * start_kp[1])
end_kp = boxes[object_id][0][edge[1], :]
end_x = int(image_debug.shape[1] * end_kp[0])
end_y = int(image_debug.shape[0] * end_kp[1])
# cv2.line(image_debug, (start_x, start_y), (end_x, end_y),
# (0, 255, 255), 3)
# cv2.line(image_debug, (start_x, start_y), (end_x, end_y),
# (255, 165, 0), 3)
cv2.line(image_debug, (start_x, start_y), (end_x, end_y),
(255, 0, 0), 3)
# Sometimes, the predicted result does not have a match, e.g., a chair who is only partially visible
if len(boxes[object_id]) >= 5:
eval_utils.draw_axes(image_debug, boxes[object_id][5], projection_matrix, image_debug.shape[0],
image_debug.shape[1], self.opt.c)
cv2.imwrite(
f'{self.opt.outf}/{self.opt.c}_{self.opt.eval_save_id}/{filename}/{str(sample_id).zfill(4)}_output_pred.png',
cv2.cvtColor(image_debug, cv2.COLOR_RGB2BGR))
# image_debug = image_src.copy()
# # Original 2D points Red
# for object_id in range(len(boxes)):
# for kp_id in range(9):
# kp_pixel = boxes[object_id][3][kp_id, :]
# cv2.circle(image_debug,
# (int(image_debug.shape[1] * kp_pixel[0]), int(image_debug.shape[0] * kp_pixel[1])),
# RADIUS, (255, 0, 0), -1)
#
# for edge in Box.EDGES:
# start_kp = boxes[object_id][3][edge[0], :]
# start_x = int(image_debug.shape[1] * start_kp[0])
# start_y = int(image_debug.shape[0] * start_kp[1])
#
# end_kp = boxes[object_id][3][edge[1], :]
# end_x = int(image_debug.shape[1] * end_kp[0])
# end_y = int(image_debug.shape[0] * end_kp[1])
#
# cv2.line(image_debug, (start_x, start_y), (end_x, end_y),
# (255, 0, 0), 2)
# Save output with
# cv2.imwrite(f'{self.opt.outf}/{self.opt.c}_{self.opt.eval_save_id}/{filename}/{sample_id}_output.png', cv2.cvtColor(image_debug, cv2.COLOR_RGB2BGR))
#
#
# # Show/Save 3D IoU
# for b1, b2 in zip(pred_box_list, gt_box_list):
# if b1 is not None and b2 is not None:
# self.draw_boxes(filename,sample_id, [b1.vertices, b2.vertices])
def evaluate_scale(self, relative_scale, instance):
relative_scale_normalized = relative_scale / relative_scale[1]
instance_normalized = instance / instance[1]
error = np.sum(np.absolute(relative_scale_normalized - instance_normalized) / instance_normalized)
# error = np.mean(np.linalg.norm(relative_scale_normalized - instance_normalized))
self._error_scale += error
return error
def evaluate_2d(self, box, instance_2d, instance_3d, Mo2c, proj, instances_MugFlag=[]):
"""
Evaluates a pair of 2D projections of 3D boxes.
It computes the mean normalized distances of eight vertices of a box.
Args:
box: A 9*2 array of a predicted box.
instance_2d: A 9*2 array of an annotated box.
instance_3d: A 9*3 array of an annotated box.
Mo2c: A gt transformation matrix from object frame to camera frame
proj: Projection matrix
instances_MugFlag: A Flag if the object is a mug or not
Returns:
Pixel error
"""
# error = np.mean(np.linalg.norm(box[1:] - instance_2d[1:], axis=1))
# self._error_2d += error
#
# return error
Mc2o = np.linalg.inv(Mo2c)
error_best = np.inf
for id_symmetry in range(self.opt.eval_num_symmetry):
theta = 2 * np.pi / self.opt.eval_num_symmetry
M_R = rotation_y_matrix(theta * id_symmetry)
M_trans = proj @ Mo2c @ M_R @ Mc2o
instance_new = M_trans @ np.hstack((instance_3d, np.ones((instance_3d.shape[0], 1)))).T
pp2 = (instance_new / instance_new[3])[:2]
viewport_point = (pp2 + 1.0) / 2.0
viewport_point[[0, 1]] = viewport_point[[1, 0]]
instance_new = viewport_point.T
error = np.mean(np.linalg.norm(box[1:] - instance_new[1:], axis=1))
if error_best > error:
print(f'{id_symmetry}: {error}')
error_best = error
if self.opt.eval_mug_symmetric == False:
# If instances_MugFlag == [] or False, loop with eval_num_symmetry
if instances_MugFlag == True:
break
self._error_2d += error_best
return error_best
def _get_rotated_box(self, box_point_3d, angle):
"""Rotate a box along its vertical axis.
Args:
box: Input box.
angle: Rotation angle in rad.
Returns:
A rotated box
"""
CENTER = 0
BACK_TOP_LEFT = 3
BACK_BOTTOM_LEFT = 1
up_vector = box_point_3d[BACK_TOP_LEFT] - box_point_3d[BACK_BOTTOM_LEFT]
rot_vec = angle * up_vector / np.linalg.norm(up_vector)
rotation = rotation_util.from_rotvec(rot_vec).as_dcm()
box_center = box_point_3d[CENTER]
box_point_3d_rotated = np.matmul((box_point_3d - box_center), rotation) + box_center
return box_point_3d_rotated
def evaluate_3d(self, box_point_3d, instance_3d, Mo2c, instances_MugFlag=[]):
"""Evaluates a box in 3D.
It computes metrics of view angle and 3D IoU.
Args:
box: A predicted box.
instance_3d: A 9*3 array of an annotated box, in metric level.
Mo2c: A transformation matrix from object frame to camera frame
instances_MugFlag: A Flag if the object is a mug or not
Returns:
The 3D IoU (float)
"""
# azimuth_error, polar_error = self.evaluate_viewpoint(box_point_3d, instance)
# iou = self.evaluate_iou(box_point_3d, instance)
# return azimuth_error, polar_error, iou
azimuth_error, polar_error = self.evaluate_viewpoint(box_point_3d, instance_3d)
avg_distance, avg_sym_distance = self.compute_average_distance(box_point_3d,
instance_3d)
Mc2o = np.linalg.inv(Mo2c)
iou_best = 0
pred_box_best = None
gt_box_best = None
avg_distance_best = _MAX_DISTANCE
avg_sym_distance_best = _MAX_DISTANCE
# Adapted from the official one: rotate the estimated one
for id_symmetry, theta in enumerate(np.linspace(0, np.pi * 2, self.opt.eval_num_symmetry)):
box_point_3d_rotated = self._get_rotated_box(box_point_3d, theta)
iou, pred_box, gt_box = self.evaluate_iou(box_point_3d_rotated, instance_3d)
if iou > iou_best:
azimuth_error, polar_error = self.evaluate_viewpoint(box_point_3d_rotated,
instance_3d)
avg_distance, avg_sym_distance = self.compute_average_distance(box_point_3d_rotated,
instance_3d)
print(f'{id_symmetry}: {iou}/{azimuth_error}/{polar_error}/{avg_distance}/{avg_sym_distance}')
iou_best = iou
pred_box_best = pred_box
gt_box_best = gt_box
avg_distance_best = avg_distance
avg_sym_distance_best = avg_sym_distance
if self.opt.eval_mug_symmetric == False:
# If instances_MugFlag == [] or False, loop with eval_num_symmetry
if instances_MugFlag == True:
break
if self.opt.eval_mug_symmetric == False:
# If instances_MugFlag == [] or False, loop with eval_num_symmetry
if instances_MugFlag == True:
break
self._iou_3d += iou_best
self._azimuth_error += azimuth_error
self._polar_error += polar_error
return azimuth_error, polar_error, iou_best, pred_box_best, gt_box_best, avg_distance_best, avg_sym_distance_best
def compute_scale(self, box, plane):
"""Computes scale of the given box sitting on the plane."""
center, normal = plane
vertex_dots = [np.dot(vertex, normal) for vertex in box[1:]]
vertex_dots = np.sort(vertex_dots)
center_dot = np.dot(center, normal)
scales = center_dot / vertex_dots[:4]
# Todo: Idea not working here
# aa=np.sum(vertex_dots[-4:]-center_dot)/4/np.linalg.norm(normal)
return np.mean(scales)
def Lift2DTo3D(self, projection_matrix, estimated_box, height, width, epnp_alpha_=epnp_alpha_default):
fx = projection_matrix[0, 0]
fy = projection_matrix[1, 1]
cx = projection_matrix[0, 2]
cy = projection_matrix[1, 2]
m = np.zeros((16, 12))
u = None
v = None
keypoint2d_list = estimated_box['kps'].flatten()
for i in range(8):
v = (keypoint2d_list[i * 2] / width) * 2 - 1
u = keypoint2d_list[i * 2 + 1] / height * 2 - 1
for j in range(4):
# For each of the 4 control points, formulate two rows of the
# m matrix (two equations).
control_alpha = epnp_alpha_[i, j]
m[i * 2, j * 3] = fx * control_alpha
m[i * 2, j * 3 + 2] = (cx + u) * control_alpha
m[i * 2 + 1, j * 3 + 1] = fy * control_alpha
m[i * 2 + 1, j * 3 + 2] = (cy + v) * control_alpha
mt_m = m.transpose() @ m
es = eigenpy.SelfAdjointEigenSolver(mt_m)
V = es.eigenvectors()
D = es.eigenvalues()
CHECK_EQ(12, len(D))
eigen_vec = V[:, 0]
control_matrix = eigen_vec.reshape(4, 3)
if control_matrix[0, 2] > 0:
control_matrix = -control_matrix
keypoint3d_list = []
keypoint3d_list.append([control_matrix[0, 0], control_matrix[0, 1], control_matrix[0, 2]])
vertices = epnp_alpha_ @ control_matrix
for i in range(8):
keypoint3d_list.append([vertices[i, 0], vertices[i, 1], vertices[i, 2]])
keypoint2d_list = []
for keypoint3d in keypoint3d_list:
# Official OpenGL way
k_3d = np.array([keypoint3d[0], keypoint3d[1], keypoint3d[2], 1])
pp2 = np.matmul(projection_matrix, k_3d.reshape(4, 1))
pp2 = (pp2 / pp2[3])[:3]
viewport_point = (pp2 + 1.0) / 2.0
viewport_point = [viewport_point[1][0], viewport_point[0][0]]
keypoint2d_list.append(viewport_point)
return np.array(keypoint2d_list), np.array(keypoint3d_list)
def compute_ray(self, box):
"""Computes a ray from camera to box centroid in box frame.
For vertex in camera frame V^c, and object unit frame V^o, we have
R * Vc + T = S * Vo,
where S is a 3*3 diagonal matrix, which scales the unit box to its real size.
In fact, the camera coordinates we get have scale ambiguity. That is, we have
Vc' = 1/beta * Vc, and S' = 1/beta * S
where beta is unknown. Since all box vertices should have negative Z values,
we can assume beta is always positive.
To update the equation,
R * beta * Vc' + T = beta * S' * Vo.
To simplify,
R * Vc' + T' = S' * Vo,
where Vc', S', and Vo are known. The problem is to compute
T' = 1/beta * T,
which is a point with scale ambiguity. It forms a ray from camera to the
centroid of the box.
By using homogeneous coordinates, we have
M * Vc'_h = (S' * Vo)_h,
where M = [R|T'] is a 4*4 transformation matrix.
To solve M, we have
M = ((S' * Vo)_h * Vc'_h^T) * (Vc'_h * Vc'_h^T)_inv.
And T' = M[:3, 3:].
Args:
box: A 9*3 array of a 3D bounding box.
Returns:
A ray represented as [x, y, z].
"""
if box[0, -1] > 0:
warnings.warn('Box should have negative Z values.')
size_x = np.linalg.norm(box[5] - box[1])
size_y = np.linalg.norm(box[3] - box[1])
size_z = np.linalg.norm(box[2] - box[1])
size = np.asarray([size_x, size_y, size_z])
box_o = Box.UNIT_BOX * size
box_oh = np.ones((4, 9))
box_oh[:3] = np.transpose(box_o)
box_ch = np.ones((4, 9))
box_ch[:3] = np.transpose(box)
box_cht = np.transpose(box_ch)
box_oct = np.matmul(box_oh, box_cht)
try:
box_cct_inv = np.linalg.inv(np.matmul(box_ch, box_cht))
except:
box_cct_inv = np.linalg.pinv(np.matmul(box_ch, box_cht))
transform = np.matmul(box_oct, box_cct_inv)
return transform[:3, 3:].reshape((3))
def compute_average_distance(self, box, instance):
"""Computes Average Distance (ADD) metric."""
add_distance = 0.
for i in range(Box.NUM_KEYPOINTS):
delta = np.linalg.norm(box[i, :] - instance[i, :])
add_distance += delta
add_distance /= Box.NUM_KEYPOINTS
# Computes the symmetric version of the average distance metric.
# From PoseCNN https://arxiv.org/abs/1711.00199
# For each keypoint in predicttion, search for the point in ground truth
# that minimizes the distance between the two.
add_sym_distance = 0.
for i in range(Box.NUM_KEYPOINTS):
# Find nearest vertex in instance
distance = np.linalg.norm(box[i, :] - instance[0, :])
for j in range(Box.NUM_KEYPOINTS):
d = np.linalg.norm(box[i, :] - instance[j, :])
if d < distance:
distance = d
add_sym_distance += distance
add_sym_distance /= Box.NUM_KEYPOINTS
return add_distance, add_sym_distance
def compute_viewpoint(self, box):
"""Computes viewpoint of a 3D bounding box.
We use the definition of polar angles in spherical coordinates
(http://mathworld.wolfram.com/PolarAngle.html), expect that the
frame is rotated such that Y-axis is up, and Z-axis is out of screen.
Args:
box: A 9*3 array of a 3D bounding box.
Returns:
Two polar angles (azimuth and elevation) in degrees. The range is between
-180 and 180.
"""
x, y, z = self.compute_ray(box)
theta = math.degrees(math.atan2(z, x))
phi = math.degrees(math.atan2(y, math.hypot(x, z)))
return theta, phi
def evaluate_viewpoint(self, box, instance):
"""Evaluates a 3D box by viewpoint.
Args:
box: A 9*3 array of a predicted box.
instance: A 9*3 array of an annotated box, in metric level.
Returns:
Two viewpoint angle errors.
"""
predicted_azimuth, predicted_polar = self.compute_viewpoint(box)
gt_azimuth, gt_polar = self.compute_viewpoint(instance)
polar_error = abs(predicted_polar - gt_polar)
# Azimuth is from (-180,180) and a spherical angle so angles -180 and 180
# are equal. E.g. the azimuth error for -179 and 180 degrees is 1'.
# azimuth_error = abs(predicted_azimuth - gt_azimuth)
# Todo: May need further updates, e.g., inf symmetry
azimuth_error = abs(predicted_azimuth - gt_azimuth) % (360 / self.opt.eval_num_symmetry)
if azimuth_error > 180:
azimuth_error = 360 - azimuth_error
# Add later
# self._azimuth_error += azimuth_error
# self._polar_error += polar_error
return azimuth_error, polar_error
def evaluate_rotation(self, box, instance):
"""Evaluates rotation of a 3D box.
1. The L2 norm of rotation angles
2. The rotation angle computed from rotation matrices
trace(R_1^T R_2) = 1 + 2 cos(theta)
theta = arccos((trace(R_1^T R_2) - 1) / 2)
3. The rotation angle computed from quaternions. Similar to the above,
except instead of computing the trace, we compute the dot product of two
quaternion.
theta = 2 * arccos(| p.q |)
Note the distance between quaternions is not the same as distance between
rotations.
4. Rotation distance from "3D Bounding box estimation using deep learning
and geometry""
d(R1, R2) = || log(R_1^T R_2) ||_F / sqrt(2)
Args:
box: A 9*3 array of a predicted box.
instance: A 9*3 array of an annotated box, in metric level.
Returns:
Magnitude of the rotation angle difference between the box and instance.
"""
prediction = Box.Box(box)
annotation = Box.Box(instance)
gt_rotation_inverse = np.linalg.inv(annotation.rotation)
rotation_error = np.matmul(prediction.rotation, gt_rotation_inverse)
error_angles = np.array(
rotation_util.from_dcm(rotation_error).as_euler('zxy'))
abs_error_angles = np.absolute(error_angles)
abs_error_angles = np.minimum(
abs_error_angles, np.absolute(math.pi * np.ones(3) - abs_error_angles))
error = np.linalg.norm(abs_error_angles)
# Compute the error as the angle between the two rotation
rotation_error_trace = abs(np.matrix.trace(rotation_error))
angular_distance = math.acos((rotation_error_trace - 1.) / 2.)
# angle = 2 * acos(|q1.q2|)
box_quat = np.array(rotation_util.from_dcm(prediction.rotation).as_quat())
gt_quat = np.array(rotation_util.from_dcm(annotation.rotation).as_quat())
quat_distance = 2 * math.acos(np.dot(box_quat, gt_quat))
# The rotation measure from "3D Bounding box estimation using deep learning
# and geometry"
rotation_error_log = scipy.linalg.logm(rotation_error)
rotation_error_frob_norm = np.linalg.norm(rotation_error_log, ord='fro')
rotation_distance = rotation_error_frob_norm / 1.4142
return (error, quat_distance, angular_distance, rotation_distance)
def evaluate_iou(self, box, instance):
"""Evaluates a 3D box by 3D IoU.
It computes 3D IoU of predicted and annotated boxes.
Args:
box: A 9*3 array of a predicted box.
instance: A 9*3 array of an annotated box, in metric level.
Returns:
3D Intersection over Union (float)
"""
# Computes 3D IoU of the two boxes.
prediction = Box.Box(box)
annotation = Box.Box(instance)
iou = IoU3D.IoU(prediction, annotation)
try:
iou_result = iou.iou()
except:
iou_result = 0
# Add values in the end
# self._iou_3d += iou_result
return iou_result, prediction, annotation
def match_box(self, box, instances, visibilities):
"""Matches a detected box with annotated instances.
For a predicted box, finds the nearest annotation in instances. This means
we always assume a match for a prediction. If the nearest annotation is
below the visibility threshold, the match can be skipped.
Args:
box: A 9*2 array of a predicted box.
instances: A 9*2 array of annotated instances. Each instance is a 9*2
array.
visibilities: An array of the visibilities of the instances.
Returns:
Index of the matched instance; otherwise -1.
"""
norms = np.linalg.norm(instances[:, 1:, :] - box[1:, :], axis=(1, 2))
i_min = np.argmin(norms)
if visibilities[i_min] < self._vis_thresh:
return -1
return i_min
def write_report(self, report_file=None):
"""Writes a report of the evaluation."""
def report_array(f, label, array):
f.write(label)
for val in array:
f.write('{:.4f},\t'.format(val))
f.write('\n')
if report_file == None:
report_file = self.opt.report_file
with open(report_file, 'w') as f:
f.write('Mean Error Scale: {}\n'.format(
safe_divide(self._error_scale, self._matched)))
f.write('Mean Error 2D: {}\n'.format(
safe_divide(self._error_2d, self._matched)))
f.write('Mean 3D IoU: {}\n'.format(
safe_divide(self._iou_3d, self._matched)))
f.write('Mean Azimuth Error: {}\n'.format(
safe_divide(self._azimuth_error, self._matched)))
f.write('Mean Polar Error: {}\n'.format(
safe_divide(self._polar_error, self._matched)))
f.write('\n')
f.write('Scale Thresh: ')
for threshold in self._scale_thresholds:
f.write('{:.4f},\t'.format(threshold))
f.write('\n')
report_array(f, 'AP @Scale : ', self._scale_ap.aps)
f.write('\n')
f.write('IoU Thresholds: ')
for threshold in self._iou_thresholds:
f.write('{:.4f},\t'.format(threshold))
f.write('\n')
report_array(f, 'AP @3D IoU : ', self._iou_ap.aps)
f.write('\n')
f.write('2D Thresholds : ')
for threshold in self._pixel_thresholds:
f.write('{:.4f},\t'.format(
threshold)) # We calculate 0-0.1, which is a more reasonable range than the original setting in the Objectron repo
f.write('\n')
report_array(f, 'AP @2D Pixel : ', self._pixel_ap.aps)
f.write('\n')
f.write('Azimuth Thresh: ')
for threshold in self._azimuth_thresholds:
f.write('{:.4f},\t'.format(threshold * 0.1)) # For better visualization in the txt file
f.write('\n')
report_array(f, 'AP @Azimuth : ', self._azimuth_ap.aps)
f.write('\n')
f.write('Polar Thresh : ')
for threshold in self._polar_thresholds:
f.write('{:.4f},\t'.format(threshold * 0.1))
f.write('\n')
report_array(f, 'AP @Polar : ', self._polar_ap.aps)
f.write('\n')
f.write('ADD Thresh : ')
for threshold in self._add_thresholds:
f.write('{:.4f},\t'.format(threshold))
f.write('\n')
report_array(f, 'AP @ADD : ', self._add_ap.aps)
f.write('\n')
f.write('ADDS Thresh : ')
for threshold in self._adds_thresholds:
f.write('{:.4f},\t'.format(threshold))
f.write('\n')
report_array(f, 'AP @ADDS : ', self._adds_ap.aps)
f.write('\n')
f.write('Consistency score: {}\n'.format(self.consistency_score))
def finalize(self):
"""Computes average precision curves."""
self._scale_ap.compute_ap_curve()
self._iou_ap.compute_ap_curve()
self._pixel_ap.compute_ap_curve()
self._azimuth_ap.compute_ap_curve()
self._polar_ap.compute_ap_curve()
self._add_ap.compute_ap_curve()
self._adds_ap.compute_ap_curve()
def _is_visible(self, point):
"""Determines if a 2D point is visible."""
return point[0] > 0 and point[0] < 1 and point[1] > 0 and point[1] < 1
def stats_save(self, report_file):
def save(dict, var, name):
dict[name] = {}
dict[name].update({
'tp': var.true_positive,
'fp': var.false_positive,
'num': var._total_instances,
})
dict_save = {
'error_scale': self._error_scale,
'error_2d': self._error_2d,
'iou_3d': self._iou_3d,
'azimuth_error': self._azimuth_error,
'polar_error': self._polar_error,
'matched': self._matched,
}
save(dict_save, self._scale_ap, 'scale')
save(dict_save, self._iou_ap, 'iou')
save(dict_save, self._pixel_ap, 'pixel')
save(dict_save, self._azimuth_ap, 'azimuth')
save(dict_save, self._polar_ap, 'polar')
save(dict_save, self._add_ap, 'add')
save(dict_save, self._adds_ap, 'adds')
with open(os.path.splitext(report_file)[0] + '.json', 'w+') as fp:
json.dump(dict_save, fp, indent=4, sort_keys=True)
def main(opt):
evaluator = Evaluator(opt)
if evaluator.opt.eval_hard_case == 1:
if os.path.exists('hard_cases.json'):
with open('hard_cases.json') as fp:
dict_hard = json.load(fp)
if evaluator.opt.c not in dict_hard:
print('No hard cases saved. Exit.')
exit(1)
else:
hard_case_list = dict_hard[evaluator.opt.c]
else:
print('No hard cases saved. Exit.')
exit(1)
elif evaluator.opt.eval_hard_case == 2:
if len(evaluator.opt.eval_hard_case_list) > 0:
hard_case_list = evaluator.opt.eval_hard_case_list
else:
print('No hard cases saved. Exit.')
# Read data
videos = Dataloader(evaluator.opt)
for idx, key in enumerate(videos):
print(f'Video {idx}, {key}:')
if evaluator.opt.eval_continue:
if glob.glob(f'{evaluator.opt.reportf}/{evaluator.opt.c}_{evaluator.opt.eval_save_id}/{key}_*'):
print('Have been evaluated.')
continue
if evaluator.opt.eval_hard_case and key not in hard_case_list:
continue
# Read the TFRecordDataset
ds = tf.data.TFRecordDataset(f'video_tfrecord_sorted/{opt.c}/{key}.tfrecord').take(-1)
batch = []
for serialized in tqdm.tqdm(ds):
batch.append(serialized.numpy())
evaluator.evaluate(batch)
if evaluator._scale_ap._total_instances == 0:
print('No instances in the computation. Skip the report.')
else:
evaluator.finalize()
# Save stats for each video
evaluator.stats_save(os.path.join(f'{evaluator.opt.reportf}', f'{evaluator.opt.c}_{evaluator.opt.eval_save_id}',
f'{key}_{evaluator.opt.report_file}'))
evaluator.write_report(
os.path.join(f'{evaluator.opt.reportf}', f'{evaluator.opt.c}_{evaluator.opt.eval_save_id}',
f'{key}_{evaluator.opt.report_file}'))
# Reset evaluator and detector tracking
evaluator.reset()
if evaluator.opt.tracking_task == True:
if evaluator.opt.c != 'cup':
evaluator.detector.reset_tracking()
else:
evaluator.detector_mug.reset_tracking()
evaluator.detector_cup.reset_tracking()
# # Todo: Save stats for all the videos for verification
# evaluator.write_report(os.path.join('report',f'{evaluator.opt.c}',f'combined.txt'))
def main_multiprocessing(opt, idx, key):
# Todo: Resize the image to what we use, easier to preprocess the camera intrinsics directly
evaluator = Evaluator(opt)
print(f'Video {idx}, {key}:')
# Read the TFRecordDataset
ds = tf.data.TFRecordDataset(f'video_tfrecord_sorted/{opt.c}/{key}.tfrecord').take(-1)
batch = []
for serialized in tqdm.tqdm(ds):
batch.append(serialized.numpy())
evaluator.evaluate(batch)
if evaluator._scale_ap._total_instances == 0:
print('No instances in the computation. Skip the report.')
else:
evaluator.finalize()
# Todo: Save stats for each video
evaluator.stats_save(
os.path.join(f'{evaluator.opt.reportf}', f'{evaluator.opt.c}_{evaluator.opt.eval_save_id}',
f'{key}_{evaluator.opt.report_file}'))
evaluator.write_report(
os.path.join(f'{evaluator.opt.reportf}', f'{evaluator.opt.c}_{evaluator.opt.eval_save_id}',
f'{key}_{evaluator.opt.report_file}'))
# Reset evaluator and detector tracking
evaluator.reset()
if evaluator.opt.tracking_task == True:
if evaluator.opt.c != 'cup':
evaluator.detector.reset_tracking()
else:
evaluator.detector_mug.reset_tracking()
evaluator.detector_cup.reset_tracking()
if __name__ == '__main__':
# multi_processing = False
multi_processing = True
n_proc = 6
# Param setting for opt_eval
opt_eval = eval_opts().parser.parse_args()
# Param setting for opt_detector
opt_detector = opts().parser.parse_args([])
# Basic options:
# opt_eval.outf = 'debug/CenterPoseTrack'
# opt_eval.reportf = 'report/CenterPoseTrack'
opt_eval.eval_confidence_thresh = 0.3 # The lower limit conf setting
# opt_eval.eval_rep_mode = 1
# opt_eval.eval_num_symmetry = 100
# opt_eval.eval_R = 20
# opt_eval.eval_c = 'cup'
# opt_eval.eval_tracking_task = True
# opt_eval.eval_kalman = True
# opt_eval.eval_scale_pool = True
# opt_eval.eval_pre_hm = True
# opt_eval.eval_pre_hm_hp = True
# opt_eval.eval_gt_pre_hm_hmhp_first = True
# opt_eval.eval_add_noise = True
# opt_eval.eval_CenterPose_initialization = True
# opt_eval.eval_arch = 'dlav1_34'
# opt_eval.eval_refined_Kalman = True # Only for CenterPose
# opt_eval.eval_exp_id = 6 # The weight group id for CenterPose
# More options:
opt_detector.nms = True
# opt_detector.eval_gt_pre_hm_hmhp = True
# opt_eval.eval_fake_output = True
# opt_eval.eval_MobilePose_postprocessing = True
# opt_eval.eval_gt_scale = True
# Debug options:
# opt_eval.eval_hard_case = 1 # Only evaluate videos from hard_cases.json
# opt_eval.eval_hard_case = 2 # Only evaluate videos from a list
# opt_eval.eval_hard_case_list = ['bike_batch-0_0']
# opt_eval.eval_continue = True # Skip the evaluated video
# opt_eval.eval_debug = True # Whether to save imgs for debug
# opt_detector.debug = 6 # save extra visualization in demo/ for debug, e.g., heatmap
opt_eval.eval_debug_json = True # Whether to save json for debug
opt_eval.eval_debug_clean = True
# Objectron paper https://arxiv.org/abs/2012.09988 assumes mug is also symmetric, for fair comparison we also have this option
opt_eval.eval_mug_symmetric = True
# True: only evaluate mug case, False: only evaluate cup case, None: Evaluate them all
opt_eval.mug_only = None
# Align opts from opt_detector with ones from opt_eval
opt_detector.tracking_task = opt_eval.eval_tracking_task
opt_detector.c = opt_eval.eval_c
opt_detector.arch = opt_eval.eval_arch
opt_detector.rep_mode = opt_eval.eval_rep_mode
opt_detector.vis_thresh = opt_eval.eval_confidence_thresh
opt_detector.R = opt_eval.eval_R
opt_detector.kalman = opt_eval.eval_kalman
opt_detector.scale_pool = opt_eval.eval_scale_pool
opt_detector.pre_hm = opt_eval.eval_pre_hm
opt_detector.pre_hm_hp = opt_eval.eval_pre_hm_hp
opt_detector.gt_pre_hm_hmhp_first = opt_eval.eval_gt_pre_hm_hmhp_first
opt_detector.refined_Kalman = opt_eval.eval_refined_Kalman
opt_detector.empty_pre_hm = opt_eval.eval_empty_pre_hm
if opt_detector.refined_Kalman == True:
opt_detector.tracking_task = False
opt_detector.kalman = True
opt_detector.scale_pool = True
# Tracking related
if opt_detector.tracking_task == True:
# Fixed in the evaluation
opt_detector.obj_scale_uncertainty = True
opt_detector.hps_uncertainty = True
opt_detector.pre_img = True
# opt_detector.pre_hm = True
opt_detector.tracking = True
# opt_detector.pre_hm_hp = True
opt_detector.tracking_hp = True
opt_detector.track_thresh = 0.1
print('Running tracking')
opt_detector.vis_thresh = max(opt_detector.track_thresh, opt_detector.vis_thresh)
opt_detector.pre_thresh = max(opt_detector.track_thresh, opt_detector.pre_thresh)
opt_detector.new_thresh = max(opt_detector.track_thresh, opt_detector.new_thresh)
# No symmetry
if 'v1' in opt_detector.arch:
opt_eval.report_file = f'{opt_detector.c}_v1_report_{opt_eval.eval_confidence_thresh}.txt'
opt_detector.load_model = f"../../../models/CenterPose/{opt_detector.c}_v1_140.pth"
# Not implemented yet
if opt_detector.tracking_task == True:
opt_detector.load_model = f"../../../models/CenterPoseTrack/{opt_detector.c}_15.pth"
else:
opt_eval.report_file = f'{opt_detector.c}_report_{opt_eval.eval_confidence_thresh}.txt'
opt_detector.load_model = f"../../../models/CenterPose/{opt_detector.c}_140.pth"
if opt_detector.tracking_task == True:
opt_detector.load_model = f"../../../models/CenterPoseTrack/{opt_detector.c}_15.pth"
# Symmetry exists, just bottle while cup has been hard-coded
if opt_detector.c == 'bottle':
if 'v1' in opt_detector.arch:
opt_eval.report_file = f'{opt_detector.c}_v1_{opt_eval.eval_num_symmetry}_sym_report_{opt_eval.eval_confidence_thresh}.txt'
opt_detector.load_model = f"../../../models/CenterPose/{opt_detector.c}_v1_sym_12_140.pth"
if opt_detector.tracking_task == True:
opt_detector.load_model = f"../../../models/CenterPoseTrack/{opt_detector.c}_v1_sym_12_15.pth"
else:
opt_eval.report_file = f'{opt_detector.c}_report_{opt_eval.eval_confidence_thresh}.txt'
opt_detector.load_model = f"../../../models/CenterPose/{opt_detector.c}_sym_12_140.pth"
if opt_detector.tracking_task == True:
opt_detector.load_model = f"../../../models/CenterPoseTrack/{opt_detector.c}_sym_12_15.pth"
# Some exp naming rules
if opt_detector.nms == True:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_nms.txt'
if opt_detector.rep_mode == 0:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_8rep.txt'
elif opt_detector.rep_mode == 1:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_16rep.txt'
elif opt_detector.rep_mode == 2:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_samplerep.txt'
elif opt_detector.rep_mode == 3:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_disrep.txt'
elif opt_detector.rep_mode == 4:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_hmrep.txt'
if opt_eval.eval_MobilePose_postprocessing == True:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_MobilePose.txt'
if opt_detector.gt_pre_hm_hmhp == True and opt_detector.gt_pre_hm_hmhp_first == False:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_gtexternal.txt'
if opt_detector.gt_pre_hm_hmhp_first == True:
if opt_eval.eval_add_noise == True:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_gtnoiseInit.txt'
elif opt_eval.eval_CenterPose_initialization == True:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_CenterPoseInit.txt'
else:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_gtInit.txt'
if opt_eval.eval_gt_scale == True:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_gtscale.txt'
if opt_eval.eval_mug_symmetric == False:
opt_eval.report_file = os.path.splitext(opt_eval.report_file)[0] + '_partsymmetry.txt'
# For saving records, which is different from eval_image
# CenterPoseTrack
if 'Track' in opt_eval.outf:
# CenterPose
if opt_eval.eval_arch == 'dlav1_34' and opt_detector.refined_Kalman == False:
opt_eval.eval_save_id = 0
# CenterPose with filtering process
elif opt_eval.eval_arch == 'dlav1_34' and opt_detector.refined_Kalman == True:
opt_eval.eval_save_id = 1
# No input to CenterPoseTrack
elif opt_eval.eval_arch == 'dla_34' and opt_detector.tracking_task == True and opt_detector.gt_pre_hm_hmhp_first == False \
and opt_detector.kalman == True and opt_detector.scale_pool == True and opt_detector.pre_hm == True and opt_detector.pre_hm_hp == True:
opt_eval.eval_save_id = 2
# CenterPoseTrack with GT
elif opt_eval.eval_arch == 'dla_34' and opt_detector.tracking_task == True and opt_detector.gt_pre_hm_hmhp_first == True \
and opt_detector.kalman == True and opt_detector.scale_pool == True and opt_detector.pre_hm == True and opt_detector.pre_hm_hp == True \
and opt_eval.eval_add_noise == False and opt_eval.eval_CenterPose_initialization == False and opt_detector.empty_pre_hm == False:
opt_eval.eval_save_id = 3
# CenterPoseTrack with GT & noise
elif opt_eval.eval_arch == 'dla_34' and opt_detector.tracking_task == True and opt_detector.gt_pre_hm_hmhp_first == True \
and opt_detector.kalman == True and opt_detector.scale_pool == True and opt_detector.pre_hm == True and opt_detector.pre_hm_hp == True \
and opt_eval.eval_add_noise == True:
opt_eval.eval_save_id = 4
# CenterPoseTrack with CenterPose as initialization
elif opt_eval.eval_arch == 'dla_34' and opt_detector.tracking_task == True and opt_detector.gt_pre_hm_hmhp_first == True \
and opt_detector.kalman == True and opt_detector.scale_pool == True and opt_detector.pre_hm == True and opt_detector.pre_hm_hp == True \
and opt_eval.eval_CenterPose_initialization == True:
opt_eval.eval_save_id = 5
# CenterPoseTrack without filtering process
elif opt_eval.eval_arch == 'dla_34' and opt_detector.tracking_task == True and opt_detector.gt_pre_hm_hmhp_first == True \
and opt_detector.kalman == False and opt_detector.scale_pool == False and opt_detector.pre_hm == True and opt_detector.pre_hm_hp == True:
opt_eval.eval_save_id = 6
# CenterPoseTrack without previous heatmap
elif opt_eval.eval_arch == 'dla_34' and opt_detector.tracking_task == True and opt_detector.gt_pre_hm_hmhp_first == True \
and opt_detector.kalman == True and opt_detector.scale_pool == True and opt_detector.pre_hm == False and opt_detector.pre_hm_hp == False:
opt_eval.eval_save_id = 7
# CenterPoseTrack with empty heatmap
elif opt_eval.eval_arch == 'dla_34' and opt_detector.tracking_task == True and opt_detector.gt_pre_hm_hmhp_first == True \
and opt_detector.kalman == True and opt_detector.scale_pool == True and opt_detector.pre_hm == True and opt_detector.pre_hm_hp == True \
and opt_detector.empty_pre_hm == True:
opt_eval.eval_save_id = 8
else:
# CenterPose
if opt_eval.eval_arch == 'dlav1_34' and opt_detector.rep_mode == 0:
opt_eval.eval_save_id = 0
elif opt_eval.eval_arch == 'dlav1_34' and opt_detector.rep_mode == 1 \
and opt_eval.eval_MobilePose_postprocessing == False and opt_eval.eval_gt_scale == False:
opt_eval.eval_save_id = 1
elif opt_eval.eval_arch == 'dlav1_34' and opt_detector.rep_mode == 2:
opt_eval.eval_save_id = 2
elif opt_eval.eval_arch == 'dlav1_34' and opt_detector.rep_mode == 3:
opt_eval.eval_save_id = 3
elif opt_eval.eval_arch == 'dlav1_34' and opt_detector.rep_mode == 4:
opt_eval.eval_save_id = 4
elif opt_eval.eval_arch == 'dla_34' and opt_detector.rep_mode == 1:
opt_eval.eval_save_id = 5
elif opt_eval.eval_arch == 'dlav1_34' and opt_eval.eval_MobilePose_postprocessing == True:
opt_eval.eval_save_id = 6
elif opt_eval.eval_arch == 'dlav1_34' and opt_eval.eval_gt_scale == True:
opt_eval.eval_save_id = 7
if opt_eval.eval_debug == True or opt_eval.eval_debug_json == True:
if opt_eval.eval_debug_clean == True and opt_eval.eval_continue != True:
# Clean up debug/
if os.path.isdir(f'{opt_eval.outf}/{opt_detector.c}_{opt_eval.eval_save_id}'):
shutil.rmtree(f'{opt_eval.outf}/{opt_detector.c}_{opt_eval.eval_save_id}')
# Clean up report/
if os.path.isdir(f'{opt_eval.reportf}/{opt_detector.c}_{opt_eval.eval_save_id}'):
shutil.rmtree(f'{opt_eval.reportf}/{opt_detector.c}_{opt_eval.eval_save_id}')
# Clean up demo/
if os.path.exists(
os.path.join('demo/', f'{os.path.splitext(os.path.basename(opt_detector.load_model))[0]}')):
shutil.rmtree(
os.path.join('demo/', f'{os.path.splitext(os.path.basename(opt_detector.load_model))[0]}'))
if os.path.isdir(f'{opt_eval.outf}'):
print(f'folder {opt_eval.outf}/ exists')
else:
os.mkdir(f'{opt_eval.outf}')
print(f'created folder {opt_eval.outf}/')
if os.path.isdir(f'{opt_eval.outf}/{opt_detector.c}_{opt_eval.eval_save_id}'):
print(f'folder {opt_eval.outf}/{opt_detector.c}_{opt_eval.eval_save_id} exists')
else:
os.mkdir(f'{opt_eval.outf}/{opt_detector.c}_{opt_eval.eval_save_id}')
print(f'created folder {opt_eval.outf}/{opt_detector.c}_{opt_eval.eval_save_id}')
if os.path.isdir(f'demo'):
print(f'folder demo/ exists')
else:
os.mkdir(f'demo')
print(f'created folder demo/')
if os.path.isdir(f'{opt_eval.reportf}'):
print(f'folder {opt_eval.reportf} exists')
else:
os.mkdir(f'{opt_eval.reportf}')
print(f'created folder {opt_eval.reportf}')
if os.path.isdir(f'{opt_eval.reportf}/{opt_detector.c}_{opt_eval.eval_save_id}'):
print(f'folder {opt_eval.reportf}/{opt_detector.c}_{opt_eval.eval_save_id} exists')
else:
os.mkdir(f'{opt_eval.reportf}/{opt_detector.c}_{opt_eval.eval_save_id}')
print(f'created folder {opt_eval.reportf}/{opt_detector.c}_{opt_eval.eval_save_id}')
opt_detector.obj_scale = True
opt_detector.use_pnp = True
# Update default configurations
opt_detector = opts().parse(opt_detector)
opt_detector = opts().init(opt_detector)
opt_combined = argparse.Namespace(**vars(opt_eval), **vars(opt_detector))
if not multi_processing:
# Single process
main(opt_combined)
else:
# # Multi-process
# Read data
videos = Dataloader(opt_combined)
mp = mp.get_context('spawn')
pool = []
# Some machines may support more processes
if opt_combined.eval_hard_case == 1:
if os.path.exists('hard_cases.json'):
with open('hard_cases.json') as fp:
dict_hard = json.load(fp)
if opt_combined.c not in dict_hard:
print('No hard cases saved. Exit.')
exit(1)
else:
# dict_hard[opt_combined.c] is like ['XXX',XX]
hard_case_list = [i[0] for i in dict_hard[opt_combined.c]]
else:
print('No hard cases saved. Exit.')
exit(1)
elif opt_combined.eval_hard_case == 2:
if len(opt_combined.eval_hard_case_list) > 0:
hard_case_list = opt_combined.eval_hard_case_list
else:
print('No hard cases saved. Exit.')
# Pack data
opt_multiprocess = []
for idx, key in enumerate(videos):
if opt_combined.eval_hard_case and key not in hard_case_list:
continue
if opt_combined.eval_continue:
if glob.glob(f'{opt_combined.reportf}/{opt_combined.c}_{opt_combined.eval_save_id}/{key}_*'):
print(f'Video {idx}, {key}:')
print('Have been evaluated.')
continue
opt_multiprocess.append((opt_combined, idx, key))
for idx in range(0, len(opt_multiprocess), n_proc):
for i in range(n_proc):
if i + idx < len(opt_multiprocess):
process = mp.Process(target=main_multiprocessing, args=opt_multiprocess[i + idx])
process.start()
pool.append(process)
for p in pool:
p.join()
print('Done!')
|
the-stack_106_26965 | # pylint: disable=function-redefined,no-name-in-module
from behave import given, when, then
import grpc
from google.protobuf.empty_pb2 import Empty
from client.topic_pb2 import *
@when(u'I fetch all topics')
def step_impl(context):
context.stubs.try_call(
context.stubs.topics.AllTopics,
Empty(),
)
@when(u'I fetch topics related to "{name}"')
def step_impl(context, name):
context.stubs.try_call(
context.stubs.topics.Related,
TopicRequest(name=name),
)
@then(u'I get {num:d} topics')
def step_impl(context, num):
topics = context.stubs.call_res.topics
assert len(topics) == num, 'got {} topics'.format(len(topics))
|
the-stack_106_26966 | import re
import os
import sys
import operator
import commands
from commands import *
SLUG = re.compile(r'\\(ex|in)tslug\[(?P<time>.*)\]\{(?P<location>.*)\}(\s+\% WITH (?P<transition>.*))?')
ACTOR = re.compile(r"\\@ifdefinable\{\\(?P<alias>[\w\d]+)\}{\\def\\.+\/\{(?P<actor>.+)\}\}")
DIALOGUE = re.compile(r'\\begin\{dialogue\}\{\\(?P<actor>\w+)\/(\s\((?P<blocking>.*)\))?\}\n\t?(?P<line>.*)\n\\end\{dialogue\}')
FADE_IN = re.compile(r'\\fadein')
FADE_OUT = re.compile(r'\\fadeout')
STRIP_PAREN = re.compile(r'\\paren\{[^\}]+\}') # temp
def format_line(s):
s = re.sub("--", u"\u2014", s) # em dash
s = re.sub(f"\"", "\\\"", s) # quotes
s = re.sub(STRIP_PAREN, '', s) # temp
s = re.sub("'", u"\u2019", s) # apostrophe to single quote
s = re.sub("\.\.\.", u"\u2026", s) # ellipsis
# clean up accidental tabs in LaTeX
s = s.strip()
return s
if __name__ == '__main__':
fn_in = sys.argv[1]
fn_out = sys.argv[2]
command_builders = []
actor_resolver = {}
with open(fn_in, 'r') as file:
data = file.read()
for m in re.finditer(ACTOR, data):
command_builders.append(ActorBuilder(m.start(), m.group("alias"), m.group("actor")))
actor_resolver[m.group("alias")] = m.group("actor")
for m in re.finditer(FADE_IN, data):
command_builders.append(CodeBuilder(m.start(), "label start:"))
for m in re.finditer(SLUG, data):
command_builders.append(SlugBuilder(m.start(), m.group("time"), m.group("location"), m.group("transition")))
for m in re.finditer(DIALOGUE, data):
command_builders.append(LineBuilder(m.start(), m.group("actor"), m.group("blocking"), format_line(m.group("line"))))
for m in re.finditer(FADE_OUT, data):
command_builders.append(CodeBuilder(m.start(), " return\n"))
# update all scenes
for b in command_builders:
if isinstance(b, commands.SlugBuilder):
b.location = re.sub("[^A-Za-z]", '', b.location) # temp
# update all character shorthands
for b in command_builders:
if isinstance(b, commands.LineBuilder):
for k,v in actor_resolver.items():
b.line = re.sub(F"\\\\{k}\/", v, b.line)
command_builders.sort(key=lambda x: x.cindex)
r = '\n'.join([x.__str__() for x in command_builders])
with open(fn_out, 'w') as outscr:
outscr.write(r) |
the-stack_106_26967 | # -*- coding: utf-8 -*-
from setuptools import setup
scripts = ['bin/borgcron']
packages = ['borgcron']
data_files = [('/etc/borgcron/', ['etc/cfg_example.yml'])]
install_requires = ['PyYAML']
tests_require = ['nose']
setup(name = 'borgcron',
version = '0.3',
description = 'execute borgbackup without user interaction',
url = '',
author = 'Thomas Kärgel',
author_email = 'kaergel at b1-systems.de',
license = 'MIT',
scripts = scripts,
packages = packages,
data_files = data_files,
install_requires = install_requires,
zip_safe = False,
test_suite = 'nose.collector',
tests_require = tests_require)
|
the-stack_106_26969 | import os
from functools import lru_cache
import numpy as np
from .connection import Connection
from forest.exceptions import SearchFail
__all__ = [
"Locator"
]
class Locator(Connection):
"""Query database for path and index related to fields"""
def __init__(self, connection, directory=None):
self.directory = directory
self.connection = connection
self.cursor = self.connection.cursor()
def locate(
self,
pattern,
variable,
initial_time,
valid_time,
pressure=None,
tolerance=0.001):
valid_time64 = np.datetime64(valid_time, 's')
for file_name in self.file_names(
pattern,
variable,
initial_time,
valid_time):
if self.directory is not None:
# HACK: consider refactor
path = os.path.join(self.directory, os.path.basename(file_name))
else:
path = file_name
ta, pa = self.axes(file_name, variable)
if (ta is None) and (pa is None):
return path, ()
elif (ta is None) and (pa is not None):
if pressure is None:
raise SearchFail("Need pressure to search pressure axis")
pressures = self.coordinate(file_name, variable, "pressure")
i = np.where(np.abs(pressures - pressure) < tolerance)[0][0]
return path, (i,)
elif (ta is not None) and (pa is None):
times = self.coordinate(file_name, variable, "time")
i = np.where(times == valid_time64)[0][0]
return path, (i,)
elif (ta is not None) and (pa is not None):
if pressure is None:
raise SearchFail("Need pressure to search pressure axis")
times = self.coordinate(file_name, variable, "time")
pressures = self.coordinate(file_name, variable, "pressure")
if (ta == 0) and (pa == 0):
pts = np.where(
(times == valid_time64) &
(np.abs(pressures - pressure) < tolerance))
i = pts[0][0]
return path, (i,)
else:
ti = np.where(times == valid_time64)[0][0]
pi = np.where(np.abs(pressures - pressure) < tolerance)[0][0]
return path, (ti, pi)
raise SearchFail("Could not locate: {}".format(pattern))
@lru_cache()
def file_names(self, pattern, variable, initial_time, valid_time):
self.cursor.execute("""
SELECT DISTINCT(f.name)
FROM file AS f
JOIN variable AS v
ON v.file_id = f.id
JOIN variable_to_time AS vt
ON vt.variable_id = v.id
JOIN time AS t
ON t.id = vt.time_id
WHERE f.name GLOB :pattern
AND f.reference = :initial_time
AND v.name = :variable
AND t.value = :valid_time
""", dict(
pattern=pattern,
variable=variable,
initial_time=initial_time,
valid_time=valid_time,
))
return [file_name for file_name, in self.cursor.fetchall()]
@lru_cache()
def coordinate(self, file_name, variable, coord):
if coord == "pressure":
self.cursor.execute("""
SELECT p.i, p.value
FROM file AS f
JOIN variable AS v
ON v.file_id = f.id
JOIN variable_to_pressure AS vp
ON vp.variable_id = v.id
JOIN pressure AS p
ON p.id = vp.pressure_id
WHERE f.name = :file_name
AND v.name = :variable
ORDER BY p.i
""", dict(
file_name=file_name,
variable=variable
))
rows = self.cursor.fetchall()
elif coord == "time":
self.cursor.execute("""
SELECT t.i, t.value
FROM file AS f
JOIN variable AS v
ON v.file_id = f.id
JOIN variable_to_time AS vt
ON vt.variable_id = v.id
JOIN time AS t
ON t.id = vt.time_id
WHERE f.name = :file_name
AND v.name = :variable
ORDER BY t.i
""", dict(
file_name=file_name,
variable=variable
))
rows = self.cursor.fetchall()
else:
raise Exception("unknown coordinate: {}".format(coord))
if coord == "time":
dtype = "datetime64[s]"
else:
dtype = "f"
index, values = zip(*rows)
array = np.empty(np.max(index) + 1, dtype=dtype)
for i, v in zip(index, values):
array[i] = v
return array
@lru_cache()
def axes(self, file_name, variable):
"""Time/pressure axis information
:returns: (time_axis, pressure_axis)
"""
self.cursor.execute("""
SELECT v.time_axis, v.pressure_axis
FROM file AS f
JOIN variable AS v
ON v.file_id = f.id
WHERE f.name = :file_name
AND v.name = :variable
""", dict(
file_name=file_name,
variable=variable
))
return self.cursor.fetchone()
|
the-stack_106_26970 | # Multithreading comparação de tempo
from threading import *
import time
def d2(n):
for x in n:
time.sleep(1)
print(x/10)
def d3(n):
for x in n:
time.sleep(1)
print(x*10)
#SEM Multithreading
n = [10,20,30,40,50]
s = time.time()
d2(n)
d3(n)
e = time.time()
print(f"Tempo gasto: {round(e-s,2)}s\n")
#Com Multithreading
st = time.time()
t1 = Thread(target=d2,args=(n,))
t2 = Thread(target=d3,args=(n,))
t1.start()
t2.start()
t1.join()
t2.join()
et = time.time()
print(f"Tempo gasto com Multithreading: {round(et-st,2)}s") |
the-stack_106_26971 | #!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import traceback
import copy
from contextlib import contextmanager
import smach
import rospy
__all__ = ['Concurrence']
class ssmConcurrence(smach.Concurrence):
"""Concurrence Container
This state allows for simple split-join concurrency. The user adds a set of
states which are all executed simultaneously. The concurrent split state
can only transition once all conatained states are ready to transition.
This container can be configured to return a given outcome as a function of
the outcomes of the contained states. This is specified in the constructor
of the class, or after construction with L{Concurrence.add_outcome_map}.
While a concurrence will not terminate until all if its children terminate,
it is possible for it to preempt a subset of states
- All child states terminate
- At least one child state terminates
- A user-defined callback signals termination
Given these causes of termination, the outcome can be determined in four ways:
- A user-defined callback returns an outcome
- A child-outcome map which requires ALL states to terminate is satisfied
- A child-outcome map which requires ONE state to terminate is satisfied
- No maps are satisfied, so the default outcome is returned
The specification of the outcome maps and the outcome callback are
described in the constructor documentation below. More than one policy can
be supplied, and each policy has the potential to not be satisfied. In the
situation in which multiple policies are provided, and a given policy is
not satisfied, the outcome choice precedence is as follows:
- Outcome callback
- First-triggered outcome map
- last-triggered outcome map
- Default outcome
In practive it is best to try to accomplish your task with just ONE outcome
policy.
"""
def __init__(self,
outcomes,
input_keys = [],
output_keys = [],
outcome_map = {},
outcome_cb = None,
child_termination_cb = None
):
input_keys.append("logfile")
output_keys.append("logfile")
outcomes.append('preempt')
default_outcome = 'preempt'
smach.Concurrence.__init__(self, outcomes, default_outcome, input_keys, output_keys, outcome_map, outcome_cb, child_termination_cb)
self._datamodel = {}
self._onEntry = None
self._onExit = None
self._tree_view = None
self._tree_view_Lock = threading.Lock()
self._tree_view_cb = None
### State interface
def execute(self, parent_ud = smach.UserData()):
"""Overridden execute method.
This starts all the threads.
"""
# Clear the ready event
self._ready_event.clear()
# Reset child outcomes
self._child_outcomes = {}
# Copy input keys
self._copy_input_keys(parent_ud, self.userdata)
## Copy the datamodel's value into the userData
for data in self._datamodel:
if(self._datamodel[data] != ""):
self.userdata[data] = self._datamodel[data]
## Do the <onentry>
if(self._onEntry is not None):
try:
self._onEntry.execute(self.userdata)
except Exception as ex:
rospy.logerr('%s::onEntry::execute() raised | %s'
%(self.__class__.__name__,str(ex)))
return "preempt"
# Spew some info
smach.loginfo("Concurrence starting with userdata: \n\t%s" %
(str(list(self.userdata.keys()))))
# Call start callbacks
self.call_start_cbs()
# Create all the threads
for (label, state) in ((k,self._states[k]) for k in self._states):
# Initialize child outcomes
self._child_outcomes[label] = None
self._threads[label] = threading.Thread(
name='concurrent_split:'+label,
target=self._state_runner,
args=(label,))
# Launch threads
for thread in self._threads.values():
thread.start()
# Wait for done notification
self._done_cond.acquire()
# Notify all threads ready to go
self._ready_event.set()
# Wait for a done notification from a thread
self._done_cond.wait()
self._done_cond.release()
# Preempt any running states
smach.logdebug("SMACH Concurrence preempting running states.")
for label in self._states:
if self._child_outcomes[label] == None:
self._states[label].request_preempt()
# Wait for all states to terminate
while not smach.is_shutdown():
if all([not t.isAlive() for t in self._threads.values()]):
break
self._done_cond.acquire()
self._done_cond.wait(0.1)
self._done_cond.release()
# Check for user code exception
if self._user_code_exception:
self._user_code_exception = False
raise smach.InvalidStateError("A concurrent state raised an exception during execution.")
# Check for preempt
if self.preempt_requested():
# initialized serviced flag
children_preempts_serviced = True
# Service this preempt if
for (label,state) in ((k,self._states[k]) for k in self._states):
if state.preempt_requested():
# Reset the flag
children_preempts_serviced = False
# Complain
smach.logwarn("State '%s' in concurrence did not service preempt." % label)
# Recall the preempt if it hasn't been serviced
state.recall_preempt()
if children_preempts_serviced:
smach.loginfo("Concurrence serviced preempt.")
self.service_preempt()
# Spew some debyg info
smach.loginfo("Concurrent Outcomes: "+str(self._child_outcomes))
# Initialize the outcome
outcome = self._default_outcome
# Determine the outcome from the outcome map
smach.logdebug("SMACH Concurrence determining contained state outcomes.")
for (container_outcome, outcomes) in ((k,self._outcome_map[k]) for k in self._outcome_map):
if all([self._child_outcomes[label] == outcomes[label] for label in outcomes]):
smach.logdebug("Terminating concurrent split with mapped outcome.")
outcome = container_outcome
# Check outcome callback
if self._outcome_cb:
try:
cb_outcome = self._outcome_cb(copy.copy(self._child_outcomes))
if cb_outcome:
if cb_outcome == str(cb_outcome):
outcome = cb_outcome
else:
smach.logerr("Outcome callback returned a non-string '%s', using default outcome '%s'" % (str(cb_outcome), self._default_outcome))
else:
smach.logwarn("Outcome callback returned None, using outcome '%s'" % outcome)
except:
raise smach.InvalidUserCodeError(("Could not execute outcome callback '%s': " % self._outcome_cb)+traceback.format_exc())
# Cleanup
self._threads = {}
self._child_outcomes = {}
# Call termination callbacks
self.call_termination_cbs(list(self._states.keys()), outcome)
## Do the <onexit>
if(self._onExit is not None):
try:
outcome = self._onExit.execute(self.userdata,outcome)
except Exception as ex:
rospy.logerr('%s::onExit::execute() raised | %s'
%(self.__class__.__name__,str(ex)))
return "preempt"
# Copy output keys
self._copy_output_keys(self.userdata, parent_ud)
return outcome
def _state_runner(self,label):
"""Runs the states in parallel threads."""
# Wait until all threads are ready to start before beginnging
self._ready_event.wait()
self.call_transition_cbs()
# Execute child state
self._tree_view_enable_state(label)
try:
self._child_outcomes[label] = self._states[label].execute(smach.Remapper(
self.userdata,
self._states[label].get_registered_input_keys(),
self._states[label].get_registered_output_keys(),
self._remappings[label]))
except:
self._user_code_exception = True
with self._done_cond:
self._done_cond.notify_all()
raise smach.InvalidStateError(("Could not execute child state '%s': " % label)+traceback.format_exc())
self._tree_view_disable_state(label)
# Make sure the child returned an outcome
if self._child_outcomes[label] is None:
raise smach.InvalidStateError("Concurrent state '%s' returned no outcome on termination." % label)
else:
smach.loginfo("Concurrent state '%s' returned outcome '%s' on termination." % (label, self._child_outcomes[label]))
# Check if all of the states have completed
with self._done_cond:
# initialize preemption flag
preempt_others = False
# Call transition cb's
self.call_transition_cbs()
# Call child termination cb if it's defined
if self._child_termination_cb:
try:
preempt_others = self._child_termination_cb(self._child_outcomes)
except:
raise smach.InvalidUserCodeError("Could not execute child termination callback: "+traceback.format_exc())
## Check if we have finished one outcome
for (container_outcome, outcomes) in ((k,self._outcome_map[k]) for k in self._outcome_map):
if all([self._child_outcomes[label] == outcomes[label] for label in outcomes]):
preempt_others = True
# Notify the container to terminate (and preempt other states if neceesary)
if preempt_others or all([o is not None for o in self._child_outcomes.values()]):
self._done_cond.notify_all()
def _create_tree_view(self):
self._tree_view = {}
for child in self.get_children():
self._tree_view[child] = 0
def _tree_view_enable_state(self, label):
if(self._tree_view is not None):
self._tree_view_Lock.acquire()
self._tree_view[label] = 1
self.call_update_tree_view_cb()
self._tree_view_Lock.release()
def _tree_view_disable_state(self, label):
if(self._tree_view is not None):
self._tree_view_Lock.acquire()
self._tree_view[label] = 0
self.call_update_tree_view_cb()
self._tree_view_Lock.release()
def get_tree_view(self):
return self._tree_view
def register_tree_view_cb(self, callback):
self._tree_view_cb = callback
def call_update_tree_view_cb(self):
if(self._tree_view_cb is not None):
try:
self._tree_view_cb()
except:
smach.logerr("Could not execute treeview callback: "+traceback.format_exc())
|
the-stack_106_26973 | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
''' loss implementation'''
import tensorflow as tf
from delta.utils.loss.base_loss import Loss
from delta.utils.loss.loss_utils import cross_entropy
from delta.utils.loss.loss_utils import mask_sequence_loss
from delta.utils.loss.loss_utils import ctc_lambda_loss
from delta.utils.loss.loss_utils import crf_log_likelihood
from delta.utils.loss.loss_utils import focal_loss
from delta.utils.register import registers
@registers.loss.register
class CrossEntropyLoss(Loss):
''' cross entropy loss for classfication and sequence classfication '''
def __init__(self, config):
super().__init__(config)
self.smoothing = self.config['solver']['optimizer']['label_smoothing']
#pylint: disable=too-many-arguments
def call(self,
logits=None,
input_length=None,
labels=None,
label_length=None,
**kwargs):
loss = cross_entropy(
logits=logits,
input_length=input_length,
labels=labels,
label_length=label_length,
smoothing=self.smoothing)
return loss
@registers.loss.register
class DistillationLoss(Loss):
''' Distilling the Knowledge in a Neural Network, arXiv:1503.02531 '''
def __init__(self, config):
super().__init__(config)
self.smoothing = self.config['solver']['optimizer']['label_smoothing']
self.temperature = self.config['solver']['distilling']['temperature']
self.alpha = self.config['solver']['distilling']['alpha']
assert self.alpha >= 0.0, "alpha : {}".format(self.alpha)
assert self.alpha <= 1.0, "alpha : {}".format(self.alpha)
assert self.temperature >= 1, "temperature : {}".format(self.temperature)
self.T = tf.convert_to_tensor(self.temperature, dtype=tf.float32) #pylint: disable=invalid-name
#pylint: disable=too-many-arguments
def call(self,
logits=None,
input_length=None,
labels=None,
label_length=None,
**kwargs):
assert "soft_lables" in kwargs
soft_labels = kwargs["soft_labels"]
loss_standard = cross_entropy(
logits=logits,
input_length=input_length,
labels=labels,
label_length=label_length,
smoothing=self.smoothing)
loss_soft = cross_entropy(
logits=logits / self.T,
input_length=input_length,
labels=soft_labels,
label_length=label_length,
smoothing=self.smoothing)
# Since the magnitudes of the gradients produced by the soft targets
# scale as 1/T2 , it is important to multiply them by T2 when using
# both hard and soft targets
total_loss = self.alpha * tf.square(
self.T) * loss_soft + (1 - self.alpha) * loss_standard
return total_loss
@registers.loss.register
class CTCLoss(Loss):
''' ctc loss '''
def __init__(self, config): #pylint: disable=useless-super-delegation
super().__init__(config)
#pylint: disable=too-many-arguments
def call(self,
logits=None,
input_length=None,
labels=None,
label_length=None,
**kwargs):
blank_index = kwargs.get('blank_index', 0)
return ctc_lambda_loss(
logits=logits,
input_length=input_length,
labels=labels,
label_length=label_length,
blank_index=blank_index)
@registers.loss.register
class CrfLoss(Loss):
'''crf loss for sequence labeling'''
def __init__(self, config):
super().__init__(config)
# pylint: disable=too-many-arguments
def call(self,
logits=None,
input_length=None,
labels=None,
label_length=None,
**kwargs):
assert "model" in kwargs
model = kwargs["model"]
tags_scores = tf.reshape(
logits, [-1, model.max_len, model.seq_num_classes], name="scores")
loss, _ = crf_log_likelihood(tags_scores, labels, input_length,
model.transitions)
return loss
@registers.loss.register
class SequenceCrossEntropyLoss(Loss):
''' cross entropy loss for sequence to sequence '''
def __init__(self, config):
super().__init__(config)
#pylint: disable=too-many-arguments
def call(self,
logits=None,
input_length=None,
labels=None,
label_length=None,
**kwargs):
loss = mask_sequence_loss(logits, labels, input_length, label_length)
return loss
@registers.loss.register
class FocalLoss(Loss):
def __init__(self, config):
super().__init__(config)
self.gamma = 2
if 'gamma' in self._config['solver']['optimizer']:
self.gamma = self._config['solver']['optimizer']['gamma']
assert self.gamma >= 0, 'gamma must greater than or equal to zero'
def call(self,
logits=None,
input_length=None,
labels=None,
label_length=None,
**kwargs):
del input_length
del label_length
return focal_loss(
logits=logits,
labels=labels,
gamma=self.gamma,
name='focal_loss')
|
the-stack_106_26974 | #!/usr/bin/env python
"""Plot ROC curve of variant called data"""
########################################################################
# File: plot_variant_accuracy.py
# executable: plot_variant_accuracy.py
#
# Author: Andrew Bailey
# History: Created 01/07/19
########################################################################
from argparse import ArgumentParser
import pandas as pd
import pickle
import os
import sys
import platform
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
if platform.system() == "Darwin":
mpl.use("macosx")
from py3helpers.utils import list_dir
from py3helpers.classification import ClassificationMetrics
from py3helpers.utils import load_json, create_dot_dict
from signalalign.filter_reads import find_fast5s_from_ids_readdb, write_readdb, copy_files_from_readdb
from signalalign.variantCaller import AggregateOverReadsFull
from signalalign.utils.sequenceTools import CustomAmbiguityPositions
from timeit import default_timer as timer
def parse_args():
parser = ArgumentParser(description=__doc__)
# required arguments
parser.add_argument('--config', '-c', action='store',
dest='config', required=True, type=str, default=None,
help="Path to json config file")
args = parser.parse_args()
return args
def plot_roc_from_config(config):
"""Plotting function to handle logic of the config file. Mainly created to test function"""
config = create_dot_dict(config)
variants = config.variants
samples = config.samples
threshold = 0.500000001
if isinstance(config.jobs, int):
n_processes = config.jobs
else:
n_processes = 2
save_fig_dir = config.save_fig_dir
assert len(samples) > 0, "Must include samples in order to do comparison"
aor_handles = []
gwa_lables_list = []
per_site_label_list = []
plot_per_read = False
plot_genome_position_aggregate = False
plot_per_call = False
# process samples
for sample in samples:
tsvs = sample.full_tsvs
positions = sample.positions_file
label = sample.label
aor_h = AggregateOverReadsFull(tsvs, variants, verbose=True, processes=n_processes)
aor_h.marginalize_over_all_reads()
aor_handles.append(aor_h)
assert positions or label, "Must provide either a label: {} or a positions file: {}".format(label,
positions)
# use character as label if given
if label:
plot_genome_position_aggregate = True
plot_per_call = True
plot_per_read = True
aor_h.aggregate_position_probs = aor_h.generate_labels2(predicted_data=aor_h.aggregate_position_probs,
true_char=label)
aor_h.per_read_data = aor_h.generate_labels2(predicted_data=aor_h.per_read_data,
true_char=label)
aor_h.per_position_data = aor_h.generate_labels2(predicted_data=aor_h.per_position_data,
true_char=label)
# if positions file is given, check accuracy from that
elif positions:
plot_genome_position_aggregate = True
plot_per_call = True
genome_position_labels = CustomAmbiguityPositions.parseAmbiguityFile(positions)
aor_h.aggregate_position_probs = aor_h.generate_labels(labelled_positions=genome_position_labels,
predicted_data=aor_h.aggregate_position_probs)
aor_h.per_position_data = aor_h.generate_labels(labelled_positions=genome_position_labels,
predicted_data=aor_h.per_position_data)
# plot per read ROC curve
if plot_per_read:
all_per_read_labels = pd.concat([x.per_read_data for x in aor_handles], ignore_index=True)
data_type_name = "per_read"
plot_all_roc_curves(all_per_read_labels, variants, save_fig_dir, data_type_name, threshold=threshold)
# plot per call ROC curve
if plot_per_call:
all_site_labels = pd.concat([x.per_position_data for x in aor_handles], ignore_index=True)
data_type_name = "per_site_per_read"
plot_all_roc_curves(all_site_labels, variants, save_fig_dir, data_type_name, threshold=threshold)
# plot genome position calls
if plot_genome_position_aggregate:
all_genome_positions_labels = pd.concat([x.aggregate_position_probs for x in aor_handles], ignore_index=True)
data_type_name = "per_genomic_site"
plot_all_roc_curves(all_genome_positions_labels, variants, save_fig_dir, data_type_name, label_key="contig",
threshold=threshold)
return 0
def plot_roc_and_precision_and_save_data(per_read_labels_only, per_read_probs_only, name, variants, save_fig_dir,
label_ids=None, threshold=0.5):
roc_h = ClassificationMetrics(per_read_labels_only, per_read_probs_only, label_ids=label_ids)
for variant in variants:
roc_path = None
precision_recall_path = None
confusion_recall_path = None
plot_probability_hist_path = None
if save_fig_dir:
roc_path = os.path.join(save_fig_dir, "{}_roc_{}".format(name, variant))
precision_recall_path = os.path.join(save_fig_dir, "{}_pr_{}".format(name, variant))
confusion_recall_path = os.path.join(save_fig_dir, "{}_confusion_{}".format(name, variant))
plot_probability_hist_path = os.path.join(save_fig_dir, "{}_prob_hist_{}".format(name, variant))
plot_1 = roc_h.plot_roc(variant, title="{} ROC for {}".format(name, variant), save_fig_path=roc_path)
plot_1.close()
plot_1 = roc_h.plot_precision_recall(variant, title="{} Precison Recall for {}".format(name, variant),
save_fig_path=precision_recall_path)
plot_1.close()
plot_1 = roc_h.plot_confusion_matrix(title="{} Confusion Matrix for {}".format(name, variant),
save_fig_path=confusion_recall_path, threshold=threshold, class_n=variant)
plot_1.close()
bins = max(int(len(roc_h.class_probabilities[variant]) / 30), 10)
plot_1 = roc_h.plot_probability_hist(variant, save_fig_path=plot_probability_hist_path, bins=bins,
normalize=False)
plot_1.close()
# save pickle of classification metrics class
if save_fig_dir:
path = os.path.join(save_fig_dir, "{}_classificationMetrics.pkl".format(name))
with open(path, "wb") as f:
pickle.dump(roc_h, f)
return 0
def plot_all_roc_curves(all_labels, variants, save_fig_dir, data_type_name, label_key="read_name", threshold=0.5):
all_per_read_labels_template = all_labels[all_labels["strand"] == 't']
all_per_read_labels_complement = all_labels[all_labels["strand"] == 'c']
names = ["{}_template".format(data_type_name), "{}_complement".format(data_type_name),
"{}_total".format(data_type_name)]
for name, data in zip(names, [all_per_read_labels_template, all_per_read_labels_complement, all_labels]):
per_read_labels_only = data[[x + "_label" for x in variants]]
per_read_probs_only = data[list(variants)]
label_ids = list(data[label_key])
per_read_labels_only.columns = list(variants)
if len(per_read_labels_only) > 0 and len(per_read_probs_only) > 0:
plot_roc_and_precision_and_save_data(per_read_labels_only, per_read_probs_only, name, variants,
save_fig_dir, label_ids=label_ids, threshold=threshold)
if save_fig_dir is not None:
all_labels.to_csv(os.path.join(save_fig_dir, data_type_name + ".tsv"), sep='\t', index=False)
def log_tp_fn_overlap(tp_classifications, fn_classifications, class_name):
"""Write out ids that overlap between true positives and false negatves"""
tp_ids = set(tp_classifications.get_tp_ids(class_name))
fn_ids = set(fn_classifications.get_fn_ids(class_name))
ids = tp_ids & fn_ids
return ids
def load_classifcation_metrics_pkl(classification_pkl):
"""Load a classificationMetrics pickle file"""
with open(classification_pkl, 'rb') as fh:
cm_h = pickle.load(fh)
return cm_h
def write_tp_fn_overlap_readdb(readdb, tp_pkl, fn_pkl, class_n, out_path, read_dirs, recursive=False):
"""Write a readdb file of reads which were true positives in one experiment and false negatives
in another experiment
:param readdb: read db file with ids for all data in both experiments
:param tp_pkl: path to ClassificationMetrics pkl data where true positives are going to be gathered
:param fn_pkl: path to ClassificationMetrics pkl data where false negatives are going to be gathered
:param class_n: name of the class to inspect
:param out_path: output path for readdb file
"""
tp_metrics = load_classifcation_metrics_pkl(tp_pkl)
fn_metrics = load_classifcation_metrics_pkl(fn_pkl)
overlap_ids = [x.split(".")[0] for x in log_tp_fn_overlap(tp_metrics, fn_metrics, class_n)]
data = [[id_name, f5_path] for id_name, f5_path in find_fast5s_from_ids_readdb(readdb, overlap_ids,
read_dirs, recursive=recursive)]
write_readdb(data, out_path)
print("{} tp in {} and fp in {}".format(len(overlap_ids), tp_pkl, fn_pkl))
return len(overlap_ids)
def main(config=None):
start = timer()
if config is None:
args = parse_args()
# load model files
assert os.path.exists(args.config), "Config file does not exist: {}".format(args.config)
config = load_json(args.config)
plot_roc_from_config(config)
stop = timer()
print("Running Time = {} seconds".format(stop - start), file=sys.stderr)
if __name__ == '__main__':
main()
|
the-stack_106_26979 | from manimlib.imports import *
import os
import pyclbr
class Shapes(Scene):
#A few simple shapes
#Python 2.7 version runs in Python 3.7 without changes
def construct(self):
circle = Circle()
square = Square()
line=Line(np.array([3,0,0]),np.array([5,0,0]))
triangle=Polygon(np.array([0,0,0]),np.array([1,1,0]),np.array([1,-1,0]))
self.play(ShowCreation(circle))
self.play(FadeOut(circle))
self.play(GrowFromCenter(square))
self.play(Transform(square,triangle))
self.add(line)
class MoreShapes(Scene):
#A few more simple shapes
#2.7 version runs in 3.7 without any changes
#Note: I fixed my 'play command not found' issue by installing sox
def construct(self):
circle = Circle(color=PURPLE_A)
square = Square(fill_color=GOLD_B, fill_opacity=1, color=GOLD_A)
square.move_to(UP+LEFT)
circle.surround(square)
rectangle = Rectangle(height=2, width=3)
ellipse=Ellipse(width=3, height=1, color=RED)
ellipse.shift(2*DOWN+2*RIGHT)
pointer = CurvedArrow(2*RIGHT,5*RIGHT,color=MAROON_C)
arrow = Arrow(LEFT,UP)
arrow.next_to(circle,DOWN+LEFT)
rectangle.next_to(arrow,DOWN+LEFT)
ring=Annulus(inner_radius=.5, outer_radius=1, color=BLUE)
ring.next_to(ellipse, RIGHT)
self.add(pointer)
self.play(FadeIn(square))
self.play(Rotating(square),FadeIn(circle))
self.play(GrowArrow(arrow))
self.play(GrowFromCenter(rectangle), GrowFromCenter(ellipse), GrowFromCenter(ring))
class MovingShapes(Scene):
#Show the difference between .shift() and .move_to
def construct(self):
circle=Circle(color=TEAL_A)
circle.move_to(LEFT)
square=Circle()
square.move_to(LEFT+3*DOWN)
self.play(GrowFromCenter(circle), GrowFromCenter(square), rate=5)
self.play(ApplyMethod(circle.move_to,RIGHT), ApplyMethod(square.shift,RIGHT))
self.play(ApplyMethod(circle.move_to,RIGHT+UP), ApplyMethod(square.shift,RIGHT+UP))
self.play(ApplyMethod(circle.move_to,LEFT+UP), ApplyMethod(square.shift,LEFT+UP))
class AddingText(Scene):
#Adding text on the screen
def construct(self):
my_first_text=TextMobject("Writing with manim is fun")
second_line=TextMobject("and easy to do!")
second_line.next_to(my_first_text,DOWN)
third_line=TextMobject("for me and you!")
third_line.next_to(my_first_text,DOWN)
self.add(my_first_text, second_line)
self.wait(2)
self.play(Transform(second_line,third_line))
self.wait(2)
second_line.shift(3*DOWN)
self.play(ApplyMethod(my_first_text.shift,3*UP))
###Try uncommenting the following###
#self.play(ApplyMethod(second_line.move_to, LEFT_SIDE-2*LEFT))
#self.play(ApplyMethod(my_first_text.next_to,second_line))
class AddingMoreText(Scene):
#Playing around with text properties
def construct(self):
quote = TextMobject("Imagination is more important than knowledge")
quote.set_color(RED)
quote.to_edge(UP)
quote2 = TextMobject("A person who never made a mistake never tried anything new")
quote2.set_color(YELLOW)
author=TextMobject("-Albert Einstein")
author.scale(0.75)
author.next_to(quote.get_corner(DOWN+RIGHT),DOWN)
self.add(quote)
self.add(author)
self.wait(2)
self.play(Transform(quote,quote2),ApplyMethod(author.move_to,quote2.get_corner(DOWN+RIGHT)+DOWN+2*LEFT))
self.play(ApplyMethod(author.scale,1.5))
author.match_color(quote2)
self.play(FadeOut(quote))
class RotateAndHighlight(Scene):
#Rotation of text and highlighting with surrounding geometries
def construct(self):
square=Square(side_length=5,fill_color=YELLOW, fill_opacity=1)
label=TextMobject("Text at an angle")
label.bg=BackgroundRectangle(label,fill_opacity=1)
label_group=VGroup(label.bg,label) #Order matters
label_group.rotate(TAU/8)
label2=TextMobject("Boxed text",color=BLACK)
label2.bg=SurroundingRectangle(label2,color=BLUE,fill_color=RED, fill_opacity=.5)
label2_group=VGroup(label2,label2.bg)
label2_group.next_to(label_group,DOWN)
label3=TextMobject("Rainbow")
label3.scale(2)
label3.set_color_by_gradient(RED, ORANGE, YELLOW, GREEN, BLUE, PURPLE)
label3.to_edge(DOWN)
self.add(square)
self.play(FadeIn(label_group))
self.play(FadeIn(label2_group))
self.play(FadeIn(label3))
class BasicEquations(Scene):
#A short script showing how to use Latex commands
def construct(self):
eq1=TextMobject("$\\vec{X}_0 \\cdot \\vec{Y}_1 = 3$")
eq1.shift(2*UP)
eq2=TexMobject(r"\vec{F}_{net} = \sum_i \vec{F}_i")
eq2.shift(2*DOWN)
self.play(Write(eq1))
self.play(Write(eq2))
class ColoringEquations(Scene):
#Grouping and coloring parts of equations
def construct(self):
line1=TexMobject(r"\text{The vector } \vec{F}_{net} \text{ is the net }",r"\text{force }",r"\text{on object of mass }")
line1.set_color_by_tex("force", BLUE)
line2=TexMobject("m", "\\text{ and acceleration }", "\\vec{a}", ". ")
line2.set_color_by_tex_to_color_map({
"m": YELLOW,
"{a}": RED
})
sentence=VGroup(line1,line2)
sentence.arrange_submobjects(DOWN, buff=MED_LARGE_BUFF)
self.play(Write(sentence))
class UsingBraces(Scene):
#Using braces to group text together
def construct(self):
eq1A = TextMobject("4x + 3y")
eq1B = TextMobject("=")
eq1C = TextMobject("0")
eq2A = TextMobject("5x -2y")
eq2B = TextMobject("=")
eq2C = TextMobject("3")
eq1B.next_to(eq1A,RIGHT)
eq1C.next_to(eq1B,RIGHT)
eq2A.shift(DOWN)
eq2B.shift(DOWN)
eq2C.shift(DOWN)
eq2A.align_to(eq1A,LEFT)
eq2B.align_to(eq1B,LEFT)
eq2C.align_to(eq1C,LEFT)
eq_group=VGroup(eq1A,eq2A)
braces=Brace(eq_group,LEFT)
eq_text = braces.get_text("A pair of equations")
self.add(eq1A, eq1B, eq1C)
self.add(eq2A, eq2B, eq2C)
self.play(GrowFromCenter(braces),Write(eq_text))
class UsingBracesConcise(Scene):
#A more concise block of code with all columns aligned
def construct(self):
eq1_text=["4","x","+","3","y","=","0"]
eq2_text=["5","x","-","2","y","=","3"]
eq1_mob=TexMobject(*eq1_text)
eq2_mob=TexMobject(*eq2_text)
eq1_mob.set_color_by_tex_to_color_map({
"x":RED_B,
"y":GREEN_C
})
eq2_mob.set_color_by_tex_to_color_map({
"x":RED_B,
"y":GREEN_C
})
for i,item in enumerate(eq2_mob):
item.align_to(eq1_mob[i],LEFT)
eq1=VGroup(*eq1_mob)
eq2=VGroup(*eq2_mob)
eq2.shift(DOWN)
eq_group=VGroup(eq1,eq2)
braces=Brace(eq_group,LEFT)
eq_text = braces.get_text("A pair of equations")
self.play(Write(eq1),Write(eq2))
self.play(GrowFromCenter(braces),Write(eq_text))
class PlotFunctions(GraphScene):
CONFIG = {
"x_min" : -10,
"x_max" : 10.3,
"y_min" : -1.5,
"y_max" : 1.5,
"graph_origin" : ORIGIN ,
"function_color" : RED ,
"axes_color" : GREEN,
"x_labeled_nums" :range(-10,12,2),
}
def construct(self):
self.setup_axes(animate=True)
func_graph=self.get_graph(self.func_to_graph,self.function_color)
func_graph2=self.get_graph(self.func_to_graph2)
vert_line = self.get_vertical_line_to_graph(TAU,func_graph,color=YELLOW)
graph_lab = self.get_graph_label(func_graph, label = "\\cos(x)")
graph_lab2=self.get_graph_label(func_graph2,label = "\\sin(x)", x_val=-10, direction=UP/2)
two_pi = TexMobject("x = 2 \\pi")
label_coord = self.input_to_graph_point(TAU,func_graph)
two_pi.next_to(label_coord,RIGHT+UP)
self.play(ShowCreation(func_graph),ShowCreation(func_graph2))
self.play(ShowCreation(vert_line), ShowCreation(graph_lab), ShowCreation(graph_lab2),ShowCreation(two_pi))
def func_to_graph(self,x):
return np.cos(x)
def func_to_graph2(self,x):
return np.sin(x)
class ExampleApproximation(GraphScene):
CONFIG = {
"function" : lambda x : np.cos(x),
"function_color" : BLUE,
"taylor" : [lambda x: 1, lambda x: 1-x**2/2, lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4), lambda x: 1-x**2/2+x**4/math.factorial(4)-x**6/math.factorial(6),
lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6)+x**8/math.factorial(8), lambda x: 1-x**2/math.factorial(2)+x**4/math.factorial(4)-x**6/math.factorial(6)+x**8/math.factorial(8) - x**10/math.factorial(10)],
"center_point" : 0,
"approximation_color" : GREEN,
"x_min" : -10,
"x_max" : 10,
"y_min" : -1,
"y_max" : 1,
"graph_origin" : ORIGIN ,
"x_labeled_nums" :range(-10,12,2),
}
def construct(self):
self.setup_axes(animate=True)
func_graph = self.get_graph(
self.function,
self.function_color,
)
approx_graphs = [
self.get_graph(
f,
self.approximation_color
)
for f in self.taylor
]
term_num = [
TexMobject("n = " + str(n),aligned_edge=TOP)
for n in range(0,8)]
#[t.to_edge(BOTTOM,buff=SMALL_BUFF) for t in term_num]
#term = TexMobject("")
#term.to_edge(BOTTOM,buff=SMALL_BUFF)
term = VectorizedPoint(3*DOWN)
approx_graph = VectorizedPoint(
self.input_to_graph_point(self.center_point, func_graph)
)
self.play(
ShowCreation(func_graph),
)
for n,graph in enumerate(approx_graphs):
self.play(
Transform(approx_graph, graph, run_time = 2),
Transform(term,term_num[n])
)
self.wait()
class DrawAnAxis(Scene):
CONFIG = { "plane_kwargs" : {
"x_line_frequency" : 2,
"y_line_frequency" :2
}
}
def construct(self):
my_plane = NumberPlane(**self.plane_kwargs)
my_plane.add(my_plane.get_axis_labels())
self.add(my_plane)
#self.wait()
class SimpleField(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED
},
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs) #Create axes and grid
plane.add(plane.get_axis_labels()) #add x and y label
self.add(plane) #Place grid on screen
points = [x*RIGHT+y*UP
for x in np.arange(-5,5,1)
for y in np.arange(-5,5,1)
] #List of vectors pointing to each grid point
vec_field = [] #Empty list to use in for loop
for point in points:
field = 0.5*RIGHT + 0.5*UP #Constant field up and to right
result = Vector(field).shift(point) #Create vector and shift it to grid point
vec_field.append(result) #Append to list
draw_field = VGroup(*vec_field) #Pass list of vectors to create a VGroup
self.play(ShowCreation(draw_field)) #Draw VGroup on screen
class FieldWithAxes(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
#plane.main_lines.fade(.9) #doesn't work in most recent commit
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.calc_field(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.play(ShowCreation(field))
def calc_field(self,point):
#This calculates the field at a single point.
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
#efield = np.array((-y,x,0))/math.sqrt(x**2+y**2) #Try one of these two fields
#efield = np.array(( -2*(y%2)+1 , -2*(x%2)+1 , 0 ))/3 #Try one of these two fields
return Vector(efield).shift(point)
class ExampleThreeD(ThreeDScene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
#plane.main_lines.fade(.9) #Doesn't work in most recent commit
plane.add(plane.get_axis_labels())
self.add(plane)
field2D = VGroup(*[self.calc_field2D(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.set_camera_orientation(phi=PI/3,gamma=PI/5)
self.play(ShowCreation(field2D))
self.wait()
#self.move_camera(gamma=0,run_time=1) #Doesn't work in most recent commit
#self.move_camera(phi=3/4*PI, theta=-PI/2) #Doesn't work in most recent commit
self.begin_ambient_camera_rotation(rate=0.1)
self.wait(6)
def calc_field2D(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
class EFieldInThreeD(ThreeDScene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
#plane.main_lines.fade(.9) #Doesn't work in most recent commit
plane.add(plane.get_axis_labels())
self.add(plane)
field2D = VGroup(*[self.calc_field2D(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
field3D = VGroup(*[self.calc_field3D(x*RIGHT+y*UP+z*OUT)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
for z in np.arange(-5,5,1)])
self.play(ShowCreation(field3D))
self.wait()
#self.move_camera(0.8*np.pi/2, -0.45*np.pi) #Doesn't work in most recent commit
self.begin_ambient_camera_rotation()
self.wait(6)
def calc_field2D(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
def calc_field3D(self,point):
x,y,z = point
Rx,Ry,Rz = self.point_charge_loc
r = math.sqrt((x-Rx)**2 + (y-Ry)**2+(z-Rz)**2)
efield = (point - self.point_charge_loc)/r**3
#efield = np.array((-y,x,z))/math.sqrt(x**2+y**2+z**2)
return Vector(efield).shift(point)
class MovingCharges(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_loc" : 0.5*RIGHT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
#plane.main_lines.fade(.9) #Doesn't work in most recent commit
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.calc_field(x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.field=field
source_charge = self.Positron().move_to(self.point_charge_loc)
self.play(FadeIn(source_charge))
self.play(ShowCreation(field))
self.moving_charge()
def calc_field(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return Vector(efield).shift(point)
def moving_charge(self):
numb_charges=4
possible_points = [v.get_start() for v in self.field]
points = random.sample(possible_points, numb_charges)
particles = VGroup(*[
self.Positron().move_to(point)
for point in points
])
for particle in particles:
particle.velocity = np.array((0,0,0))
self.play(FadeIn(particles))
self.moving_particles = particles
self.add_foreground_mobjects(self.moving_particles )
self.always_continually_update = True
self.wait(10)
def field_at_point(self,point):
x,y = point[:2]
Rx,Ry = self.point_charge_loc[:2]
r = math.sqrt((x-Rx)**2 + (y-Ry)**2)
efield = (point - self.point_charge_loc)/r**3
return efield
def continual_update(self, *args, **kwargs):
if hasattr(self, "moving_particles"):
dt = self.frame_duration
for p in self.moving_particles:
accel = self.field_at_point(p.get_center())
p.velocity = p.velocity + accel*dt
p.shift(p.velocity*dt)
class Positron(Circle):
CONFIG = {
"radius" : 0.2,
"stroke_width" : 3,
"color" : RED,
"fill_color" : RED,
"fill_opacity" : 0.5,
}
def __init__(self, **kwargs):
Circle.__init__(self, **kwargs)
plus = TexMobject("+")
plus.scale(0.7)
plus.move_to(self)
self.add(plus)
class FieldOfMovingCharge(Scene):
CONFIG = {
"plane_kwargs" : {
"color" : RED_B
},
"point_charge_start_loc" : 5.5*LEFT-1.5*UP,
}
def construct(self):
plane = NumberPlane(**self.plane_kwargs)
#plane.main_lines.fade(.9) #Doesn't work in most recent commit
plane.add(plane.get_axis_labels())
self.add(plane)
field = VGroup(*[self.create_vect_field(self.point_charge_start_loc,x*RIGHT+y*UP)
for x in np.arange(-9,9,1)
for y in np.arange(-5,5,1)
])
self.field=field
self.source_charge = self.Positron().move_to(self.point_charge_start_loc)
self.source_charge.velocity = np.array((1,0,0))
self.play(FadeIn(self.source_charge))
self.play(ShowCreation(field))
self.moving_charge()
def create_vect_field(self,source_charge,observation_point):
return Vector(self.calc_field(source_charge,observation_point)).shift(observation_point)
def calc_field(self,source_point,observation_point):
x,y,z = observation_point
Rx,Ry,Rz = source_point
r = math.sqrt((x-Rx)**2 + (y-Ry)**2 + (z-Rz)**2)
if r<0.0000001: #Prevent divide by zero
efield = np.array((0,0,0))
else:
efield = (observation_point - source_point)/r**3
return efield
def moving_charge(self):
numb_charges=3
possible_points = [v.get_start() for v in self.field]
points = random.sample(possible_points, numb_charges)
particles = VGroup(self.source_charge, *[
self.Positron().move_to(point)
for point in points
])
for particle in particles[1:]:
particle.velocity = np.array((0,0,0))
self.play(FadeIn(particles[1:]))
self.moving_particles = particles
self.add_foreground_mobjects(self.moving_particles )
self.always_continually_update = True
self.wait(10)
def continual_update(self, *args, **kwargs):
Scene.continual_update(self, *args, **kwargs)
if hasattr(self, "moving_particles"):
dt = self.frame_duration
for v in self.field:
field_vect=np.zeros(3)
for p in self.moving_particles:
field_vect = field_vect + self.calc_field(p.get_center(), v.get_start())
v.put_start_and_end_on(v.get_start(), field_vect+v.get_start())
for p in self.moving_particles:
accel = np.zeros(3)
p.velocity = p.velocity + accel*dt
p.shift(p.velocity*dt)
class Positron(Circle):
CONFIG = {
"radius" : 0.2,
"stroke_width" : 3,
"color" : RED,
"fill_color" : RED,
"fill_opacity" : 0.5,
}
def __init__(self, **kwargs):
Circle.__init__(self, **kwargs)
plus = TexMobject("+")
plus.scale(0.7)
plus.move_to(self)
self.add(plus)
HEAD_INDEX = 0
BODY_INDEX = 1
ARMS_INDEX = 2
LEGS_INDEX = 3
class StickMan(SVGMobject):
CONFIG = {
"color" : BLUE_E,
"file_name_prefix": "stick_man",
"stroke_width" : 2,
"stroke_color" : WHITE,
"fill_opacity" : 1.0,
"height" : 3,
}
def __init__(self, mode = "plain", **kwargs):
digest_config(self, kwargs)
self.mode = mode
self.parts_named = False
try:
svg_file = os.path.join(
SVG_IMAGE_DIR,
"%s_%s.svg" % (self.file_name_prefix, mode)
)
SVGMobject.__init__(self, file_name=svg_file, **kwargs)
except:
warnings.warn("No %s design with mode %s" %
(self.file_name_prefix, mode))
svg_file = os.path.join(
SVG_IMAGE_DIR,
"stick_man_plain.svg",
)
SVGMobject.__init__(self, mode="plain", file_name=svg_file, **kwargs)
def name_parts(self):
self.head = self.submobjects[HEAD_INDEX]
self.body = self.submobjects[BODY_INDEX]
self.arms = self.submobjects[ARMS_INDEX]
self.legs = self.submobjects[LEGS_INDEX]
self.parts_named = True
def init_colors(self):
SVGMobject.init_colors(self)
if not self.parts_named:
self.name_parts()
self.head.set_fill(self.color, opacity = 1)
self.body.set_fill(RED, opacity = 1)
self.arms.set_fill(YELLOW, opacity = 1)
self.legs.set_fill(BLUE, opacity = 1)
return self
class Waving(Scene):
def construct(self):
start_man = StickMan()
plain_man = StickMan()
waving_man = StickMan("wave")
self.add(start_man)
self.wait()
self.play(Transform(start_man,waving_man))
self.play(Transform(start_man,plain_man))
self.wait()
class CirclesAndSquares(SVGMobject):
CONFIG = {
"color" : BLUE_E,
"file_name_prefix": "circles_and_squares",
"stroke_width" : 2,
"stroke_color" : WHITE,
"fill_opacity" : 1.0,
"height" : 3,
"start_corner" : None,
"circle_index" : 0,
"line1_index" :1,
"line2_index" : 2,
"square1_index" : 3,
"square2_index" : 4,
}
def __init__(self, mode = "plain", **kwargs):
digest_config(self, kwargs)
self.mode = mode
self.parts_named = False
try:
svg_file = os.path.join(
SVG_IMAGE_DIR,
"%s_%s.svg" % (self.file_name_prefix, mode)
)
SVGMobject.__init__(self, file_name=svg_file, **kwargs)
except:
warnings.warn("No %s design with mode %s" %
(self.file_name_prefix, mode))
svg_file = os.path.join(
SVG_IMAGE_DIR,
"circles_and_squares_plain.svg",
)
SVGMobject.__init__(self, mode="plain", file_name=svg_file, **kwargs)
def name_parts(self):
self.circle = self.submobjects[self.circle_index]
self.line1 = self.submobjects[self.line1_index]
self.line2 = self.submobjects[self.line2_index]
self.square1 = self.submobjects[self.square1_index]
self.square2 = self.submobjects[self.square2_index]
self.parts_named = True
def init_colors(self):
SVGMobject.init_colors(self)
self.name_parts()
self.circle.set_fill(RED, opacity = 1)
self.line1.set_fill(self.color, opacity = 0)
self.line2.set_fill(self.color, opacity = 0)
self.square1.set_fill(GREEN, opacity = 1)
self.square2.set_fill(BLUE, opacity = 1)
return self
class SVGCircleAndSquare(Scene):
def construct(self):
thingy = CirclesAndSquares()
self.add(thingy)
self.wait()
if __name__ == "__main__":
# Call this file at command line to make sure all scenes work with version of manim
# type "python manim_tutorial_P37.py" at command line to run all scenes in this file
#Must have "import os" and "import pyclbr" at start of file to use this
###Using Python class browser to determine which classes are defined in this file
module_name = 'manim_tutorial_P37' #Name of current file
module_info = pyclbr.readmodule(module_name)
for item in module_info.values():
if item.module==module_name:
print(item.name)
os.system("python -m manim manim_tutorial_P37.py %s -l" % item.name) #Does not play files |
the-stack_106_26980 | from __future__ import annotations
import json
import os
import sys
from typing import TYPE_CHECKING, Any, List, Tuple, cast
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable.base_executor_operator import (
BaseExecutorOperator,
)
from tfx.proto.orchestration import (
executable_spec_pb2,
execution_result_pb2,
pipeline_pb2,
)
import coalescenceml
import coalescenceml.constants
from coalescenceml.directory import Directory
from coalescenceml.io import fileio
from coalescenceml.logger import get_logger
from coalescenceml.step.utils import (
INTERNAL_EXECUTION_PARAMETER_PREFIX,
PARAM_CUSTOM_STEP_OPERATOR,
)
from coalescenceml.utils import json_utils, source_utils
if TYPE_CHECKING:
from coalescenceml.stack import Stack
from coalescenceml.step_operator import BaseStepOperator
logger = get_logger(__name__)
def _write_execution_info(
execution_info: data_types.ExecutionInfo, path: str
) -> None:
"""Writes execution information to a given path."""
execution_info_bytes = execution_info.to_proto().SerializeToString()
with fileio.open(path, "wb") as f:
f.write(execution_info_bytes)
logger.debug("Finished writing execution info to '%s'", path)
def _read_executor_output(
output_path: str,
) -> execution_result_pb2.ExecutorOutput:
"""Reads executor output from the given path.
Returns:
Executor output object.
Raises:
RuntimeError: If no output is written to the given path.
"""
if fileio.exists(output_path):
with fileio.open(output_path, "rb") as f:
return execution_result_pb2.ExecutorOutput.FromString(f.read())
else:
raise RuntimeError(
f"Unable to find executor output at path '{output_path}'."
)
class StepExecutorOperator(BaseExecutorOperator):
"""StepExecutorOperator extends TFX's BaseExecutorOperator.
This class can be passed as a custom executor operator during
a pipeline run which will then be used to call the step's
configured step operator to launch it in some environment.
"""
SUPPORTED_EXECUTOR_SPEC_TYPE = [
executable_spec_pb2.PythonClassExecutableSpec
]
SUPPORTED_PLATFORM_CONFIG_TYPE: List[Any] = []
@staticmethod
def _collect_requirements(
stack: Stack,
pipeline_node: pipeline_pb2.PipelineNode,
) -> List[str]:
"""Collects all requirements necessary to run a step.
Args:
stack: Stack on which the step is being executed.
pipeline_node: Pipeline node info for a step.
Returns:
Alphabetically sorted list of pip requirements.
"""
requirements = stack.requirements()
# Add pipeline requirements from the corresponding node context
for context in pipeline_node.contexts.contexts:
if context.type.name == "pipeline_requirements":
pipeline_requirements = context.properties[
"pipeline_requirements"
].field_value.string_value.split(" ")
requirements.update(pipeline_requirements)
break
requirements.add(f"coalescenceml=={coalescenceml.__version__}")
return sorted(requirements)
@staticmethod
def _resolve_user_modules(
pipeline_node: pipeline_pb2.PipelineNode,
) -> Tuple[str, str]:
"""Resolves the main and step module.
Args:
pipeline_node: Pipeline node info for a step.
Returns:
A tuple containing the path of the resolved main module and step
class.
"""
main_module_path = coalescenceml.constants.USER_MAIN_MODULE
if not main_module_path:
main_module_path = source_utils.get_module_source_from_module(
sys.modules["__main__"]
)
step_type = cast(str, pipeline_node.node_info.type.name)
step_module_path, step_class = step_type.rsplit(".", maxsplit=1)
if step_module_path == "__main__":
step_module_path = main_module_path
step_source_path = f"{step_module_path}.{step_class}"
return main_module_path, step_source_path
@staticmethod
def _get_step_operator(
stack: Stack, execution_info: data_types.ExecutionInfo
) -> "BaseStepOperator":
"""Fetches the step operator specified in the execution info.
Args:
stack: Stack on which the step is being executed.
execution_info: Execution info needed to run the step.
Returns:
The step operator to run a step.
"""
step_operator = stack.step_operator
# the two following errors should never happen as the stack gets
# validated before running the pipeline
if not step_operator:
raise RuntimeError(
f"No step operator specified for active stack '{stack.name}'."
)
step_operator_property_name = (
INTERNAL_EXECUTION_PARAMETER_PREFIX + PARAM_CUSTOM_STEP_OPERATOR
)
required_step_operator = json.loads(
execution_info.exec_properties[step_operator_property_name]
)
if required_step_operator != step_operator.name:
raise RuntimeError(
f"No step operator named '{required_step_operator}' in active "
f"stack '{stack.name}'."
)
return step_operator
def run_executor(
self,
execution_info: data_types.ExecutionInfo,
) -> execution_result_pb2.ExecutorOutput:
"""Invokes the executor with inputs provided by the Launcher.
Args:
execution_info: Necessary information to run the executor.
Returns:
The executor output.
"""
# Pretty sure these attributes will always be not None, assert here so
# mypy doesn't complain
assert execution_info.pipeline_node
assert execution_info.pipeline_info
assert execution_info.pipeline_run_id
assert execution_info.tmp_dir
assert execution_info.execution_output_uri
step_name = execution_info.pipeline_node.node_info.id
stack = Directory().active_stack
step_operator = self._get_step_operator(
stack=stack, execution_info=execution_info
)
requirements = self._collect_requirements(
stack=stack, pipeline_node=execution_info.pipeline_node
)
# Write the execution info to a temporary directory inside the artifact
# store so the step operator entrypoint can load it
execution_info_path = os.path.join(
execution_info.tmp_dir, "coalescenceml_execution_info.pb"
)
_write_execution_info(execution_info, path=execution_info_path)
main_module, step_source_path = self._resolve_user_modules(
pipeline_node=execution_info.pipeline_node
)
input_artifact_types_path = os.path.join(
execution_info.tmp_dir, "input_artifacts.json"
)
input_artifact_type_mapping = {
input_name: source_utils.resolve_class(artifacts[0].__class__)
for input_name, artifacts in execution_info.input_dict.items()
}
json_utils.write_json(
input_artifact_types_path, input_artifact_type_mapping
)
entrypoint_command = [
"python",
"-m",
"coalescenceml.step_operators.entrypoint",
"--main_module",
main_module,
"--step_source_path",
step_source_path,
"--execution_info_path",
execution_info_path,
"--input_artifact_types_path",
input_artifact_types_path,
]
logger.info(
"Using step operator `%s` to run step `%s`.",
step_operator.name,
step_name,
)
logger.debug(
"Step operator requirements: %s, entrypoint command: %s.",
requirements,
entrypoint_command,
)
step_operator.launch(
pipeline_name=execution_info.pipeline_info.id,
run_name=execution_info.pipeline_run_id,
requirements=requirements,
entrypoint_command=entrypoint_command,
)
return _read_executor_output(execution_info.execution_output_uri)
|
the-stack_106_26981 | from elasticsearch_dsl import analyzer
from django_elasticsearch_dsl import Document, Index, fields
from django_elasticsearch_dsl.registries import registry
from .models import Ad, Category, Car, Manufacturer
index_settings = {
'number_of_shards': 1,
'number_of_replicas': 0,
}
html_strip = analyzer(
'html_strip',
tokenizer="standard",
filter=["lowercase", "stop", "snowball"],
char_filter=["html_strip"]
)
@registry.register_document
class CarDocument(Document):
# test can override __init__
def __init__(self, *args, **kwargs):
super(CarDocument, self).__init__(*args, **kwargs)
manufacturer = fields.ObjectField(properties={
'name': fields.StringField(),
'country': fields.StringField(),
})
ads = fields.NestedField(properties={
'description': fields.StringField(analyzer=html_strip),
'title': fields.StringField(),
'pk': fields.IntegerField(),
})
categories = fields.NestedField(properties={
'title': fields.StringField(),
'slug': fields.StringField(),
'icon': fields.FileField(),
})
class Django:
model = Car
related_models = [Ad, Manufacturer, Category]
fields = [
'name',
'launched',
'type',
]
class Index:
name = 'test_cars'
settings = index_settings
def get_queryset(self):
return super(CarDocument, self).get_queryset().select_related(
'manufacturer')
def get_instances_from_related(self, related_instance):
if isinstance(related_instance, Ad):
return related_instance.car
# otherwise it's a Manufacturer or a Category
return related_instance.car_set.all()
@registry.register_document
class ManufacturerDocument(Document):
country = fields.StringField()
class Django:
model = Manufacturer
fields = [
'name',
'created',
'country_code',
'logo',
]
class Index:
name = 'index_settings'
settings = index_settings
@registry.register_document
class CarWithPrepareDocument(Document):
manufacturer = fields.ObjectField(properties={
'name': fields.StringField(),
'country': fields.StringField(),
})
manufacturer_short = fields.ObjectField(properties={
'name': fields.StringField(),
})
class Django:
model = Car
related_models = [Manufacturer]
fields = [
'name',
'launched',
'type',
]
class Index:
name = 'car_with_prepare_index'
def prepare_manufacturer_with_related(self, car, related_to_ignore):
if (car.manufacturer is not None and car.manufacturer !=
related_to_ignore):
return {
'name': car.manufacturer.name,
'country': car.manufacturer.country(),
}
return {}
def prepare_manufacturer_short(self, car):
if car.manufacturer is not None:
return {
'name': car.manufacturer.name,
}
return {}
def get_instances_from_related(self, related_instance):
return related_instance.car_set.all()
@registry.register_document
class AdDocument(Document):
description = fields.TextField(
analyzer=html_strip,
fields={'raw': fields.KeywordField()}
)
class Django:
model = Ad
fields = [
'title',
'created',
'modified',
'url',
]
class Index:
name = 'test_ads'
settings = index_settings
@registry.register_document
class PaginatedAdDocument(Document):
class Django:
model = Ad
queryset_pagination = 2
fields = [
'title',
'created',
'modified',
'url',
]
class Index:
name = 'ad_index'
def get_queryset(self):
return Ad.objects.all().order_by('-id')
ad_index = AdDocument._index
car_index = CarDocument._index
|
the-stack_106_26984 | #-*-coding: utf-8-*-
#-*-coding: euc-kr-*-
import requests
import base64
def check(ip):
url = 'http://'+ip+':3500/ping'
r = requests.post(url)
if r.ok:
result = r.json()['result']
if result == 'pong': # 서버 살아있으면
return True
return False # 서버 죽었으면
def get_pose(img_name, ip):
# 저장된 캡쳐 이미지 불러온 후base64코드로 인코드
with open(img_name, 'rb') as f:
im_b64 = base64.b64encode(f.read()).decode('utf8')
# 이미지 payload 형태로 만든 후 flask 서버에 post 요청
payload = {'img_base64': im_b64}
headers = {}
url2 = 'http://'+ip+':3500/pose_detection'
r = requests.post(url2, json=payload, headers=headers)
if r.ok:
pose = r.json()
if pose[0]['probability'] > 0.8:
return pose[0]['className']
return 'none'
|
the-stack_106_26987 | import pickle
import numpy as np
from ...ops.iou3d_nms import iou3d_nms_utils
from ...utils import box_utils
class DataBaseSampler(object):
def __init__(self, root_path, sampler_cfg, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.sampler_cfg = sampler_cfg
self.logger = logger
self.db_infos = {}
for class_name in class_names:
self.db_infos[class_name] = []
for db_info_path in sampler_cfg.DB_INFO_PATH:
db_info_path = self.root_path.resolve() / db_info_path
with open(str(db_info_path), 'rb') as f:
infos = pickle.load(f)
[self.db_infos[cur_class].extend(infos[cur_class]) for cur_class in class_names]
for func_name, val in sampler_cfg.PREPARE.items():
self.db_infos = getattr(self, func_name)(self.db_infos, val)
self.sample_groups = {}
self.sample_class_num = {}
self.limit_whole_scene = sampler_cfg.get('LIMIT_WHOLE_SCENE', False)
for x in sampler_cfg.SAMPLE_GROUPS:
class_name, sample_num = x.split(':')
if class_name not in class_names:
continue
self.sample_class_num[class_name] = sample_num
self.sample_groups[class_name] = {
'sample_num': sample_num,
'pointer': len(self.db_infos[class_name]),
'indices': np.arange(len(self.db_infos[class_name]))
}
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def filter_by_difficulty(self, db_infos, removed_difficulty):
new_db_infos = {}
for key, dinfos in db_infos.items():
pre_len = len(dinfos)
new_db_infos[key] = [
info for info in dinfos
if info['difficulty'] not in removed_difficulty
]
if self.logger is not None:
self.logger.info('Database filter by difficulty %s: %d => %d' % (key, pre_len, len(new_db_infos[key])))
return new_db_infos
def filter_by_min_points(self, db_infos, min_gt_points_list):
for name_num in min_gt_points_list:
name, min_num = name_num.split(':')
min_num = int(min_num)
if min_num > 0 and name in db_infos.keys():
filtered_infos = []
for info in db_infos[name]:
if info['num_points_in_gt'] >= min_num:
filtered_infos.append(info)
if self.logger is not None:
self.logger.info('Database filter by min points %s: %d => %d' %
(name, len(db_infos[name]), len(filtered_infos)))
db_infos[name] = filtered_infos
return db_infos
def sample_with_fixed_number(self, class_name, sample_group):
"""
Args:
class_name:
sample_group:
Returns:
"""
sample_num, pointer, indices = int(sample_group['sample_num']), sample_group['pointer'], sample_group['indices']
if pointer >= len(self.db_infos[class_name]):
indices = np.random.permutation(len(self.db_infos[class_name]))
pointer = 0
sampled_dict = [self.db_infos[class_name][idx] for idx in indices[pointer: pointer + sample_num]]
pointer += sample_num
sample_group['pointer'] = pointer
sample_group['indices'] = indices
return sampled_dict
@staticmethod
def put_boxes_on_road_planes(gt_boxes, road_planes, calib):
"""
Only validate in KITTIDataset
Args:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
road_planes: [a, b, c, d]
calib:
Returns:
"""
a, b, c, d = road_planes
center_cam = calib.lidar_to_rect(gt_boxes[:, 0:3])
cur_height_cam = (-d - a * center_cam[:, 0] - c * center_cam[:, 2]) / b
center_cam[:, 1] = cur_height_cam
cur_lidar_height = calib.rect_to_lidar(center_cam)[:, 2]
mv_height = gt_boxes[:, 2] - gt_boxes[:, 5] / 2 - cur_lidar_height
gt_boxes[:, 2] -= mv_height # lidar view
return gt_boxes, mv_height
def add_sampled_boxes_to_scene(self, data_dict, sampled_gt_boxes, total_valid_sampled_dict):
gt_boxes_mask = data_dict['gt_boxes_mask']
gt_boxes = data_dict['gt_boxes'][gt_boxes_mask]
gt_names = data_dict['gt_names'][gt_boxes_mask]
# points = data_dict['points']
lidar_points = data_dict['lidar_points']
radar_points = data_dict['radar_points']
if self.sampler_cfg.get('USE_ROAD_PLANE', False):
# sampled_gt_boxes, mv_height = self.put_boxes_on_road_planes(
# sampled_gt_boxes, data_dict['road_plane'], data_dict['calib']
# )
data_dict.pop('calib')
# data_dict.pop('road_plane')
obj_points_list = []
for idx, info in enumerate(total_valid_sampled_dict):
file_path = self.root_path / info['path']
obj_points = np.fromfile(str(file_path), dtype=np.float32).reshape(
[-1, self.sampler_cfg.NUM_POINT_FEATURES])
obj_points[:, :3] += info['box3d_lidar'][:3]
# if self.sampler_cfg.get('USE_ROAD_PLANE', False):
# mv height
# obj_points[:, 2] -= mv_height[idx]
obj_points_list.append(obj_points)
obj_points = np.concatenate(obj_points_list, axis=0)
sampled_gt_names = np.array([x['name'] for x in total_valid_sampled_dict])
large_sampled_gt_boxes = box_utils.enlarge_box3d(
sampled_gt_boxes[:, 0:7], extra_width=self.sampler_cfg.REMOVE_EXTRA_WIDTH
)
# points = box_utils.remove_points_in_boxes3d(points, large_sampled_gt_boxes)
lidar_points = box_utils.remove_points_in_boxes3d(lidar_points, large_sampled_gt_boxes)
radar_points = box_utils.remove_points_in_boxes3d(radar_points, large_sampled_gt_boxes)
# points = np.concatenate([obj_points, points], axis=0)
lidar_points = np.concatenate([obj_points, lidar_points], axis=0)
radar_points = np.concatenate([obj_points, radar_points], axis=0)
gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes], axis=0)
data_dict['gt_boxes'] = gt_boxes
data_dict['gt_names'] = gt_names
# data_dict['points'] = points
data_dict['lidar_points'] = lidar_points
data_dict['radar_points'] = radar_points
return data_dict
def __call__(self, data_dict):
"""
Args:
data_dict:
gt_boxes: (N, 7 + C) [x, y, z, dx, dy, dz, heading, ...]
Returns:
"""
gt_boxes = data_dict['gt_boxes']
gt_names = data_dict['gt_names'].astype(str)
existed_boxes = gt_boxes
total_valid_sampled_dict = []
for class_name, sample_group in self.sample_groups.items():
if self.limit_whole_scene:
num_gt = np.sum(class_name == gt_names)
sample_group['sample_num'] = str(int(self.sample_class_num[class_name]) - num_gt)
if int(sample_group['sample_num']) > 0:
sampled_dict = self.sample_with_fixed_number(class_name, sample_group)
sampled_boxes = np.stack([x['box3d_lidar'] for x in sampled_dict], axis=0).astype(np.float32)
if self.sampler_cfg.get('DATABASE_WITH_FAKELIDAR', False):
sampled_boxes = box_utils.boxes3d_kitti_fakelidar_to_lidar(sampled_boxes)
iou1 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], existed_boxes[:, 0:7])
iou2 = iou3d_nms_utils.boxes_bev_iou_cpu(sampled_boxes[:, 0:7], sampled_boxes[:, 0:7])
iou2[range(sampled_boxes.shape[0]), range(sampled_boxes.shape[0])] = 0
iou1 = iou1 if iou1.shape[1] > 0 else iou2
valid_mask = ((iou1.max(axis=1) + iou2.max(axis=1)) == 0).nonzero()[0]
valid_sampled_dict = [sampled_dict[x] for x in valid_mask]
valid_sampled_boxes = sampled_boxes[valid_mask]
existed_boxes = np.concatenate((existed_boxes, valid_sampled_boxes), axis=0)
total_valid_sampled_dict.extend(valid_sampled_dict)
sampled_gt_boxes = existed_boxes[gt_boxes.shape[0]:, :]
if total_valid_sampled_dict.__len__() > 0:
data_dict = self.add_sampled_boxes_to_scene(data_dict, sampled_gt_boxes, total_valid_sampled_dict)
data_dict.pop('gt_boxes_mask')
return data_dict
|
the-stack_106_26988 | # Copyright (C) 2021 ServiceNow, Inc.
""" Train a mittens model """
import csv
import numpy as np
import pickle
import argparse
from mittens import Mittens
def glove2dict(glove_filename):
with open(glove_filename) as f:
reader = csv.reader(f, delimiter=' ', quoting=csv.QUOTE_NONE)
embed = {line[0]: np.array(list(map(float, line[1:])))
for line in reader}
return embed
def run_train(
matrix_filename,
vocab_filename,
max_iter,
original_embeddings_filename,
vector_size,
mittens_filename
):
# Load cooccurrence matrix
M = np.load(matrix_filename)
# Load vocabulary
infile = open(vocab_filename, 'rb')
vocabulary = pickle.load(infile)
infile.close()
# Load pre-trained Glove embeddings
original_embeddings = glove2dict(original_embeddings_filename)
mittens_model = Mittens(n=config.VECTOR_SIZE, max_iter=max_iter)
new_embeddings = mittens_model.fit(M, vocab=vocabulary, initial_embedding_dict=original_embeddings)
np.save(mittens_filename, new_embeddings)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--MATRIX_FILENAME', help='input matrix file', required=True)
parser.add_argument('--VOCAB_FILENAME', help='input vocab filename', required=True)
parser.add_argument('--ORIGINAL_EMBEDDINGS_PATH', help='input glove filepath', required=True)
parser.add_argument('--MAX_ITER', help='max iterations', required=True, type=int)
parser.add_argument('--VECTOR_SIZE', help='vector size', required=True, type=int)
parser.add_argument('--MITTENS_FILENAME', help='output filename', required=True)
args = parser.parse_args()
run_train(
matrix_filename=args.MATRIX_FILENAME,
vocab_filename=args.VOCAB_FILENAME,
max_iter=args.MAX_ITER,
original_embeddings_filename=args.ORIGINAL_EMBEDDINGS_PATH,
vector_size=args.VECTOR_SIZE,
mittens_filename=args.MITTENS_FILENAME
)
|
the-stack_106_26990 | """.. Ignore pydocstyle D400.
=============
Resolwe Query
=============
.. autoclass:: resdk.ResolweQuery
:members:
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import copy
import logging
import operator
import six
class ResolweQuery(object):
"""Query resource endpoints.
A Resolwe instance (for example "res") has several endpoints:
res.data, res.collections, res.sample and res.process. Each
endpoint is an instance of the ResolweQuery class. ResolweQuery
supports queries on corresponding objects, for example:
.. code-block:: python
res.data.get(42) # return Data object with ID 42.
res.sample.filter(contributor=1) # return all samples made by contributor 1
This object is lazy loaded which means that actual is made only
when needed. This enables composing multiple filters, for example:
.. code-block:: python
res.data.filter(contributor=1).filter(name='My object')
is the same as:
.. code-block:: python
res.data.filter(contributor=1, name='My object')
This is especially useful, because all endpoints at Resolwe instance
are such queries and can be filtered further before transferring
any data.
Filters can be made with the following keywords (and operators)
* Fields (and operators) for **data** endpoint:
* slug (=, __in=)
* contributor (=)
* status (=, __in=)
* name (=, __in=, __startswith, endswith=)
* created (=, __gte, __gt=, __lte=, __lt=, __year__gte=,
__month__gte=,...)
* modified (=, __gte, __gt=, __lte=, __lt=, __year__gte=,
__month__gte=,...)
* input (=)
* descriptor (=)
* started (=, __gte, __gt=, __lte=, __lt=, __year__gte=,
__month__gte=,...)
* finished (=, __gte, __gt=, __lte=, __lt=, __year__gte=,
__month__gte=,...)
* output (=)
* process (=)
* process_name (=, __in=, __startswith=)
* type (=)
* collection (=, __in=)
* Fields (and operators) for **collection** and **sample** endpoint:
* contributor (=)
* name (=, __in=, __startswith=)
* description (=)
* created (=, __gte, __gt=, __lte=, __lt=, __year__gte=,
__month__gte=,...)
* modified (=, __gte, __gt=, __lte=, __lt=, __year__gte=,
__month__gte=,...)
* slug (=, __in=)
* descriptor (=)
* data (=, __in=)
* descriptor_schema (=)
* id (=, __in=)
* Fields (and operators) for **process** endpoint:
* contributor (=)
* name (=, __in=, __startswith=)
* created (=, __gte, __gt=, __lte=, __lt=, __year__gte=,
__month__gte=,...)
* modified (=, __gte, __gt=, __lte=, __lt=, __year__gte=,
__month__gte=,...)
* slug (=, __in=)
* id (=, __in=)
Example usage:
.. code-block:: python
# Get a list of data objects with status set to OK.
res.data.filter(status='OK')
# Get a list of sample objects that contain data object 42 and
# were contributed by contributor with ID 1
res.collection.filter(data=42, contributor=1)
"""
_cache = None
_count = None # number of objects in current query (without applied limit and offset)
_limit = None
_offset = None
_filters = collections.defaultdict(list)
resolwe = None
resource = None
endpoint = None
api = None
logger = None
def __init__(self, resolwe, resource, endpoint=None, slug_field='slug'):
"""Initialize attributes."""
self.resolwe = resolwe
self.resource = resource
self.slug_field = slug_field
# Determine the endpoint to use.
if endpoint is not None:
self.endpoint = endpoint
elif resource.query_endpoint is not None:
self.endpoint = resource.query_endpoint
else:
self.endpoint = resource.endpoint
self.api = operator.attrgetter(self.endpoint)(resolwe.api)
self.logger = logging.getLogger(__name__)
def __getitem__(self, index):
"""Retrieve an item or slice from the set of results."""
# pylint: disable=protected-access
if not isinstance(index, (slice,) + six.integer_types):
raise TypeError
if ((not isinstance(index, slice) and index < 0)
or (isinstance(index, slice) and index.start is not None and index.start < 0)
or (isinstance(index, slice) and index.stop is not None and index.stop < 0)):
raise ValueError("Negative indexing is not supported.")
if isinstance(index, slice) and index.step is not None:
raise ValueError("`step` parameter in slice is not supported")
if self._cache is not None:
return self._cache[index]
new_query = self._clone()
if isinstance(index, slice):
if self._offset or self._limit:
raise NotImplementedError('You cannot slice already sliced query.')
start = 0 if index.start is None else int(index.start)
stop = 1000000 if index.stop is None else int(index.stop) # default to something big
new_query._offset = start
new_query._limit = stop - start
return new_query
new_query._offset = self._offset + index if self._offset else index
new_query._limit = 1
query_list = list(new_query)
if not query_list:
raise IndexError('list index out of range')
return query_list[0]
def __iter__(self):
"""Return iterator over the current object."""
self._fetch()
return iter(self._cache)
def __repr__(self):
"""Return string representation of the current object."""
self._fetch()
rep = '[{}]'.format(
',\n '.join(str(obj).decode('utf-8') if six.PY2 else str(obj) for obj in self._cache)
)
return rep.encode('utf-8') if six.PY2 else rep
def __len__(self):
"""Return length of results of current query."""
return self.count()
def _clone(self):
"""Return copy of current object with empty cache."""
# pylint: disable=protected-access
new_obj = ResolweQuery(self.resolwe, self.resource, self.endpoint)
new_obj._filters = copy.deepcopy(self._filters)
new_obj._limit = self._limit
new_obj._offset = self._offset
return new_obj
def _add_filter(self, filter_):
"""Add filter parameter."""
for key, value in filter_.items():
# 'sample' is called 'entity' in the backend.
key = key.replace('sample', 'entity')
if self.resource.query_method == 'GET':
self._filters[key].append(value)
elif self.resource.query_method == 'POST':
self._filters[key] = value
else:
raise NotImplementedError(
'Unsupported query_method: {}'.format(self.resource.query_method))
def _compose_filters(self):
"""Convert filters to dict and add pagination filters."""
filters = self._filters
if self._limit is not None:
filters['limit'] = self._limit
if self._offset is not None:
filters['offset'] = self._offset
return dict(filters)
def _populate_resource(self, data):
"""Populate resource with given data."""
return self.resource(resolwe=self.resolwe, **data)
def _fetch(self):
"""Make request to the server and populate cache."""
if self._cache is not None:
return # already fetched
filters = self._compose_filters()
if self.resource.query_method == 'GET':
items = self.api.get(**filters)
elif self.resource.query_method == 'POST':
items = self.api.post(filters)
else:
raise NotImplementedError(
'Unsupported query_method: {}'.format(self.resource.query_method))
# Extract data from paginated response
if isinstance(items, dict) and 'results' in items:
self._count = items['count']
items = items['results']
self._cache = [self._populate_resource(data) for data in items]
def clear_cache(self):
"""Clear cache."""
self._cache = None
self._count = None
def count(self):
"""Return number of objects in current query."""
# pylint: disable=protected-access
if self._count is None:
count_query = self._clone()
count_query._offset = 0
count_query._limit = 1
count_query._fetch()
self._count = count_query._count
if self._limit is None:
return self._count
remaining = self._count - self._offset
return max(0, min(self._limit, remaining))
def get(self, *args, **kwargs):
"""Get object that matches given parameters.
If only one non-keyworded argument is given, it is considered
as id if it is number and as slug otherwise.
:param uid: unique identifier - ID or slug
:type uid: int for ID or string for slug
:rtype: object of type self.resource
:raises ValueError: if non-keyworded and keyworded arguments
are combined or if more than one non-keyworded argument is
given
:raises LookupError: if none or more than one objects are
returned
"""
if args:
if len(args) > 1:
raise ValueError('Only one non-keyworded argument can be given')
if kwargs:
raise ValueError('Non-keyworded arguments cannot be combined with keyworded ones.')
arg = args[0]
kwargs = {'id': arg} if str(arg).isdigit() else {self.slug_field: arg}
new_query = self._clone()
new_query._add_filter(kwargs) # pylint: disable=protected-access
response = list(new_query)
if not response:
raise LookupError('Matching object does not exist.')
if len(response) > 1:
raise LookupError('get() returned more than one object.')
return response[0]
def create(self, **model_data):
"""Return new instance of current resource."""
resource = self.resource(self.resolwe, **model_data)
resource.save()
return resource
def post(self, data):
"""Post data to this endpoint.
:param dict data: Data dictionary to post
"""
return self.api.post(data) # pylint: disable=no-member
def filter(self, **filters):
"""Return clone of current query with added given filters."""
new_query = self._clone()
new_query._add_filter(filters) # pylint: disable=protected-access
return new_query
def delete(self, force=False):
"""Delete objects in current query."""
if force is not True:
user_input = six.moves.input(
'Do you really want to delete {} object(s)?[yN] '.format(self.count()))
if user_input.strip().lower() != 'y':
return
# TODO: Use bulk delete when supported on backend
for obj in self:
obj.delete(force=True)
self.clear_cache()
def all(self):
"""Return copy of the current queryset.
This is handy function to get newly created query without any
filters.
"""
return self._clone()
def search(self):
"""Full text search."""
raise NotImplementedError()
|
the-stack_106_26991 | import os
import pickle
import time
from unittest.mock import MagicMock
import pytest
from tools import skip_if_on_windows
from xonsh.commands_cache import (
SHELL_PREDICTOR_PARSER,
CommandsCache,
predict_false,
predict_shell,
predict_true,
)
def test_commands_cache_lazy(xession):
cc = xession.commands_cache
assert not cc.lazyin("xonsh")
assert 0 == len(list(cc.lazyiter()))
assert 0 == cc.lazylen()
def test_predict_threadable_unknown_command(xession):
result = xession.commands_cache.predict_threadable(["command_should_not_found"])
assert isinstance(result, bool)
@pytest.fixture
def commands_cache_tmp(xession, tmp_path, monkeypatch, patch_commands_cache_bins):
xession.env["COMMANDS_CACHE_SAVE_INTERMEDIATE"] = True
return patch_commands_cache_bins(["bin1", "bin2"])
def test_commands_cached_between_runs(commands_cache_tmp, tmp_path, tmpdir):
# 1. no pickle file
# 2. return empty result first and create a thread to populate result
# 3. once the result is available then next call to cc.all_commands returns
cc = commands_cache_tmp
# wait for thread to end
cnt = 0 # timeout waiting for thread
while True:
if cc.all_commands or cnt > 10:
break
cnt += 1
time.sleep(0.1)
assert [b.lower() for b in cc.all_commands.keys()] == ["bin1", "bin2"]
files = tmp_path.glob("*.pickle")
assert len(list(files)) == 1
# cleanup dir
for file in files:
os.remove(file)
def test_commands_cache_uses_pickle_file(commands_cache_tmp, tmp_path, monkeypatch):
cc = commands_cache_tmp
update_cmds_cache = MagicMock()
monkeypatch.setattr(cc, "_update_cmds_cache", update_cmds_cache)
file = tmp_path / CommandsCache.CACHE_FILE
bins = {
"bin1": (
"/some-path/bin1",
None,
),
"bin2": (
"/some-path/bin2",
None,
),
}
file.write_bytes(pickle.dumps(bins))
assert str(cc.cache_file) == str(file)
assert cc.all_commands == bins
assert cc._loaded_pickled
TRUE_SHELL_ARGS = [
["-c", "yo"],
["-c=yo"],
["file"],
["-i", "-l", "file"],
["-i", "-c", "yo"],
["-i", "file"],
["-i", "-c", "yo", "file"],
]
@pytest.mark.parametrize("args", TRUE_SHELL_ARGS)
def test_predict_shell_parser(args):
ns, unknown = SHELL_PREDICTOR_PARSER.parse_known_args(args)
if ns.filename is not None:
assert not ns.filename.startswith("-")
@pytest.mark.parametrize("args", TRUE_SHELL_ARGS)
def test_predict_shell_true(args):
assert predict_shell(args)
FALSE_SHELL_ARGS = [[], ["-c"], ["-i"], ["-i", "-l"]]
@pytest.mark.parametrize("args", FALSE_SHELL_ARGS)
def test_predict_shell_false(args):
assert not predict_shell(args)
PATTERN_BIN_USING_TTY_OR_NOT = [
(
False,
{10: b"isnotatty"},
),
(
False,
{12: b"isatty"},
),
(
False,
{151: b"gpm"},
),
(
False,
{10: b"isatty", 100: b"tcgetattr"},
),
(
False,
{10: b"isatty", 100: b"tcsetattr"},
),
(
True,
{10: b"isatty", 100: b"tcsetattr", 1000: b"tcgetattr"},
),
(
True,
{1000: b"libncurses"},
),
(
True,
{4094: b"libgpm"},
),
(
True,
{2045: b"tcgetattr", 4095: b"tcgetattr", 6140: b"tcsetattr", 8190: b"isatty"},
),
]
@pytest.mark.parametrize("args", PATTERN_BIN_USING_TTY_OR_NOT)
@skip_if_on_windows
def test_commands_cache_predictor_default(args, xession, tmp_path):
use_tty, patterns = args
file = tmp_path / "testfile"
where = list(patterns.keys())
where.sort()
with file.open("wb") as f:
pos = 0
for w in where:
f.write(b"\x20" * (w - pos))
f.write(patterns[w])
pos = w + len(patterns[w])
f.write(b"\x20" * (pos // 2))
result = xession.commands_cache.default_predictor_readbin(
"", str(file), timeout=1, failure=None
)
expected = predict_false if use_tty else predict_true
assert result == expected
@skip_if_on_windows
def test_cd_is_only_functional_alias(xession):
xession.aliases["cd"] = lambda args: os.chdir(args[0])
xession.env["PATH"] = []
assert xession.commands_cache.is_only_functional_alias("cd")
def test_non_exist_is_only_functional_alias(xession):
assert not xession.commands_cache.is_only_functional_alias(
"<not really a command name>"
)
@skip_if_on_windows
def test_bash_is_only_functional_alias(xession):
assert not xession.commands_cache.is_only_functional_alias("bash")
@skip_if_on_windows
def test_bash_and_is_alias_is_only_functional_alias(xession):
xession.aliases["bash"] = lambda args: os.chdir(args[0])
assert not xession.commands_cache.is_only_functional_alias("bash")
|
the-stack_106_26995 | from django.urls import path
from .views import *
urlpatterns = [
path('', deteksi_mandiri_view, name='deteksi-mandiri'),
path('<pk>/', quiz_view, name='quiz-view'),
path('<pk>/data', quiz_data_view, name='quiz-data-view'),
path('<pk>/save', save_quiz_view, name='save-quiz-view'),
path('delete/<pk>/', delete_quiz, name='delete-quiz'),
path('edit/<pk>/', edit_quiz, name='edit-quiz'),
path('create-quiz/', create_quiz, name='create-quiz'),
path('edit-questions/<pk>', edit_questions, name='edit-question' ),
path('see-questions/<pk>', see_questions, name='see-questions'),
path('see-questions/<pk>/delete/<pk2>', delete_questions, name='delete-questions'),
path('see-questions/<pk>/edit/<pk2>', edit_answers, name='edit-answers'),
]
|
the-stack_106_26996 | # encoding: utf-8
from functools import reduce
import operator
import warnings
from haystack import connection_router, connections
from haystack.backends import SQ
from haystack.constants import DEFAULT_OPERATOR, ITERATOR_LOAD_PER_QUERY
from haystack.exceptions import NotHandled
from haystack.inputs import AutoQuery, Raw
from haystack.utils import log as logging
class SearchQuerySet(object):
"""
Provides a way to specify search parameters and lazily load results.
Supports chaining (a la QuerySet) to narrow the search.
"""
def __init__(self, using=None, query=None):
# ``_using`` should only ever be a value other than ``None`` if it's
# been forced with the ``.using`` method.
self._using = using
self.query = None
self._determine_backend()
# If ``query`` is present, it should override even what the routers
# think.
if query is not None:
self.query = query
self._result_cache = []
self._result_count = None
self._cache_full = False
self._load_all = False
self._ignored_result_count = 0
self.log = logging.getLogger("haystack")
def _determine_backend(self):
# A backend has been manually selected. Use it instead.
if self._using is not None:
self.query = connections[self._using].get_query()
return
# No backend, so rely on the routers to figure out what's right.
hints = {}
if self.query:
hints["models"] = self.query.models
backend_alias = connection_router.for_read(**hints)
# The ``SearchQuery`` might swap itself out for a different variant
# here.
if self.query:
self.query = self.query.using(backend_alias)
else:
self.query = connections[backend_alias].get_query()
def __getstate__(self):
"""
For pickling.
"""
len(self)
obj_dict = self.__dict__.copy()
obj_dict["_iter"] = None
obj_dict["log"] = None
return obj_dict
def __setstate__(self, data_dict):
"""
For unpickling.
"""
self.__dict__ = data_dict
self.log = logging.getLogger("haystack")
def __repr__(self):
return "<SearchQuerySet: query=%r, using=%r>" % (self.query, self._using)
def __len__(self):
if self._result_count is None:
self._result_count = self.query.get_count()['value']
# Some backends give weird, false-y values here. Convert to zero.
if not self._result_count:
self._result_count = 0
# This needs to return the actual number of hits, not what's in the cache.
return self._result_count - self._ignored_result_count
def __iter__(self):
if self._cache_is_full():
# We've got a fully populated cache. Let Python do the hard work.
return iter(self._result_cache)
return self._manual_iter()
def __and__(self, other):
if isinstance(other, EmptySearchQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, SQ.AND)
return combined
def __or__(self, other):
combined = self._clone()
if isinstance(other, EmptySearchQuerySet):
return combined
combined.query.combine(other.query, SQ.OR)
return combined
def _cache_is_full(self):
if not self.query.has_run():
return False
if len(self) <= 0:
return True
try:
self._result_cache.index(None)
return False
except ValueError:
# No ``None``s found in the results. Check the length of the cache.
return len(self._result_cache) > 0
def _manual_iter(self):
# If we're here, our cache isn't fully populated.
# For efficiency, fill the cache as we go if we run out of results.
# Also, this can't be part of the __iter__ method due to Python's rules
# about generator functions.
current_position = 0
current_cache_max = 0
while True:
if len(self._result_cache) > 0:
try:
current_cache_max = self._result_cache.index(None)
except ValueError:
current_cache_max = len(self._result_cache)
while current_position < current_cache_max:
yield self._result_cache[current_position]
current_position += 1
if self._cache_is_full():
return
# We've run out of results and haven't hit our limit.
# Fill more of the cache.
if not self._fill_cache(
current_position, current_position + ITERATOR_LOAD_PER_QUERY
):
return
def post_process_results(self, results):
to_cache = []
# Check if we wish to load all objects.
if self._load_all:
models_pks = {}
loaded_objects = {}
# Remember the search position for each result so we don't have to resort later.
for result in results:
models_pks.setdefault(result.model, []).append(result.pk)
# Load the objects for each model in turn.
for model in models_pks:
loaded_objects[model] = self._load_model_objects(
model, models_pks[model]
)
for result in results:
if self._load_all:
model_objects = loaded_objects.get(result.model, {})
# Try to coerce a primary key object that matches the models pk
# We have to deal with semi-arbitrary keys being cast from strings (UUID, int, etc)
if model_objects:
result_klass = type(next(iter(model_objects)))
result.pk = result_klass(result.pk)
try:
result._object = model_objects[result.pk]
except KeyError:
# The object was either deleted since we indexed or should
# be ignored for other reasons such as an overriden 'load_all_queryset';
# fail silently.
self._ignored_result_count += 1
# avoid an unfilled None at the end of the result cache
self._result_cache.pop()
continue
else:
# No objects were returned -- possible due to SQS nesting such as
# XYZ.objects.filter(id__gt=10) where the amount ignored are
# exactly equal to the ITERATOR_LOAD_PER_QUERY
del self._result_cache[: len(results)]
self._ignored_result_count += len(results)
break
to_cache.append(result)
return to_cache
def _load_model_objects(self, model, pks):
try:
ui = connections[self.query._using].get_unified_index()
index = ui.get_index(model)
objects = index.read_queryset(using=self.query._using)
return objects.in_bulk(pks)
except NotHandled:
self.log.warning("Model '%s' not handled by the routers.", model)
# Revert to old behaviour
return model._default_manager.in_bulk(pks)
def _fill_cache(self, start, end, **kwargs):
# Tell the query where to start from and how many we'd like.
self.query._reset()
if start is None:
start = 0
query_start = start
query_start += self._ignored_result_count
query_end = end
if query_end is not None:
query_end += self._ignored_result_count
self.query.set_limits(query_start, query_end)
results = self.query.get_results(**kwargs)
if results is None or len(results) == 0:
# trim missing stuff from the result cache
self._result_cache = self._result_cache[:start]
return False
# Setup the full cache now that we know how many results there are.
# We need the ``None``s as placeholders to know what parts of the
# cache we have/haven't filled.
# Using ``None`` like this takes up very little memory. In testing,
# an array of 100,000 ``None``s consumed less than .5 Mb, which ought
# to be an acceptable loss for consistent and more efficient caching.
if len(self._result_cache) == 0:
self._result_cache = [None] * self.query.get_count()['value']
fill_start, fill_end = start, end
if fill_end is None:
fill_end = self.query.get_count()['value']
cache_start = fill_start
while True:
to_cache = self.post_process_results(results)
# Assign by slice.
self._result_cache[cache_start : cache_start + len(to_cache)] = to_cache
if None in self._result_cache[start:end]:
fill_start = fill_end
fill_end += ITERATOR_LOAD_PER_QUERY
cache_start += len(to_cache)
# Tell the query where to start from and how many we'd like.
self.query._reset()
self.query.set_limits(fill_start, fill_end)
results = self.query.get_results()
if results is None or len(results) == 0:
# No more results. Trim missing stuff from the result cache
self._result_cache = self._result_cache[:cache_start]
break
else:
break
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, int)):
raise TypeError
assert (not isinstance(k, slice) and (k >= 0)) or (
isinstance(k, slice)
and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0)
), "Negative indexing is not supported."
# Remember if it's a slice or not. We're going to treat everything as
# a slice to simply the logic and will `.pop()` at the end as needed.
if isinstance(k, slice):
is_slice = True
start = k.start
if k.stop is not None:
bound = int(k.stop)
else:
bound = None
else:
is_slice = False
start = k
bound = k + 1
# We need check to see if we need to populate more of the cache.
if len(self._result_cache) <= 0 or (
None in self._result_cache[start:bound] and not self._cache_is_full()
):
try:
self._fill_cache(start, bound)
except StopIteration:
# There's nothing left, even though the bound is higher.
pass
# Cache should be full enough for our needs.
if is_slice:
return self._result_cache[start:bound]
else:
return self._result_cache[start]
# Methods that return a SearchQuerySet.
def all(self):
"""Returns all results for the query."""
return self._clone()
def none(self):
"""Returns an empty result list for the query."""
return self._clone(klass=EmptySearchQuerySet)
def filter(self, *args, **kwargs):
"""Narrows the search based on certain attributes and the default operator."""
if DEFAULT_OPERATOR == "OR":
return self.filter_or(*args, **kwargs)
else:
return self.filter_and(*args, **kwargs)
def exclude(self, *args, **kwargs):
"""Narrows the search by ensuring certain attributes are not included."""
clone = self._clone()
clone.query.add_filter(~SQ(*args, **kwargs))
return clone
def filter_and(self, *args, **kwargs):
"""Narrows the search by looking for (and including) certain attributes."""
clone = self._clone()
clone.query.add_filter(SQ(*args, **kwargs))
return clone
def filter_or(self, *args, **kwargs):
"""Narrows the search by ensuring certain attributes are not included."""
clone = self._clone()
clone.query.add_filter(SQ(*args, **kwargs), use_or=True)
return clone
def order_by(self, *args):
"""Alters the order in which the results should appear."""
clone = self._clone()
for field in args:
clone.query.add_order_by(field)
return clone
def highlight(self, **kwargs):
"""Adds highlighting to the results."""
clone = self._clone()
clone.query.add_highlight(**kwargs)
return clone
def models(self, *models):
"""Accepts an arbitrary number of Model classes to include in the search."""
clone = self._clone()
for model in models:
if (
model
not in connections[self.query._using]
.get_unified_index()
.get_indexed_models()
):
warnings.warn("The model %r is not registered for search." % (model,))
clone.query.add_model(model)
return clone
def result_class(self, klass):
"""
Allows specifying a different class to use for results.
Overrides any previous usages. If ``None`` is provided, Haystack will
revert back to the default ``SearchResult`` object.
"""
clone = self._clone()
clone.query.set_result_class(klass)
return clone
def boost(self, term, boost):
"""Boosts a certain aspect of the query."""
clone = self._clone()
clone.query.add_boost(term, boost)
return clone
def facet(self, field, **options):
"""Adds faceting to a query for the provided field."""
clone = self._clone()
clone.query.add_field_facet(field, **options)
return clone
def within(self, field, point_1, point_2):
"""Spatial: Adds a bounding box search to the query."""
clone = self._clone()
clone.query.add_within(field, point_1, point_2)
return clone
def dwithin(self, field, point, distance):
"""Spatial: Adds a distance-based search to the query."""
clone = self._clone()
clone.query.add_dwithin(field, point, distance)
return clone
def stats(self, field):
"""Adds stats to a query for the provided field."""
return self.stats_facet(field, facet_fields=None)
def stats_facet(self, field, facet_fields=None):
"""Adds stats facet for the given field and facet_fields represents
the faceted fields."""
clone = self._clone()
stats_facets = []
try:
stats_facets.append(sum(facet_fields, []))
except TypeError:
if facet_fields:
stats_facets.append(facet_fields)
clone.query.add_stats_query(field, stats_facets)
return clone
def distance(self, field, point):
"""
Spatial: Denotes results must have distance measurements from the
provided point.
"""
clone = self._clone()
clone.query.add_distance(field, point)
return clone
def date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):
"""Adds faceting to a query for the provided field by date."""
clone = self._clone()
clone.query.add_date_facet(
field, start_date, end_date, gap_by, gap_amount=gap_amount
)
return clone
def query_facet(self, field, query):
"""Adds faceting to a query for the provided field with a custom query."""
clone = self._clone()
clone.query.add_query_facet(field, query)
return clone
def narrow(self, query):
"""Pushes existing facet choices into the search."""
if isinstance(query, SQ):
# produce query string using empty query of the same class
empty_query = self.query._clone()
empty_query._reset()
query = query.as_query_string(empty_query.build_query_fragment)
clone = self._clone()
clone.query.add_narrow_query(query)
return clone
def raw_search(self, query_string, **kwargs):
"""Passes a raw query directly to the backend."""
return self.filter(content=Raw(query_string, **kwargs))
def load_all(self):
"""Efficiently populates the objects in the search results."""
clone = self._clone()
clone._load_all = True
return clone
def auto_query(self, query_string, fieldname="content"):
"""
Performs a best guess constructing the search query.
This method is somewhat naive but works well enough for the simple,
common cases.
"""
kwargs = {fieldname: AutoQuery(query_string)}
return self.filter(**kwargs)
def autocomplete(self, **kwargs):
"""
A shortcut method to perform an autocomplete search.
Must be run against fields that are either ``NgramField`` or
``EdgeNgramField``.
"""
clone = self._clone()
query_bits = []
for field_name, query in kwargs.items():
for word in query.split(" "):
bit = clone.query.clean(word.strip())
if bit:
kwargs = {field_name: bit}
query_bits.append(SQ(**kwargs))
return clone.filter(reduce(operator.__and__, query_bits))
def using(self, connection_name):
"""
Allows switching which connection the ``SearchQuerySet`` uses to
search in.
"""
clone = self._clone()
clone.query = self.query.using(connection_name)
clone._using = connection_name
return clone
# Methods that do not return a SearchQuerySet.
def count(self):
"""Returns the total number of matching results."""
return len(self)
def best_match(self):
"""Returns the best/top search result that matches the query."""
return self[0]
def latest(self, date_field):
"""Returns the most recent search result that matches the query."""
clone = self._clone()
clone.query.clear_order_by()
clone.query.add_order_by("-%s" % date_field)
return clone.best_match()
def more_like_this(self, model_instance):
"""Finds similar results to the object passed in."""
clone = self._clone()
clone.query.more_like_this(model_instance)
return clone
def facet_counts(self):
"""
Returns the facet counts found by the query.
This will cause the query to execute and should generally be used when
presenting the data.
"""
if self.query.has_run():
return self.query.get_facet_counts()
else:
clone = self._clone()
return clone.query.get_facet_counts()
def stats_results(self):
"""
Returns the stats results found by the query.
"""
if self.query.has_run():
return self.query.get_stats()
else:
clone = self._clone()
return clone.query.get_stats()
def set_spelling_query(self, spelling_query):
"""Set the exact text to be used to generate spelling suggestions
When making complicated queries, such as the alt parser mechanism
used by Solr dismax/edismax, this provides a convenient way to set
the a simple text string which will be used to generate spelling
suggestions without including unnecessary syntax.
"""
clone = self._clone()
clone.query.set_spelling_query(spelling_query)
return clone
def spelling_suggestion(self, preferred_query=None):
"""
Returns the spelling suggestion found by the query.
To work, you must set ``INCLUDE_SPELLING`` within your connection's
settings dictionary to ``True``. Otherwise, ``None`` will be returned.
This will cause the query to execute and should generally be used when
presenting the data.
"""
if self.query.has_run():
return self.query.get_spelling_suggestion(preferred_query)
else:
clone = self._clone()
return clone.query.get_spelling_suggestion(preferred_query)
def values(self, *fields):
"""
Returns a list of dictionaries, each containing the key/value pairs for
the result, exactly like Django's ``ValuesQuerySet``.
"""
qs = self._clone(klass=ValuesSearchQuerySet)
qs._fields.extend(fields)
return qs
def values_list(self, *fields, **kwargs):
"""
Returns a list of field values as tuples, exactly like Django's
``QuerySet.values``.
Optionally accepts a ``flat=True`` kwarg, which in the case of a
single field being provided, will return a flat list of that field
rather than a list of tuples.
"""
flat = kwargs.pop("flat", False)
if flat and len(fields) > 1:
raise TypeError(
"'flat' is not valid when values_list is called with more than one field."
)
qs = self._clone(klass=ValuesListSearchQuerySet)
qs._fields.extend(fields)
qs._flat = flat
return qs
# Utility methods.
def _clone(self, klass=None):
if klass is None:
klass = self.__class__
query = self.query._clone()
clone = klass(query=query)
clone._load_all = self._load_all
return clone
class EmptySearchQuerySet(SearchQuerySet):
"""
A stubbed SearchQuerySet that behaves as normal but always returns no
results.
"""
def __len__(self):
return 0
def _cache_is_full(self):
# Pretend the cache is always full with no results.
return True
def _clone(self, klass=None):
clone = super(EmptySearchQuerySet, self)._clone(klass=klass)
clone._result_cache = []
return clone
def _fill_cache(self, start, end):
return False
def facet_counts(self):
return {}
class ValuesListSearchQuerySet(SearchQuerySet):
"""
A ``SearchQuerySet`` which returns a list of field values as tuples, exactly
like Django's ``ValuesListQuerySet``.
"""
def __init__(self, *args, **kwargs):
super(ValuesListSearchQuerySet, self).__init__(*args, **kwargs)
self._flat = False
self._fields = []
# Removing this dependency would require refactoring much of the backend
# code (_process_results, etc.) and these aren't large enough to make it
# an immediate priority:
self._internal_fields = ["id", "django_ct", "django_id", "score"]
def _clone(self, klass=None):
clone = super(ValuesListSearchQuerySet, self)._clone(klass=klass)
clone._fields = self._fields
clone._flat = self._flat
return clone
def _fill_cache(self, start, end):
query_fields = set(self._internal_fields)
query_fields.update(self._fields)
kwargs = {"fields": query_fields}
return super(ValuesListSearchQuerySet, self)._fill_cache(start, end, **kwargs)
def post_process_results(self, results):
to_cache = []
if self._flat:
accum = to_cache.extend
else:
accum = to_cache.append
for result in results:
accum([getattr(result, i, None) for i in self._fields])
return to_cache
class ValuesSearchQuerySet(ValuesListSearchQuerySet):
"""
A ``SearchQuerySet`` which returns a list of dictionaries, each containing
the key/value pairs for the result, exactly like Django's
``ValuesQuerySet``.
"""
def _fill_cache(self, start, end):
query_fields = set(self._internal_fields)
query_fields.update(self._fields)
kwargs = {"fields": query_fields}
return super(ValuesListSearchQuerySet, self)._fill_cache(start, end, **kwargs)
def post_process_results(self, results):
to_cache = []
for result in results:
to_cache.append(dict((i, getattr(result, i, None)) for i in self._fields))
return to_cache
class RelatedSearchQuerySet(SearchQuerySet):
"""
A variant of the SearchQuerySet that can handle `load_all_queryset`s.
"""
def __init__(self, *args, **kwargs):
super(RelatedSearchQuerySet, self).__init__(*args, **kwargs)
self._load_all_querysets = {}
self._result_cache = []
def _load_model_objects(self, model, pks):
if model in self._load_all_querysets:
# Use the overriding queryset.
return self._load_all_querysets[model].in_bulk(pks)
else:
# Check the SearchIndex for the model for an override.
try:
ui = connections[self.query._using].get_unified_index()
index = ui.get_index(model)
qs = index.load_all_queryset()
return qs.in_bulk(pks)
except NotHandled:
# The model returned doesn't seem to be handled by the
# routers. We should silently fail and populate
# nothing for those objects.
return {}
def load_all_queryset(self, model, queryset):
"""
Allows for specifying a custom ``QuerySet`` that changes how ``load_all``
will fetch records for the provided model.
This is useful for post-processing the results from the query, enabling
things like adding ``select_related`` or filtering certain data.
"""
clone = self._clone()
clone._load_all_querysets[model] = queryset
return clone
def _clone(self, klass=None):
clone = super(RelatedSearchQuerySet, self)._clone(klass=klass)
clone._load_all_querysets = self._load_all_querysets
return clone
|
the-stack_106_27000 | import torch
def vis_density(model,bbox, L= 32):
maxs = torch.max(bbox, dim=0).values
mins = torch.min(bbox, dim=0).values
x = torch.linspace(mins[0],maxs[0],steps=L).cuda()
y = torch.linspace(mins[1],maxs[1],steps=L).cuda()
z = torch.linspace(mins[2],maxs[2],steps=L).cuda()
grid_x ,grid_y,grid_z = torch.meshgrid(x, y,z)
xyz = torch.stack([grid_x ,grid_y,grid_z], dim = -1) #(L,L,L,3)
xyz = xyz.reshape((-1,3)) #(L*L*L,3)
xyzs = xyz.split(5000, dim=0)
sigmas = []
for i in xyzs:
with torch.no_grad():
_,density = model.spacenet_fine(i, None, model.maxs, model.mins) #(L*L*L,1)
density = torch.nn.functional.relu(density)
sigmas.append(density.detach().cpu())
sigmas = torch.cat(sigmas, dim=0)
return sigmas |
the-stack_106_27002 | #!/usr/bin/env python
__all__ = ['tucao_download']
from ..common import *
# import re
import random
import time
from xml.dom import minidom
#possible raw list types
#1. <li>type=tudou&vid=199687639</li>
#2. <li>type=tudou&vid=199506910|</li>
#3. <li>type=video&file=http://xiaoshen140731.qiniudn.com/lovestage04.flv|</li>
#4 may ? <li>type=video&file=http://xiaoshen140731.qiniudn.com/lovestage04.flv|xx**type=&vid=?</li>
#5. <li>type=tudou&vid=200003098|07**type=tudou&vid=200000350|08</li>
#6. <li>vid=49454694&type=sina|</li>
#7. <li>type=189&vid=513031813243909|</li>
# re_pattern=re.compile(r"(type=(.+?)&(vid|file)=(.*?))[\|<]")
def tucao_single_download(type_link, title, output_dir=".", merge=True, info_only=False, **kwargs):
if "file" in type_link:
url=type_link[type_link.find("file=")+5:]
vtype, ext, size=url_info(url)
print_info(site_info, title, vtype, size)
if not info_only:
return download_urls([url], title, ext, size, output_dir, **kwargs)
#fix for 189 video source, see raw list types 7
elif "189" in type_link:
vid = match1(type_link, r"vid=(\d+)")
assert vid, "vid not exsits"
url = "http://api.tucao.tv/api/down/{}".format(vid)
vtype, ext, size=url_info(url)
print_info(site_info, title, vtype, size)
if not info_only:
return download_urls([url], title, ext, size, output_dir, **kwargs)
else:
u="http://www.tucao.tv/api/playurl.php?{}&key=tucao{:07x}.cc&r={}".format(type_link,random.getrandbits(28),int(time.time()*1000))
xml=minidom.parseString(get_content(u))
urls=[]
size=0
for i in xml.getElementsByTagName("url"):
urls.append(i.firstChild.nodeValue)
vtype, ext, _size=url_info(i.firstChild.nodeValue)
size+=_size
print_info(site_info, title, vtype, size)
if not info_only:
return download_urls(urls, title, ext, size, output_dir, **kwargs)
def tucao_download(url, output_dir=".", merge=True, info_only=False, **kwargs):
html=get_content(url)
title=match1(html,r'<h1 class="show_title">(.*?)<\w')
#fix for raw list that vid goes before type, see raw list types 6
raw_list=match1(html,r"<li>\s*(type=.+?|vid=.+?)</li>")
raw_l=raw_list.split("**")
if len(raw_l)==1:
format_link=raw_l[0][:-1] if raw_l[0].endswith("|") else raw_l[0]
return tucao_single_download(format_link,title,output_dir,merge,info_only, **kwargs)
else:
for i in raw_l:
format_link,sub_title=i.split("|")
return tucao_single_download(format_link,title+"-"+sub_title,output_dir,merge,info_only, **kwargs)
site_info = "tucao.tv"
download = tucao_download
download_playlist = playlist_not_supported("tucao")
|
the-stack_106_27003 | # -*- coding: utf-8 -*-
"""
test_data
~~~~~~~~~
Test `data` module for `mrtool` package.
"""
import numpy as np
import pandas as pd
import xarray as xr
import pytest
from mrtool import MRData
@pytest.fixture()
def df():
num_obs = 5
df = pd.DataFrame({
'obs': np.random.randn(num_obs),
'obs_se': np.random.rand(num_obs) + 0.01,
'cov0': np.random.randn(num_obs),
'cov1': np.random.randn(num_obs),
'cov2': np.random.randn(num_obs),
})
return df
@pytest.fixture()
def xarray():
example_dataset = xr.Dataset({
"y":
xr.DataArray(
np.random.random([2, 2]),
dims=["age_group_id", "location_id"],
name="random_met_need",
coords={"age_group_id": [2, 3],
"location_id": [6, 102]}),
"y_se":
xr.DataArray(
np.ones([2, 2]),
dims=["age_group_id", "location_id"],
name="random_met_need",
coords={"age_group_id": [2, 3],
"location_id": [6, 102]}),
"sdi":
xr.DataArray(
np.ones([2, 2])*5,
dims=["age_group_id", "location_id"],
name="random_education",
coords={"age_group_id": [2, 3],
"location_id": [6, 102]}),
"sdi_se":
xr.DataArray(
np.ones([2, 2])*0,
dims=["age_group_id", "location_id"],
name="random_education",
coords={"age_group_id": [2, 3],
"location_id": [6, 102]}),
})
return example_dataset
@pytest.fixture
def data(df):
df['study_id'] = np.array([0, 0, 1, 1, 2])
d = MRData()
d.load_df(
df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=[f'cov{i}' for i in range(3)],
col_study_id='study_id'
)
return d
@pytest.mark.parametrize('obs', ['obs', None])
@pytest.mark.parametrize('obs_se', ['obs_se', None])
def test_obs(df, obs, obs_se):
d = MRData()
d.load_df(df,
col_obs=obs,
col_obs_se=obs_se,
col_covs=['cov0', 'cov1', 'cov2'])
assert d.obs.size == df.shape[0]
assert d.obs_se.size == df.shape[0]
if obs is None:
assert all(np.isnan(d.obs))
@pytest.mark.parametrize('covs', [None,
['cov0', 'cov1', 'cov2']])
def test_covs(df, covs):
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=covs)
num_covs = 0 if covs is None else len(covs)
num_covs += 1
assert d.num_covs == num_covs
@pytest.mark.parametrize('study_id', [None, np.array([0, 0, 1, 1, 2])])
def test_study_id(df, study_id):
if study_id is not None:
df['study_id'] = study_id
col_study_id = 'study_id'
else:
col_study_id = None
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'],
col_study_id=col_study_id)
if col_study_id is None:
assert np.all(d.study_id == 'Unknown')
assert d.num_studies == 1
assert d.studies[0] == 'Unknown'
else:
assert np.allclose(d.study_id, np.array([0, 0, 1, 1, 2]))
assert d.num_studies == 3
assert np.allclose(d.studies, np.array([0, 1, 2]))
assert np.allclose(d.study_sizes, np.array([2, 2, 1]))
@pytest.mark.parametrize('study_id', [None,
np.array([0, 0, 1, 1, 2]),
np.array([2, 0, 0, 1, 1])])
def test_data_id(df, study_id):
if study_id is not None:
df['study_id'] = study_id
col_study_id = 'study_id'
else:
col_study_id = None
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'],
col_study_id=col_study_id)
d._sort_by_data_id()
assert np.allclose(d.obs, df['obs'])
assert np.allclose(d.obs_se, df['obs_se'])
for i in range(3):
assert np.allclose(d.covs[f'cov{i}'], df[f'cov{i}'])
def test_is_empty(df):
d = MRData()
assert d.is_empty()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'])
assert not d.is_empty()
d.reset()
assert d.is_empty()
def test_assert_not_empty():
d = MRData()
with pytest.raises(ValueError):
d._assert_not_empty()
def test_has_covs(df):
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'])
assert d.has_covs(['cov0'])
assert d.has_covs(['cov0', 'cov1'])
assert not d.has_covs(['cov3'])
def test_assert_has_covs(df):
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'])
with pytest.raises(ValueError):
d._assert_has_covs('cov3')
def test_get_covs(df):
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'])
for cov_name in ['cov0', 'cov1', 'cov2']:
assert np.allclose(d.get_covs(cov_name), df[cov_name].to_numpy()[:, None])
cov_mat = d.get_covs(['cov0', 'cov1', 'cov2'])
assert np.allclose(cov_mat, df[['cov0', 'cov1', 'cov2']].to_numpy())
@pytest.mark.parametrize('covs', [None, 'cov0', ['cov0', 'cov1']])
def test_normalize_covs(df, covs):
d = MRData()
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=['cov0', 'cov1', 'cov2'])
d.normalize_covs(covs)
assert d.is_cov_normalized(covs)
@pytest.mark.parametrize('covs', [['cov0', 'cov1']])
def test_remove_nan_in_covs(df, covs):
df.loc[:0, covs] = np.nan
d = MRData()
with pytest.warns(Warning):
d.load_df(df,
col_obs='obs',
col_obs_se='obs_se',
col_covs=covs)
assert d.num_obs == df.shape[0] - 1
def test_load_xr(xarray):
d = MRData()
d.load_xr(xarray,
var_obs='y',
var_obs_se='y_se',
var_covs=['sdi'],
coord_study_id='location_id')
assert np.allclose(np.sort(d.obs), np.sort(xarray['y'].data.flatten()))
assert np.allclose(np.sort(d.obs_se), np.sort(xarray['y_se'].data.flatten()))
assert np.allclose(np.sort(d.covs['sdi']), np.sort(xarray['sdi'].data.flatten()))
assert np.allclose(np.sort(d.studies), np.sort(xarray.coords['location_id']))
@pytest.mark.parametrize('index', [np.array([True, True, False, False, False]),
np.array([0, 1])])
def test_get_data(index, data):
sub_data = data._get_data(index)
assert np.allclose(data.obs[index], sub_data.obs)
assert np.allclose(data.obs_se[index], sub_data.obs_se)
assert np.allclose(data.study_id[index], sub_data.study_id)
assert np.allclose(data.data_id[index], sub_data.data_id)
for cov_name in data.covs:
assert np.allclose(data.covs[cov_name][index], sub_data.covs[cov_name])
@pytest.mark.parametrize(('studies', 'result'), [([0, 1, 2], True),
([3, 4, 0], False),
(0, True),
(3, False)])
def test_has_studies(studies, result, data):
assert data.has_studies(studies) == result
def test_assert_has_studies(data):
with pytest.raises(ValueError):
data._assert_has_studies(3)
@pytest.mark.parametrize('studies', [0, [0], [0, 1], [0, 1, 2]])
def test_get_study_data(studies, data):
sub_data = data.get_study_data(studies)
if not isinstance(studies, list):
assert sub_data.num_studies == 1
else:
assert sub_data.num_studies == len(studies)
|
the-stack_106_27004 | #@+leo-ver=4-thin
#@+node:bob.20080109185406.1:@thin gtkDialogs.py
#@@language python
#@@tabwidth -4
#@<< docstring >>
#@+node:bob.20071220105852:<< docstring >>
"""Replace Tk dialogs with Gtk dialogs.
At the moment this plugin only replaces Tk's file dialogs, but
other dialogs may be replaced in future.
This plugin consists of two files, this one and runGtkDialogs.py.txt,
and obviously requires gtk2 to be availiable on your system.
runGtkDialogs.py.txt has a .txt extension so it can live
in the plugins folder without being mistaken for a plugin.
This python script is called to show the gtk dialogs.
@settings
=========
This plugin assumes that the command to invoke python is 'python'
but this may be changed by placing:
@string gtkdialogs_pythoncommand = your_python_command
in the @settings tree of myLeoSettings.leo.
"""
#@-node:bob.20071220105852:<< docstring >>
#@nl
#@<< version history >>
#@+node:bob.20071220123624:<< version history >>
#@@killcolor
#@+at
#
# 1.1 plumloco: Initial version
# 1.2 plumloco: Changed root node to fit in leoPlugins
# 1.3 plumloco: check for c is None in hook handler
#@-at
#@nonl
#@-node:bob.20071220123624:<< version history >>
#@nl
#@<<imports>>
#@+node:bob.20071220110146:<< imports >>
import leoGlobals as g
import leoPlugins
import os
import pickle
try:
from subprocess import *
ok = True
except:
ok = False
import re
#@-node:bob.20071220110146:<< imports >>
#@nl
__revision__ = re.sub(r'^\D+([\d\.]+)\D+$', r'\1', "$Revision: 1.3 $")
__version__ = '0.%s'% __revision__
__plugin_name__ = "GTK Dialogs"
__author__ = "[email protected]"
#@+others
#@+node:bob.20071220111856:init
def init ():
if g.unitTesting:
return False
if ok:
leoPlugins.registerHandler('start2', onStart2)
g.plugin_signon(__name__)
return ok
#@-node:bob.20071220111856:init
#@+node:bob.20071220110328:onStart2
def onStart2 (tag, keywords):
"""
Replace tkfile open/save method with external calls to runGtkDialogs.
"""
global oldopen, oldsave
c = keywords.get('c')
if not c:
return
global pythoncommand
oldopen = g.app.gui.runOpenFileDialog
oldsave = g.app.gui.runSaveFileDialog
g.funcToMethod(runOpenFileDialog,g.app.gui)
g.funcToMethod(runSaveFileDialog,g.app.gui)
pythoncommand = c.config.getString('gtkdialogs_pythoncommand')
#@-node:bob.20071220110328:onStart2
#@+node:bob.20071220094231.10:callGtkDialogs
def callGtkDialogs(data, path='runGtkDialogs.py.txt'):
data = pickle.dumps(data)
path = g.os_path_abspath(g.os_path_join(g.app.loadDir, '..', 'plugins', path))
command = [ pythoncommand or 'python', path, data ]
try:
o = Popen(command, stdout=PIPE)
o.wait()
ok = True
except:
ok = False
if not ok:
g.es('error running gtk file chooser\nreverting to tk dialogs', color='red')
return False, None
data = o.communicate()[0].rstrip()
ret = o.returncode
if ret or not data:
return True, None
return True, pickle.loads(data)
#@-node:bob.20071220094231.10:callGtkDialogs
#@+node:bob.20071220100337:runOpenFileDialog
def runOpenFileDialog(title=None,filetypes=None,defaultextension=None,multiple=False):
"""Call runGtkDialogs open file(s) dialog."""
initialdir = g.app.globalOpenDir or g.os_path_abspath(os.getcwd())
data = {
'dialog': 'filechooser',
'title': title,
'initialdir': initialdir,
'filetypes': filetypes,
'defaultextension': defaultextension,
'multiple': multiple,
'action': 'open',
}
ok, data = callGtkDialogs(data)
if not ok:
return oldopen(title=title,filetypes=filetypes,defaultextension=defaultextension,multiple=multiple)
if data is None:
return ''
return data['result']
#@-node:bob.20071220100337:runOpenFileDialog
#@+node:bob.20071220100831:runSaveFileDialog
def runSaveFileDialog(initialfile=None,title=None,filetypes=None,defaultextension=None):
"""Call runGtkDialogs save file dialog."""
initialdir=g.app.globalOpenDir or g.os_path_abspath(os.getcwd())
data = {
'dialog': 'filechooser',
'title': title,
'initialdir': initialdir,
'filetypes': filetypes,
'defaultextension': defaultextension,
'action': 'save'
}
ok, data = callGtkDialogs(data)
if not ok:
return oldsave(initialfile=initialfile,title=title,filetypes=none,defaultextension=none)
if data is None:
return ''
return data['result']
#@-node:bob.20071220100831:runSaveFileDialog
#@-others
#@nonl
#@-node:bob.20080109185406.1:@thin gtkDialogs.py
#@-leo
|
the-stack_106_27005 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from desktop.conf import USE_NEW_EDITOR
from desktop.lib.exceptions_renderable import PopupException
from desktop.models import Document, Document2
from oozie.models import Job, Node, Dataset
LOG = logging.getLogger(__name__)
def check_document_access_permission():
def inner(view_func):
def decorate(request, *args, **kwargs):
doc_id = uuid = doc2 = None
try:
if request.REQUEST.get('workflow'):
workflow_id = request.REQUEST.get('workflow')
if workflow_id.isdigit():
doc_id = workflow_id
else:
uuid = workflow_id
elif request.GET.get('uuid'):
uuid = request.GET.get('uuid')
elif request.GET.get('coordinator'):
doc_id = request.GET.get('coordinator')
elif request.GET.get('bundle'):
doc_id = request.GET.get('bundle')
elif 'doc_id' in kwargs:
doc_id = kwargs['doc_id']
if doc_id and not doc_id.isdigit():
uuid = doc_id
doc_id = None
if doc_id is not None:
doc2 = Document2.objects.get(id=doc_id)
elif uuid is not None:
# TODO: The commented line should be used once we fully transition to doc2
# doc2 = Document2.objects.get_by_uuid(user=request.user, uuid=uuid, perm_type=None)
doc2 = Document2.objects.filter(uuid=uuid).order_by('-last_modified').first()
if doc2:
if USE_NEW_EDITOR.get():
doc2.can_read_or_exception(request.user)
else:
doc2.doc.get().can_read_or_exception(request.user)
except Document2.DoesNotExist:
raise PopupException(_('Job with %(key)s=%(value)s does not exist') %
{'key': 'id' if doc_id else 'uuid', 'value': doc_id or uuid})
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_document_modify_permission():
def inner(view_func):
def decorate(request, *args, **kwargs):
doc_id = None
job = json.loads(request.POST.get('workflow', '{}'))
if not job:
job = json.loads(request.POST.get('coordinator', '{}'))
elif not job:
job = json.loads(request.POST.get('bundle', '{}'))
if job and job.get('id'):
doc_id = job.get('id')
try:
doc2 = Document2.objects.get(id=job['id'])
if USE_NEW_EDITOR.get():
doc2.can_write_or_exception(request.user)
else:
doc2.doc.get().can_write_or_exception(request.user)
except Document.DoesNotExist:
raise PopupException(_('Job %(id)s does not exist') % {'id': doc_id})
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_editor_access_permission(view_func):
def decorate(request, *args, **kwargs):
if not request.user.is_superuser and request.user.has_hue_permission(action="disable_editor_access", app="oozie"):
raise PopupException(_('Missing permission to access the Oozie Editor'), error_code=401)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
## Oozie v1 below
def check_job_access_permission(exception_class=PopupException):
"""
Decorator ensuring that the user has access to the workflow or coordinator.
Arg: 'workflow' or 'coordinator' id.
Return: the workflow of coordinator or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
def inner(view_func):
def decorate(request, *args, **kwargs):
if 'workflow' in kwargs:
job_type = 'workflow'
elif 'coordinator' in kwargs:
job_type = 'coordinator'
else:
job_type = 'bundle'
job = kwargs.get(job_type)
if job is not None:
job = Job.objects.can_read_or_exception(request, job, exception_class=exception_class)
kwargs[job_type] = job
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_job_edition_permission(authorize_get=False, exception_class=PopupException):
"""
Decorator ensuring that the user has the permissions to modify a workflow or coordinator.
Need to appear below @check_job_access_permission
"""
def inner(view_func):
def decorate(request, *args, **kwargs):
if 'workflow' in kwargs:
job_type = 'workflow'
elif 'coordinator' in kwargs:
job_type = 'coordinator'
else:
job_type = 'bundle'
job = kwargs.get(job_type)
if job is not None and not (authorize_get and request.method == 'GET'):
Job.objects.can_edit_or_exception(request, job, exception_class=exception_class)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
def check_action_access_permission(view_func):
"""
Decorator ensuring that the user has access to the workflow action.
Arg: 'workflow action' id.
Return: the workflow action or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
def decorate(request, *args, **kwargs):
action_id = kwargs.get('action')
action = Node.objects.get(id=action_id).get_full_node()
Job.objects.can_read_or_exception(request, action.workflow.id)
kwargs['action'] = action
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def check_action_edition_permission(view_func):
"""
Decorator ensuring that the user has the permissions to modify a workflow action.
Need to appear below @check_action_access_permission
"""
def decorate(request, *args, **kwargs):
action = kwargs.get('action')
Job.objects.can_edit_or_exception(request, action.workflow)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def check_dataset_access_permission(view_func):
"""
Decorator ensuring that the user has access to dataset.
Arg: 'dataset'.
Return: the dataset or raise an exception
Notice: its gets an id in input and returns the full object in output (not an id).
"""
def decorate(request, *args, **kwargs):
dataset = kwargs.get('dataset')
if dataset is not None:
dataset = Dataset.objects.can_read_or_exception(request, dataset)
kwargs['dataset'] = dataset
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def check_dataset_edition_permission(authorize_get=False):
"""
Decorator ensuring that the user has the permissions to modify a dataset.
A dataset can be edited if the coordinator that owns the dataset can be edited.
Need to appear below @check_dataset_access_permission
"""
def inner(view_func):
def decorate(request, *args, **kwargs):
dataset = kwargs.get('dataset')
if dataset is not None and not (authorize_get and request.method == 'GET'):
Job.objects.can_edit_or_exception(request, dataset.coordinator)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
return inner
|
the-stack_106_27011 | from ui import UiFrame, Vect, BLACK, WHITE, YELLOW
class UiRain(UiFrame):
def __init__(self, ofs, dim):
super().__init__(ofs, dim)
def draw(self, ui, d):
# Pre-calculates some range values and draw icons bar
forecast = ui.forecast.forecast
cnt = len(forecast)
cmax = cnt - 1
block = ui.canvas.dim.x / cnt
ui.canvas.hline(Vect(0, self.dim.y - 1), self.dim.x - 1, BLACK)
for i in range(cnt):
xx = int(block * i)
weather = forecast[i]
dt = ui.forecast.time.get_date_time(weather.dt)
# Draw rain chart
p = max(weather.rain, weather.snow)
if weather.rain > 0 or weather.snow > 0:
r = int(p * 12)
_ = self.dim.y // 4
for h in (_, _ * 2, _ * 3):
if r > h:
r = h + (r - h) // 2
v = Vect(xx - int(block // 2) + 1, self.dim.y - r - 1)
d = Vect(int(block) - 2, r)
if weather.rain > 0:
ui.canvas.trect(v, d, BLACK)
else:
ui.canvas.fill_rect(v, d, YELLOW)
ui.canvas.rect( v, d, BLACK)
# Type rain text
if (i > 0) and (i < cmax):
f0 = forecast[i - 1]
f1 = forecast[i + 1]
if (max(f0.rain, f0.snow) < p) and (p > max(f1.rain, f1.snow)):
ui.text_center(10, '%.1f' % p, Vect(xx, self.dim.y - 2), BLACK, WHITE)
|
the-stack_106_27012 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from functools import lru_cache, wraps
import logging
import pandas as pd
import numpy as np
from reco_utils.common.constants import (
DEFAULT_USER_COL,
DEFAULT_ITEM_COL,
DEFAULT_RATING_COL,
DEFAULT_LABEL_COL,
)
logger = logging.getLogger(__name__)
def user_item_pairs(
user_df,
item_df,
user_col=DEFAULT_USER_COL,
item_col=DEFAULT_ITEM_COL,
user_item_filter_df=None,
shuffle=True,
seed=None,
):
"""Get all pairs of users and items data.
Args:
user_df (pd.DataFrame): User data containing unique user ids and maybe their features.
item_df (pd.DataFrame): Item data containing unique item ids and maybe their features.
user_col (str): User id column name.
item_col (str): Item id column name.
user_item_filter_df (pd.DataFrame): User-item pairs to be used as a filter.
shuffle (bool): If True, shuffles the result.
seed (int): Random seed for shuffle
Returns:
pd.DataFrame: All pairs of user-item from user_df and item_df, excepting the pairs in user_item_filter_df
"""
# Get all user-item pairs
user_df["key"] = 1
item_df["key"] = 1
users_items = user_df.merge(item_df, on="key")
user_df.drop("key", axis=1, inplace=True)
item_df.drop("key", axis=1, inplace=True)
users_items.drop("key", axis=1, inplace=True)
# Filter
if user_item_filter_df is not None:
users_items = filter_by(users_items, user_item_filter_df, [user_col, item_col])
if shuffle:
users_items = users_items.sample(frac=1, random_state=seed).reset_index(
drop=True
)
return users_items
def filter_by(df, filter_by_df, filter_by_cols):
"""From the input DataFrame (df), remove the records whose target column (filter_by_cols) values are
exist in the filter-by DataFrame (filter_by_df)
Args:
df (pd.DataFrame): Source dataframe.
filter_by_df (pd.DataFrame): Filter dataframe.
filter_by_cols (iterable of str): Filter columns.
Returns:
pd.DataFrame: Dataframe filtered by filter_by_df on filter_by_cols
"""
return df.loc[
~df.set_index(filter_by_cols).index.isin(
filter_by_df.set_index(filter_by_cols).index
)
]
class LibffmConverter(object):
"""Converts an input Dataframe (df) to another Dataframe (df) in libffm format. A text file of the converted
Dataframe is optionally generated.
Note:
The input dataframe is expected to represent the feature data in the following schema
|field-1|field-2|...|field-n|rating|
|feature-1-1|feature-2-1|...|feature-n-1|1|
|feature-1-2|feature-2-2|...|feature-n-2|0|
...
|feature-1-i|feature-2-j|...|feature-n-k|0|
Where
1. each "field-*" is the column name of the dataframe (column of lable/rating is excluded), and
2. "feature-*-*" can be either a string or a numerical value, representing the categorical variable or
actual numerical variable of the feature value in the field, respectively.
3. If there are ordinal variables represented in int types, users should make sure these columns
are properly converted to string type.
The above data will be converted to the libffm format by following the convention as explained in
https://www.csie.ntu.edu.tw/~r01922136/slides/ffm.pdf
i.e., <field_index>:<field_feature_index>:1 or <field_index>:<field_index>:<field_feature_value>, depending on
the data type of the features in the original dataframe.
Examples:
>>> import pandas as pd
>>> df_feature = pd.DataFrame({
'rating': [1, 0, 0, 1, 1],
'field1': ['xxx1', 'xxx2', 'xxx4', 'xxx4', 'xxx4'],
'field2': [3, 4, 5, 6, 7],
'field3': [1.0, 2.0, 3.0, 4.0, 5.0],
'field4': ['1', '2', '3', '4', '5']
})
>>> converter = LibffmConveter().fit(df_feature, col_rating='rating')
>>> df_out = converter.transform(df_feature)
>>> df_out
rating field1 field2 field3 field4
0 1 1:1:1 2:4:3 3:5:1.0 4:4:1
1 0 1:2:1 2:4:4 3:5:2.0 4:5:1
2 0 1:3:1 2:4:5 3:5:3.0 4:6:1
3 1 1:3:1 2:4:6 3:5:4.0 4:7:1
4 1 1:3:1 2:4:7 3:5:5.0 4:8:1
Args:
filepath (str): path to save the converted data.
Attributes:
field_count (int): count of field in the libffm format data
feature_count (int): count of feature in the libffm format data
filepath (str or None): file path where the output is stored - it can be None or a string
"""
def __init__(self, filepath=None):
self.filepath = filepath
self.col_rating = None
self.field_names = None
self.field_count = None
self.feature_count = None
def fit(self, df, col_rating=DEFAULT_RATING_COL):
"""Fit the dataframe for libffm format.
This method does nothing but check the validity of the input columns
Args:
df (pd.DataFrame): input Pandas dataframe.
col_rating (str): rating of the data.
Return:
obj: the instance of the converter
"""
# Check column types.
types = df.dtypes
if not all(
[
x == object or np.issubdtype(x, np.integer) or x == np.float
for x in types
]
):
raise TypeError("Input columns should be only object and/or numeric types.")
if col_rating not in df.columns:
raise TypeError(
"Column of {} is not in input dataframe columns".format(col_rating)
)
self.col_rating = col_rating
self.field_names = list(df.drop(col_rating, axis=1).columns)
return self
def transform(self, df):
"""Tranform an input dataset with the same schema (column names and dtypes) to libffm format
by using the fitted converter.
Args:
df (pd.DataFrame): input Pandas dataframe.
Return:
pd.DataFrame: output libffm format dataframe.
"""
if self.col_rating not in df.columns:
raise ValueError(
"Input dataset does not contain the label column {} in the fitting dataset".format(
self.col_rating
)
)
if not all([x in df.columns for x in self.field_names]):
raise ValueError(
"Not all columns in the input dataset appear in the fitting dataset"
)
# Encode field-feature.
idx = 1
self.field_feature_dict = {}
for field in self.field_names:
for feature in df[field].values:
# Check whether (field, feature) tuple exists in the dict or not.
# If not, put them into the key-values of the dict and count the index.
if (field, feature) not in self.field_feature_dict:
self.field_feature_dict[(field, feature)] = idx
if df[field].dtype == object:
idx += 1
if df[field].dtype != object:
idx += 1
self.field_count = len(self.field_names)
self.feature_count = idx - 1
def _convert(field, feature, field_index, field_feature_index_dict):
field_feature_index = field_feature_index_dict[(field, feature)]
if isinstance(feature, str):
feature = 1
return "{}:{}:{}".format(field_index, field_feature_index, feature)
for col_index, col in enumerate(self.field_names):
df[col] = df[col].apply(
lambda x: _convert(col, x, col_index + 1, self.field_feature_dict)
)
# Move rating column to the first.
column_names = self.field_names[:]
column_names.insert(0, self.col_rating)
df = df[column_names]
if self.filepath is not None:
np.savetxt(self.filepath, df.values, delimiter=" ", fmt="%s")
return df
def fit_transform(self, df, col_rating=DEFAULT_RATING_COL):
"""Do fit and transform in a row
Args:
df (pd.DataFrame): input Pandas dataframe.
col_rating (str): rating of the data.
Return:
pd.DataFrame: output libffm format dataframe.
"""
return self.fit(df, col_rating=col_rating).transform(df)
def get_params(self):
"""Get parameters (attributes) of the libffm converter
Return:
dict: parameters field count, feature count, and file path.
"""
return {
"field count": self.field_count,
"feature count": self.feature_count,
"file path": self.filepath,
}
def negative_feedback_sampler(
df,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_label=DEFAULT_LABEL_COL,
ratio_neg_per_user=1,
seed=42,
):
"""Utility function to sample negative feedback from user-item interaction dataset.
This negative sampling function will take the user-item interaction data to create
binarized feedback, i.e., 1 and 0 indicate positive and negative feedback,
respectively.
Negative sampling is used in the literature frequently to generate negative samples
from a user-item interaction data.
See for example the neural collaborative filtering paper
https://www.comp.nus.edu.sg/~xiangnan/papers/ncf.pdf
Examples:
>>> import pandas as pd
>>> df = pd.DataFrame({
'userID': [1, 2, 3],
'itemID': [1, 2, 3],
'rating': [5, 5, 5]
})
>>> df_neg_sampled = negative_feedback_sampler(
df, col_user='userID', col_item='itemID', ratio_neg_per_user=1
)
>>> df_neg_sampled
userID itemID feedback
1 1 1
1 2 0
2 2 1
2 1 0
3 3 1
3 1 0
Args:
df (pandas.DataFrame): input data that contains user-item tuples.
col_user (str): user id column name.
col_item (str): item id column name.
col_label (str): label column name. It is used for the generated columns where labels
of positive and negative feedback, i.e., 1 and 0, respectively, in the output dataframe.
ratio_neg_per_user (int): ratio of negative feedback w.r.t to the number of positive feedback for each user.
If the samples exceed the number of total possible negative feedback samples, it will be reduced to the number
of all the possible samples.
seed (int): seed for the random state of the sampling function.
Returns:
pandas.DataFrame: data with negative feedback
"""
# Get all of the users and items.
users = df[col_user].unique()
items = df[col_item].unique()
# Create a dataframe for all user-item pairs
df_neg = user_item_pairs(
pd.DataFrame(users, columns=[col_user]),
pd.DataFrame(items, columns=[col_item]),
user_item_filter_df=df,
)
df_neg[col_label] = 0
df_pos = df.copy()
df_pos[col_label] = 1
df_all = pd.concat([df_pos, df_neg], ignore_index=True, sort=True)
df_all = df_all[[col_user, col_item, col_label]]
# Sample negative feedback from the combined dataframe.
df_sample = (
df_all.groupby(col_user)
.apply(
lambda x: pd.concat(
[
x[x[col_label] == 1],
x[x[col_label] == 0].sample(
min(
max(
round(len(x[x[col_label] == 1]) * ratio_neg_per_user), 1
),
len(x[x[col_label] == 0]),
),
random_state=seed,
replace=False,
)
if len(x[x[col_label] == 0] > 0)
else pd.DataFrame({}, columns=[col_user, col_item, col_label]),
],
ignore_index=True,
sort=True,
)
)
.reset_index(drop=True)
.sort_values(col_user)
)
return df_sample
def has_columns(df, columns):
"""Check if DataFrame has necessary columns
Args:
df (pd.DataFrame): DataFrame
columns (list(str): columns to check for
Returns:
bool: True if DataFrame has specified columns
"""
result = True
for column in columns:
if column not in df.columns:
logger.error("Missing column: {} in DataFrame".format(column))
result = False
return result
def has_same_base_dtype(df_1, df_2, columns=None):
"""Check if specified columns have the same base dtypes across both DataFrames
Args:
df_1 (pd.DataFrame): first DataFrame
df_2 (pd.DataFrame): second DataFrame
columns (list(str)): columns to check, None checks all columns
Returns:
bool: True if DataFrames columns have the same base dtypes
"""
if columns is None:
if any(set(df_1.columns).symmetric_difference(set(df_2.columns))):
logger.error(
"Cannot test all columns because they are not all shared across DataFrames"
)
return False
columns = df_1.columns
if not (
has_columns(df=df_1, columns=columns) and has_columns(df=df_2, columns=columns)
):
return False
result = True
for column in columns:
if df_1[column].dtype.type.__base__ != df_2[column].dtype.type.__base__:
logger.error("Columns {} do not have the same base datatype".format(column))
result = False
return result
class PandasHash:
"""Wrapper class to allow pandas objects (DataFrames or Series) to be hashable"""
# reserve space just for a single pandas object
__slots__ = "pandas_object"
def __init__(self, pandas_object):
"""Initialize class
Args:
pandas_object (pd.DataFrame|pd.Series): pandas object
"""
if not isinstance(pandas_object, (pd.DataFrame, pd.Series)):
raise TypeError("Can only wrap pandas DataFrame or Series objects")
self.pandas_object = pandas_object
def __eq__(self, other):
"""Overwrite equality comparison
Args:
other (pd.DataFrame|pd.Series): pandas object to compare
Returns:
bool: whether other object is the same as this one
"""
return hash(self) == hash(other)
def __hash__(self):
"""Overwrite hash operator for use with pandas objects
Returns:
int: hashed value of object
"""
hashable = tuple(self.pandas_object.values.tobytes())
if isinstance(self.pandas_object, pd.DataFrame):
hashable += tuple(self.pandas_object.columns)
else:
hashable += tuple(self.pandas_object.name)
return hash(hashable)
def lru_cache_df(maxsize, typed=False):
"""Least-recently-used cache decorator
Args:
maxsize (int|None): max size of cache, if set to None cache is boundless
typed (bool): arguments of different types are cached separately
"""
def to_pandas_hash(val):
"""Return PandaHash object if input is a DataFrame otherwise return input unchanged"""
return PandasHash(val) if isinstance(val, pd.DataFrame) else val
def from_pandas_hash(val):
"""Extract DataFrame if input is PandaHash object otherwise return input unchanged"""
return val.pandas_object if isinstance(val, PandasHash) else val
def decorating_function(user_function):
@wraps(user_function)
def wrapper(*args, **kwargs):
# convert DataFrames in args and kwargs to PandaHash objects
args = tuple([to_pandas_hash(a) for a in args])
kwargs = {k: to_pandas_hash(v) for k, v in kwargs.items()}
return cached_wrapper(*args, **kwargs)
@lru_cache(maxsize=maxsize, typed=typed)
def cached_wrapper(*args, **kwargs):
# get DataFrames from PandaHash objects in args and kwargs
args = tuple([from_pandas_hash(a) for a in args])
kwargs = {k: from_pandas_hash(v) for k, v in kwargs.items()}
return user_function(*args, **kwargs)
# retain lru_cache attributes
wrapper.cache_info = cached_wrapper.cache_info
wrapper.cache_clear = cached_wrapper.cache_clear
return wrapper
return decorating_function
|
the-stack_106_27013 | '''
# Singin Module
Sing-ins the user with url given
(appType:2)
'''
from .. import session
from bs4 import BeautifulSoup
from utils.myutils import urlparams
import logging
logger = logging.getLogger('Singin')
def NormalSingin(singin_url) -> str:
'''
# 签到、手势签到
Returns a string suggesting whether the signin has succeeded or not
...this only works on these two singins
'''
logger.debug('Signin in with URL %s' % singin_url)
response = session.get(
'https://mobilelearn.chaoxing.com/widget/sign/pcStuSignController/signIn',
params=urlparams.GetParams(singin_url)
)
soup = BeautifulSoup(response.text,'lxml')
result = soup.find('div',{'class':'qd_Success'}).text.strip()
return result |
the-stack_106_27017 | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import logging
import os
import tensorflow as tf
import keras
from keras.callbacks import EarlyStopping, TensorBoard
from keras.datasets import cifar10
from keras.optimizers import SGD, Adadelta, Adagrad, Adam, Adamax, RMSprop
from keras.utils import multi_gpu_model, to_categorical
import keras.backend.tensorflow_backend as KTF
import nni
from nni.networkmorphism_tuner.graph import json_to_graph
# set the logger format
log_format = "%(asctime)s %(message)s"
logging.basicConfig(
filename="networkmorphism.log",
filemode="a",
level=logging.INFO,
format=log_format,
datefmt="%m/%d %I:%M:%S %p",
)
# set the logger format
logger = logging.getLogger("cifar10-network-morphism-keras")
# restrict gpu usage background
config = tf.ConfigProto()
# pylint: disable=E1101,W0603
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
KTF.set_session(sess)
def get_args():
""" get args from command line
"""
parser = argparse.ArgumentParser("cifar10")
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--optimizer", type=str, default="SGD", help="optimizer")
parser.add_argument("--epochs", type=int, default=200, help="epoch limit")
parser.add_argument(
"--learning_rate", type=float, default=0.001, help="learning rate"
)
parser.add_argument(
"--weight_decay",
type=float,
default=1e-5,
help="weight decay of the learning rate",
)
return parser.parse_args()
trainloader = None
testloader = None
net = None
args = get_args()
TENSORBOARD_DIR = os.environ["NNI_OUTPUT_DIR"]
def build_graph_from_json(ir_model_json):
"""build model from json representation
"""
graph = json_to_graph(ir_model_json)
logging.debug(graph.operation_history)
model = graph.produce_keras_model()
return model
def parse_rev_args(receive_msg):
""" parse reveive msgs to global variable
"""
global trainloader
global testloader
global net
# Loading Data
logger.debug("Preparing data..")
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255.0
x_test /= 255.0
trainloader = (x_train, y_train)
testloader = (x_test, y_test)
# Model
logger.debug("Building model..")
net = build_graph_from_json(receive_msg)
# parallel model
try:
available_devices = os.environ["CUDA_VISIBLE_DEVICES"]
gpus = len(available_devices.split(","))
if gpus > 1:
net = multi_gpu_model(net, gpus)
except KeyError:
logger.debug("parallel model not support in this config settings")
if args.optimizer == "SGD":
optimizer = SGD(lr=args.learning_rate, momentum=0.9, decay=args.weight_decay)
if args.optimizer == "Adadelta":
optimizer = Adadelta(lr=args.learning_rate, decay=args.weight_decay)
if args.optimizer == "Adagrad":
optimizer = Adagrad(lr=args.learning_rate, decay=args.weight_decay)
if args.optimizer == "Adam":
optimizer = Adam(lr=args.learning_rate, decay=args.weight_decay)
if args.optimizer == "Adamax":
optimizer = Adamax(lr=args.learning_rate, decay=args.weight_decay)
if args.optimizer == "RMSprop":
optimizer = RMSprop(lr=args.learning_rate, decay=args.weight_decay)
# Compile the model
net.compile(
loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"]
)
return 0
class SendMetrics(keras.callbacks.Callback):
"""
Keras callback to send metrics to NNI framework
"""
def on_epoch_end(self, epoch, logs=None):
"""
Run on end of each epoch
"""
if logs is None:
logs = dict()
logger.debug(logs)
nni.report_intermediate_result(logs["acc"])
# Training
def train_eval():
""" train and eval the model
"""
global trainloader
global testloader
global net
(x_train, y_train) = trainloader
(x_test, y_test) = testloader
# train procedure
net.fit(
x=x_train,
y=y_train,
batch_size=args.batch_size,
validation_data=(x_test, y_test),
epochs=args.epochs,
shuffle=True,
callbacks=[
SendMetrics(),
EarlyStopping(min_delta=0.001, patience=10),
TensorBoard(log_dir=TENSORBOARD_DIR),
],
)
# trial report final acc to tuner
_, acc = net.evaluate(x_test, y_test)
logger.debug("Final result is: %d", acc)
nni.report_final_result(acc)
if __name__ == "__main__":
try:
# trial get next parameter from network morphism tuner
RCV_CONFIG = nni.get_next_parameter()
logger.debug(RCV_CONFIG)
parse_rev_args(RCV_CONFIG)
train_eval()
except Exception as exception:
logger.exception(exception)
raise
|
the-stack_106_27019 | from typing import Set, Optional, Sequence, Tuple
from specs import BeaconState, VALIDATOR_REGISTRY_LIMIT, ValidatorIndex, Attestation
from eth2spec.utils.ssz.ssz_typing import Container, List, uint64
class NetworkSetIndex(uint64):
pass
class NetworkSet(Container):
validators: List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT] # up to VALIDATOR_REGISTRY_LIMIT ValidatorIndex may be included
beacon_state: BeaconState
class NetworkAttestation(Container):
item: Attestation
info_sets: List[NetworkSetIndex, VALIDATOR_REGISTRY_LIMIT] # stores the SetIndexes that this attestaion is known to
class Network(Container):
sets: List[NetworkSet, VALIDATOR_REGISTRY_LIMIT]
attestations: List[NetworkAttestation, VALIDATOR_REGISTRY_LIMIT]
malicious: List[ValidatorIndex, VALIDATOR_REGISTRY_LIMIT]
def get_all_sets_for_validator(network: Network, validator_index: ValidatorIndex) -> Sequence[NetworkSetIndex]:
# Returns a sequence NetworkSetIndices: All set indices for the sets that the validator is in.
return [i for i, s in enumerate(network.sets) if validator_index in s.validators]
def disseminate_attestation(network: Network, sender: ValidatorIndex, item: Attestation, to_sets: List[NetworkSetIndex, VALIDATOR_REGISTRY_LIMIT] = None) -> None:
broadcast_list = get_all_sets_for_validator(network, sender) if to_sets is None else to_sets
# This stores the Attestaion and the information set that knows about this attestation!
networkItem = NetworkAttestation(item=item, info_sets=broadcast_list)
# Append the NetworkAttestation to Network.attestions ...
network.attestations.append(networkItem)
def update_network(network: Network) -> None:
item_sets = [network.attestations]
for item_set in item_sets:
for item in item_set:
known_validators = set()
for info_set in item.info_sets:
known_validators = known_validators.union(set(network.sets[info_set].validators))
unknown_sets = [i for i, s in enumerate(network.sets) if i not in item.info_sets]
for unknown_set in unknown_sets:
new_validators = network.sets[unknown_set].validators
for new_validator in new_validators:
if new_validator in known_validators and new_validator not in network.malicious:
item.info_sets.append(unknown_set)
break
def knowledge_set(network: Network, validator_index: ValidatorIndex) -> Sequence[Container]:
'''
For each attestation in network.attestations it is checked whether the validator (/proposer) is
part of a set that knows about the attestation in question. If yes, it is added to the list of tuples:
known_attestation = [(NetworkAttestationIndex, NetworkAttestation), ...]
Returns a dict: {"attestations": known_attestations}
'''
# Returns a sequence NetworkSetIndices: All set indices for the sets that the validator is in.
info_sets = set(get_all_sets_for_validator(network, validator_index))
# len(set(item.info_sets)) checks to how many unique sets the the attestation is known to
# info_sets returns a sequence of all set indices that the validator is in
known_attestations = [
(item_index, item) for item_index, item in enumerate(network.attestations)
if len(set(item.info_sets) & info_sets) > 0
]
return { "attestations": known_attestations } |
the-stack_106_27021 | from bokeh.io import output_file, show
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from bokeh.transform import dodge
output_file("dodged_bars.html")
fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries']
years = ['2015', '2016', '2017']
data = {'fruits' : fruits,
'2015' : [2, 1, 4, 3, 2, 4],
'2016' : [5, 3, 3, 2, 4, 6],
'2017' : [3, 2, 4, 4, 5, 3]}
source = ColumnDataSource(data=data)
p = figure(x_range=fruits, y_range=(0, 10), plot_height=250, title="Fruit Counts by Year",
toolbar_location=None, tools="")
p.vbar(x=dodge('fruits', -0.25, range=p.x_range), top='2015', width=0.2, source=source,
color="#c9d9d3", legend_label="2015")
p.vbar(x=dodge('fruits', 0.0, range=p.x_range), top='2016', width=0.2, source=source,
color="#718dbf", legend_label="2016")
p.vbar(x=dodge('fruits', 0.25, range=p.x_range), top='2017', width=0.2, source=source,
color="#e84d60", legend_label="2017")
p.x_range.range_padding = 0.1
p.xgrid.grid_line_color = None
p.legend.location = "top_left"
p.legend.orientation = "horizontal"
show(p)
|
the-stack_106_27022 | import re
import json
import string
import random
from steemapi.steemclient import SteemNodeRPC
from steembase.account import PrivateKey, PublicKey, Address
import steembase.transactions as transactions
from .utils import (
resolveIdentifier,
constructIdentifier,
derivePermlink,
formatTimeString
)
from .wallet import Wallet
from .storage import configStorage as config
from datetime import datetime, timedelta
import logging
log = logging.getLogger(__name__)
prefix = "STM"
# prefix = "TST"
STEEMIT_100_PERCENT = 10000
STEEMIT_1_PERCENT = (STEEMIT_100_PERCENT / 100)
class AccountExistsException(Exception):
pass
class Post(object):
""" This object gets instanciated by Steem.streams and is used as an
abstraction layer for Comments in Steem
:param Steem steem: An instance of the Steem() object
:param object post: The post as obtained by `get_content`
"""
steem = None
def __init__(self, steem, post):
if not isinstance(steem, Steem):
raise ValueError(
"First argument must be instance of Steem()"
)
self.steem = steem
self._patch = False
# Get full Post
if isinstance(post, str): # From identifier
self.identifier = post
post_author, post_permlink = resolveIdentifier(post)
post = self.steem.rpc.get_content(post_author, post_permlink)
elif (isinstance(post, dict) and # From dictionary
"author" in post and
"permlink" in post):
# strip leading @
if post["author"][0] == "@":
post["author"] = post["author"][1:]
self.identifier = constructIdentifier(
post["author"],
post["permlink"]
)
# if there only is an author and a permlink but no body
# get the full post via RPC
if "created" not in post or "cashout_time" not in post:
post = self.steem.rpc.get_content(
post["author"],
post["permlink"]
)
else:
raise ValueError("Post expects an identifier or a dict "
"with author and permlink!")
# If this 'post' comes from an operation, it might carry a patch
if "body" in post and re.match("^@@", post["body"]):
self._patched = True
self._patch = post["body"]
# Parse Times
parse_times = ["active",
"cashout_time",
"created",
"last_payout",
"last_update",
"max_cashout_time"]
for p in parse_times:
post["%s_parsed" % p] = datetime.strptime(
post.get(p, "1970-01-01T00:00:00"), '%Y-%m-%dT%H:%M:%S'
)
# Try to properly format json meta data
meta_str = post.get("json_metadata", "")
post["_json_metadata"] = meta_str
meta = {}
try:
meta = json.loads(meta_str)
except:
pass
post["_tags"] = meta.get("tags", [])
# Retrieve the root comment
self.openingPostIdentifier, self.category = self._getOpeningPost()
# Total reward
post["total_payout_reward"] = "%.3f SBD" % (
float(post["total_payout_value"].split(" ")[0]) +
float(post["total_pending_payout_value"].split(" ")[0])
)
# Store everything as attribute
for key in post:
setattr(self, key, post[key])
def _getOpeningPost(self):
m = re.match("/([^/]*)/@([^/]*)/([^#]*).*",
getattr(self, "url", ""))
if not m:
return None, None
else:
category = m.group(1)
author = m.group(2)
permlink = m.group(3)
return constructIdentifier(
author, permlink
), category
def __getitem__(self, key):
return getattr(self, key)
def remove(self, key):
delattr(self, key)
def get(self, key, default=None):
if hasattr(self, key):
return getattr(self, key)
else:
return default
def __delitem__(self, key):
delattr(self, key)
def __contains__(self, key):
return hasattr(self, key)
def __iter__(self):
r = {}
for key in vars(self):
r[key] = getattr(self, key)
return iter(r)
def __len__(self):
return len(vars(self))
def __repr__(self):
return "<Steem.Post-%s>" % constructIdentifier(self["author"], self["permlink"])
def get_comments(self, sort="total_payout_reward"):
""" Return **first-level** comments of the post.
"""
post_author, post_permlink = resolveIdentifier(self.identifier)
posts = self.steem.rpc.get_content_replies(post_author, post_permlink)
r = []
for post in posts:
r.append(Post(self.steem, post))
if sort == "total_payout_value":
r = sorted(r, key=lambda x: float(
x["total_payout_value"].split(" ")[0]
), reverse=True)
elif sort == "total_payout_reward":
r = sorted(r, key=lambda x: float(
x["total_payout_reward"].split(" ")[0]
), reverse=True)
else:
r = sorted(r, key=lambda x: x[sort])
return(r)
def reply(self, body, title="", author="", meta=None):
""" Reply to the post
:param str body: (required) body of the reply
:param str title: Title of the reply
:param str author: Author of reply
:param json meta: JSON Meta data
"""
return self.steem.reply(self.identifier, body, title, author, meta)
def upvote(self, weight=+100, voter=None):
""" Upvote the post
:param float weight: (optional) Weight for posting (-100.0 - +100.0) defaults to +100.0
:param str voter: (optional) Voting account
"""
return self.vote(weight, voter=voter)
def downvote(self, weight=-100, voter=None):
""" Downvote the post
:param float weight: (optional) Weight for posting (-100.0 - +100.0) defaults to -100.0
:param str voter: (optional) Voting account
"""
return self.vote(weight, voter=voter)
def vote(self, weight, voter=None):
""" Vote the post
:param float weight: Weight for posting (-100.0 - +100.0)
:param str voter: Voting account
"""
return self.steem.vote(self.identifier, weight, voter=voter)
class MissingKeyError(Exception):
pass
class BroadcastingError(Exception):
pass
class Steem(object):
""" The purpose of this class it to simplify posting and dealing
with accounts, posts and categories in Steem.
The idea is to have a class that allows to do this:
.. code-block:: python
from piston.steem import Steem
steem = Steem()
steem.post("Testing piston-libs", "I am testing piston-libs", category="spam")
All that is requires is for the user to have added a posting key with
.. code-block:: bash
piston addkey
and setting a default author:
.. code-block:: bash
piston set default_author xeroc
This class also deals with edits, votes and reading content.
"""
wallet = None
rpc = None
def __init__(self, *args, **kwargs):
"""
:param bool debug: Enable Debugging
:param wif wif: WIF private key for signing. If provided,
will not load from wallet (optional). Can be
single string, or array of keys.
"""
self.connect(*args, **kwargs)
self.debug = kwargs.get("debug", False)
self.nobroadcast = kwargs.get("nobroadcast", False)
if "wif" in kwargs:
self.wallet = Wallet(self.rpc, wif=kwargs["wif"])
else:
self.wallet = Wallet(self.rpc)
def connect(self, *args, **kwargs):
""" Connect to the Steem network.
:param str node: Node to connect to *(optional)*
:param str rpcuser: RPC user *(optional)*
:param str rpcpassword: RPC password *(optional)*
:param bool nobroadcast: Do **not** broadcast a transaction!
If no node is provided, it will connect to the node of
http://piston.rocks. It is **highly** recommended that you pick your own
node instead. Default settings can be changed with:
.. code-block:: python
piston set node <host>
where ``<host>`` starts with ``ws://`` or ``wss://``.
"""
node = None
rpcuser = None
rpcpassword = None
if len(args):
node = args.pop(0)
if len(args):
rpcuser = args.pop(0)
if len(args):
rpcpassword = args.pop(0)
node = kwargs.pop("node", node)
rpcuser = kwargs.pop("rpcuser", rpcuser)
rpcpassword = kwargs.pop("rpcpassword", rpcpassword)
if not node:
if "node" in config:
node = config["node"]
else:
raise ValueError("A Steem node needs to be provided!")
if not rpcuser and "rpcuser" in config:
rpcuser = config["rpcuser"]
if not rpcpassword and "rpcpassword" in config:
rpcpassword = config["rpcpassword"]
self.rpc = SteemNodeRPC(node, rpcuser, rpcpassword, **kwargs)
def executeOp(self, op, wif=None):
""" Execute an operation by signing it with the ``wif`` key and
broadcasting it to the Steem network
:param Object op: The operation to be signed and broadcasts as
provided by the ``transactions`` class.
:param string wif: The wif key to use for signing a transaction
**TODO**: The full node could, given the operations, give us a
set of public keys that are required for signing, then the
public keys could used to identify the wif-keys from the wallet.
"""
# overwrite wif with default wif if available
if not wif:
raise MissingKeyError
ops = [transactions.Operation(op)]
expiration = transactions.formatTimeFromNow(30)
ref_block_num, ref_block_prefix = transactions.getBlockParams(self.rpc)
tx = transactions.Signed_Transaction(
ref_block_num=ref_block_num,
ref_block_prefix=ref_block_prefix,
expiration=expiration,
operations=ops
)
tx = tx.sign([wif])
tx = transactions.JsonObj(tx)
if self.debug:
log.debug(str(tx))
if not self.nobroadcast:
try:
self.rpc.broadcast_transaction(tx, api="network_broadcast")
except:
raise BroadcastingError
else:
log.warning("Not broadcasting anything!")
return tx
def info(self):
""" Returns the global properties
"""
return self.rpc.get_dynamic_global_properties()
def reply(self, identifier, body, title="", author="", meta=None):
""" Reply to an existing post
:param str identifier: Identifier of the post to reply to. Takes the
form ``@author/permlink``
:param str body: Body of the reply
:param str title: Title of the reply post
:param str author: Author of reply (optional) if not provided
``default_user`` will be used, if present, else
a ``ValueError`` will be raised.
:param json meta: JSON meta object that can be attached to the
post. (optional)
"""
return self.post(title,
body,
meta=meta,
author=author,
reply_identifier=identifier)
def edit(self,
identifier,
body,
meta={},
replace=False):
""" Edit an existing post
:param str identifier: Identifier of the post to reply to. Takes the
form ``@author/permlink``
:param str body: Body of the reply
:param json meta: JSON meta object that can be attached to the
post. (optional)
:param bool replace: Instead of calculating a *diff*, replace
the post entirely (defaults to ``False``)
"""
post_author, post_permlink = resolveIdentifier(identifier)
original_post = self.rpc.get_content(post_author, post_permlink)
if replace:
newbody = body
else:
import diff_match_patch
dmp = diff_match_patch.diff_match_patch()
patch = dmp.patch_make(original_post["body"], body)
newbody = dmp.patch_toText(patch)
if not newbody:
log.info("No changes made! Skipping ...")
return
reply_identifier = constructIdentifier(
original_post["parent_author"],
original_post["parent_permlink"]
)
new_meta = {}
if meta:
if original_post["json_metadata"]:
import json
new_meta = json.loads(original_post["json_metadata"]).update(meta)
else:
new_meta = meta
return self.post(
original_post["title"],
newbody,
reply_identifier=reply_identifier,
author=original_post["author"],
permlink=original_post["permlink"],
meta=new_meta,
)
def post(self,
title,
body,
author=None,
permlink=None,
meta={},
reply_identifier=None,
category=""):
""" New post
:param str title: Title of the reply post
:param str body: Body of the reply
:param str author: Author of reply (optional) if not provided
``default_user`` will be used, if present, else
a ``ValueError`` will be raised.
:param json meta: JSON meta object that can be attached to the
post.
:param str reply_identifier: Identifier of the post to reply to. Takes the
form ``@author/permlink``
:param str category: Allows to define a category for new posts.
It is highly recommended to provide a
category as posts end up in ``spam``
otherwise.
"""
if not author and config["default_author"]:
author = config["default_author"]
if not author:
raise ValueError(
"Please define an author. (Try 'piston set default_author'"
)
if reply_identifier and not category:
parent_author, parent_permlink = resolveIdentifier(reply_identifier)
if not permlink :
permlink = derivePermlink(title, parent_permlink)
elif category and not reply_identifier:
parent_permlink = derivePermlink(category)
parent_author = ""
if not permlink :
permlink = derivePermlink(title)
elif not category and not reply_identifier:
parent_author = ""
parent_permlink = ""
if not permlink :
permlink = derivePermlink(title)
else:
raise ValueError(
"You can't provide a category while replying to a post"
)
op = transactions.Comment(
**{"parent_author": parent_author,
"parent_permlink": parent_permlink,
"author": author,
"permlink": permlink,
"title": title,
"body": body,
"json_metadata": meta}
)
wif = self.wallet.getPostingKeyForAccount(author)
return self.executeOp(op, wif)
def vote(self,
identifier,
weight,
voter=None):
""" Vote for a post
:param str identifier: Identifier for the post to upvote Takes
the form ``@author/permlink``
:param float weight: Voting weight. Range: -100.0 - +100.0. May
not be 0.0
:param str voter: Voter to use for voting. (Optional)
If ``voter`` is not defines, the ``default_voter`` will be taken or
a ValueError will be raised
.. code-block:: python
piston set default_voter <account>
"""
if not voter:
if "default_voter" in config:
voter = config["default_voter"]
if not voter:
raise ValueError("You need to provide a voter account")
post_author, post_permlink = resolveIdentifier(identifier)
op = transactions.Vote(
**{"voter": voter,
"author": post_author,
"permlink": post_permlink,
"weight": int(weight * STEEMIT_1_PERCENT)}
)
wif = self.wallet.getPostingKeyForAccount(voter)
return self.executeOp(op, wif)
def create_account(self,
account_name,
json_meta={},
creator=None,
owner_key=None,
active_key=None,
posting_key=None,
memo_key=None,
password=None,
additional_owner_keys=[],
additional_active_keys=[],
additional_posting_keys=[],
additional_owner_accounts=[],
additional_active_accounts=[],
additional_posting_accounts=[],
storekeys=True,
):
""" Create new account in Steem
The brainkey/password can be used to recover all generated keys (see
`steembase.account` for more details.
By default, this call will use ``default_author`` to
register a new name ``account_name`` with all keys being
derived from a new brain key that will be returned. The
corresponding keys will automatically be installed in the
wallet.
.. note:: Account creations cost a fee that is defined by
the network. If you create an account, you will
need to pay for that fee!
.. warning:: Don't call this method unless you know what
you are doing! Be sure to understand what this
method does and where to find the private keys
for your account.
.. note:: Please note that this imports private keys
(if password is present) into the wallet by
default. However, it **does not import the owner
key** for security reasons. Do NOT expect to be
able to recover it from piston if you lose your
password!
:param str account_name: (**required**) new account name
:param str json_meta: Optional meta data for the account
:param str creator: which account should pay the registration fee
(defaults to ``default_author``)
:param str owner_key: Main owner key
:param str active_key: Main active key
:param str posting_key: Main posting key
:param str memo_key: Main memo_key
:param str password: Alternatively to providing keys, one
can provide a password from which the
keys will be derived
:param array additional_owner_keys: Additional owner public keys
:param array additional_active_keys: Additional active public keys
:param array additional_posting_keys: Additional posting public keys
:param array additional_owner_accounts: Additional owner account names
:param array additional_active_accounts: Additional acctive account names
:param array additional_posting_accounts: Additional posting account names
:param bool storekeys: Store new keys in the wallet (default: ``True``)
:raises AccountExistsException: if the account already exists on the blockchain
"""
if not creator and config["default_author"]:
creator = config["default_author"]
if not creator:
raise ValueError(
"Not creator account given. Define it with " +
"creator=x, or set the default_author in piston")
if password and (owner_key or posting_key or active_key or memo_key):
raise ValueError(
"You cannot use 'password' AND provide keys!"
)
account = None
try:
account = self.rpc.get_account(account_name)
except:
pass
if account:
raise AccountExistsException
" Generate new keys from password"
from steembase.account import PasswordKey, PublicKey
if password:
posting_key = PasswordKey(account_name, password, role="posting")
active_key = PasswordKey(account_name, password, role="active")
owner_key = PasswordKey(account_name, password, role="owner")
memo_key = PasswordKey(account_name, password, role="memo")
posting_pubkey = posting_key.get_public_key()
active_pubkey = active_key.get_public_key()
owner_pubkey = owner_key.get_public_key()
memo_pubkey = memo_key.get_public_key()
posting_privkey = posting_key.get_private_key()
active_privkey = active_key.get_private_key()
# owner_privkey = owner_key.get_private_key()
memo_privkey = memo_key.get_private_key()
# store private keys
if storekeys:
# self.wallet.addPrivateKey(owner_privkey)
self.wallet.addPrivateKey(active_privkey)
self.wallet.addPrivateKey(posting_privkey)
self.wallet.addPrivateKey(memo_privkey)
elif (owner_key and posting_key and active_key and memo_key):
posting_pubkey = PublicKey(posting_key, prefix=prefix)
active_pubkey = PublicKey(active_key, prefix=prefix)
owner_pubkey = PublicKey(owner_key, prefix=prefix)
memo_pubkey = PublicKey(memo_key, prefix=prefix)
else:
raise ValueError(
"Call incomplete! Provide either a password or public keys!"
)
owner = format(posting_pubkey, prefix)
active = format(active_pubkey, prefix)
posting = format(owner_pubkey, prefix)
memo = format(memo_pubkey, prefix)
owner_key_authority = [[owner, 1]]
active_key_authority = [[active, 1]]
posting_key_authority = [[posting, 1]]
owner_accounts_authority = []
active_accounts_authority = []
posting_accounts_authority = []
# additional authorities
for k in additional_owner_keys:
owner_key_authority.append([k, 1])
for k in additional_active_keys:
active_key_authority.append([k, 1])
for k in additional_posting_keys:
posting_key_authority.append([k, 1])
for k in additional_owner_accounts:
owner_accounts_authority.append([k, 1])
for k in additional_active_accounts:
active_accounts_authority.append([k, 1])
for k in additional_posting_accounts:
posting_accounts_authority.append([k, 1])
props = self.rpc.get_chain_properties()
fee = props["account_creation_fee"]
s = {'creator': creator,
'fee': fee,
'json_metadata': json_meta,
'memo_key': memo,
'new_account_name': account_name,
'owner': {'account_auths': owner_accounts_authority,
'key_auths': owner_key_authority,
'weight_threshold': 1},
'active': {'account_auths': active_accounts_authority,
'key_auths': active_key_authority,
'weight_threshold': 1},
'posting': {'account_auths': posting_accounts_authority,
'key_auths': posting_key_authority,
'weight_threshold': 1}}
op = transactions.Account_create(**s)
wif = self.wallet.getActiveKeyForAccount(creator)
return self.executeOp(op, wif)
def transfer(self, to, amount, asset, memo="", account=None):
""" Transfer SBD or STEEM to another account.
:param str to: Recipient
:param float amount: Amount to transfer
:param str asset: Asset to transfer (``SBD`` or ``STEEM``)
:param str memo: (optional) Memo, may begin with `#` for encrypted messaging
:param str account: (optional) the source account for the transfer if not ``default_account``
"""
if not account:
if "default_account" in config:
account = config["default_account"]
if not account:
raise ValueError("You need to provide an account")
assert asset == "SBD" or asset == "STEEM"
if memo and memo[0] == "#":
from steembase import memo as Memo
memo_wif = self.wallet.getMemoKeyForAccount(account)
if not memo_wif:
raise MissingKeyError("Memo key for %s missing!" % account)
to_account = self.rpc.get_account(to)
nonce = str(random.getrandbits(64))
memo = Memo.encode_memo(
PrivateKey(memo_wif),
PublicKey(to_account["memo_key"], prefix=prefix),
nonce,
memo
)
op = transactions.Transfer(
**{"from": account,
"to": to,
"amount": '{:.{prec}f} {asset}'.format(
float(amount),
prec=3,
asset=asset
),
"memo": memo
}
)
wif = self.wallet.getActiveKeyForAccount(account)
return self.executeOp(op, wif)
def withdraw_vesting(self, amount, account=None):
""" Withdraw VESTS from the vesting account.
:param float amount: number of VESTS to withdraw over a period of 104 weeks
:param str account: (optional) the source account for the transfer if not ``default_account``
"""
if not account:
if "default_account" in config:
account = config["default_account"]
if not account:
raise ValueError("You need to provide an account")
op = transactions.Withdraw_vesting(
**{"account": account,
"vesting_shares": '{:.{prec}f} {asset}'.format(
float(amount),
prec=6,
asset="VESTS"
),
}
)
wif = self.wallet.getActiveKeyForAccount(account)
return self.executeOp(op, wif)
def transfer_to_vesting(self, amount, to=None, account=None):
""" Vest STEEM
:param float amount: number of STEEM to vest
:param str to: (optional) the source account for the transfer if not ``default_account``
:param str account: (optional) the source account for the transfer if not ``default_account``
"""
if not account:
if "default_account" in config:
account = config["default_account"]
if not account:
raise ValueError("You need to provide an account")
if not to:
if "default_account" in config:
to = config["default_account"]
if not to:
raise ValueError("You need to provide a 'to' account")
op = transactions.Transfer_to_vesting(
**{"from": account,
"to": to,
"amount": '{:.{prec}f} {asset}'.format(
float(amount),
prec=3,
asset="STEEM"
),
}
)
wif = self.wallet.getActiveKeyForAccount(account)
return self.executeOp(op, wif)
def get_content(self, identifier):
""" Get the full content of a post.
:param str identifier: Identifier for the post to upvote Takes
the form ``@author/permlink``
"""
post_author, post_permlink = resolveIdentifier(identifier)
return Post(self, self.rpc.get_content(post_author, post_permlink))
def get_recommended(self, user):
""" (obsolete) Get recommended posts for user
"""
log.critical("get_recommended has been removed from the backend.")
return []
def get_blog(self, user):
""" Get blog posts of a user
:param str user: Show recommendations for this author
"""
state = self.rpc.get_state("/@%s/blog" % user)
posts = state["accounts"][user].get("blog", [])
r = []
for p in posts:
post = state["content"]["%s/%s" % (
user, p # FIXME, this is a inconsistency in steem backend
)]
r.append(Post(self, post))
return r
def get_replies(self, author, skipown=True):
""" Get replies for an author
:param str author: Show replies for this author
:param bool skipown: Do not show my own replies
"""
state = self.rpc.get_state("/@%s/recent-replies" % author)
replies = state["accounts"][author].get("recent_replies", [])
discussions = []
for reply in replies:
post = state["content"][reply]
if skipown and post["author"] == author:
continue
discussions.append(Post(self, post))
return discussions
def get_posts(self, limit=10,
sort="hot",
category=None,
start=None):
""" Get multiple posts in an array.
:param int limit: Limit the list of posts by ``limit``
:param str sort: Sort the list by "recent" or "payout"
:param str category: Only show posts in this category
:param str start: Show posts after this post. Takes an
identifier of the form ``@author/permlink``
"""
discussion_query = {"tag": category,
"limit": limit,
}
if start:
author, permlink = resolveIdentifier(start)
discussion_query["start_author"] = author
discussion_query["start_permlink"] = permlink
if sort not in ["trending", "created", "active", "cashout",
"payout", "votes", "children", "hot"]:
raise Exception("Invalid choice of '--sort'!")
return
func = getattr(self.rpc, "get_discussions_by_%s" % sort)
r = []
for p in func(discussion_query):
r.append(Post(self, p))
return r
def get_comments(self, identifier):
""" Return **first-level** comments of a post.
:param str identifier: Identifier of a post. Takes an
identifier of the form ``@author/permlink``
"""
post_author, post_permlink = resolveIdentifier(identifier)
posts = self.rpc.get_content_replies(post_author, post_permlink)
r = []
for post in posts:
r.append(Post(self, post))
return(r)
def get_categories(self, sort="trending", begin=None, limit=10):
""" List categories
:param str sort: Sort categories by "trending", "best",
"active", or "recent"
:param str begin: Show categories after this
identifier of the form ``@author/permlink``
:param int limit: Limit categories by ``x``
"""
if sort == "trending":
func = self.rpc.get_trending_categories
elif sort == "best":
func = self.rpc.get_best_categories
elif sort == "active":
func = self.rpc.get_active_categories
elif sort == "recent":
func = self.rpc.get_recent_categories
else:
log.error("Invalid choice of '--sort' (%s)!" % sort)
return
return func(begin, limit)
def get_balances(self, account=None):
""" Get the balance of an account
:param str account: (optional) the source account for the transfer if not ``default_account``
"""
if not account:
if "default_account" in config:
account = config["default_account"]
if not account:
raise ValueError("You need to provide an account")
a = self.rpc.get_account(account)
info = self.rpc.get_dynamic_global_properties()
steem_per_mvest = (
float(info["total_vesting_fund_steem"].split(" ")[0]) /
(float(info["total_vesting_shares"].split(" ")[0]) / 1e6)
)
vesting_shares_steem = float(a["vesting_shares"].split(" ")[0]) / 1e6 * steem_per_mvest
return {
"balance": a["balance"],
"vesting_shares" : a["vesting_shares"],
"vesting_shares_steem" : vesting_shares_steem,
"sbd_balance": a["sbd_balance"]
}
def get_account_history(self, account, end=100, limit=100, only_ops=[]):
""" Returns the transaction history of an account
:param str account: account name to get history for
:param int end: sequence number of the last transaction to return
:param int limit: limit number of transactions to return
:param array only_ops: Limit generator by these operations
"""
r = []
for op in self.loop_account_history(account, end, limit, only_ops):
r.append(op)
return r
def loop_account_history(self, account, end=100, limit=100, only_ops=[]):
""" Returns a generator for individual account transactions
:param str account: account name to get history for
:param int end: sequence number of the last transaction to return
:param int limit: limit number of transactions to return
:param array only_ops: Limit generator by these operations
"""
cnt = 0
if end < limit:
limit = end
if limit > 100:
_limit = 100
else:
_limit = limit
while (cnt < limit) and end >= limit:
txs = self.rpc.get_account_history(account, end, _limit)
for i in txs:
if not only_ops or i[1]["op"][0] in only_ops:
cnt += 1
yield i
if cnt > limit:
break
end = txs[0][0] - 1 # new end
def stream_comments(self, *args, **kwargs):
""" Generator that yields posts when they come in
To be used in a for loop that returns an instance of `Post()`.
"""
for c in self.rpc.stream("comment", *args, **kwargs):
yield Post(self, c)
def interest(self, account):
""" Caluclate interest for an account
:param str account: Account name to get interest for
"""
account = self.rpc.get_account(account)
last_payment = formatTimeString(account["sbd_last_interest_payment"])
next_payment = last_payment + timedelta(days=30)
interest_rate = self.info()["sbd_interest_rate"] / 100 # the result is in percent!
interest_amount = (interest_rate / 100) * int(
int(account["sbd_seconds"]) / (60 * 60 * 24 * 356)
) * 10 ** -3
return {
"interest": interest_amount,
"last_payment" : last_payment,
"next_payment" : next_payment,
"next_payment_duration" : next_payment - datetime.now(),
"interest_rate": interest_rate,
}
def set_withdraw_vesting_route(self, to, percentage=100,
account=None, auto_vest=False):
""" Set up a vesting withdraw route. When vesting shares are
withdrawn, they will be routed to these accounts based on the
specified weights.
:param str to: Recipient of the vesting withdrawal
:param floag percentage: The percent of the withdraw to go
to the 'to' account.
:param str account: (optional) the vesting account
:param bool auto_vest: Set to true if the from account
should receive the VESTS as VESTS, or false if it should
receive them as STEEM. (defaults to ``False``)
"""
if not account:
if "default_account" in config:
account = config["default_account"]
if not account:
raise ValueError("You need to provide an account")
op = transactions.Set_withdraw_vesting_route(
**{"from_account": account,
"to_account": to,
"percent": int(percentage * STEEMIT_1_PERCENT),
"auto_vest": auto_vest
}
)
wif = self.wallet.getActiveKeyForAccount(account)
return self.executeOp(op, wif)
|
the-stack_106_27023 | # This file is executed on every boot (including wake-boot from deepsleep)
#import esp
#esp.osdebug(None)
import gc
#import webrepl
#webrepl.start()
gc.collect()
from machine import Pin, PWM
from neopixel import NeoPixel
from time import sleep
flash = Pin(0, Pin.IN) # D3/FLASH
led = Pin(5, Pin.OUT) # D1
btn = Pin(14, Pin.IN) # D5
strip = NeoPixel(Pin(12, Pin.OUT), 8) # D6
servo = PWM(Pin(15, Pin.OUT), freq=50) # D8
Pin(2, Pin.OUT).value(1) # D4/TXD1
Pin(4, Pin.OUT).value(0) # D2
led.value(0)
BLACK = OFF = 0, 0, 0
WHITE = 10, 10, 10
RED = 10, 0, 0
ORANGE = 10, 5, 0
YELLOW = 10, 10, 0
GREEN = 0, 10, 0
CYAN = 0, 10, 10
BLUE = 0, 0, 10
VIOLET = PURPLE = PINK = 10, 0, 10
GRAY = 5, 5, 5
if btn.value() == 0:
# Self-test
colors = [RED, GREEN, BLUE, ORANGE, YELLOW, CYAN, PINK, GRAY]
led.value(1)
servo.duty(120)
for i, color in enumerate(colors):
strip[i] = color
strip.write()
sleep(0.5)
servo.duty(40)
for i in range(8):
led.value(0)
sleep(0.1)
led.value(1)
sleep(0.1)
colors = colors[1:] + [colors[0]]
for i, color in enumerate(colors):
strip[i] = color
strip.write()
led.value(0)
for i in range(8):
strip[i] = 0, 0, 0
strip.write()
|
the-stack_106_27024 | import os
import pathlib
import re
from collections.abc import Container, Iterable, Mapping, MutableMapping, Sized
from urllib.parse import unquote
import pytest
from yarl import URL
import aiohttp
from aiohttp import hdrs, web
from aiohttp.test_utils import make_mocked_request
from aiohttp.web import HTTPMethodNotAllowed, HTTPNotFound, Response
from aiohttp.web_urldispatcher import (PATH_SEP, AbstractResource,
ResourceRoute, SystemRoute, View,
_default_expect_handler)
def make_request(method, path):
return make_mocked_request(method, path)
def make_handler():
async def handler(request):
return Response(request) # pragma: no cover
return handler
@pytest.fixture
def app(loop):
app = web.Application()
app._set_loop(loop)
return app
@pytest.fixture
def router(app):
return app.router
@pytest.fixture
def fill_routes(router):
def go():
route1 = router.add_route('GET', '/plain', make_handler())
route2 = router.add_route('GET', '/variable/{name}',
make_handler())
resource = router.add_static('/static',
os.path.dirname(aiohttp.__file__))
return [route1, route2] + list(resource)
return go
def test_register_uncommon_http_methods(router):
uncommon_http_methods = {
'PROPFIND',
'PROPPATCH',
'COPY',
'LOCK',
'UNLOCK'
'MOVE',
'SUBSCRIBE',
'UNSUBSCRIBE',
'NOTIFY'
}
for method in uncommon_http_methods:
router.add_route(method, '/handler/to/path', make_handler())
async def test_add_route_root(router):
handler = make_handler()
router.add_route('GET', '/', handler)
req = make_request('GET', '/')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_simple(router):
handler = make_handler()
router.add_route('GET', '/handler/to/path', handler)
req = make_request('GET', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_with_matchdict(router):
handler = make_handler()
router.add_route('GET', '/handler/{to}', handler)
req = make_request('GET', '/handler/tail')
info = await router.resolve(req)
assert info is not None
assert {'to': 'tail'} == info
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_get_shortcut(router):
handler = make_handler()
router.add_get('/handler/to/path', handler)
req = make_request('GET', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_post_shortcut(router):
handler = make_handler()
router.add_post('/handler/to/path', handler)
req = make_request('POST', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_put_shortcut(router):
handler = make_handler()
router.add_put('/handler/to/path', handler)
req = make_request('PUT', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_patch_shortcut(router):
handler = make_handler()
router.add_patch('/handler/to/path', handler)
req = make_request('PATCH', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_delete_shortcut(router):
handler = make_handler()
router.add_delete('/handler/to/path', handler)
req = make_request('DELETE', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_route_with_add_head_shortcut(router):
handler = make_handler()
router.add_head('/handler/to/path', handler)
req = make_request('HEAD', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 0 == len(info)
assert handler is info.handler
assert info.route.name is None
async def test_add_with_name(router):
handler = make_handler()
router.add_route('GET', '/handler/to/path', handler,
name='name')
req = make_request('GET', '/handler/to/path')
info = await router.resolve(req)
assert info is not None
assert 'name' == info.route.name
async def test_add_with_tailing_slash(router):
handler = make_handler()
router.add_route('GET', '/handler/to/path/', handler)
req = make_request('GET', '/handler/to/path/')
info = await router.resolve(req)
assert info is not None
assert {} == info
assert handler is info.handler
def test_add_invalid_path(router):
handler = make_handler()
with pytest.raises(ValueError):
router.add_route('GET', '/{/', handler)
def test_add_url_invalid1(router):
handler = make_handler()
with pytest.raises(ValueError):
router.add_route('post', '/post/{id', handler)
def test_add_url_invalid2(router):
handler = make_handler()
with pytest.raises(ValueError):
router.add_route('post', '/post/{id{}}', handler)
def test_add_url_invalid3(router):
handler = make_handler()
with pytest.raises(ValueError):
router.add_route('post', '/post/{id{}', handler)
def test_add_url_invalid4(router):
handler = make_handler()
with pytest.raises(ValueError):
router.add_route('post', '/post/{id"}', handler)
async def test_add_url_escaping(router):
handler = make_handler()
router.add_route('GET', '/+$', handler)
req = make_request('GET', '/+$')
info = await router.resolve(req)
assert info is not None
assert handler is info.handler
async def test_any_method(router):
handler = make_handler()
route = router.add_route(hdrs.METH_ANY, '/', handler)
req = make_request('GET', '/')
info1 = await router.resolve(req)
assert info1 is not None
assert route is info1.route
req = make_request('POST', '/')
info2 = await router.resolve(req)
assert info2 is not None
assert info1.route is info2.route
async def test_match_second_result_in_table(router):
handler1 = make_handler()
handler2 = make_handler()
router.add_route('GET', '/h1', handler1)
router.add_route('POST', '/h2', handler2)
req = make_request('POST', '/h2')
info = await router.resolve(req)
assert info is not None
assert {} == info
assert handler2 is info.handler
async def test_raise_method_not_allowed(router):
handler1 = make_handler()
handler2 = make_handler()
router.add_route('GET', '/', handler1)
router.add_route('POST', '/', handler2)
req = make_request('PUT', '/')
match_info = await router.resolve(req)
assert isinstance(match_info.route, SystemRoute)
assert {} == match_info
with pytest.raises(HTTPMethodNotAllowed) as ctx:
await match_info.handler(req)
exc = ctx.value
assert 'PUT' == exc.method
assert 405 == exc.status
assert {'POST', 'GET'} == exc.allowed_methods
async def test_raise_method_not_found(router):
handler = make_handler()
router.add_route('GET', '/a', handler)
req = make_request('GET', '/b')
match_info = await router.resolve(req)
assert isinstance(match_info.route, SystemRoute)
assert {} == match_info
with pytest.raises(HTTPNotFound) as ctx:
await match_info.handler(req)
exc = ctx.value
assert 404 == exc.status
def test_double_add_url_with_the_same_name(router):
handler1 = make_handler()
handler2 = make_handler()
router.add_route('GET', '/get', handler1, name='name')
regexp = ("Duplicate 'name', already handled by")
with pytest.raises(ValueError) as ctx:
router.add_route('GET', '/get_other', handler2, name='name')
assert re.match(regexp, str(ctx.value))
def test_route_plain(router):
handler = make_handler()
route = router.add_route('GET', '/get', handler, name='name')
route2 = next(iter(router['name']))
url = route2.url_for()
assert '/get' == str(url)
assert route is route2
def test_route_unknown_route_name(router):
with pytest.raises(KeyError):
router['unknown']
def test_route_dynamic(router):
handler = make_handler()
route = router.add_route('GET', '/get/{name}', handler,
name='name')
route2 = next(iter(router['name']))
url = route2.url_for(name='John')
assert '/get/John' == str(url)
assert route is route2
def test_add_static(router):
resource = router.add_static('/st',
os.path.dirname(aiohttp.__file__),
name='static')
assert router['static'] is resource
url = resource.url_for(filename='/dir/a.txt')
assert '/st/dir/a.txt' == str(url)
assert len(resource) == 2
def test_add_static_append_version(router):
resource = router.add_static('/st',
os.path.dirname(__file__),
name='static')
url = resource.url_for(filename='/data.unknown_mime_type',
append_version=True)
expect_url = '/st/data.unknown_mime_type?' \
'v=aUsn8CHEhhszc81d28QmlcBW0KQpfS2F4trgQKhOYd8%3D'
assert expect_url == str(url)
def test_add_static_append_version_set_from_constructor(router):
resource = router.add_static('/st',
os.path.dirname(__file__),
append_version=True,
name='static')
url = resource.url_for(filename='/data.unknown_mime_type')
expect_url = '/st/data.unknown_mime_type?' \
'v=aUsn8CHEhhszc81d28QmlcBW0KQpfS2F4trgQKhOYd8%3D'
assert expect_url == str(url)
def test_add_static_append_version_override_constructor(router):
resource = router.add_static('/st',
os.path.dirname(__file__),
append_version=True,
name='static')
url = resource.url_for(filename='/data.unknown_mime_type',
append_version=False)
expect_url = '/st/data.unknown_mime_type'
assert expect_url == str(url)
def test_add_static_append_version_filename_without_slash(router):
resource = router.add_static('/st',
os.path.dirname(__file__),
name='static')
url = resource.url_for(filename='data.unknown_mime_type',
append_version=True)
expect_url = '/st/data.unknown_mime_type?' \
'v=aUsn8CHEhhszc81d28QmlcBW0KQpfS2F4trgQKhOYd8%3D'
assert expect_url == str(url)
def test_add_static_append_version_non_exists_file(router):
resource = router.add_static('/st',
os.path.dirname(__file__),
name='static')
url = resource.url_for(filename='/non_exists_file', append_version=True)
assert '/st/non_exists_file' == str(url)
def test_add_static_append_version_non_exists_file_without_slash(router):
resource = router.add_static('/st',
os.path.dirname(__file__),
name='static')
url = resource.url_for(filename='non_exists_file', append_version=True)
assert '/st/non_exists_file' == str(url)
def test_add_static_append_version_follow_symlink(router, tmpdir):
"""
Tests the access to a symlink, in static folder with apeend_version
"""
tmp_dir_path = str(tmpdir)
symlink_path = os.path.join(tmp_dir_path, 'append_version_symlink')
symlink_target_path = os.path.dirname(__file__)
os.symlink(symlink_target_path, symlink_path, True)
# Register global static route:
resource = router.add_static('/st', tmp_dir_path, follow_symlinks=True,
append_version=True)
url = resource.url_for(
filename='/append_version_symlink/data.unknown_mime_type')
expect_url = '/st/append_version_symlink/data.unknown_mime_type?' \
'v=aUsn8CHEhhszc81d28QmlcBW0KQpfS2F4trgQKhOYd8%3D'
assert expect_url == str(url)
def test_add_static_append_version_not_follow_symlink(router, tmpdir):
"""
Tests the access to a symlink, in static folder with apeend_version
"""
tmp_dir_path = str(tmpdir)
symlink_path = os.path.join(tmp_dir_path, 'append_version_symlink')
symlink_target_path = os.path.dirname(__file__)
os.symlink(symlink_target_path, symlink_path, True)
# Register global static route:
resource = router.add_static('/st', tmp_dir_path, follow_symlinks=False,
append_version=True)
filename = '/append_version_symlink/data.unknown_mime_type'
url = resource.url_for(filename=filename)
assert '/st/append_version_symlink/data.unknown_mime_type' == str(url)
def test_plain_not_match(router):
handler = make_handler()
router.add_route('GET', '/get/path', handler, name='name')
route = router['name']
assert route._match('/another/path') is None
def test_dynamic_not_match(router):
handler = make_handler()
router.add_route('GET', '/get/{name}', handler, name='name')
route = router['name']
assert route._match('/another/path') is None
async def test_static_not_match(router):
router.add_static('/pre', os.path.dirname(aiohttp.__file__),
name='name')
resource = router['name']
ret = await resource.resolve(
make_mocked_request('GET', '/another/path'))
assert (None, set()) == ret
def test_dynamic_with_trailing_slash(router):
handler = make_handler()
router.add_route('GET', '/get/{name}/', handler, name='name')
route = router['name']
assert {'name': 'John'} == route._match('/get/John/')
def test_len(router):
handler = make_handler()
router.add_route('GET', '/get1', handler, name='name1')
router.add_route('GET', '/get2', handler, name='name2')
assert 2 == len(router)
def test_iter(router):
handler = make_handler()
router.add_route('GET', '/get1', handler, name='name1')
router.add_route('GET', '/get2', handler, name='name2')
assert {'name1', 'name2'} == set(iter(router))
def test_contains(router):
handler = make_handler()
router.add_route('GET', '/get1', handler, name='name1')
router.add_route('GET', '/get2', handler, name='name2')
assert 'name1' in router
assert 'name3' not in router
def test_static_repr(router):
router.add_static('/get', os.path.dirname(aiohttp.__file__),
name='name')
assert re.match(r"<StaticResource 'name' /get", repr(router['name']))
def test_static_adds_slash(router):
route = router.add_static('/prefix',
os.path.dirname(aiohttp.__file__))
assert '/prefix' == route._prefix
def test_static_remove_trailing_slash(router):
route = router.add_static('/prefix/',
os.path.dirname(aiohttp.__file__))
assert '/prefix' == route._prefix
async def test_add_route_with_re(router):
handler = make_handler()
router.add_route('GET', r'/handler/{to:\d+}', handler)
req = make_request('GET', '/handler/1234')
info = await router.resolve(req)
assert info is not None
assert {'to': '1234'} == info
router.add_route('GET', r'/handler/{name}.html', handler)
req = make_request('GET', '/handler/test.html')
info = await router.resolve(req)
assert {'name': 'test'} == info
async def test_add_route_with_re_and_slashes(router):
handler = make_handler()
router.add_route('GET', r'/handler/{to:[^/]+/?}', handler)
req = make_request('GET', '/handler/1234/')
info = await router.resolve(req)
assert info is not None
assert {'to': '1234/'} == info
router.add_route('GET', r'/handler/{to:.+}', handler)
req = make_request('GET', '/handler/1234/5/6/7')
info = await router.resolve(req)
assert info is not None
assert {'to': '1234/5/6/7'} == info
async def test_add_route_with_re_not_match(router):
handler = make_handler()
router.add_route('GET', r'/handler/{to:\d+}', handler)
req = make_request('GET', '/handler/tail')
match_info = await router.resolve(req)
assert isinstance(match_info.route, SystemRoute)
assert {} == match_info
with pytest.raises(HTTPNotFound):
await match_info.handler(req)
async def test_add_route_with_re_including_slashes(router):
handler = make_handler()
router.add_route('GET', r'/handler/{to:.+}/tail', handler)
req = make_request('GET', '/handler/re/with/slashes/tail')
info = await router.resolve(req)
assert info is not None
assert {'to': 're/with/slashes'} == info
def test_add_route_with_invalid_re(router):
handler = make_handler()
with pytest.raises(ValueError) as ctx:
router.add_route('GET', r'/handler/{to:+++}', handler)
s = str(ctx.value)
assert s.startswith("Bad pattern '" +
PATH_SEP +
"handler" +
PATH_SEP +
"(?P<to>+++)': nothing to repeat")
assert ctx.value.__cause__ is None
def test_route_dynamic_with_regex_spec(router):
handler = make_handler()
route = router.add_route('GET', '/get/{num:^\d+}', handler,
name='name')
url = route.url_for(num='123')
assert '/get/123' == str(url)
def test_route_dynamic_with_regex_spec_and_trailing_slash(router):
handler = make_handler()
route = router.add_route('GET', '/get/{num:^\d+}/', handler,
name='name')
url = route.url_for(num='123')
assert '/get/123/' == str(url)
def test_route_dynamic_with_regex(router):
handler = make_handler()
route = router.add_route('GET', r'/{one}/{two:.+}', handler)
url = route.url_for(one=1, two=2)
assert '/1/2' == str(url)
async def test_regular_match_info(router):
handler = make_handler()
router.add_route('GET', '/get/{name}', handler)
req = make_request('GET', '/get/john')
match_info = await router.resolve(req)
assert {'name': 'john'} == match_info
assert re.match("<MatchInfo {'name': 'john'}: .+<Dynamic.+>>",
repr(match_info))
async def test_match_info_with_plus(router):
handler = make_handler()
router.add_route('GET', '/get/{version}', handler)
req = make_request('GET', '/get/1.0+test')
match_info = await router.resolve(req)
assert {'version': '1.0+test'} == match_info
async def test_not_found_repr(router):
req = make_request('POST', '/path/to')
match_info = await router.resolve(req)
assert "<MatchInfoError 404: Not Found>" == repr(match_info)
async def test_not_allowed_repr(router):
handler = make_handler()
router.add_route('GET', '/path/to', handler)
handler2 = make_handler()
router.add_route('POST', '/path/to', handler2)
req = make_request('PUT', '/path/to')
match_info = await router.resolve(req)
assert "<MatchInfoError 405: Method Not Allowed>" == repr(match_info)
def test_default_expect_handler(router):
route = router.add_route('GET', '/', make_handler())
assert route._expect_handler is _default_expect_handler
def test_custom_expect_handler_plain(router):
async def handler(request):
pass
route = router.add_route(
'GET', '/', make_handler(), expect_handler=handler)
assert route._expect_handler is handler
assert isinstance(route, ResourceRoute)
def test_custom_expect_handler_dynamic(router):
async def handler(request):
pass
route = router.add_route(
'GET', '/get/{name}', make_handler(), expect_handler=handler)
assert route._expect_handler is handler
assert isinstance(route, ResourceRoute)
def test_expect_handler_non_coroutine(router):
def handler(request):
pass
with pytest.raises(AssertionError):
router.add_route('GET', '/', make_handler(),
expect_handler=handler)
async def test_dynamic_match_non_ascii(router):
handler = make_handler()
router.add_route('GET', '/{var}', handler)
req = make_request(
'GET',
'/%D1%80%D1%83%D1%81%20%D1%82%D0%B5%D0%BA%D1%81%D1%82')
match_info = await router.resolve(req)
assert {'var': 'рус текст'} == match_info
async def test_dynamic_match_with_static_part(router):
handler = make_handler()
router.add_route('GET', '/{name}.html', handler)
req = make_request('GET', '/file.html')
match_info = await router.resolve(req)
assert {'name': 'file'} == match_info
async def test_dynamic_match_two_part2(router):
handler = make_handler()
router.add_route('GET', '/{name}.{ext}', handler)
req = make_request('GET', '/file.html')
match_info = await router.resolve(req)
assert {'name': 'file', 'ext': 'html'} == match_info
async def test_dynamic_match_unquoted_path(router):
handler = make_handler()
router.add_route('GET', '/{path}/{subpath}', handler)
resource_id = 'my%2Fpath%7Cwith%21some%25strange%24characters'
req = make_request('GET', '/path/{0}'.format(resource_id))
match_info = await router.resolve(req)
assert match_info == {
'path': 'path',
'subpath': unquote(resource_id)
}
def test_add_route_not_started_with_slash(router):
with pytest.raises(ValueError):
handler = make_handler()
router.add_route('GET', 'invalid_path', handler)
def test_add_route_invalid_method(router):
sample_bad_methods = {
'BAD METHOD',
'B@D_METHOD',
'[BAD_METHOD]',
'{BAD_METHOD}',
'(BAD_METHOD)',
'B?D_METHOD',
}
for bad_method in sample_bad_methods:
with pytest.raises(ValueError):
handler = make_handler()
router.add_route(bad_method, '/path', handler)
def test_routes_view_len(router, fill_routes):
fill_routes()
assert 4 == len(router.routes())
def test_routes_view_iter(router, fill_routes):
routes = fill_routes()
assert list(routes) == list(router.routes())
def test_routes_view_contains(router, fill_routes):
routes = fill_routes()
for route in routes:
assert route in router.routes()
def test_routes_abc(router):
assert isinstance(router.routes(), Sized)
assert isinstance(router.routes(), Iterable)
assert isinstance(router.routes(), Container)
def test_named_resources_abc(router):
assert isinstance(router.named_resources(), Mapping)
assert not isinstance(router.named_resources(), MutableMapping)
def test_named_resources(router):
route1 = router.add_route('GET', '/plain', make_handler(),
name='route1')
route2 = router.add_route('GET', '/variable/{name}',
make_handler(), name='route2')
route3 = router.add_static('/static',
os.path.dirname(aiohttp.__file__),
name='route3')
names = {route1.name, route2.name, route3.name}
assert 3 == len(router.named_resources())
for name in names:
assert name in router.named_resources()
assert isinstance(router.named_resources()[name],
AbstractResource)
def test_resource_iter(router):
async def handler(request):
pass
resource = router.add_resource('/path')
r1 = resource.add_route('GET', handler)
r2 = resource.add_route('POST', handler)
assert 2 == len(resource)
assert [r1, r2] == list(resource)
def test_deprecate_bare_generators(router):
resource = router.add_resource('/path')
def gen(request):
yield
with pytest.warns(DeprecationWarning):
resource.add_route('GET', gen)
def test_view_route(router):
resource = router.add_resource('/path')
route = resource.add_route('GET', View)
assert View is route.handler
def test_resource_route_match(router):
async def handler(request):
pass
resource = router.add_resource('/path')
route = resource.add_route('GET', handler)
assert {} == route.resource._match('/path')
def test_error_on_double_route_adding(router):
async def handler(request):
pass
resource = router.add_resource('/path')
resource.add_route('GET', handler)
with pytest.raises(RuntimeError):
resource.add_route('GET', handler)
def test_error_on_adding_route_after_wildcard(router):
async def handler(request):
pass
resource = router.add_resource('/path')
resource.add_route('*', handler)
with pytest.raises(RuntimeError):
resource.add_route('GET', handler)
async def test_http_exception_is_none_when_resolved(router):
handler = make_handler()
router.add_route('GET', '/', handler)
req = make_request('GET', '/')
info = await router.resolve(req)
assert info.http_exception is None
async def test_http_exception_is_not_none_when_not_resolved(router):
handler = make_handler()
router.add_route('GET', '/', handler)
req = make_request('GET', '/abc')
info = await router.resolve(req)
assert info.http_exception.status == 404
async def test_match_info_get_info_plain(router):
handler = make_handler()
router.add_route('GET', '/', handler)
req = make_request('GET', '/')
info = await router.resolve(req)
assert info.get_info() == {'path': '/'}
async def test_match_info_get_info_dynamic(router):
handler = make_handler()
router.add_route('GET', '/{a}', handler)
req = make_request('GET', '/value')
info = await router.resolve(req)
assert info.get_info() == {
'pattern': re.compile(PATH_SEP+'(?P<a>[^{}/]+)'),
'formatter': '/{a}'}
async def test_match_info_get_info_dynamic2(router):
handler = make_handler()
router.add_route('GET', '/{a}/{b}', handler)
req = make_request('GET', '/path/to')
info = await router.resolve(req)
assert info.get_info() == {
'pattern': re.compile(PATH_SEP +
'(?P<a>[^{}/]+)' +
PATH_SEP +
'(?P<b>[^{}/]+)'),
'formatter': '/{a}/{b}'}
def test_static_resource_get_info(router):
directory = pathlib.Path(aiohttp.__file__).parent
resource = router.add_static('/st', directory)
assert resource.get_info() == {'directory': directory,
'prefix': '/st'}
async def test_system_route_get_info(router):
handler = make_handler()
router.add_route('GET', '/', handler)
req = make_request('GET', '/abc')
info = await router.resolve(req)
assert info.get_info()['http_exception'].status == 404
def test_resources_view_len(router):
router.add_resource('/plain')
router.add_resource('/variable/{name}')
assert 2 == len(router.resources())
def test_resources_view_iter(router):
resource1 = router.add_resource('/plain')
resource2 = router.add_resource('/variable/{name}')
resources = [resource1, resource2]
assert list(resources) == list(router.resources())
def test_resources_view_contains(router):
resource1 = router.add_resource('/plain')
resource2 = router.add_resource('/variable/{name}')
resources = [resource1, resource2]
for resource in resources:
assert resource in router.resources()
def test_resources_abc(router):
assert isinstance(router.resources(), Sized)
assert isinstance(router.resources(), Iterable)
assert isinstance(router.resources(), Container)
def test_static_route_user_home(router):
here = pathlib.Path(aiohttp.__file__).parent
home = pathlib.Path(os.path.expanduser('~'))
if not str(here).startswith(str(home)): # pragma: no cover
pytest.skip("aiohttp folder is not placed in user's HOME")
static_dir = '~/' + str(here.relative_to(home))
route = router.add_static('/st', static_dir)
assert here == route.get_info()['directory']
def test_static_route_points_to_file(router):
here = pathlib.Path(aiohttp.__file__).parent / '__init__.py'
with pytest.raises(ValueError):
router.add_static('/st', here)
async def test_404_for_static_resource(router):
resource = router.add_static('/st',
os.path.dirname(aiohttp.__file__))
ret = await resource.resolve(
make_mocked_request('GET', '/unknown/path'))
assert (None, set()) == ret
async def test_405_for_resource_adapter(router):
resource = router.add_static('/st',
os.path.dirname(aiohttp.__file__))
ret = await resource.resolve(
make_mocked_request('POST', '/st/abc.py'))
assert (None, {'HEAD', 'GET'}) == ret
async def test_check_allowed_method_for_found_resource(router):
handler = make_handler()
resource = router.add_resource('/')
resource.add_route('GET', handler)
ret = await resource.resolve(make_mocked_request('GET', '/'))
assert ret[0] is not None
assert {'GET'} == ret[1]
def test_url_for_in_static_resource(router):
resource = router.add_static('/static',
os.path.dirname(aiohttp.__file__))
assert URL('/static/file.txt') == resource.url_for(filename='file.txt')
def test_url_for_in_static_resource_pathlib(router):
resource = router.add_static('/static',
os.path.dirname(aiohttp.__file__))
assert URL('/static/file.txt') == resource.url_for(
filename=pathlib.Path('file.txt'))
def test_url_for_in_resource_route(router):
route = router.add_route('GET', '/get/{name}', make_handler(),
name='name')
assert URL('/get/John') == route.url_for(name='John')
def test_subapp_get_info(app, loop):
subapp = web.Application()
resource = subapp.add_subapp('/pre', subapp)
assert resource.get_info() == {'prefix': '/pre', 'app': subapp}
def test_subapp_url_for(app, loop):
subapp = web.Application()
resource = app.add_subapp('/pre', subapp)
with pytest.raises(RuntimeError):
resource.url_for()
def test_subapp_repr(app, loop):
subapp = web.Application()
resource = app.add_subapp('/pre', subapp)
assert repr(resource).startswith(
'<PrefixedSubAppResource /pre -> <Application')
def test_subapp_len(app, loop):
subapp = web.Application()
subapp.router.add_get('/', make_handler(), allow_head=False)
subapp.router.add_post('/', make_handler())
resource = app.add_subapp('/pre', subapp)
assert len(resource) == 2
def test_subapp_iter(app, loop):
subapp = web.Application()
r1 = subapp.router.add_get('/', make_handler(), allow_head=False)
r2 = subapp.router.add_post('/', make_handler())
resource = app.add_subapp('/pre', subapp)
assert list(resource) == [r1, r2]
def test_invalid_route_name(router):
with pytest.raises(ValueError):
router.add_get('/', make_handler(), name='invalid name')
def test_frozen_router(router):
router.freeze()
with pytest.raises(RuntimeError):
router.add_get('/', make_handler())
def test_frozen_router_subapp(app, loop):
subapp = web.Application()
subapp.freeze()
with pytest.raises(RuntimeError):
app.add_subapp('/', subapp)
def test_frozen_app_on_subapp(app, loop):
app.freeze()
subapp = web.Application()
with pytest.raises(RuntimeError):
app.add_subapp('/', subapp)
def test_set_options_route(router):
resource = router.add_static('/static',
os.path.dirname(aiohttp.__file__))
options = None
for route in resource:
if route.method == 'OPTIONS':
options = route
assert options is None
resource.set_options_route(make_handler())
for route in resource:
if route.method == 'OPTIONS':
options = route
assert options is not None
with pytest.raises(RuntimeError):
resource.set_options_route(make_handler())
def test_dynamic_url_with_name_started_from_undescore(router):
route = router.add_route('GET', '/get/{_name}', make_handler())
assert URL('/get/John') == route.url_for(_name='John')
def test_cannot_add_subapp_with_empty_prefix(app, loop):
subapp = web.Application()
with pytest.raises(ValueError):
app.add_subapp('', subapp)
def test_cannot_add_subapp_with_slash_prefix(app, loop):
subapp = web.Application()
with pytest.raises(ValueError):
app.add_subapp('/', subapp)
async def test_convert_empty_path_to_slash_on_freezing(router):
handler = make_handler()
route = router.add_get('', handler)
resource = route.resource
assert resource.get_info() == {'path': ''}
router.freeze()
assert resource.get_info() == {'path': '/'}
def test_deprecate_non_coroutine(router):
def handler(request):
pass
with pytest.warns(DeprecationWarning):
router.add_route('GET', '/handler', handler)
|
the-stack_106_27025 | import os, warnings, time, tempfile, datetime, pathlib, shutil, subprocess
from tqdm import tqdm
from urllib.request import urlopen
from urllib.parse import urlparse
import cv2
from scipy.ndimage import find_objects, gaussian_filter, generate_binary_structure, label, maximum_filter1d, binary_fill_holes
from scipy.spatial import ConvexHull
from scipy.stats import gmean
import numpy as np
import colorsys
import io
from skimage.morphology import remove_small_holes
from . import metrics
from omnipose.utils import format_labels
class TqdmToLogger(io.StringIO):
"""
Output stream for TQDM which will output to logger module instead of
the StdOut.
"""
logger = None
level = None
buf = ''
def __init__(self,logger,level=None):
super(TqdmToLogger, self).__init__()
self.logger = logger
self.level = level or logging.INFO
def write(self,buf):
self.buf = buf.strip('\r\n\t ')
def flush(self):
self.logger.log(self.level, self.buf)
def rgb_to_hsv(arr):
rgb_to_hsv_channels = np.vectorize(colorsys.rgb_to_hsv)
r, g, b = np.rollaxis(arr, axis=-1)
h, s, v = rgb_to_hsv_channels(r, g, b)
hsv = np.stack((h,s,v), axis=-1)
return hsv
def hsv_to_rgb(arr):
hsv_to_rgb_channels = np.vectorize(colorsys.hsv_to_rgb)
h, s, v = np.rollaxis(arr, axis=-1)
r, g, b = hsv_to_rgb_channels(h, s, v)
rgb = np.stack((r,g,b), axis=-1)
return rgb
def download_url_to_file(url, dst, progress=True):
r"""Download object at the given URL to a local path.
Thanks to torch, slightly modified
Args:
url (string): URL of the object to download
dst (string): Full path where object will be saved, e.g. `/tmp/temporary_file`
progress (bool, optional): whether or not to display a progress bar to stderr
Default: True
"""
file_size = None
u = urlopen(url)
meta = u.info()
if hasattr(meta, 'getheaders'):
content_length = meta.getheaders("Content-Length")
else:
content_length = meta.get_all("Content-Length")
if content_length is not None and len(content_length) > 0:
file_size = int(content_length[0])
# We deliberately save it in a temp file and move it after
dst = os.path.expanduser(dst)
dst_dir = os.path.dirname(dst)
f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir)
try:
with tqdm(total=file_size, disable=not progress,
unit='B', unit_scale=True, unit_divisor=1024) as pbar:
while True:
buffer = u.read(8192)
if len(buffer) == 0:
break
f.write(buffer)
pbar.update(len(buffer))
f.close()
shutil.move(f.name, dst)
finally:
f.close()
if os.path.exists(f.name):
os.remove(f.name)
def distance_to_boundary(masks):
""" get distance to boundary of mask pixels
Parameters
----------------
masks: int, 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], 0=NO masks; 1,2,...=mask labels
Returns
----------------
dist_to_bound: 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx]
"""
if masks.ndim > 3 or masks.ndim < 2:
raise ValueError('distance_to_boundary takes 2D or 3D array, not %dD array'%masks.ndim)
dist_to_bound = np.zeros(masks.shape, np.float64)
if masks.ndim==3:
for i in range(masks.shape[0]):
dist_to_bound[i] = distance_to_boundary(masks[i])
return dist_to_bound
else:
slices = find_objects(masks)
for i,si in enumerate(slices):
if si is not None:
sr,sc = si
mask = (masks[sr, sc] == (i+1)).astype(np.uint8)
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = np.concatenate(contours[-2], axis=0).squeeze().T
ypix, xpix = np.nonzero(mask)
min_dist = ((ypix[:,np.newaxis] - pvr)**2 +
(xpix[:,np.newaxis] - pvc)**2).min(axis=1)
dist_to_bound[ypix + sr.start, xpix + sc.start] = min_dist
return dist_to_bound
def masks_to_edges(masks, threshold=1.0):
""" get edges of masks as a 0-1 array
Parameters
----------------
masks: int, 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], 0=NO masks; 1,2,...=mask labels
Returns
----------------
edges: 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], True pixels are edge pixels
"""
dist_to_bound = distance_to_boundary(masks)
edges = (dist_to_bound < threshold) * (masks > 0)
return edges
def remove_edge_masks(masks, change_index=True):
""" remove masks with pixels on edge of image
Parameters
----------------
masks: int, 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], 0=NO masks; 1,2,...=mask labels
change_index: bool (optional, default True)
if True, after removing masks change indexing so no missing label numbers
Returns
----------------
outlines: 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], 0=NO masks; 1,2,...=mask labels
"""
slices = find_objects(masks.astype(int))
for i,si in enumerate(slices):
remove = False
if si is not None:
for d,sid in enumerate(si):
if sid.start==0 or sid.stop==masks.shape[d]:
remove=True
break
if remove:
masks[si][masks[si]==i+1] = 0
shape = masks.shape
if change_index:
_,masks = np.unique(masks, return_inverse=True)
masks = np.reshape(masks, shape).astype(np.int32)
return masks
def masks_to_outlines(masks):
""" get outlines of masks as a 0-1 array
Parameters
----------------
masks: int, 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], 0=NO masks; 1,2,...=mask labels
Returns
----------------
outlines: 2D or 3D array
size [Ly x Lx] or [Lz x Ly x Lx], True pixels are outlines
"""
if masks.ndim > 3 or masks.ndim < 2:
raise ValueError('masks_to_outlines takes 2D or 3D array, not %dD array'%masks.ndim)
outlines = np.zeros(masks.shape, bool)
if masks.ndim==3:
for i in range(masks.shape[0]):
outlines[i] = masks_to_outlines(masks[i])
return outlines
else:
slices = find_objects(masks.astype(int))
for i,si in enumerate(slices):
if si is not None:
sr,sc = si
mask = (masks[sr, sc] == (i+1)).astype(np.uint8)
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
pvc, pvr = np.concatenate(contours[-2], axis=0).squeeze().T
vr, vc = pvr + sr.start, pvc + sc.start
outlines[vr, vc] = 1
return outlines
def outlines_list(masks):
""" get outlines of masks as a list to loop over for plotting """
outpix=[]
for n in np.unique(masks)[1:]:
mn = masks==n
if mn.sum() > 0:
contours = cv2.findContours(mn.astype(np.uint8), mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
contours = contours[-2]
cmax = np.argmax([c.shape[0] for c in contours])
pix = contours[cmax].astype(int).squeeze()
if len(pix)>4:
outpix.append(pix)
else:
outpix.append(np.zeros((0,2)))
return outpix
def get_perimeter(points):
""" perimeter of points - npoints x ndim """
if points.shape[0]>4:
points = np.append(points, points[:1], axis=0)
return ((np.diff(points, axis=0)**2).sum(axis=1)**0.5).sum()
else:
return 0
def get_mask_compactness(masks):
perimeters = get_mask_perimeters(masks)
#outlines = masks_to_outlines(masks)
#perimeters = np.unique(outlines*masks, return_counts=True)[1][1:]
npoints = np.unique(masks, return_counts=True)[1][1:]
areas = npoints
compactness = 4 * np.pi * areas / perimeters**2
compactness[perimeters==0] = 0
compactness[compactness>1.0] = 1.0
return compactness
def get_mask_perimeters(masks):
""" get perimeters of masks """
perimeters = np.zeros(masks.max())
for n in range(masks.max()):
mn = masks==(n+1)
if mn.sum() > 0:
contours = cv2.findContours(mn.astype(np.uint8), mode=cv2.RETR_EXTERNAL,
method=cv2.CHAIN_APPROX_NONE)[-2]
#cmax = np.argmax([c.shape[0] for c in contours])
#perimeters[n] = get_perimeter(contours[cmax].astype(int).squeeze())
perimeters[n] = np.array([get_perimeter(c.astype(int).squeeze()) for c in contours]).sum()
return perimeters
def circleMask(d0):
""" creates array with indices which are the radius of that x,y point
inputs:
d0 (patch of (-d0,d0+1) over which radius computed
outputs:
rs: array (2*d0+1,2*d0+1) of radii
dx,dy: indices of patch
"""
dx = np.tile(np.arange(-d0[1],d0[1]+1), (2*d0[0]+1,1))
dy = np.tile(np.arange(-d0[0],d0[0]+1), (2*d0[1]+1,1))
dy = dy.transpose()
rs = (dy**2 + dx**2) ** 0.5
return rs, dx, dy
def get_mask_stats(masks_true):
mask_perimeters = get_mask_perimeters(masks_true)
# disk for compactness
rs,dy,dx = circleMask(np.array([100, 100]))
rsort = np.sort(rs.flatten())
# area for solidity
npoints = np.unique(masks_true, return_counts=True)[1][1:]
areas = npoints - mask_perimeters / 2 - 1
compactness = np.zeros(masks_true.max())
convexity = np.zeros(masks_true.max())
solidity = np.zeros(masks_true.max())
convex_perimeters = np.zeros(masks_true.max())
convex_areas = np.zeros(masks_true.max())
for ic in range(masks_true.max()):
points = np.array(np.nonzero(masks_true==(ic+1))).T
if len(points)>15 and mask_perimeters[ic] > 0:
med = np.median(points, axis=0)
# compute compactness of ROI
r2 = ((points - med)**2).sum(axis=1)**0.5
compactness[ic] = (rsort[:r2.size].mean() + 1e-10) / r2.mean()
try:
hull = ConvexHull(points)
convex_perimeters[ic] = hull.area
convex_areas[ic] = hull.volume
except:
convex_perimeters[ic] = 0
convexity[mask_perimeters>0.0] = (convex_perimeters[mask_perimeters>0.0] /
mask_perimeters[mask_perimeters>0.0])
solidity[convex_areas>0.0] = (areas[convex_areas>0.0] /
convex_areas[convex_areas>0.0])
convexity = np.clip(convexity, 0.0, 1.0)
solidity = np.clip(solidity, 0.0, 1.0)
compactness = np.clip(compactness, 0.0, 1.0)
return convexity, solidity, compactness
def get_masks_unet(output, cell_threshold=0, boundary_threshold=0):
""" create masks using cell probability and cell boundary """
cells = (output[...,1] - output[...,0])>cell_threshold
selem = generate_binary_structure(cells.ndim, connectivity=1)
labels, nlabels = label(cells, selem)
if output.shape[-1]>2:
slices = find_objects(labels)
dists = 10000*np.ones(labels.shape, np.float32)
mins = np.zeros(labels.shape, np.int32)
borders = np.logical_and(~(labels>0), output[...,2]>boundary_threshold)
pad = 10
for i,slc in enumerate(slices):
if slc is not None:
slc_pad = tuple([slice(max(0,sli.start-pad), min(labels.shape[j], sli.stop+pad))
for j,sli in enumerate(slc)])
msk = (labels[slc_pad] == (i+1)).astype(np.float32)
msk = 1 - gaussian_filter(msk, 5)
dists[slc_pad] = np.minimum(dists[slc_pad], msk)
mins[slc_pad][dists[slc_pad]==msk] = (i+1)
labels[labels==0] = borders[labels==0] * mins[labels==0]
masks = labels
shape0 = masks.shape
_,masks = np.unique(masks, return_inverse=True)
masks = np.reshape(masks, shape0)
return masks
def stitch3D(masks, stitch_threshold=0.25):
""" stitch 2D masks into 3D volume with stitch_threshold on IOU """
mmax = masks[0].max()
for i in range(len(masks)-1):
iou = metrics._intersection_over_union(masks[i+1], masks[i])[1:,1:]
if iou.size > 0:
iou[iou < stitch_threshold] = 0.0
iou[iou < iou.max(axis=0)] = 0.0
istitch = iou.argmax(axis=1) + 1
ino = np.nonzero(iou.max(axis=1)==0.0)[0]
istitch[ino] = np.arange(mmax+1, mmax+len(ino)+1, 1, int)
mmax += len(ino)
istitch = np.append(np.array(0), istitch)
masks[i+1] = istitch[masks[i+1]]
return masks
# merged deiameter functions
def diameters(masks, omni=False, dist_threshold=1):
if not omni: #original 'equivalent area circle' diameter
_, counts = np.unique(np.int32(masks), return_counts=True)
counts = counts[1:]
md = np.median(counts**0.5)
if np.isnan(md):
md = 0
md /= (np.pi**0.5)/2
return md, counts**0.5
else: #new distance-field-derived diameter (aggrees with cicle but more general)
return omnipose.diameters(masks), None
def radius_distribution(masks, bins):
unique, counts = np.unique(masks, return_counts=True)
counts = counts[unique!=0]
nb, _ = np.histogram((counts**0.5)*0.5, bins)
nb = nb.astype(np.float32)
if nb.sum() > 0:
nb = nb / nb.sum()
md = np.median(counts**0.5)*0.5
if np.isnan(md):
md = 0
md /= (np.pi**0.5)/2
return nb, md, (counts**0.5)/2
def size_distribution(masks):
counts = np.unique(masks, return_counts=True)[1][1:]
return np.percentile(counts, 25) / np.percentile(counts, 75)
def process_cells(M0, npix=20):
unq, ic = np.unique(M0, return_counts=True)
for j in range(len(unq)):
if ic[j]<npix:
M0[M0==unq[j]] = 0
return M0
# Edited slightly to only remove small holes(under min_size) to avoid filling in voids formed by cells touching themselves
# (Masks show this, outlines somehow do not. Also need to find a way to split self-contact points).
def fill_holes_and_remove_small_masks(masks, min_size=15, hole_size=3, scale_factor=1):
""" fill holes in masks (2D/3D) and discard masks smaller than min_size (2D)
fill holes in each mask using scipy.ndimage.morphology.binary_fill_holes
Parameters
----------------
masks: int, 2D or 3D array
labelled masks, 0=NO masks; 1,2,...=mask labels,
size [Ly x Lx] or [Lz x Ly x Lx]
min_size: int (optional, default 15)
minimum number of pixels per mask, can turn off with -1
Returns
---------------
masks: int, 2D or 3D array
masks with holes filled and masks smaller than min_size removed,
0=NO masks; 1,2,...=mask labels,
size [Ly x Lx] or [Lz x Ly x Lx]
"""
masks = format_labels(masks) # not sure how this works with 3D... tests pass though
# my slightly altered version below does not work well with 3D (vs test GT) so I need to test
# to see if mine is actually better in general or needs to be toggled; for now, commenting out
# # min_size *= scale_factor
# hole_size *= scale_factor
# if masks.ndim > 3 or masks.ndim < 2:
# raise ValueError('masks_to_outlines takes 2D or 3D array, not %dD array'%masks.ndim)
# slices = find_objects(masks)
# j = 0
# for i,slc in enumerate(slices):
# if slc is not None:
# msk = masks[slc] == (i+1)
# npix = msk.sum()
# if min_size > 0 and npix < min_size:
# masks[slc][msk] = 0
# else:
# hsz = np.count_nonzero(msk)*hole_size/100 #turn hole size into percentage
# #eventually the boundary output should be used to properly exclude real holes vs label gaps
# if msk.ndim==3:
# for k in range(msk.shape[0]):
# padmsk = remove_small_holes(np.pad(msk[k],1,mode='constant'),hsz)
# msk[k] = padmsk[1:-1,1:-1]
# else:
# padmsk = remove_small_holes(np.pad(msk,1,mode='constant'),hsz)
# msk = padmsk[1:-1,1:-1]
# masks[slc][msk] = (j+1)
# j+=1
# return masks
if masks.ndim > 3 or masks.ndim < 2:
raise ValueError('fill_holes_and_remove_small_masks takes 2D or 3D array, not %dD array'%masks.ndim)
slices = find_objects(masks)
j = 0
for i,slc in enumerate(slices):
if slc is not None:
msk = masks[slc] == (i+1)
npix = msk.sum()
if min_size > 0 and npix < min_size:
masks[slc][msk] = 0
else:
if msk.ndim==3:
for k in range(msk.shape[0]):
msk[k] = binary_fill_holes(msk[k])
else:
msk = binary_fill_holes(msk)
masks[slc][msk] = (j+1)
j+=1
return masks
|
the-stack_106_27026 | from database.databasehandler import MariaDB_handler
import database.pricesModel as pricemodel
from prices.prices_api import PricesAPI
from datetime import datetime, timedelta
class PriceDB():
def __init__(self, priceDBConfig):
self.priceAPI = PricesAPI()
self.priceDB = MariaDB_handler(**priceDBConfig)
self.priceDB.createTables(pricemodel.Base)
def writePriceToDB(self, time = None):
currentTime = datetime.now()
if not time:
time = currentTime
self.priceAPI.setTime(time)
self.priceAPI.forgePayload()
try:
response = self.priceAPI.getPriceForecast()
except Exception as e:
print(e)
return
print(response)
day = response[0].replace(hour=0)
prices = response[1]
try:
session = self.priceDB.create_session()
for price in prices:
timestamp = day + timedelta(hours=int(price[0])-1)
forecastPrice = session.query(pricemodel.PriceForecast).filter_by(timestamp=timestamp).first()
if forecastPrice:
forecastPrice.price = price[1]
forecastPrice.retrivalTime = currentTime
self.priceDB.commitOrRollback(session)
else:
forecastPrice = pricemodel.PriceForecast(timestamp, currentTime, price[1])
self.priceDB.addElementToDatabase(session, forecastPrice)
finally:
self.priceDB.close_session(session)
if __name__ == "__main__":
configDB = {'user':'myusr', 'password':'myusrpass', 'host':'localhost', 'port':'3306', 'database':'mydb'}
priceDB = PriceDB(configDB)
priceDB.writePriceToDB()
priceDB.writePriceToDB(datetime.now() + timedelta(days =1)) |
the-stack_106_27034 | class Beam(object):
def __init__(self, opt, tokens, log_probs, state, prev_attn, p_gens, coverage=None, three_grams=[], bi_grams=[]):
""" Args:
tokens: List of integers. The ids of the tokens that form the summary so far.
log_probs: List, same length as tokens, of floats, giving the log probabilities of the tokens so far.
state: Current state of the decoder, a LSTMStateTuple.
attn_dists: List, same length as tokens, of numpy arrays with shape (attn_length). These are the attention distributions so far.
p_gens: List, same length as tokens, of floats, or None if not using pointer-generator model. The values of the generation probability so far.
coverage: Numpy array of shape (attn_length), or None if not using coverage. The current coverage vector.
"""
self.opt = opt
self.tokens = tokens
self.coverage = coverage
self.log_probs = log_probs
self.state = state
self.prev_attn = prev_attn
self.p_gens = p_gens
self.three_grams = three_grams
self.bi_grams = bi_grams
def extend(self, opt, token, log_prob, state, prev_attn, coverage=None, bi_gram=None, three_gram=None, p_gen=None):
if three_gram is None:
return Beam(opt=opt, tokens=self.tokens + [token],
log_probs=self.log_probs + [log_prob],
state=state, prev_attn=self.prev_attn + [prev_attn], p_gens=self.p_gens + [p_gen],
coverage=coverage,
three_grams=self.three_grams, bi_grams=self.bi_grams)
else:
if opt.avoid:
if self.avid_repeatition(3, three_gram):
log_prob -= 10
if self.avid_repeatition(2, bi_gram):
log_prob -= 3
self.three_grams.append(three_gram)
self.bi_grams.append(bi_gram)
new_three_gram = self.three_grams
new_bi_gram = self.bi_grams
return Beam(opt=opt, tokens=self.tokens + [token],
log_probs=self.log_probs + [log_prob],
state=state, prev_attn=self.prev_attn + [prev_attn], p_gens=self.p_gens + [p_gen],
coverage=coverage, three_grams=new_three_gram, bi_grams=new_bi_gram)
@property
def latest_token(self):
return self.tokens[-1]
@property
def latest_attn(self):
return self.prev_attn[-1]
def log_prob(self):
# the log probability of the hypothesis so far is the sum of the log probabilities of the tokens so far
return sum(self.log_probs)
def avg_log_prob(self):
# normalize log probability by number of tokens (otherwise longer sequences always have lower probability)
return self.log_prob() / (len(self.tokens) - 1)
def avid_repeatition(self, ngram, candidate_gram):
# If the latest three grams appear previously, return True
if len(self.tokens) > ngram:
# latest_3 = "%d_%d_%d" % (self.tokens[-3], self.tokens[-2], self.tokens[-1])
if ngram == 3:
if candidate_gram in self.three_grams:
return True
elif ngram == 2:
if candidate_gram in self.bi_grams:
return True
return False
@staticmethod
def sort_hyps(hyps):
"""Return a list of Hypothesis objects, sorted by descending average log probability"""
return sorted(hyps, key=lambda h: h.avg_log_prob(), reverse=True)
|
the-stack_106_27035 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''zgls.py - Waqas Bhatti ([email protected]) - Jan 2017
Contains the Zechmeister & Kurster (2002) Generalized Lomb-Scargle period-search
algorithm implementation for periodbase.
'''
#############
## LOGGING ##
#############
import logging
from datetime import datetime
from traceback import format_exc
# setup a logger
LOGGER = None
LOGMOD = __name__
DEBUG = False
def set_logger_parent(parent_name):
globals()['LOGGER'] = logging.getLogger('%s.%s' % (parent_name, LOGMOD))
def LOGDEBUG(message):
if LOGGER:
LOGGER.debug(message)
elif DEBUG:
print('[%s - DBUG] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGINFO(message):
if LOGGER:
LOGGER.info(message)
else:
print('[%s - INFO] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGERROR(message):
if LOGGER:
LOGGER.error(message)
else:
print('[%s - ERR!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGWARNING(message):
if LOGGER:
LOGGER.warning(message)
else:
print('[%s - WRN!] %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message)
)
def LOGEXCEPTION(message):
if LOGGER:
LOGGER.exception(message)
else:
print(
'[%s - EXC!] %s\nexception was: %s' % (
datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
message, format_exc()
)
)
#############
## IMPORTS ##
#############
from multiprocessing import Pool, cpu_count
import numpy as np
# import these to avoid lookup overhead
from numpy import nan as npnan, sum as npsum, abs as npabs, \
roll as nproll, isfinite as npisfinite, std as npstd, \
sign as npsign, sqrt as npsqrt, median as npmedian, \
array as nparray, percentile as nppercentile, \
polyfit as nppolyfit, var as npvar, max as npmax, min as npmin, \
log10 as nplog10, arange as nparange, pi as MPI, floor as npfloor, \
argsort as npargsort, cos as npcos, sin as npsin, tan as nptan, \
where as npwhere, linspace as nplinspace, \
zeros_like as npzeros_like, full_like as npfull_like, \
arctan as nparctan, nanargmax as npnanargmax, nanargmin as npnanargmin, \
empty as npempty, ceil as npceil, mean as npmean, \
digitize as npdigitize, unique as npunique, \
argmax as npargmax, argmin as npargmin, zeros as npzeros, nanmax as npnanmax
###################
## LOCAL IMPORTS ##
###################
from ..lcmath import phase_magseries, sigclip_magseries, time_bin_magseries, \
phase_bin_magseries
from . import get_frequency_grid
############
## CONFIG ##
############
NCPUS = cpu_count()
######################################################
## PERIODOGRAM VALUE EXPRESSIONS FOR A SINGLE OMEGA ##
######################################################
def generalized_lsp_value(times, mags, errs, omega):
'''Generalized LSP value for a single omega.
P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS)
where: YC, YS, CC, and SS are all calculated at T
and where: tan 2omegaT = 2*CS/(CC - SS)
and where:
Y = sum( w_i*y_i )
C = sum( w_i*cos(wT_i) )
S = sum( w_i*sin(wT_i) )
YY = sum( w_i*y_i*y_i ) - Y*Y
YC = sum( w_i*y_i*cos(wT_i) ) - Y*C
YS = sum( w_i*y_i*sin(wT_i) ) - Y*S
CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) )
CC = CpC - C*C
SS = (1 - CpC) - S*S
CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S
'''
one_over_errs2 = 1.0/(errs*errs)
W = npsum(one_over_errs2)
wi = one_over_errs2/W
sin_omegat = npsin(omega*times)
cos_omegat = npcos(omega*times)
sin2_omegat = sin_omegat*sin_omegat
cos2_omegat = cos_omegat*cos_omegat
sincos_omegat = sin_omegat*cos_omegat
# calculate some more sums and terms
Y = npsum( wi*mags )
C = npsum( wi*cos_omegat )
S = npsum( wi*sin_omegat )
YpY = npsum( wi*mags*mags)
YpC = npsum( wi*mags*cos_omegat )
YpS = npsum( wi*mags*sin_omegat )
CpC = npsum( wi*cos2_omegat )
# SpS = npsum( wi*sin2_omegat )
CpS = npsum( wi*sincos_omegat )
# the final terms
YY = YpY - Y*Y
YC = YpC - Y*C
YS = YpS - Y*S
CC = CpC - C*C
SS = 1 - CpC - S*S # use SpS = 1 - CpC
CS = CpS - C*S
# calculate tau
tan_omega_tau_top = 2.0*CS
tan_omega_tau_bottom = CC - SS
tan_omega_tau = tan_omega_tau_top/tan_omega_tau_bottom
tau = nparctan(tan_omega_tau/(2.0*omega))
periodogramvalue = (YC*YC/CC + YS*YS/SS)/YY
return periodogramvalue
def generalized_lsp_value_notau(times, mags, errs, omega):
'''
This is the simplified version not using tau.
W = sum (1.0/(errs*errs) )
w_i = (1/W)*(1/(errs*errs))
Y = sum( w_i*y_i )
C = sum( w_i*cos(wt_i) )
S = sum( w_i*sin(wt_i) )
YY = sum( w_i*y_i*y_i ) - Y*Y
YC = sum( w_i*y_i*cos(wt_i) ) - Y*C
YS = sum( w_i*y_i*sin(wt_i) ) - Y*S
CpC = sum( w_i*cos(w_t_i)*cos(w_t_i) )
CC = CpC - C*C
SS = (1 - CpC) - S*S
CS = sum( w_i*cos(w_t_i)*sin(w_t_i) ) - C*S
D(omega) = CC*SS - CS*CS
P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D)
'''
one_over_errs2 = 1.0/(errs*errs)
W = npsum(one_over_errs2)
wi = one_over_errs2/W
sin_omegat = npsin(omega*times)
cos_omegat = npcos(omega*times)
sin2_omegat = sin_omegat*sin_omegat
cos2_omegat = cos_omegat*cos_omegat
sincos_omegat = sin_omegat*cos_omegat
# calculate some more sums and terms
Y = npsum( wi*mags )
C = npsum( wi*cos_omegat )
S = npsum( wi*sin_omegat )
YpY = npsum( wi*mags*mags)
YpC = npsum( wi*mags*cos_omegat )
YpS = npsum( wi*mags*sin_omegat )
CpC = npsum( wi*cos2_omegat )
# SpS = npsum( wi*sin2_omegat )
CpS = npsum( wi*sincos_omegat )
# the final terms
YY = YpY - Y*Y
YC = YpC - Y*C
YS = YpS - Y*S
CC = CpC - C*C
SS = 1 - CpC - S*S # use SpS = 1 - CpC
CS = CpS - C*S
# P(omega) = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*D)
# D(omega) = CC*SS - CS*CS
Domega = CC*SS - CS*CS
lspval = (SS*YC*YC + CC*YS*YS - 2.0*CS*YC*YS)/(YY*Domega)
return lspval
def specwindow_lsp_value(times, mags, errs, omega):
'''
This calculates the peak associated with the spectral window function
for times and at the specified omega.
'''
norm_times = times - times.min()
tau = (
(1.0/(2.0*omega)) *
nparctan( npsum(npsin(2.0*omega*norm_times)) /
npsum(npcos(2.0*omega*norm_times)) )
)
lspval_top_cos = (npsum(1.0 * npcos(omega*(norm_times-tau))) *
npsum(1.0 * npcos(omega*(norm_times-tau))))
lspval_bot_cos = npsum( (npcos(omega*(norm_times-tau))) *
(npcos(omega*(norm_times-tau))) )
lspval_top_sin = (npsum(1.0 * npsin(omega*(norm_times-tau))) *
npsum(1.0 * npsin(omega*(norm_times-tau))))
lspval_bot_sin = npsum( (npsin(omega*(norm_times-tau))) *
(npsin(omega*(norm_times-tau))) )
lspval = 0.5 * ( (lspval_top_cos/lspval_bot_cos) +
(lspval_top_sin/lspval_bot_sin) )
return lspval
##############################
## GENERALIZED LOMB-SCARGLE ##
##############################
def glsp_worker(task):
'''This is a worker to wrap the generalized Lomb-Scargle single-frequency
function.
'''
try:
return generalized_lsp_value(*task)
except Exception as e:
return npnan
def glsp_worker_specwindow(task):
'''This is a worker to wrap the generalized Lomb-Scargle single-frequency
function.
'''
try:
return specwindow_lsp_value(*task)
except Exception as e:
return npnan
def glsp_worker_notau(task):
'''This is a worker to wrap the generalized Lomb-Scargle single-freq func.
This version doesn't use tau.
'''
try:
return generalized_lsp_value_notau(*task)
except Exception as e:
return npnan
def pgen_lsp(
times,
mags,
errs,
magsarefluxes=False,
startp=None,
endp=None,
autofreq=True,
nbestpeaks=5,
periodepsilon=0.1, # 0.1
stepsize=1.0e-4,
nworkers=None,
workchunksize=None,
sigclip=10.0,
glspfunc=glsp_worker,
verbose=True
):
'''This calculates the generalized LSP given times, mags, errors.
Uses the algorithm from Zechmeister and Kurster (2009). By default, this
calculates a frequency grid to use automatically, based on the autofrequency
function from astropy.stats.lombscargle. If startp and endp are provided,
will generate a frequency grid based on these instead.
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# get rid of zero errs
nzind = np.nonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# get the frequencies to use
if startp:
endf = 1.0/startp
else:
# default start period is 0.1 day
endf = 1.0/0.1
if endp:
startf = 1.0/endp
else:
# default end period is length of time series
startf = 1.0/(stimes.max() - stimes.min())
# if we're not using autofreq, then use the provided frequencies
if not autofreq:
omegas = 2*np.pi*np.arange(startf, endf, stepsize)
if verbose:
LOGINFO(
'using %s frequency points, start P = %.3f, end P = %.3f' %
(omegas.size, 1.0/endf, 1.0/startf)
)
else:
# this gets an automatic grid of frequencies to use
freqs = get_frequency_grid(stimes,
minfreq=startf,
maxfreq=endf)
omegas = 2*np.pi*freqs
if verbose:
LOGINFO(
'using autofreq with %s frequency points, '
'start P = %.3f, end P = %.3f' %
(omegas.size, 1.0/freqs.max(), 1.0/freqs.min())
)
# map to parallel workers
if (not nworkers) or (nworkers > NCPUS):
nworkers = NCPUS
if verbose:
LOGINFO('using %s workers...' % nworkers)
pool = Pool(nworkers)
tasks = [(stimes, smags, serrs, x) for x in omegas]
if workchunksize:
lsp = pool.map(glspfunc, tasks, chunksize=workchunksize)
else:
lsp = pool.map(glspfunc, tasks)
pool.close()
pool.join()
del pool
lsp = np.array(lsp)
periods = 2.0*np.pi/omegas
# find the nbestpeaks for the periodogram: 1. sort the lsp array by
# highest value first 2. go down the values until we find five
# values that are separated by at least periodepsilon in period
# make sure to filter out non-finite values of lsp
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# make sure that finlsp has finite values before we work on it
try:
bestperiodind = npargmax(finlsp)
except ValueError:
LOGERROR('no finite periodogram values '
'for this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'omegas':omegas,
'periods':None,
'method':'gls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
sortedlspind = np.argsort(finlsp)[::-1]
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
prevbestlspval = sortedlspvals[0]
# now get the nbestpeaks
nbestperiods, nbestlspvals, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval in zip(sortedlspperiods, sortedlspvals):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# print('prevperiod = %s, thisperiod = %s, '
# 'perioddiff = %s, peakcount = %s' %
# (prevperiod, period, perioddiff, peakcount))
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different peak
# in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*prevperiod) for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
peakcount = peakcount + 1
prevperiod = period
return {'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'omegas':omegas,
'periods':periods,
'method':'gls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'omegas':None,
'periods':None,
'method':'gls',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
def specwindow_lsp(
times,
mags,
errs,
magsarefluxes=False,
startp=None,
endp=None,
autofreq=True,
nbestpeaks=5,
periodepsilon=0.1, # 0.1
stepsize=1.0e-4,
nworkers=None,
sigclip=10.0,
glspfunc=glsp_worker_specwindow,
verbose=True
):
'''
This calculates the spectral window function.
'''
# run the LSP using glsp_worker_specwindow as the worker
lspres = pgen_lsp(
times,
mags,
errs,
magsarefluxes=magsarefluxes,
startp=startp,
endp=endp,
autofreq=autofreq,
nbestpeaks=nbestpeaks,
periodepsilon=periodepsilon,
stepsize=stepsize,
nworkers=nworkers,
sigclip=sigclip,
glspfunc=glsp_worker_specwindow,
verbose=verbose
)
# update the resultdict to indicate we're a spectral window function
lspres['method'] = 'win'
if lspres['lspvals'] is not None:
# renormalize the periodogram to between 0 and 1 like the usual GLS.
lspmax = npnanmax(lspres['lspvals'])
if np.isfinite(lspmax):
lspres['lspvals'] = lspres['lspvals']/lspmax
lspres['nbestlspvals'] = [
x/lspmax for x in lspres['nbestlspvals']
]
lspres['bestlspval'] = lspres['bestlspval']/lspmax
return lspres
|
the-stack_106_27037 | # -*- coding: utf-8 -*-
"""
examples.py: copy example files to a specified directory
Source repository: http://github.com/tdda/tdda
License: MIT
Copyright (c) Stochastic Solutions Limited 2016-2017
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import shutil
import sys
def copy_examples(name, destination='.', verbose=True):
"""
Copy example files to a specified directory
"""
path = os.path.join(os.path.dirname(__file__), name)
srcdir = os.path.join(path, 'examples')
if not os.path.isdir(destination):
print('copyexamples: output directory %s does not exist' % destination,
file=sys.stderr)
sys.exit(1)
outdir = os.path.join(destination, '%s_examples' % name)
shutil.rmtree(outdir, ignore_errors=True)
os.mkdir(outdir)
copy(srcdir, outdir)
if verbose:
print('Copied example files for tdda.%s to %s' % (name, outdir))
def copy(srcdir, destination):
"""
Recursive copy
"""
for name in os.listdir(srcdir):
fullname = os.path.join(srcdir, name)
if os.path.isdir(fullname):
if name in ('.cache', '__pycache__'):
continue
outdir = os.path.join(destination, name)
if not os.path.exists(outdir):
os.mkdir(outdir)
copy(fullname, outdir)
elif name.endswith('.pyc'):
continue
else:
binary = 'b' if fullname.endswith('.feather') else ''
with open(fullname, 'r%s' % binary) as fin:
with open(os.path.join(destination, name),
'w%s' % binary) as fout:
fout.write(fin.read())
def copy_main(name, verbose=True):
if len(sys.argv) > 2:
print('USAGE: examples [destination-directory]', file=sys.stderr)
sys.exit(1)
destination = sys.argv[1] if len(sys.argv) == 2 else '.'
copy_examples(name, destination, verbose=verbose)
|
the-stack_106_27039 | """
双向链表
"""
class Node:
def __init__(self, elem, _next=None):
self.elem = elem
self.next = _next
self.pre = None
class DLinkList:
def __init__(self):
self._head = None
def is_empty(self):
return self._head is None
def length(self):
cur = self._head
count = 0
while cur is not None:
count += 1
cur = cur.next
return count
def travel(self):
cur = self._head
while cur is not None:
print(cur.elem, end='\t')
cur = cur.next
print()
def add(self, elem):
node = Node(elem)
if self.is_empty():
self._head = node
else:
# node next 指向head 结点
node.next = self._head
self._head.pre = node
self._head = node
def append(self, elem):
node = Node(elem)
if self.is_empty():
self._head = node
else:
cur = self._head
while cur.next is not None:
cur = cur.next
# 节点处理
cur.next = node
node.pre = cur
def search(self, elem):
cur = self._head
while cur is not None:
if cur.elem == elem:
return True
cur = cur.next
return False
def insert(self, elem, pos):
if pos <= 0:
self.add(elem)
elif pos > (self.length() - 1):
self.append(elem)
else:
node = Node(elem)
cur = self._head
count = 0
while count < (pos - 1):
count += 1
cur = cur.next
node.pre = cur
node.next = cur.next
cur.next.pre = node
cur.next = node
def remove(self, elem):
cur = self._head
while cur is not None:
if cur.elem == elem:
# 判断是否为头节点
if cur == self._head:
# 将头节点指向下一个节点
self._head = cur.next
# 如果下一个节点不为None, 需要将下一个节点的pre 置为None, 而不是之前头节点的值
if cur.next:
cur.next.pre = None
else:
# 不是头节点
# 将下一个节点的上一级节点指向
cur.pre.next = cur.next
if cur.next:
cur.next.pre = cur.pre
break
else:
cur = cur.next
if __name__ == '__main__':
ll = DLinkList()
ll.add(1)
ll.add(2)
ll.append(3)
ll.insert(2, 4)
ll.insert(4, 5)
ll.insert(0, 6)
print(ll.length())
ll.travel()
print(ll.search(3))
print(ll.search(5))
ll.remove(3)
print(ll.length())
ll.travel()
|
the-stack_106_27040 | """Config Flow using OAuth2.
This module exists of the following parts:
- OAuth2 config flow which supports multiple OAuth2 implementations
- OAuth2 implementation that works with local provided client ID/secret
"""
from __future__ import annotations
from abc import ABC, ABCMeta, abstractmethod
import asyncio
from collections.abc import Awaitable, Callable
import logging
import secrets
import time
from typing import Any, cast
from aiohttp import client, web
import async_timeout
import jwt
import voluptuous as vol
from yarl import URL
from homeassistant import config_entries
from homeassistant.components import http
from homeassistant.core import HomeAssistant, callback
from homeassistant.data_entry_flow import FlowResult
from .aiohttp_client import async_get_clientsession
from .network import NoURLAvailableError
_LOGGER = logging.getLogger(__name__)
DATA_JWT_SECRET = "oauth2_jwt_secret"
DATA_VIEW_REGISTERED = "oauth2_view_reg"
DATA_IMPLEMENTATIONS = "oauth2_impl"
DATA_PROVIDERS = "oauth2_providers"
AUTH_CALLBACK_PATH = "/auth/external/callback"
HEADER_FRONTEND_BASE = "HA-Frontend-Base"
CLOCK_OUT_OF_SYNC_MAX_SEC = 20
class AbstractOAuth2Implementation(ABC):
"""Base class to abstract OAuth2 authentication."""
@property
@abstractmethod
def name(self) -> str:
"""Name of the implementation."""
@property
@abstractmethod
def domain(self) -> str:
"""Domain that is providing the implementation."""
@abstractmethod
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize.
This step is called when a config flow is initialized. It should redirect the
user to the vendor website where they can authorize Home Assistant.
The implementation is responsible to get notified when the user is authorized
and pass this to the specified config flow. Do as little work as possible once
notified. You can do the work inside async_resolve_external_data. This will
give the best UX.
Pass external data in with:
await hass.config_entries.flow.async_configure(
flow_id=flow_id, user_input={'code': 'abcd', 'state': { … }
)
"""
@abstractmethod
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Resolve external data to tokens.
Turn the data that the implementation passed to the config flow as external
step data into tokens. These tokens will be stored as 'token' in the
config entry data.
"""
async def async_refresh_token(self, token: dict) -> dict:
"""Refresh a token and update expires info."""
new_token = await self._async_refresh_token(token)
# Force int for non-compliant oauth2 providers
new_token["expires_in"] = int(new_token["expires_in"])
new_token["expires_at"] = time.time() + new_token["expires_in"]
return new_token
@abstractmethod
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh a token."""
class LocalOAuth2Implementation(AbstractOAuth2Implementation):
"""Local OAuth2 implementation."""
def __init__(
self,
hass: HomeAssistant,
domain: str,
client_id: str,
client_secret: str,
authorize_url: str,
token_url: str,
) -> None:
"""Initialize local auth implementation."""
self.hass = hass
self._domain = domain
self.client_id = client_id
self.client_secret = client_secret
self.authorize_url = authorize_url
self.token_url = token_url
@property
def name(self) -> str:
"""Name of the implementation."""
return "Configuration.yaml"
@property
def domain(self) -> str:
"""Domain providing the implementation."""
return self._domain
@property
def redirect_uri(self) -> str:
"""Return the redirect uri."""
if (req := http.current_request.get()) is None:
raise RuntimeError("No current request in context")
if (ha_host := req.headers.get(HEADER_FRONTEND_BASE)) is None:
raise RuntimeError("No header in request")
return f"{ha_host}{AUTH_CALLBACK_PATH}"
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {}
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize."""
redirect_uri = self.redirect_uri
return str(
URL(self.authorize_url)
.with_query(
{
"response_type": "code",
"client_id": self.client_id,
"redirect_uri": redirect_uri,
"state": _encode_jwt(
self.hass, {"flow_id": flow_id, "redirect_uri": redirect_uri}
),
}
)
.update_query(self.extra_authorize_data)
)
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Resolve the authorization code to tokens."""
return await self._token_request(
{
"grant_type": "authorization_code",
"code": external_data["code"],
"redirect_uri": external_data["state"]["redirect_uri"],
}
)
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh tokens."""
new_token = await self._token_request(
{
"grant_type": "refresh_token",
"client_id": self.client_id,
"refresh_token": token["refresh_token"],
}
)
return {**token, **new_token}
async def _token_request(self, data: dict) -> dict:
"""Make a token request."""
session = async_get_clientsession(self.hass)
data["client_id"] = self.client_id
if self.client_secret is not None:
data["client_secret"] = self.client_secret
resp = await session.post(self.token_url, data=data)
if resp.status >= 400 and _LOGGER.isEnabledFor(logging.DEBUG):
body = await resp.text()
_LOGGER.debug(
"Token request failed with status=%s, body=%s",
resp.status,
body,
)
resp.raise_for_status()
return cast(dict, await resp.json())
class AbstractOAuth2FlowHandler(config_entries.ConfigFlow, metaclass=ABCMeta):
"""Handle a config flow."""
DOMAIN = ""
VERSION = 1
def __init__(self) -> None:
"""Instantiate config flow."""
if self.DOMAIN == "":
raise TypeError(
f"Can't instantiate class {self.__class__.__name__} without DOMAIN being set"
)
self.external_data: Any = None
self.flow_impl: AbstractOAuth2Implementation = None # type: ignore
@property
@abstractmethod
def logger(self) -> logging.Logger:
"""Return logger."""
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {}
async def async_step_pick_implementation(
self, user_input: dict | None = None
) -> FlowResult:
"""Handle a flow start."""
implementations = await async_get_implementations(self.hass, self.DOMAIN)
if user_input is not None:
self.flow_impl = implementations[user_input["implementation"]]
return await self.async_step_auth()
if not implementations:
return self.async_abort(reason="missing_configuration")
req = http.current_request.get()
if len(implementations) == 1 and req is not None:
# Pick first implementation if we have only one, but only
# if this is triggered by a user interaction (request).
self.flow_impl = list(implementations.values())[0]
return await self.async_step_auth()
return self.async_show_form(
step_id="pick_implementation",
data_schema=vol.Schema(
{
vol.Required(
"implementation", default=list(implementations)[0]
): vol.In({key: impl.name for key, impl in implementations.items()})
}
),
)
async def async_step_auth(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Create an entry for auth."""
# Flow has been triggered by external data
if user_input:
self.external_data = user_input
return self.async_external_step_done(next_step_id="creation")
try:
async with async_timeout.timeout(10):
url = await self.flow_impl.async_generate_authorize_url(self.flow_id)
except asyncio.TimeoutError:
return self.async_abort(reason="authorize_url_timeout")
except NoURLAvailableError:
return self.async_abort(
reason="no_url_available",
description_placeholders={
"docs_url": "https://www.home-assistant.io/more-info/no-url-available"
},
)
url = str(URL(url).update_query(self.extra_authorize_data))
return self.async_external_step(step_id="auth", url=url)
async def async_step_creation(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Create config entry from external data."""
token = await self.flow_impl.async_resolve_external_data(self.external_data)
# Force int for non-compliant oauth2 providers
try:
token["expires_in"] = int(token["expires_in"])
except ValueError as err:
_LOGGER.warning("Error converting expires_in to int: %s", err)
return self.async_abort(reason="oauth_error")
token["expires_at"] = time.time() + token["expires_in"]
self.logger.info("Successfully authenticated")
return await self.async_oauth_create_entry(
{"auth_implementation": self.flow_impl.domain, "token": token}
)
async def async_oauth_create_entry(self, data: dict) -> FlowResult:
"""Create an entry for the flow.
Ok to override if you want to fetch extra info or even add another step.
"""
return self.async_create_entry(title=self.flow_impl.name, data=data)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow start."""
return await self.async_step_pick_implementation(user_input)
@classmethod
def async_register_implementation(
cls, hass: HomeAssistant, local_impl: LocalOAuth2Implementation
) -> None:
"""Register a local implementation."""
async_register_implementation(hass, cls.DOMAIN, local_impl)
@callback
def async_register_implementation(
hass: HomeAssistant, domain: str, implementation: AbstractOAuth2Implementation
) -> None:
"""Register an OAuth2 flow implementation for an integration."""
if isinstance(implementation, LocalOAuth2Implementation) and not hass.data.get(
DATA_VIEW_REGISTERED, False
):
hass.http.register_view(OAuth2AuthorizeCallbackView())
hass.data[DATA_VIEW_REGISTERED] = True
implementations = hass.data.setdefault(DATA_IMPLEMENTATIONS, {})
implementations.setdefault(domain, {})[implementation.domain] = implementation
async def async_get_implementations(
hass: HomeAssistant, domain: str
) -> dict[str, AbstractOAuth2Implementation]:
"""Return OAuth2 implementations for specified domain."""
registered = cast(
dict[str, AbstractOAuth2Implementation],
hass.data.setdefault(DATA_IMPLEMENTATIONS, {}).get(domain, {}),
)
if DATA_PROVIDERS not in hass.data:
return registered
registered = dict(registered)
for provider_domain, get_impl in hass.data[DATA_PROVIDERS].items():
if (implementation := await get_impl(hass, domain)) is not None:
registered[provider_domain] = implementation
return registered
async def async_get_config_entry_implementation(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> AbstractOAuth2Implementation:
"""Return the implementation for this config entry."""
implementations = await async_get_implementations(hass, config_entry.domain)
implementation = implementations.get(config_entry.data["auth_implementation"])
if implementation is None:
raise ValueError("Implementation not available")
return implementation
@callback
def async_add_implementation_provider(
hass: HomeAssistant,
provider_domain: str,
async_provide_implementation: Callable[
[HomeAssistant, str], Awaitable[AbstractOAuth2Implementation | None]
],
) -> None:
"""Add an implementation provider.
If no implementation found, return None.
"""
hass.data.setdefault(DATA_PROVIDERS, {})[
provider_domain
] = async_provide_implementation
class OAuth2AuthorizeCallbackView(http.HomeAssistantView):
"""OAuth2 Authorization Callback View."""
requires_auth = False
url = AUTH_CALLBACK_PATH
name = "auth:external:callback"
async def get(self, request: web.Request) -> web.Response:
"""Receive authorization code."""
# pylint: disable=no-self-use
if "code" not in request.query or "state" not in request.query:
return web.Response(
text=f"Missing code or state parameter in {request.url}"
)
hass = request.app["hass"]
state = _decode_jwt(hass, request.query["state"])
if state is None:
return web.Response(text="Invalid state")
await hass.config_entries.flow.async_configure(
flow_id=state["flow_id"],
user_input={"state": state, "code": request.query["code"]},
)
return web.Response(
headers={"content-type": "text/html"},
text="<script>window.close()</script>",
)
class OAuth2Session:
"""Session to make requests authenticated with OAuth2."""
def __init__(
self,
hass: HomeAssistant,
config_entry: config_entries.ConfigEntry,
implementation: AbstractOAuth2Implementation,
) -> None:
"""Initialize an OAuth2 session."""
self.hass = hass
self.config_entry = config_entry
self.implementation = implementation
@property
def token(self) -> dict:
"""Return the token."""
return cast(dict, self.config_entry.data["token"])
@property
def valid_token(self) -> bool:
"""Return if token is still valid."""
return (
cast(float, self.token["expires_at"])
> time.time() + CLOCK_OUT_OF_SYNC_MAX_SEC
)
async def async_ensure_token_valid(self) -> None:
"""Ensure that the current token is valid."""
if self.valid_token:
return
new_token = await self.implementation.async_refresh_token(self.token)
self.hass.config_entries.async_update_entry(
self.config_entry, data={**self.config_entry.data, "token": new_token}
)
async def async_request(
self, method: str, url: str, **kwargs: Any
) -> client.ClientResponse:
"""Make a request."""
await self.async_ensure_token_valid()
return await async_oauth2_request(
self.hass, self.config_entry.data["token"], method, url, **kwargs
)
async def async_oauth2_request(
hass: HomeAssistant, token: dict, method: str, url: str, **kwargs: Any
) -> client.ClientResponse:
"""Make an OAuth2 authenticated request.
This method will not refresh tokens. Use OAuth2 session for that.
"""
session = async_get_clientsession(hass)
return await session.request(
method,
url,
**kwargs,
headers={
**(kwargs.get("headers") or {}),
"authorization": f"Bearer {token['access_token']}",
},
)
@callback
def _encode_jwt(hass: HomeAssistant, data: dict) -> str:
"""JWT encode data."""
if (secret := hass.data.get(DATA_JWT_SECRET)) is None:
secret = hass.data[DATA_JWT_SECRET] = secrets.token_hex()
return jwt.encode(data, secret, algorithm="HS256")
@callback
def _decode_jwt(hass: HomeAssistant, encoded: str) -> dict | None:
"""JWT encode data."""
secret = cast(str, hass.data.get(DATA_JWT_SECRET))
try:
return jwt.decode(encoded, secret, algorithms=["HS256"])
except jwt.InvalidTokenError:
return None
|
the-stack_106_27041 | from random import choice
from string import ascii_uppercase
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
import os
from configs import global_config, paths_config
import wandb
from training.coaches.multi_id_coach import MultiIDCoach
from training.coaches.single_id_coach import SingleIDCoach
from utils.ImagesDataset import ImagesDataset
def run_PTI(run_name='', use_wandb=False, use_multi_id_training=False):
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = global_config.cuda_visible_devices
if run_name == '':
global_config.run_name = ''.join(choice(ascii_uppercase) for i in range(12))
else:
global_config.run_name = run_name
if use_wandb:
run = wandb.init(project=paths_config.pti_results_keyword, reinit=True, name=global_config.run_name)
global_config.pivotal_training_steps = 1
global_config.training_step = 1
embedding_dir_path = f'{paths_config.embedding_base_dir}/{paths_config.input_data_id}/{paths_config.pti_results_keyword}'
os.makedirs(embedding_dir_path, exist_ok=True)
dataset = ImagesDataset(paths_config.input_data_path, transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]))
dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
if use_multi_id_training:
coach = MultiIDCoach(dataloader, use_wandb)
else:
coach = SingleIDCoach(dataloader, use_wandb)
coach.train()
return global_config.run_name
if __name__ == '__main__':
run_PTI(run_name='', use_wandb=False, use_multi_id_training=False)
|
the-stack_106_27042 | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from opencensus.ext.mysql import trace
class Test_mysql_trace(unittest.TestCase):
def test_trace_integration(self):
def mock_wrap(func):
return 'mock call'
mock_call = 'mock call'
mock_inspect = mock.Mock()
mock_mysql_module = mock.Mock()
mock_inspect.getmodule.return_value = mock_mysql_module
patch_wrap = mock.patch(
'opencensus.ext.mysql.trace.trace.wrap_conn',
side_effect=mock_wrap)
patch_inspect = mock.patch(
'opencensus.ext.mysql.trace.inspect',
mock_inspect)
with patch_wrap, patch_inspect:
trace.trace_integration()
self.assertEqual(mock_mysql_module.connect, mock_call)
|
the-stack_106_27043 | import os
import sys
import pprint
import traceback
from random import randint
pp = pprint.PrettyPrinter(depth=6)
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
# import ccxt # noqa: E402
import ccxt.async_support as ccxt # noqa: E402
import asyncio # noqa: E402
loop = asyncio.get_event_loop()
notRecoverableError = False
nextRecoverableErrorTimeout = None
async def doUnsubscribe(exchange, symbols, params): # noqa: E302
for symbol in symbols:
print('unsubscribe: ' + symbol)
sys.stdout.flush()
await exchange.websocket_unsubscribe('ob', symbol, params)
print('unsubscribed: ' + symbol)
sys.stdout.flush()
async def doSubscribe(exchange, symbols, params): # noqa: E302
global nextRecoverableErrorTimeout
for symbol in symbols:
if notRecoverableError:
return
print('subscribe: ' + symbol)
sys.stdout.flush()
await exchange.websocket_subscribe('ob', symbol, params)
print('subscribed: ' + symbol)
sys.stdout.flush()
# hack to emit websocket error
seconds2wait = randint(5, 10)
print("NEXT PROGRAMATED WEBSOCKET ERROR AFTER " + str(seconds2wait) + " seconds")
sys.stdout.flush()
def raise_recoverable_error(): # noqa: E302
keys = list(exchange.websocketContexts.keys())
keyIndex = randint(0, len(keys) - 1)
contextId = keys[keyIndex]
exchange.websocketContexts[contextId]['conx']['conx'].emit('err', 'recoverable error')
nextRecoverableErrorTimeout = loop.call_later(seconds2wait, raise_recoverable_error)
async def main():
if len(sys.argv) <= 5:
print('python ' + __file__ + ' exchange apikey secret limit symbol ...')
sys.exit(-1)
exchange_id = sys.argv[1]
apiKey = sys.argv[2]
secret = sys.argv[3]
limit = int(sys.argv[4])
symbols = []
for i in range(5, len(sys.argv)):
symbols.append(sys.argv[i])
exchange = getattr(ccxt, exchange_id)({
"apiKey": apiKey,
"secret": secret,
"enableRateLimit": True,
'verbose': False,
'timeout': 5 * 1000
})
@exchange.on('err')
async def websocket_error(err, conxid): # pylint: disable=W0612
global notRecoverableError
global loop
global nextRecoverableErrorTimeout
print(type(err).__name__ + ":" + str(err))
traceback.print_tb(err.__traceback__)
# traceback.print_stack()
sys.stdout.flush()
exchange.websocketClose(conxid)
if isinstance(err, ccxt.NetworkError):
print("waiting 5 seconds ...")
sys.stdout.flush()
await asyncio.sleep(5)
try:
if notRecoverableError:
return
print("subscribing again ...")
sys.stdout.flush()
await doSubscribe(exchange, symbols, {
'limit': limit,
})
except Exception as ex:
print(ex)
sys.stdout.flush()
else:
print("unsubscribing all ...")
notRecoverableError = True
if nextRecoverableErrorTimeout is not None:
nextRecoverableErrorTimeout.cancel()
await doUnsubscribe(exchange, symbols, {
'limit': limit,
})
print("unsubscribed all")
loop.stop()
@exchange.on('ob')
def websocket_ob(symbol, ob): # pylint: disable=W0612
print("ob received from: " + symbol)
sys.stdout.flush()
# pp.pprint(ob)
await exchange.loadMarkets()
def raise_unrecoverable_error():
keys = list(exchange.websocketContexts.keys())
keyIndex = randint(0, len(keys) - 1)
contextId = keys[keyIndex]
exchange.emit('err', ccxt.ExchangeError('not recoverable error'), contextId)
loop.call_later(30, raise_unrecoverable_error)
await doSubscribe(exchange, symbols, {
'limit': limit
})
asyncio.ensure_future(main())
loop.run_forever()
print("after complete")
|
the-stack_106_27046 | from collections import deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def is_symmetric(root):
queue = deque([root])
while queue:
level = []
for _ in range(len(queue)):
node = queue.popleft()
if node:
level.append(node.val)
queue.append(node.left)
queue.append(node.right)
else:
level.append(None)
if not level == level[::-1]:
return False
return True
# Test cases:
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(2)
# root.left.left = TreeNode(3)
root.left.right = TreeNode(4)
# root.right.left = TreeNode(4)
root.right.right = TreeNode(4)
print(is_symmetric(root)) |
the-stack_106_27047 | import argparse
import chainer
from chainer import iterators
from chainercv.datasets import ade20k_semantic_segmentation_label_names
from chainercv.datasets import ADE20KSemanticSegmentationDataset
from chainercv.datasets import camvid_label_names
from chainercv.datasets import CamVidDataset
from chainercv.datasets import cityscapes_semantic_segmentation_label_names
from chainercv.datasets import CityscapesSemanticSegmentationDataset
from chainercv.datasets import voc_semantic_segmentation_label_names
from chainercv.datasets import VOCSemanticSegmentationDataset
from chainercv.evaluations import eval_semantic_segmentation
from chainercv.experimental.links import PSPNetResNet101
from chainercv.experimental.links import PSPNetResNet50
from chainercv.links import DeepLabV3plusXception65
from chainercv.links import SegNetBasic
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
models = {
'pspnet_resnet50': (PSPNetResNet50, {}, 1),
'pspnet_resnet101': (PSPNetResNet101, {}, 1),
'segnet': (SegNetBasic, {}, 1),
'deeplab_v3plus_xception65': (DeepLabV3plusXception65, {}, 1),
}
def setup(dataset, model, pretrained_model, batchsize, input_size):
dataset_name = dataset
if dataset_name == 'cityscapes':
dataset = CityscapesSemanticSegmentationDataset(
split='val', label_resolution='fine')
label_names = cityscapes_semantic_segmentation_label_names
elif dataset_name == 'ade20k':
dataset = ADE20KSemanticSegmentationDataset(split='val')
label_names = ade20k_semantic_segmentation_label_names
elif dataset_name == 'camvid':
dataset = CamVidDataset(split='test')
label_names = camvid_label_names
elif dataset_name == 'voc':
dataset = VOCSemanticSegmentationDataset(split='val')
label_names = voc_semantic_segmentation_label_names
def eval_(out_values, rest_values):
pred_labels, = out_values
gt_labels, = rest_values
result = eval_semantic_segmentation(pred_labels, gt_labels)
for iu, label_name in zip(result['iou'], label_names):
print('{:>23} : {:.4f}'.format(label_name, iu))
print('=' * 34)
print('{:>23} : {:.4f}'.format('mean IoU', result['miou']))
print('{:>23} : {:.4f}'.format(
'Class average accuracy', result['mean_class_accuracy']))
print('{:>23} : {:.4f}'.format(
'Global average accuracy', result['pixel_accuracy']))
cls, pretrained_models, default_batchsize = models[model]
if pretrained_model is None:
pretrained_model = pretrained_models.get(dataset_name, dataset_name)
if input_size is None:
input_size = None
else:
input_size = (input_size, input_size)
kwargs = {
'n_class': len(label_names),
'pretrained_model': pretrained_model,
}
if model in ['pspnet_resnet50', 'pspnet_resnet101']:
kwargs.update({'input_size': input_size})
elif model == 'deeplab_v3plus_xception65':
kwargs.update({'min_input_size': input_size})
model = cls(**kwargs)
if batchsize is None:
batchsize = default_batchsize
return dataset, eval_, model, batchsize
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset', choices=('cityscapes', 'ade20k', 'camvid', 'voc'))
parser.add_argument('--model', choices=sorted(models.keys()))
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument('--batchsize', type=int)
parser.add_argument('--input-size', type=int, default=None)
args = parser.parse_args()
dataset, eval_, model, batchsize = setup(
args.dataset, args.model, args.pretrained_model,
args.batchsize, args.input_size)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
iterator = iterators.SerialIterator(
dataset, batchsize, repeat=False, shuffle=False)
in_values, out_values, rest_values = apply_to_iterator(
model.predict, iterator, hook=ProgressHook(len(dataset)))
# Delete an iterator of images to save memory usage.
del in_values
eval_(out_values, rest_values)
if __name__ == '__main__':
main()
|
the-stack_106_27050 | # =============================================================================
# TASK PARAMETER DEFINITION (should appear on GUI) init trial objects values
# =============================================================================
# SOUND, AMBIENT SENSOR, AND VIDEO RECORDINGS
RECORD_SOUND = True
RECORD_AMBIENT_SENSOR_DATA = True
RECORD_VIDEO = True
OPEN_CAMERA_VIEW = True # if RECORD_VIDEO == True OPEN_CAMERA_VIEW is ignored
# REWARDS
AUTOMATIC_CALIBRATION = True # Wether to look for a calibration session and func to define the valve opening time # noqa
CALIBRATION_VALUE = 0.067 # calibration value for 3ul of target reward amount (ignored if automatic ON) # noqa
REWARD_AMOUNT = 1.5 # (µl) Amount of reward to be delivered upon correct choice each trial (overwitten if adaptive ON) # noqa
REWARD_TYPE = "Water 10% Sucrose" # Water, Water 10% Sucrose, Water 15% Sucrose, Water 2% Citric Acid (Guo et al.. PLoS One 2014) # noqa
# TASK
NTRIALS = 2000 # Number of trials for the current session
USE_AUTOMATIC_STOPPING_CRITERIONS = (
True # Weather to check for the Automatic stopping criterions or not # noqa
)
USE_VISUAL_STIMULUS = True # Run the visual stim in bonsai
BONSAI_EDITOR = False # Whether to open the visual stim Bonsai editor or not
REPEAT_ON_ERROR = False
# STATE TIMERS
QUIESCENCE_THRESHOLDS = [-2, 2] # degree
QUIESCENT_PERIOD = 0.2 # + x, where x~exp(0.35), t ∈ 0.2 <= R <= 0.5
INTERACTIVE_DELAY = 0.0 # (s) how long after stim onset the CL starts
RESPONSE_WINDOW = 60 # Time to move the wheel after go tone (seconds)
ITI_CORRECT = 1 # how long the stim should stay visible after CORRECT choice
ITI_ERROR = 2 # how long the stim should stay visible after ERROR choice
# VISUAL STIM
STIM_FREQ = 0.10 # Probably constant - NOT IN USE
STIM_ANGLE = 0.0 # Vertical orientation of Gabor patch - NOT IN USE
STIM_SIGMA = 7.0 # (azimuth_degree) Size of Gabor patch
STIM_GAIN = 4.0 # (azimuth_degree/mm) Gain of the RE to stimulus movement
SYNC_SQUARE_X = 1.33
SYNC_SQUARE_Y = -1.03
# BLOCKS
BLOCK_INIT_5050 = False
BLOCK_PROBABILITY_SET = [0.2, 0.8]
BLOCK_LEN_FACTOR = 60
BLOCK_LEN_MIN = 20
BLOCK_LEN_MAX = 100
# LASER
LASER_HALF_HALF = False
LASER_HALF_TRIALS = 5
LASER_FIRST_HALF_ON = False
LASER_BLOCK_LEN_FACTOR = 60
LASER_BLOCK_LEN_MIN = 20
LASER_BLOCK_LEN_MAX = 100
LASER_PROB_0_STIM = 0.75 # Probability of laser stimulation during stimulated blocks in 0% contrast trials
LASER_PROB_0_NOSTIM = 0.25 # Probability of laser stimulation during non-stimulated blocks in 0% contrast trials
LASER_TRANSITION_MIN = 10 # Number of trials before and after block switch in which no laser block switch may occur
# POSITIONS
STIM_POSITIONS = [-35, 35] # All possible positions for this session (deg)
# CONTRASTS
CONTRAST_SET = [1.0, 0.25, 0.125, 0.0625, 0.0] # Full contrast set
CONTRAST_SET_PROBABILITY_TYPE = "biased" # 'biased' or 'uniform', when set to biased P(0% contrast) is * CONTRAST_PROB_0
CONTRAST_PROB_0 = 1.5
# SOUNDS
SOFT_SOUND = "xonar" # Use software sound 'xonar', 'sysdefault' or None for BpodSoundCard # noqa
SOUND_BOARD_BPOD_PORT = "Serial3" # (on Bpod) - Ignored if using SOFT_SOUND
WHITE_NOISE_DURATION = 0.5 # Length of noise burst
WHITE_NOISE_AMPLITUDE = 0.05
GO_TONE_DURATION = 0.1 # Length of tone
GO_TONE_FREQUENCY = 5000 # 5KHz
GO_TONE_AMPLITUDE = 0.0272 # [0->1] 0.0272 for 70dB SPL Xonar
# POOP COUNT LOGGING
POOP_COUNT = False # Wether to ask for a poop count at the end of the session
|
the-stack_106_27053 | """
Invoke entrypoint, import here all the tasks we want to make available
"""
import os
from invoke import Collection
from . import agent, android, benchmarks, customaction, docker, dogstatsd, pylauncher, cluster_agent, systray, release
from .go import fmt, lint, vet, cyclo, ineffassign, misspell, deps, reset
from .test import test, integration_tests, version, lint_teamassignment, lint_releasenote, lint_filenames, e2e_tests
from .build_tags import audit_tag_impact
# the root namespace
ns = Collection()
# add single tasks to the root
ns.add_task(fmt)
ns.add_task(lint)
ns.add_task(vet)
ns.add_task(cyclo)
ns.add_task(ineffassign)
ns.add_task(misspell)
ns.add_task(test)
ns.add_task(integration_tests)
ns.add_task(deps)
ns.add_task(reset)
ns.add_task(version)
ns.add_task(lint_teamassignment)
ns.add_task(lint_releasenote)
ns.add_task(lint_filenames)
ns.add_task(audit_tag_impact)
ns.add_task(e2e_tests)
# add namespaced tasks to the root
ns.add_collection(agent)
ns.add_collection(android)
ns.add_collection(cluster_agent)
ns.add_collection(customaction)
ns.add_collection(benchmarks, name="bench")
ns.add_collection(docker)
ns.add_collection(dogstatsd)
ns.add_collection(pylauncher)
ns.add_collection(systray)
ns.add_collection(release)
ns.configure({
'run': {
# workaround waiting for a fix being merged on Invoke,
# see https://github.com/pyinvoke/invoke/pull/407
'shell': os.environ.get('COMSPEC', os.environ.get('SHELL')),
# this should stay, set the encoding explicitly so invoke doesn't
# freak out if a command outputs unicode chars.
'encoding': 'utf-8',
}
})
|
the-stack_106_27055 | import numpy as np
import cvxpy as cp
import networkx as nx
from scipy import linalg
def greedy_independent_set(graph):
"""
:param graph: (nx.classes.graph.Graph) An undirected graph with no
self-loops or multiple edges. The graph can either be weighted or
unweighted, although the problem only differentiates between zero and
non-zero weight edges.
:return: (set) The independent set the algorithm outputs, represented as
a set of vertices.
"""
independent = set()
for vertex in graph.nodes:
if not any(graph.has_edge(vertex, element) for element in independent):
independent.add(vertex)
return independent
def crude_sdp_independent_set(graph):
"""
:param graph: (nx.classes.graph.Graph) An undirected graph with no
self-loops or multiple edges. The graph can either be weighted or
unweighted, although the problem only differentiates between zero and
non-zero weight edges.
:return: (set) The independent set the algorithm outputs, represented as
a set of vertices.
"""
solution = _solve_vector_program(graph)
labels = list(graph.nodes)
candidates = _get_vector_clusters(labels, solution, 1.0)
best = max(candidates, key=lambda cluster: len(cluster))
return best
def _solve_vector_program(graph):
"""
:param graph: (nx.classes.graph.Graph) An undirected graph with no
self-loops or multiple edges. The graph can either be weighted or
unweighted, although the problem only differentiates between zero and
non-zero weight edges.
:return: (np.ndarray) A matrix whose columns represents the vectors assigned
to each vertex to maximize the crude semi-definite program (C-SDP)
objective.
"""
size = len(graph)
products = cp.Variable((size, size), PSD=True)
objective_matrix = size * np.eye(size) - np.ones((size, size))
objective = cp.Minimize(cp.trace(objective_matrix @ products))
adjacency = nx.linalg.adjacency_matrix(graph)
adjacency = adjacency.toarray()
constraints = [
cp.diag(products) == 1,
products >= 0,
cp.multiply(products, adjacency) == 0
]
problem = cp.Problem(objective, constraints)
problem.solve()
assert problem.status == 'optimal'
eigenvalues, eigenvectors = np.linalg.eigh(products.value)
eigenvalues = np.maximum(eigenvalues, 0)
diagonal_root = np.diag(np.sqrt(eigenvalues))
assignment = diagonal_root @ eigenvectors.T
return assignment
def _get_vector_clusters(labels, vectors, threshold):
"""
:param labels: (list) A list of labels.
:param vectors: (np.ndarray) A matrix whose columns are the vectors
corresponding to each label. Therefore, the label LABELS[i] references
the vector VECTORS[:,i]; both lengths must be exactly the same, so
len(VECTORS.T) == len(LABELS).
:param threshold: (float | int) The closeness threshold.
:return: (list) Return a list of sets. For each vector, this list includes a
set which contains the labels of all vectors within a THRESHOLD-ball
of the original. The list will contain exactly len(LABELS) entries,
in the same order as LABELS.
"""
total = len(labels)
clusters = []
for current in range(total):
output = set()
for other in range(total):
if np.linalg.norm(vectors[:,current] - vectors[:,other]) <= threshold:
output.add(labels[other])
clusters.append(output)
return clusters
def planted_spectral_algorithm(graph):
"""
:param graph: (nx.classes.graph.Graph) An undirected graph with no
self-loops or multiple edges. The graph can either be weighted or
unweighted, although the problem only differentiates between zero and
non-zero weight edges.
:return: (set) The independent set the algorithm outputs, represented as
a set of vertices.
"""
size = len(graph)
labels = list(graph.nodes)
adjacency = nx.linalg.adjacency_matrix(graph)
adjacency = adjacency.toarray()
co_adjacency = 1 - adjacency
ones_matrix = np.ones((size, size))
normalized = co_adjacency - 0.5 * ones_matrix
_, eigenvector = linalg.eigh(normalized, eigvals=(size - 1, size - 1))
indices = list(range(size))
indices.sort(key=lambda num: eigenvector[num], reverse=True)
output = set()
for index in indices:
vertex = labels[index]
if not any(graph.has_edge(vertex, element) for element in output):
output.add(vertex)
return output
|
the-stack_106_27056 | import locale
import logging
import os
import wx
from flask_restful import Resource
from orangeshare import Config
class GetDataFrame(wx.Frame):
def __init__(self, parent, id, title, orange_share, newer_version):
"""
:param newer_version: The new available version of orangeshare
:param orange_share: The orangeshare instance
"""
wx.Frame.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
self.orange_share = orange_share
self.newer_version = newer_version
ico = wx.Icon(os.path.join(os.path.dirname(__file__), os.pardir, "logo/white.ico"), wx.BITMAP_TYPE_ICO)
self.SetIcon(ico)
text = wx.StaticText(self, label="Version {} of Orange Share is available.".format(self.newer_version), size=(400, -1), pos=(0, 0), style=wx.ALIGN_CENTER)
text.SetFont(wx.Font(-1, wx.DEFAULT, wx.NORMAL, wx.BOLD, 0, ""))
self.checkbox = wx.CheckBox(self, label="don't show again")
show_button = wx.Button(self, -1, size=(200, -1), label='Show Me')
show_button.Bind(wx.EVT_BUTTON, self.on_show)
ignore_button = wx.Button(self, -1, size=(200, -1), label='Ignore')
ignore_button.Bind(wx.EVT_BUTTON, self.on_ignore)
# layout
grid = wx.GridBagSizer(15, 0)
grid.Add(text, pos=(0, 0), span=(1, 2), flag=wx.CENTER)
grid.Add(self.checkbox, pos=(1, 0), span=(1, 2), flag=wx.CENTER)
grid.Add(show_button, pos=(2, 0), span=(1, 1), flag=wx.EXPAND)
grid.Add(ignore_button, pos=(2, 1), span=(1, 1), flag=wx.EXPAND)
self.SetSizerAndFit(grid)
self.Layout()
self.Centre()
self.Show()
def on_ignore(self, e):
self.save_ignore()
self.Close(True)
def on_show(self, e):
self.orange_share.open_ui("/update")
self.Close(True)
def save_ignore(self):
config = Config.get_config()
if self.checkbox.GetValue():
config.config.set("UPDATE", "ignore", self.newer_version)
else:
config.config.set("UPDATE", "ignore", "")
config.save()
class UpdatePopup(Resource):
"""
Opens a dialog telling the user about a new update
"""
def __init__(self, orange_share: 'Orangeshare', newer_version):
"""
Creates the dialog
:param orange_share: the orangeshare instance
:param newer_version: the new available version
"""
logging.info("showing update popup")
app = wx.App()
# app.locale = wx.Locale(wx.Locale.GetSystemLanguage())
locale.setlocale(locale.LC_ALL, 'C')
frame = GetDataFrame(None, -1, "Update Available", orange_share=orange_share, newer_version=newer_version)
frame.Show()
app.MainLoop() |
the-stack_106_27058 | from abc import ABCMeta, abstractmethod
from contextlib import suppress
from pocs.base import PanBase
from pocs.camera.camera import AbstractCamera
from pocs.utils import error
from pocs.utils.library import load_library
from pocs.utils.logger import get_root_logger
class AbstractSDKDriver(PanBase, metaclass=ABCMeta):
def __init__(self, name, library_path=None, **kwargs):
"""Base class for all camera SDK interfaces.
On construction loads the shared object/dynamically linked version of the camera SDK
library, which must be already installed.
The name and location of the shared library can be manually specified with the library_path
argument, otherwise the ctypes.util.find_library function will be used to try to locate it.
Args:
name (str): name of the library (without 'lib' prefix or any suffixes, e.g. 'fli').
library_path (str, optional): path to the libary e.g. '/usr/local/lib/libASICamera2.so'
Raises:
pocs.utils.error.NotFound: raised if library_path not given & find_libary fails to
locate the library.
OSError: raises if the ctypes.CDLL loader cannot load the library.
"""
super().__init__(**kwargs)
self._CDLL = load_library(name=name, path=library_path, logger=self.logger)
self._version = self.get_SDK_version()
self.logger.debug("{} driver ({}) initialised.".format(name, self._version))
# Properties
@property
def version(self):
return self._version
# Methods
@abstractmethod
def get_SDK_version(self):
""" Get the version of the SDK """
raise NotImplementedError
@abstractmethod
def get_cameras(self):
"""Convenience function to get a dictionary of all currently connected camera UIDs
and their corresponding device nodes/handles/camera IDs.
"""
raise NotImplementedError
class AbstractSDKCamera(AbstractCamera):
_driver = None
_cameras = {}
_assigned_cameras = set()
def __init__(self,
name='Generic SDK camera',
driver=AbstractSDKDriver,
library_path=None,
filter_type=None,
set_point=None,
*args, **kwargs):
# Would usually use self.logger but that won't exist until after calling super().__init__(),
# and don't want to do that until after the serial number and port have both been determined
# in order to avoid log entries with misleading values. To enable logging during the device
# scanning phase use get_root_logger() instead.
logger = get_root_logger()
# The SDK cameras don't generally have a 'port', they are identified by a serial_number,
# which is some form of unique ID readable via the camera SDK.
kwargs['port'] = None
serial_number = kwargs.get('serial_number')
if not serial_number:
msg = "Must specify serial_number for {}.".format(name)
logger.error(msg)
raise ValueError(msg)
# Get class of current object in a way that works in derived classes
my_class = type(self)
if my_class._driver is None:
# Initialise the driver if it hasn't already been done
my_class._driver = driver(library_path=library_path)
logger.debug("Looking for {} with UID '{}'.".format(name, serial_number))
if not my_class._cameras:
# No cached camera details, need to probe for connected cameras
# This will raise a PanError if there are no cameras.
my_class._cameras = my_class._driver.get_cameras()
logger.debug("Connected {}s: {}".format(name, my_class._cameras))
if serial_number in my_class._cameras:
logger.debug("Found {} with UID '{}' at {}.".format(
name, serial_number, my_class._cameras[serial_number]))
else:
raise error.PanError("Could not find {} with UID '{}'.".format(
name, serial_number))
if serial_number in my_class._assigned_cameras:
raise error.PanError("{} with UID '{}' already in use.".format(
name, serial_number))
my_class._assigned_cameras.add(serial_number)
super().__init__(name, *args, **kwargs)
self._address = my_class._cameras[self.uid]
self.connect()
if not self.is_connected:
raise error.PanError("Could not connect to {}.".format(self))
if filter_type:
# connect() will have set this based on camera info, but that doesn't know about filters
# upstream of the CCD. Can be set manually here, or handled by a filterwheel attribute.
self._filter_type = filter_type
if set_point is not None:
self.ccd_set_point = set_point
self.ccd_cooling_enabled = True
def __del__(self):
""" Attempt some clean up """
with suppress(AttributeError):
uid = self.uid
type(self)._assigned_cameras.discard(uid)
self.logger.debug('Removed {} from assigned cameras list'.format(uid))
# Properties
@property
def properties(self):
""" A collection of camera properties as read from the camera """
return self._info
# Methods
def __str__(self):
# SDK cameras don't have a port so just include the serial number in the string
# representation.
s = "{} ({})".format(self.name, self.uid)
if self.focuser:
s += ' with {}'.format(self.focuser.name)
if self.filterwheel:
s += ' & {}'.format(self.filterwheel.name)
elif self.filterwheel:
s += ' with {}'.format(self.filterwheel.name)
return s
|
the-stack_106_27062 | def front_and_back_search(lst, item):
'''
args:
lst: an unsorted array of integers
item: data to be found
return:
item which is found else False
'''
rear=0
front=len(lst)-1
u=None
if rear>front:
return False
else:
while rear<=front:
if item==lst[rear] or item==lst[front]:
u=''
return True ##item found
elif item!=lst[rear] and item!=lst[front]:
if item > lst[rear]:
rear=rear+1
elif item < lst[front]:
front=front-1
if u==None:
return False
|
the-stack_106_27063 | import os.path
import tempfile
import logging
from binascii import unhexlify
from urllib.request import urlopen
from torba.client.errors import InsufficientFundsError
from lbry.testcase import CommandTestCase
from lbry.wallet.transaction import Transaction
log = logging.getLogger(__name__)
class ClaimTestCase(CommandTestCase):
files_directory = os.path.join(os.path.dirname(__file__), 'files')
video_file_url = 'http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerEscapes.mp4'
video_file_name = os.path.join(files_directory, 'ForBiggerEscapes.mp4')
def setUp(self):
if not os.path.exists(self.video_file_name):
if not os.path.exists(self.files_directory):
os.mkdir(self.files_directory)
log.info(f'downloading test video from {self.video_file_name}')
with urlopen(self.video_file_url) as response, \
open(self.video_file_name, 'wb') as video_file:
video_file.write(response.read())
async def image_stream_create(self, name='blank-image', bid='1.0', confirm=True):
with tempfile.NamedTemporaryFile(suffix='.png') as file:
file.write(unhexlify(
b'89504e470d0a1a0a0000000d49484452000000050000000708020000004fc'
b'510b9000000097048597300000b1300000b1301009a9c1800000015494441'
b'5408d763fcffff3f031260624005d4e603004c45030b5286e9ea000000004'
b'9454e44ae426082'
))
file.flush()
tx = await self.out(
self.daemon.jsonrpc_stream_create(
name, bid, file_path=file.name
)
)
if confirm:
await self.on_transaction_dict(tx)
await self.generate(1)
await self.on_transaction_dict(tx)
return tx
async def video_stream_create(self, name='chrome', bid='1.0', confirm=True):
tx = await self.out(
self.daemon.jsonrpc_stream_create(
name, bid, file_path=self.video_file_name
)
)
if confirm:
await self.on_transaction_dict(tx)
await self.generate(1)
await self.on_transaction_dict(tx)
return tx
class ClaimSearchCommand(ClaimTestCase):
async def create_channel(self):
self.channel = await self.channel_create('@abc', '1.0')
self.channel_id = self.get_claim_id(self.channel)
async def create_lots_of_streams(self):
tx = await self.daemon.jsonrpc_account_fund(None, None, '0.001', outputs=100, broadcast=True)
await self.confirm_tx(tx.id)
# 4 claims per block, 3 blocks. Sorted by height (descending) then claim name (ascending).
self.streams = []
for j in range(3):
same_height_claims = []
for k in range(3):
claim_tx = await self.stream_create(
f'c{j}-{k}', '0.000001', channel_id=self.channel_id, confirm=False)
same_height_claims.append(claim_tx['outputs'][0]['name'])
await self.on_transaction_dict(claim_tx)
claim_tx = await self.stream_create(
f'c{j}-4', '0.000001', channel_id=self.channel_id, confirm=True)
same_height_claims.append(claim_tx['outputs'][0]['name'])
self.streams = same_height_claims + self.streams
async def assertFindsClaim(self, claim, **kwargs):
await self.assertFindsClaims([claim], **kwargs)
async def assertFindsClaims(self, claims, **kwargs):
kwargs.setdefault('order_by', ['height', '^name'])
results = await self.claim_search(**kwargs)
self.assertEqual(len(claims), len(results))
for claim, result in zip(claims, results):
self.assertEqual(
(claim['txid'], self.get_claim_id(claim)),
(result['txid'], result['claim_id'])
)
async def test_basic_claim_search(self):
await self.create_channel()
channel_txo = self.channel['outputs'][0]
channel2 = await self.channel_create('@abc', '0.1', allow_duplicate_name=True)
channel_txo2 = channel2['outputs'][0]
channel_id2 = channel_txo2['claim_id']
# finding a channel
await self.assertFindsClaims([channel2, self.channel], name='@abc')
await self.assertFindsClaim(self.channel, name='@abc', is_controlling=True)
await self.assertFindsClaim(self.channel, claim_id=self.channel_id)
await self.assertFindsClaim(self.channel, txid=self.channel['txid'], nout=0)
await self.assertFindsClaim(channel2, claim_id=channel_id2)
await self.assertFindsClaim(channel2, txid=channel2['txid'], nout=0)
await self.assertFindsClaim(
channel2, public_key_id=channel_txo2['value']['public_key_id'])
await self.assertFindsClaim(
self.channel, public_key_id=channel_txo['value']['public_key_id'])
signed = await self.stream_create('on-channel-claim', '0.001', channel_id=self.channel_id)
signed2 = await self.stream_create('on-channel-claim', '0.0001', channel_id=channel_id2,
allow_duplicate_name=True)
unsigned = await self.stream_create('unsigned', '0.0001')
# finding claims with and without a channel
await self.assertFindsClaims([signed2, signed], name='on-channel-claim')
await self.assertFindsClaims([signed2, signed], channel_ids=[self.channel_id, channel_id2])
await self.assertFindsClaim(signed, name='on-channel-claim', channel_ids=[self.channel_id])
await self.assertFindsClaim(signed2, name='on-channel-claim', channel_ids=[channel_id2])
await self.assertFindsClaim(unsigned, name='unsigned')
await self.assertFindsClaim(unsigned, txid=unsigned['txid'], nout=0)
await self.assertFindsClaim(unsigned, claim_id=self.get_claim_id(unsigned))
two = await self.stream_create('on-channel-claim-2', '0.0001', channel_id=self.channel_id)
three = await self.stream_create('on-channel-claim-3', '0.0001', channel_id=self.channel_id)
# three streams in channel, zero streams in abandoned channel
claims = [three, two, signed]
await self.assertFindsClaims(claims, channel_ids=[self.channel_id])
await self.assertFindsClaims(claims, channel=f"@abc#{self.channel_id}")
await self.assertFindsClaims([three, two, signed2, signed], channel_ids=[channel_id2, self.channel_id])
await self.channel_abandon(claim_id=self.channel_id)
await self.assertFindsClaims([], channel=f"@abc#{self.channel_id}", valid_channel_signature=True)
await self.assertFindsClaims([], channel_ids=[self.channel_id], valid_channel_signature=True)
await self.assertFindsClaims([signed2], channel_ids=[channel_id2], valid_channel_signature=True)
# pass `invalid_channel_signature=False` to catch a bug in argument processing
await self.assertFindsClaims([signed2], channel_ids=[channel_id2, self.channel_id],
valid_channel_signature=True, invalid_channel_signature=False)
# invalid signature still returns channel_id
self.ledger._tx_cache.clear()
invalid_claims = await self.claim_search(invalid_channel_signature=True, has_channel_signature=True)
self.assertEqual(3, len(invalid_claims))
self.assertTrue(all([not c['is_channel_signature_valid'] for c in invalid_claims]))
self.assertEqual({'channel_id': self.channel_id}, invalid_claims[0]['signing_channel'])
valid_claims = await self.claim_search(valid_channel_signature=True, has_channel_signature=True)
self.assertEqual(1, len(valid_claims))
self.assertTrue(all([c['is_channel_signature_valid'] for c in valid_claims]))
self.assertEqual('@abc', valid_claims[0]['signing_channel']['name'])
# abandoned stream won't show up for streams in channel search
await self.stream_abandon(txid=signed2['txid'], nout=0)
await self.assertFindsClaims([], channel_ids=[channel_id2])
async def test_pagination(self):
await self.create_channel()
await self.create_lots_of_streams()
page = await self.claim_search(page_size=20, channel='@abc', order_by=['height', '^name'])
page_claim_ids = [item['name'] for item in page]
self.assertEqual(page_claim_ids, self.streams)
page = await self.claim_search(page_size=6, channel='@abc', order_by=['height', '^name'])
page_claim_ids = [item['name'] for item in page]
self.assertEqual(page_claim_ids, self.streams[:6])
page = await self.claim_search(page=2, page_size=6, channel='@abc', order_by=['height', '^name'])
page_claim_ids = [item['name'] for item in page]
self.assertEqual(page_claim_ids, self.streams[6:])
out_of_bounds = await self.claim_search(page=2, page_size=20, channel='@abc')
self.assertEqual(out_of_bounds, [])
results = await self.daemon.jsonrpc_claim_search()
self.assertEqual(results['total_pages'], 2)
self.assertEqual(results['total_items'], 13)
results = await self.daemon.jsonrpc_claim_search(no_totals=True)
self.assertNotIn('total_pages', results)
self.assertNotIn('total_items', results)
async def test_tag_search(self):
claim1 = await self.stream_create('claim1', tags=['aBc'])
claim2 = await self.stream_create('claim2', tags=['#abc', 'def'])
claim3 = await self.stream_create('claim3', tags=['abc', 'ghi', 'jkl'])
claim4 = await self.stream_create('claim4', tags=['abc\t', 'ghi', 'mno'])
claim5 = await self.stream_create('claim5', tags=['pqr'])
# any_tags
await self.assertFindsClaims([claim5, claim4, claim3, claim2, claim1], any_tags=['\tabc', 'pqr'])
await self.assertFindsClaims([claim4, claim3, claim2, claim1], any_tags=['abc'])
await self.assertFindsClaims([claim4, claim3, claim2, claim1], any_tags=['abc', 'ghi'])
await self.assertFindsClaims([claim4, claim3], any_tags=['ghi'])
await self.assertFindsClaims([claim4, claim3], any_tags=['ghi', 'xyz'])
await self.assertFindsClaims([], any_tags=['xyz'])
# all_tags
await self.assertFindsClaims([], all_tags=['abc', 'pqr'])
await self.assertFindsClaims([claim4, claim3, claim2, claim1], all_tags=['ABC'])
await self.assertFindsClaims([claim4, claim3], all_tags=['abc', 'ghi'])
await self.assertFindsClaims([claim4, claim3], all_tags=['ghi'])
await self.assertFindsClaims([], all_tags=['ghi', 'xyz'])
await self.assertFindsClaims([], all_tags=['xyz'])
# not_tags
await self.assertFindsClaims([], not_tags=['abc', 'pqr'])
await self.assertFindsClaims([claim5], not_tags=['abC'])
await self.assertFindsClaims([claim5], not_tags=['abc', 'ghi'])
await self.assertFindsClaims([claim5, claim2, claim1], not_tags=['ghi'])
await self.assertFindsClaims([claim5, claim2, claim1], not_tags=['ghi', 'xyz'])
await self.assertFindsClaims([claim5, claim4, claim3, claim2, claim1], not_tags=['xyz'])
# combinations
await self.assertFindsClaims([claim3], all_tags=['abc', 'ghi'], not_tags=['mno'])
await self.assertFindsClaims([claim3], all_tags=['abc', 'ghi'], any_tags=['jkl'], not_tags=['mno'])
await self.assertFindsClaims([claim4, claim3, claim2], all_tags=['abc'], any_tags=['def', 'ghi'])
async def test_order_by(self):
height = self.ledger.network.remote_height
claims = [await self.stream_create(f'claim{i}') for i in range(5)]
await self.assertFindsClaims(claims, order_by=["^height"])
await self.assertFindsClaims(list(reversed(claims)), order_by=["height"])
await self.assertFindsClaims([claims[0]], height=height+1)
await self.assertFindsClaims([claims[4]], height=height+5)
await self.assertFindsClaims(claims[:1], height=f'<{height+2}', order_by=["^height"])
await self.assertFindsClaims(claims[:2], height=f'<={height+2}', order_by=["^height"])
await self.assertFindsClaims(claims[2:], height=f'>{height+2}', order_by=["^height"])
await self.assertFindsClaims(claims[1:], height=f'>={height+2}', order_by=["^height"])
await self.assertFindsClaims(claims, order_by=["^name"])
async def test_search_by_fee(self):
claim1 = await self.stream_create('claim1', fee_amount='1.0', fee_currency='lbc')
claim2 = await self.stream_create('claim2', fee_amount='0.9', fee_currency='lbc')
claim3 = await self.stream_create('claim3', fee_amount='0.5', fee_currency='lbc')
claim4 = await self.stream_create('claim4', fee_amount='0.1', fee_currency='lbc')
claim5 = await self.stream_create('claim5', fee_amount='1.0', fee_currency='usd')
await self.assertFindsClaims([claim5, claim4, claim3, claim2, claim1], fee_amount='>0')
await self.assertFindsClaims([claim4, claim3, claim2, claim1], fee_currency='lbc')
await self.assertFindsClaims([claim3, claim2, claim1], fee_amount='>0.1', fee_currency='lbc')
await self.assertFindsClaims([claim4, claim3, claim2], fee_amount='<1.0', fee_currency='lbc')
await self.assertFindsClaims([claim3], fee_amount='0.5', fee_currency='lbc')
await self.assertFindsClaims([claim5], fee_currency='usd')
await self.assertFindsClaims([], fee_currency='foo')
async def test_search_by_channel(self):
match = self.assertFindsClaims
chan1_id = self.get_claim_id(await self.channel_create('@chan1'))
chan2_id = self.get_claim_id(await self.channel_create('@chan2'))
chan3_id = self.get_claim_id(await self.channel_create('@chan3'))
claim1 = await self.stream_create('claim1')
claim2 = await self.stream_create('claim2', channel_id=chan1_id)
claim3 = await self.stream_create('claim3', channel_id=chan1_id)
claim4 = await self.stream_create('claim4', channel_id=chan2_id)
claim5 = await self.stream_create('claim5', channel_id=chan2_id)
claim6 = await self.stream_create('claim6', channel_id=chan3_id)
await self.channel_abandon(chan3_id)
# {has/valid/invalid}_channel_signature
await match([claim6, claim5, claim4, claim3, claim2], has_channel_signature=True)
await match([claim5, claim4, claim3, claim2, claim1], valid_channel_signature=True, claim_type='stream')
await match([claim6, claim1], invalid_channel_signature=True, claim_type='stream')
await match([claim5, claim4, claim3, claim2], has_channel_signature=True, valid_channel_signature=True)
await match([claim6], has_channel_signature=True, invalid_channel_signature=True)
# not_channel_ids
await match([claim6, claim5, claim4, claim3, claim2, claim1], not_channel_ids=['abc123'], claim_type='stream')
await match([claim5, claim4, claim3, claim2, claim1], not_channel_ids=[chan3_id], claim_type='stream')
await match([claim6, claim5, claim4, claim1], not_channel_ids=[chan1_id], claim_type='stream')
await match([claim6, claim3, claim2, claim1], not_channel_ids=[chan2_id], claim_type='stream')
await match([claim6, claim1], not_channel_ids=[chan1_id, chan2_id], claim_type='stream')
# not_channel_ids + valid_channel_signature
await match([claim5, claim4, claim3, claim2, claim1],
not_channel_ids=['abc123'], valid_channel_signature=True, claim_type='stream')
await match([claim5, claim4, claim1],
not_channel_ids=[chan1_id], valid_channel_signature=True, claim_type='stream')
await match([claim3, claim2, claim1],
not_channel_ids=[chan2_id], valid_channel_signature=True, claim_type='stream')
await match([claim1], not_channel_ids=[chan1_id, chan2_id], valid_channel_signature=True, claim_type='stream')
# not_channel_ids + has_channel_signature
await match([claim6, claim5, claim4, claim3, claim2], not_channel_ids=['abc123'], has_channel_signature=True)
await match([claim6, claim5, claim4], not_channel_ids=[chan1_id], has_channel_signature=True)
await match([claim6, claim3, claim2], not_channel_ids=[chan2_id], has_channel_signature=True)
await match([claim6], not_channel_ids=[chan1_id, chan2_id], has_channel_signature=True)
# not_channel_ids + has_channel_signature + valid_channel_signature
await match([claim5, claim4, claim3, claim2],
not_channel_ids=['abc123'], has_channel_signature=True, valid_channel_signature=True)
await match([claim5, claim4],
not_channel_ids=[chan1_id], has_channel_signature=True, valid_channel_signature=True)
await match([claim3, claim2],
not_channel_ids=[chan2_id], has_channel_signature=True, valid_channel_signature=True)
await match([], not_channel_ids=[chan1_id, chan2_id], has_channel_signature=True, valid_channel_signature=True)
async def test_claim_type_and_media_type_search(self):
# create an invalid/unknown claim
address = await self.account.receiving.get_or_create_usable_address()
tx = await Transaction.claim_create(
'unknown', b'{"sources":{"lbry_sd_hash":""}}', 1, address, [self.account], self.account)
await tx.sign([self.account])
await self.broadcast(tx)
await self.confirm_tx(tx.id)
octet = await self.stream_create()
video = await self.video_stream_create()
image = await self.image_stream_create()
channel = await self.channel_create()
unknown = self.sout(tx)
# claim_type
await self.assertFindsClaims([image, video, octet, unknown], claim_type='stream')
await self.assertFindsClaims([channel], claim_type='channel')
# stream_type
await self.assertFindsClaims([octet, unknown], stream_types=['binary'])
await self.assertFindsClaims([video], stream_types=['video'])
await self.assertFindsClaims([image], stream_types=['image'])
await self.assertFindsClaims([image, video], stream_types=['video', 'image'])
# stream_type
await self.assertFindsClaims([octet, unknown], media_types=['application/octet-stream'])
await self.assertFindsClaims([video], media_types=['video/mp4'])
await self.assertFindsClaims([image], media_types=['image/png'])
await self.assertFindsClaims([image, video], media_types=['video/mp4', 'image/png'])
class ChannelCommands(CommandTestCase):
async def test_create_channel_names(self):
# claim new name
await self.channel_create('@foo')
self.assertEqual(len(await self.daemon.jsonrpc_channel_list()), 1)
await self.assertBalance(self.account, '8.991893')
# fail to claim duplicate
with self.assertRaisesRegex(Exception, "You already have a channel under the name '@foo'."):
await self.channel_create('@foo')
# fail to claim invalid name
with self.assertRaisesRegex(Exception, "Channel names must start with '@' symbol."):
await self.channel_create('foo')
# nothing's changed after failed attempts
self.assertEqual(len(await self.daemon.jsonrpc_channel_list()), 1)
await self.assertBalance(self.account, '8.991893')
# succeed overriding duplicate restriction
await self.channel_create('@foo', allow_duplicate_name=True)
self.assertEqual(len(await self.daemon.jsonrpc_channel_list()), 2)
await self.assertBalance(self.account, '7.983786')
async def test_channel_bids(self):
# enough funds
tx = await self.channel_create('@foo', '5.0')
claim_id = self.get_claim_id(tx)
self.assertEqual(len(await self.daemon.jsonrpc_channel_list()), 1)
await self.assertBalance(self.account, '4.991893')
# bid preserved on update
tx = await self.channel_update(claim_id)
self.assertEqual(tx['outputs'][0]['amount'], '5.0')
# bid changed on update
tx = await self.channel_update(claim_id, bid='4.0')
self.assertEqual(tx['outputs'][0]['amount'], '4.0')
await self.assertBalance(self.account, '5.991447')
# not enough funds
with self.assertRaisesRegex(
InsufficientFundsError, "Not enough funds to cover this transaction."):
await self.channel_create('@foo2', '9.0')
self.assertEqual(len(await self.daemon.jsonrpc_channel_list()), 1)
await self.assertBalance(self.account, '5.991447')
# spend exactly amount available, no change
tx = await self.channel_create('@foo3', '5.981266')
await self.assertBalance(self.account, '0.0')
self.assertEqual(len(tx['outputs']), 1) # no change
self.assertEqual(len(await self.daemon.jsonrpc_channel_list()), 2)
async def test_setting_channel_fields(self):
values = {
'title': "Cool Channel",
'description': "Best channel on LBRY.",
'thumbnail_url': "https://co.ol/thumbnail.png",
'tags': ["cool", "awesome"],
'languages': ["en-US"],
'locations': ['US::Manchester'],
'email': "[email protected]",
'website_url': "https://co.ol",
'cover_url': "https://co.ol/cover.png",
'featured': ['cafe']
}
fixed_values = values.copy()
fixed_values['thumbnail'] = {'url': fixed_values.pop('thumbnail_url')}
fixed_values['locations'] = [{'country': 'US', 'city': 'Manchester'}]
fixed_values['cover'] = {'url': fixed_values.pop('cover_url')}
# create new channel with all fields set
tx = await self.out(self.channel_create('@bigchannel', **values))
channel = tx['outputs'][0]['value']
self.assertEqual(channel, {
'public_key': channel['public_key'],
'public_key_id': channel['public_key_id'],
**fixed_values
})
# create channel with nothing set
tx = await self.out(self.channel_create('@lightchannel'))
channel = tx['outputs'][0]['value']
self.assertEqual(
channel, {'public_key': channel['public_key'], 'public_key_id': channel['public_key_id']})
# create channel with just a featured claim
tx = await self.out(self.channel_create('@featurechannel', featured='beef'))
txo = tx['outputs'][0]
claim_id, channel = txo['claim_id'], txo['value']
fixed_values['public_key'] = channel['public_key']
fixed_values['public_key_id'] = channel['public_key_id']
self.assertEqual(channel, {
'public_key': fixed_values['public_key'],
'public_key_id': fixed_values['public_key_id'],
'featured': ['beef']
})
# update channel "@featurechannel" setting all fields
tx = await self.out(self.channel_update(claim_id, **values))
channel = tx['outputs'][0]['value']
fixed_values['featured'].insert(0, 'beef') # existing featured claim
self.assertEqual(channel, fixed_values)
# clearing and settings featured content
tx = await self.out(self.channel_update(claim_id, featured='beefcafe', clear_featured=True))
channel = tx['outputs'][0]['value']
fixed_values['featured'] = ['beefcafe']
self.assertEqual(channel, fixed_values)
# reset signing key
tx = await self.out(self.channel_update(claim_id, new_signing_key=True))
channel = tx['outputs'][0]['value']
self.assertNotEqual(channel['public_key'], fixed_values['public_key'])
# replace mode (clears everything except public_key)
tx = await self.out(self.channel_update(claim_id, replace=True, title='foo', email='[email protected]'))
self.assertEqual(tx['outputs'][0]['value'], {
'public_key': channel['public_key'],
'public_key_id': channel['public_key_id'],
'title': 'foo', 'email': '[email protected]'}
)
# move channel to another account
new_account = await self.out(self.daemon.jsonrpc_account_create('second account'))
account2_id, account2 = new_account['id'], self.wallet.get_account_or_error(new_account['id'])
# before moving
self.assertEqual(len(await self.daemon.jsonrpc_channel_list()), 3)
self.assertEqual(len(await self.daemon.jsonrpc_channel_list(account_id=account2_id)), 0)
other_address = await account2.receiving.get_or_create_usable_address()
tx = await self.out(self.channel_update(claim_id, claim_address=other_address))
# after moving
self.assertEqual(len(await self.daemon.jsonrpc_channel_list()), 3)
self.assertEqual(len(await self.daemon.jsonrpc_channel_list(account_id=self.account.id)), 2)
self.assertEqual(len(await self.daemon.jsonrpc_channel_list(account_id=account2_id)), 1)
async def test_channel_export_import_before_sending_channel(self):
# export
tx = await self.channel_create('@foo', '1.0')
claim_id = self.get_claim_id(tx)
channel_private_key = (await self.account.get_channels())[0].private_key
exported_data = await self.out(self.daemon.jsonrpc_channel_export(claim_id))
# import
daemon2 = await self.add_daemon()
self.assertEqual(0, len(await daemon2.jsonrpc_channel_list()))
await daemon2.jsonrpc_channel_import(exported_data)
channels = await daemon2.jsonrpc_channel_list()
self.assertEqual(1, len(channels))
self.assertEqual(channel_private_key.to_string(), channels[0].private_key.to_string())
# second wallet can't update until channel is sent to it
with self.assertRaisesRegex(AssertionError, 'Cannot find private key for signing output.'):
await daemon2.jsonrpc_channel_update(claim_id, bid='0.5')
# now send the channel as well
await self.channel_update(claim_id, claim_address=await daemon2.jsonrpc_address_unused())
# second wallet should be able to update now
await daemon2.jsonrpc_channel_update(claim_id, bid='0.5')
async def test_channel_update_across_accounts(self):
account2 = await self.daemon.jsonrpc_account_create('second account')
channel = await self.out(self.channel_create('@spam', '1.0', account_id=account2.id))
# channel not in account1
with self.assertRaisesRegex(Exception, "Can't find the channel"):
await self.channel_update(self.get_claim_id(channel), bid='2.0', account_id=self.account.id)
# channel is in account2
await self.channel_update(self.get_claim_id(channel), bid='2.0', account_id=account2.id)
result = await self.out(self.daemon.jsonrpc_channel_list())
self.assertEqual(result[0]['amount'], '2.0')
# check all accounts for channel
await self.channel_update(self.get_claim_id(channel), bid='3.0')
result = await self.out(self.daemon.jsonrpc_channel_list())
self.assertEqual(result[0]['amount'], '3.0')
await self.channel_abandon(self.get_claim_id(channel))
class StreamCommands(ClaimTestCase):
async def test_create_stream_names(self):
# claim new name
await self.stream_create('foo')
self.assertEqual(len(await self.daemon.jsonrpc_claim_list()), 1)
await self.assertBalance(self.account, '8.993893')
# fail to claim duplicate
with self.assertRaisesRegex(
Exception, "You already have a stream claim published under the name 'foo'."):
await self.stream_create('foo')
# fail claim starting with @
with self.assertRaisesRegex(
Exception, "Stream names cannot start with '@' symbol."):
await self.stream_create('@foo')
self.assertEqual(len(await self.daemon.jsonrpc_claim_list()), 1)
await self.assertBalance(self.account, '8.993893')
# succeed overriding duplicate restriction
await self.stream_create('foo', allow_duplicate_name=True)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list()), 2)
await self.assertBalance(self.account, '7.987786')
async def test_stream_bids(self):
# enough funds
tx = await self.stream_create('foo', '2.0')
claim_id = self.get_claim_id(tx)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list()), 1)
await self.assertBalance(self.account, '7.993893')
# bid preserved on update
tx = await self.stream_update(claim_id)
self.assertEqual(tx['outputs'][0]['amount'], '2.0')
# bid changed on update
tx = await self.stream_update(claim_id, bid='3.0')
self.assertEqual(tx['outputs'][0]['amount'], '3.0')
await self.assertBalance(self.account, '6.993319')
# not enough funds
with self.assertRaisesRegex(
InsufficientFundsError, "Not enough funds to cover this transaction."):
await self.stream_create('foo2', '9.0')
self.assertEqual(len(await self.daemon.jsonrpc_claim_list()), 1)
await self.assertBalance(self.account, '6.993319')
# spend exactly amount available, no change
tx = await self.stream_create('foo3', '6.98523')
await self.assertBalance(self.account, '0.0')
self.assertEqual(len(tx['outputs']), 1) # no change
self.assertEqual(len(await self.daemon.jsonrpc_claim_list()), 2)
async def test_stream_update_and_abandon_across_accounts(self):
account2 = await self.daemon.jsonrpc_account_create('second account')
stream = await self.out(self.stream_create('spam', '1.0', account_id=account2.id))
# stream not in account1
with self.assertRaisesRegex(Exception, "Can't find the stream"):
await self.stream_update(self.get_claim_id(stream), bid='2.0', account_id=self.account.id)
# stream is in account2
await self.stream_update(self.get_claim_id(stream), bid='2.0', account_id=account2.id)
result = await self.out(self.daemon.jsonrpc_stream_list())
self.assertEqual(result[0]['amount'], '2.0')
# check all accounts for stream
await self.stream_update(self.get_claim_id(stream), bid='3.0')
result = await self.out(self.daemon.jsonrpc_stream_list())
self.assertEqual(result[0]['amount'], '3.0')
await self.stream_abandon(self.get_claim_id(stream))
async def test_publishing_checks_all_accounts_for_channel(self):
account1_id, account1 = self.account.id, self.account
new_account = await self.out(self.daemon.jsonrpc_account_create('second account'))
account2_id, account2 = new_account['id'], self.wallet.get_account_or_error(new_account['id'])
await self.out(self.channel_create('@spam', '1.0'))
self.assertEqual('8.989893', (await self.daemon.jsonrpc_account_balance())['available'])
result = await self.out(self.daemon.jsonrpc_account_send(
'5.0', await self.daemon.jsonrpc_address_unused(account2_id)
))
await self.confirm_tx(result['txid'])
self.assertEqual('3.989769', (await self.daemon.jsonrpc_account_balance())['available'])
self.assertEqual('5.0', (await self.daemon.jsonrpc_account_balance(account2_id))['available'])
baz_tx = await self.out(self.channel_create('@baz', '1.0', account_id=account2_id))
baz_id = self.get_claim_id(baz_tx)
channels = await self.out(self.daemon.jsonrpc_channel_list(account1_id))
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@spam')
self.assertEqual(channels, await self.out(self.daemon.jsonrpc_channel_list(account1_id)))
channels = await self.out(self.daemon.jsonrpc_channel_list(account2_id))
self.assertEqual(len(channels), 1)
self.assertEqual(channels[0]['name'], '@baz')
channels = await self.out(self.daemon.jsonrpc_channel_list())
self.assertEqual(len(channels), 2)
self.assertEqual(channels[0]['name'], '@baz')
self.assertEqual(channels[1]['name'], '@spam')
# defaults to using all accounts to lookup channel
await self.stream_create('hovercraft1', '0.1', channel_id=baz_id)
self.assertEqual((await self.claim_search(name='hovercraft1'))[0]['signing_channel']['name'], '@baz')
# lookup by channel_name in all accounts
await self.stream_create('hovercraft2', '0.1', channel_name='@baz')
self.assertEqual((await self.claim_search(name='hovercraft2'))[0]['signing_channel']['name'], '@baz')
# uses only the specific accounts which contains the channel
await self.stream_create('hovercraft3', '0.1', channel_id=baz_id, channel_account_id=[account2_id])
self.assertEqual((await self.claim_search(name='hovercraft3'))[0]['signing_channel']['name'], '@baz')
# lookup by channel_name in specific account
await self.stream_create('hovercraft4', '0.1', channel_name='@baz', channel_account_id=[account2_id])
self.assertEqual((await self.claim_search(name='hovercraft4'))[0]['signing_channel']['name'], '@baz')
# fails when specifying account which does not contain channel
with self.assertRaisesRegex(ValueError, "Couldn't find channel with channel_id"):
await self.stream_create(
'hovercraft5', '0.1', channel_id=baz_id, channel_account_id=[account1_id]
)
# fail with channel_name
with self.assertRaisesRegex(ValueError, "Couldn't find channel with channel_name '@baz'"):
await self.stream_create(
'hovercraft5', '0.1', channel_name='@baz', channel_account_id=[account1_id]
)
# signing with channel works even if channel and certificate are in different accounts
await self.channel_update(
baz_id, account_id=account2_id,
claim_address=await self.daemon.jsonrpc_address_unused(account1_id)
)
await self.stream_create(
'hovercraft5', '0.1', channel_id=baz_id
)
async def test_preview_works_with_signed_streams(self):
await self.out(self.channel_create('@spam', '1.0'))
signed = await self.out(self.stream_create('bar', '1.0', channel_name='@spam', preview=True, confirm=False))
self.assertTrue(signed['outputs'][0]['is_channel_signature_valid'])
async def test_publish_updates_file_list(self):
tx = await self.out(self.stream_create(title='created'))
txo = tx['outputs'][0]
claim_id, expected = txo['claim_id'], txo['value']
files = self.sout(self.daemon.jsonrpc_file_list())
self.assertEqual(1, len(files))
self.assertEqual(tx['txid'], files[0]['txid'])
self.assertEqual(expected, files[0]['metadata'])
# update with metadata-only changes
tx = await self.out(self.stream_update(claim_id, title='update 1'))
files = self.sout(self.daemon.jsonrpc_file_list())
expected['title'] = 'update 1'
self.assertEqual(1, len(files))
self.assertEqual(tx['txid'], files[0]['txid'])
self.assertEqual(expected, files[0]['metadata'])
# update with new data
tx = await self.out(self.stream_update(claim_id, title='update 2', data=b'updated data'))
expected = tx['outputs'][0]['value']
files = self.sout(self.daemon.jsonrpc_file_list())
self.assertEqual(1, len(files))
self.assertEqual(tx['txid'], files[0]['txid'])
self.assertEqual(expected, files[0]['metadata'])
async def test_setting_stream_fields(self):
values = {
'title': "Cool Content",
'description': "Best content on LBRY.",
'thumbnail_url': "https://co.ol/thumbnail.png",
'tags': ["cool", "awesome"],
'languages': ["en"],
'locations': ['US:NH:Manchester:03101:42.990605:-71.460989'],
'author': "Jules Verne",
'license': 'Public Domain',
'license_url': "https://co.ol/license",
'release_time': 123456,
'fee_currency': 'usd',
'fee_amount': '2.99',
'fee_address': 'mmCsWAiXMUVecFQ3fVzUwvpT9XFMXno2Ca',
}
fixed_values = values.copy()
fixed_values['locations'] = [{
'country': 'US',
'state': 'NH',
'city': 'Manchester',
'code': '03101',
'latitude': '42.990605',
'longitude': '-71.460989'
}]
fixed_values['thumbnail'] = {'url': fixed_values.pop('thumbnail_url')}
fixed_values['release_time'] = str(values['release_time'])
fixed_values['stream_type'] = 'binary'
fixed_values['source'] = {
'hash': '56bf5dbae43f77a63d075b0f2ae9c7c3e3098db93779c7f9840da0f4db9c2f8c8454f4edd1373e2b64ee2e68350d916e',
'media_type': 'application/octet-stream',
'size': '3'
}
fixed_values['fee'] = {
'address': fixed_values.pop('fee_address'),
'amount': fixed_values.pop('fee_amount'),
'currency': fixed_values.pop('fee_currency').upper()
}
# create new stream with all fields set
tx = await self.out(self.stream_create('big', **values))
stream = tx['outputs'][0]['value']
fixed_values['source']['name'] = stream['source']['name']
fixed_values['source']['sd_hash'] = stream['source']['sd_hash']
self.assertEqual(stream, fixed_values)
# create stream with nothing set
tx = await self.out(self.stream_create('light'))
stream = tx['outputs'][0]['value']
self.assertEqual(
stream, {
'stream_type': 'binary',
'source': {
'size': '3',
'media_type': 'application/octet-stream',
'name': stream['source']['name'],
'hash': '56bf5dbae43f77a63d075b0f2ae9c7c3e3098db93779c7f9840da0f4db9c2f8c8454f4edd1373e2b64ee2e68350d916e',
'sd_hash': stream['source']['sd_hash']
},
}
)
# create stream with just some tags, langs and locations
tx = await self.out(self.stream_create('updated', tags='blah', languages='uk', locations='UA::Kyiv'))
txo = tx['outputs'][0]
claim_id, stream = txo['claim_id'], txo['value']
fixed_values['source']['name'] = stream['source']['name']
fixed_values['source']['sd_hash'] = stream['source']['sd_hash']
self.assertEqual(
stream, {
'stream_type': 'binary',
'source': {
'size': '3',
'media_type': 'application/octet-stream',
'name': fixed_values['source']['name'],
'hash': '56bf5dbae43f77a63d075b0f2ae9c7c3e3098db93779c7f9840da0f4db9c2f8c8454f4edd1373e2b64ee2e68350d916e',
'sd_hash': fixed_values['source']['sd_hash'],
},
'tags': ['blah'],
'languages': ['uk'],
'locations': [{'country': 'UA', 'city': 'Kyiv'}]
}
)
# update stream setting all fields, 'source' doesn't change
tx = await self.out(self.stream_update(claim_id, **values))
stream = tx['outputs'][0]['value']
fixed_values['tags'].insert(0, 'blah') # existing tag
fixed_values['languages'].insert(0, 'uk') # existing language
fixed_values['locations'].insert(0, {'country': 'UA', 'city': 'Kyiv'}) # existing location
self.assertEqual(stream, fixed_values)
# clearing and settings tags, languages and locations
tx = await self.out(self.stream_update(
claim_id, tags='single', clear_tags=True,
languages='pt', clear_languages=True,
locations='BR', clear_locations=True,
))
txo = tx['outputs'][0]
fixed_values['tags'] = ['single']
fixed_values['languages'] = ['pt']
fixed_values['locations'] = [{'country': 'BR'}]
self.assertEqual(txo['value'], fixed_values)
# modifying hash/size/name
fixed_values['source']['name'] = 'changed_name'
fixed_values['source']['hash'] = 'cafebeef'
fixed_values['source']['size'] = '42'
tx = await self.out(self.stream_update(
claim_id, file_name='changed_name', file_hash='cafebeef', file_size=42
))
self.assertEqual(tx['outputs'][0]['value'], fixed_values)
# stream_update re-signs with the same channel
channel_id = self.get_claim_id(await self.channel_create('@chan'))
tx = await self.stream_update(claim_id, channel_id=channel_id)
self.assertEqual(tx['outputs'][0]['signing_channel']['name'], '@chan')
tx = await self.stream_update(claim_id, title='channel re-signs')
self.assertEqual(tx['outputs'][0]['value']['title'], 'channel re-signs')
self.assertEqual(tx['outputs'][0]['signing_channel']['name'], '@chan')
# send claim to someone else
new_account = await self.out(self.daemon.jsonrpc_account_create('second account'))
account2_id, account2 = new_account['id'], self.wallet.get_account_or_error(new_account['id'])
# before sending
self.assertEqual(len(await self.daemon.jsonrpc_claim_list()), 4)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list(account_id=self.account.id)), 4)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list(account_id=account2_id)), 0)
other_address = await account2.receiving.get_or_create_usable_address()
tx = await self.out(self.stream_update(claim_id, claim_address=other_address))
# after sending
self.assertEqual(len(await self.daemon.jsonrpc_claim_list()), 4)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list(account_id=self.account.id)), 3)
self.assertEqual(len(await self.daemon.jsonrpc_claim_list(account_id=account2_id)), 1)
async def test_setting_fee_fields(self):
tx = await self.out(self.stream_create('paid-stream'))
txo = tx['outputs'][0]
claim_id, stream = txo['claim_id'], txo['value']
fee_address = 'mmCsWAiXMUVecFQ3fVzUwvpT9XFMXno2Ca'
self.assertNotIn('fee', stream)
# --replace=false
# validation
with self.assertRaisesRegex(Exception, 'please specify a fee currency'):
await self.stream_update(claim_id, fee_amount='0.1')
with self.assertRaisesRegex(Exception, 'unknown currency provided: foo'):
await self.stream_update(claim_id, fee_amount='0.1', fee_currency="foo")
with self.assertRaisesRegex(Exception, 'please specify a fee amount'):
await self.stream_update(claim_id, fee_currency='usd')
with self.assertRaisesRegex(Exception, 'please specify a fee amount'):
await self.stream_update(claim_id, fee_address=fee_address)
# set just amount and currency with default address
tx = await self.stream_update(
claim_id, fee_amount='0.99', fee_currency='lbc'
)
self.assertEqual(
tx['outputs'][0]['value']['fee'],
{'amount': '0.99', 'currency': 'LBC', 'address': txo['address']}
)
# set all fee fields
tx = await self.stream_update(
claim_id, fee_amount='0.1', fee_currency='usd', fee_address=fee_address
)
self.assertEqual(
tx['outputs'][0]['value']['fee'],
{'amount': '0.1', 'currency': 'USD', 'address': fee_address}
)
# change just address
tx = await self.stream_update(claim_id, fee_address=txo['address'])
self.assertEqual(
tx['outputs'][0]['value']['fee'],
{'amount': '0.1', 'currency': 'USD', 'address': txo['address']}
)
# change just amount (does not reset fee_address)
tx = await self.stream_update(claim_id, fee_amount='0.2')
self.assertEqual(
tx['outputs'][0]['value']['fee'],
{'amount': '0.2', 'currency': 'USD', 'address': txo['address']}
)
# changing currency without an amount is never allowed, even if previous amount exists
with self.assertRaises(Exception, msg='In order to set a fee currency, please specify a fee amount'):
await self.stream_update(claim_id, fee_currency='usd')
# clearing fee
tx = await self.out(self.stream_update(claim_id, clear_fee=True))
self.assertNotIn('fee', tx['outputs'][0]['value'])
# --replace=true
# set just amount and currency with default address
tx = await self.stream_update(
claim_id, fee_amount='0.99', fee_currency='lbc', replace=True
)
self.assertEqual(
tx['outputs'][0]['value']['fee'],
{'amount': '0.99', 'currency': 'LBC', 'address': txo['address']}
)
# set all fee fields
tx = await self.stream_update(
claim_id, fee_amount='0.1', fee_currency='usd', fee_address=fee_address, replace=True
)
self.assertEqual(
tx['outputs'][0]['value']['fee'],
{'amount': '0.1', 'currency': 'USD', 'address': fee_address}
)
# validation
with self.assertRaisesRegex(Exception, 'please specify a fee currency'):
await self.stream_update(claim_id, fee_amount='0.1', replace=True)
with self.assertRaisesRegex(Exception, 'unknown currency provided: foo'):
await self.stream_update(claim_id, fee_amount='0.1', fee_currency="foo", replace=True)
with self.assertRaisesRegex(Exception, 'please specify a fee amount'):
await self.stream_update(claim_id, fee_currency='usd', replace=True)
with self.assertRaisesRegex(Exception, 'please specify a fee amount'):
await self.stream_update(claim_id, fee_address=fee_address, replace=True)
async def test_automatic_type_and_metadata_detection_for_image(self):
txo = (await self.image_stream_create())['outputs'][0]
self.assertEqual(
txo['value'], {
'source': {
'size': '99',
'name': txo['value']['source']['name'],
'media_type': 'image/png',
'hash': '6c7df435d412c603390f593ef658c199817c7830ba3f16b7eadd8f99fa50e85dbd0d2b3dc61eadc33fe096e3872d1545',
'sd_hash': txo['value']['source']['sd_hash'],
},
'stream_type': 'image',
'image': {
'width': 5,
'height': 7
}
}
)
async def test_automatic_type_and_metadata_detection_for_video(self):
txo = (await self.video_stream_create())['outputs'][0]
self.assertEqual(
txo['value'], {
'source': {
'size': '2299653',
'name': 'ForBiggerEscapes.mp4',
'media_type': 'video/mp4',
'hash': '5f6811c83c1616df06f10bf5309ca61edb5ff949a9c1212ce784602d837bfdfc1c3db1e0580ef7bd1dadde41d8acf315',
'sd_hash': txo['value']['source']['sd_hash'],
},
'stream_type': 'video',
'video': {
'width': 1280,
'height': 720,
'duration': 15
}
}
)
async def test_overriding_automatic_metadata_detection(self):
tx = await self.out(
self.daemon.jsonrpc_stream_create(
'chrome', '1.0', file_path=self.video_file_name, width=99, height=88, duration=9
)
)
txo = tx['outputs'][0]
self.assertEqual(
txo['value'], {
'source': {
'size': '2299653',
'name': 'ForBiggerEscapes.mp4',
'media_type': 'video/mp4',
'hash': '5f6811c83c1616df06f10bf5309ca61edb5ff949a9c1212ce784602d837bfdfc1c3db1e0580ef7bd1dadde41d8acf315',
'sd_hash': txo['value']['source']['sd_hash'],
},
'stream_type': 'video',
'video': {
'width': 99,
'height': 88,
'duration': 9
}
}
)
async def test_replace_mode_preserves_source_and_type(self):
expected = {
'tags': ['blah'],
'languages': ['uk'],
'locations': [{'country': 'UA', 'city': 'Kyiv'}],
'source': {
'size': '2299653',
'name': 'ForBiggerEscapes.mp4',
'media_type': 'video/mp4',
'hash': '5f6811c83c1616df06f10bf5309ca61edb5ff949a9c1212ce784602d837bfdfc1c3db1e0580ef7bd1dadde41d8acf315',
},
'stream_type': 'video',
'video': {
'width': 1280,
'height': 720,
'duration': 15
}
}
channel = await self.channel_create('@chan')
tx = await self.out(self.daemon.jsonrpc_stream_create(
'chrome', '1.0', file_path=self.video_file_name,
tags='blah', languages='uk', locations='UA::Kyiv',
channel_id=self.get_claim_id(channel)
))
await self.on_transaction_dict(tx)
txo = tx['outputs'][0]
expected['source']['sd_hash'] = txo['value']['source']['sd_hash']
self.assertEqual(txo['value'], expected)
self.assertEqual(txo['signing_channel']['name'], '@chan')
tx = await self.out(self.daemon.jsonrpc_stream_update(
txo['claim_id'], title='new title', replace=True
))
txo = tx['outputs'][0]
expected['title'] = 'new title'
del expected['tags']
del expected['languages']
del expected['locations']
self.assertEqual(txo['value'], expected)
self.assertNotIn('signing_channel', txo)
async def test_create_update_and_abandon_stream(self):
await self.assertBalance(self.account, '10.0')
tx = await self.stream_create(bid='2.5') # creates new claim
claim_id = self.get_claim_id(tx)
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['claim_info']), 1)
self.assertEqual(txs[0]['confirmations'], 1)
self.assertEqual(txs[0]['claim_info'][0]['balance_delta'], '-2.5')
self.assertEqual(txs[0]['claim_info'][0]['claim_id'], claim_id)
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.020107')
await self.assertBalance(self.account, '7.479893')
self.assertEqual(1, len(self.daemon.jsonrpc_file_list()))
await self.daemon.jsonrpc_file_delete(delete_all=True)
self.assertEqual(0, len(self.daemon.jsonrpc_file_list()))
await self.stream_update(claim_id, bid='1.0') # updates previous claim
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['update_info']), 1)
self.assertEqual(txs[0]['update_info'][0]['balance_delta'], '1.5')
self.assertEqual(txs[0]['update_info'][0]['claim_id'], claim_id)
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.0002165')
await self.assertBalance(self.account, '8.9796765')
await self.stream_abandon(claim_id)
txs = await self.out(self.daemon.jsonrpc_transaction_list())
self.assertEqual(len(txs[0]['abandon_info']), 1)
self.assertEqual(txs[0]['abandon_info'][0]['balance_delta'], '1.0')
self.assertEqual(txs[0]['abandon_info'][0]['claim_id'], claim_id)
self.assertEqual(txs[0]['value'], '0.0')
self.assertEqual(txs[0]['fee'], '-0.000107')
await self.assertBalance(self.account, '9.9795695')
async def test_abandoning_stream_at_loss(self):
await self.assertBalance(self.account, '10.0')
tx = await self.stream_create(bid='0.0001')
await self.assertBalance(self.account, '9.979793')
await self.stream_abandon(self.get_claim_id(tx))
await self.assertBalance(self.account, '9.97968399')
async def test_publish(self):
# errors on missing arguments to create a stream
with self.assertRaisesRegex(Exception, "'bid' is a required argument for new publishes."):
await self.daemon.jsonrpc_publish('foo')
with self.assertRaisesRegex(Exception, "'file_path' is a required argument for new publishes."):
await self.daemon.jsonrpc_publish('foo', bid='1.0')
# successfully create stream
with tempfile.NamedTemporaryFile() as file:
file.write(b'hi')
file.flush()
tx1 = await self.publish('foo', bid='1.0', file_path=file.name)
self.assertEqual(1, len(self.daemon.jsonrpc_file_list()))
# doesn't error on missing arguments when doing an update stream
tx2 = await self.publish('foo', tags='updated')
self.assertEqual(1, len(self.daemon.jsonrpc_file_list()))
self.assertEqual(self.get_claim_id(tx1), self.get_claim_id(tx2))
# update conflict with two claims of the same name
tx3 = await self.stream_create('foo', allow_duplicate_name=True)
with self.assertRaisesRegex(Exception, "There are 2 claims for 'foo'"):
await self.daemon.jsonrpc_publish('foo')
self.assertEqual(2, len(self.daemon.jsonrpc_file_list()))
# abandon duplicate stream
await self.stream_abandon(self.get_claim_id(tx3))
# publish to a channel
await self.channel_create('@abc')
tx3 = await self.publish('foo', channel_name='@abc')
self.assertEqual(2, len(self.daemon.jsonrpc_file_list()))
r = await self.resolve('lbry://@abc/foo')
self.assertEqual(
r['lbry://@abc/foo']['claim_id'],
self.get_claim_id(tx3)
)
# publishing again clears channel
tx4 = await self.publish('foo', languages='uk-UA')
self.assertEqual(2, len(self.daemon.jsonrpc_file_list()))
r = await self.resolve('lbry://foo')
claim = r['lbry://foo']
self.assertEqual(claim['txid'], tx4['outputs'][0]['txid'])
self.assertNotIn('signing_channel', claim)
self.assertEqual(claim['value']['languages'], ['uk-UA'])
class SupportCommands(CommandTestCase):
async def test_regular_supports_and_tip_supports(self):
wallet2 = await self.daemon.jsonrpc_wallet_create('wallet2', create_account=True)
account2 = wallet2.accounts[0]
# send account2 5 LBC out of the 10 LBC in account1
result = await self.out(self.daemon.jsonrpc_account_send(
'5.0', await self.daemon.jsonrpc_address_unused(wallet_id='wallet2')
))
await self.on_transaction_dict(result)
# account1 and account2 balances:
await self.assertBalance(self.account, '4.999876')
await self.assertBalance(account2, '5.0')
# create the claim we'll be tipping and supporting
claim_id = self.get_claim_id(await self.stream_create())
# account1 and account2 balances:
await self.assertBalance(self.account, '3.979769')
await self.assertBalance(account2, '5.0')
# send a tip to the claim using account2
tip = await self.out(
self.daemon.jsonrpc_support_create(
claim_id, '1.0', True, account2.id, 'wallet2', funding_account_ids=[account2.id])
)
await self.confirm_tx(tip['txid'])
# tips don't affect balance so account1 balance is same but account2 balance went down
await self.assertBalance(self.account, '3.979769')
await self.assertBalance(account2, '3.9998585')
# verify that the incoming tip is marked correctly as is_tip=True in account1
txs = await self.out(self.daemon.jsonrpc_transaction_list(self.account.id))
self.assertEqual(len(txs[0]['support_info']), 1)
self.assertEqual(txs[0]['support_info'][0]['balance_delta'], '1.0')
self.assertEqual(txs[0]['support_info'][0]['claim_id'], claim_id)
self.assertEqual(txs[0]['support_info'][0]['is_tip'], True)
self.assertEqual(txs[0]['value'], '1.0')
self.assertEqual(txs[0]['fee'], '0.0')
# verify that the outgoing tip is marked correctly as is_tip=True in account2
txs2 = await self.out(
self.daemon.jsonrpc_transaction_list(wallet_id='wallet2', account_id=account2.id)
)
self.assertEqual(len(txs2[0]['support_info']), 1)
self.assertEqual(txs2[0]['support_info'][0]['balance_delta'], '-1.0')
self.assertEqual(txs2[0]['support_info'][0]['claim_id'], claim_id)
self.assertEqual(txs2[0]['support_info'][0]['is_tip'], True)
self.assertEqual(txs2[0]['value'], '-1.0')
self.assertEqual(txs2[0]['fee'], '-0.0001415')
# send a support to the claim using account2
support = await self.out(
self.daemon.jsonrpc_support_create(
claim_id, '2.0', False, account2.id, 'wallet2', funding_account_ids=[account2.id])
)
await self.confirm_tx(support['txid'])
# account2 balance went down ~2
await self.assertBalance(self.account, '3.979769')
await self.assertBalance(account2, '1.999717')
# verify that the outgoing support is marked correctly as is_tip=False in account2
txs2 = await self.out(self.daemon.jsonrpc_transaction_list(wallet_id='wallet2'))
self.assertEqual(len(txs2[0]['support_info']), 1)
self.assertEqual(txs2[0]['support_info'][0]['balance_delta'], '-2.0')
self.assertEqual(txs2[0]['support_info'][0]['claim_id'], claim_id)
self.assertEqual(txs2[0]['support_info'][0]['is_tip'], False)
self.assertEqual(txs2[0]['value'], '0.0')
self.assertEqual(txs2[0]['fee'], '-0.0001415')
|
the-stack_106_27065 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_a2c_ppo_acktr.utils import AddBias, init, init_normc_
"""
Modify standard PyTorch distributions so they are compatible with this code.
"""
FixedCategorical = torch.distributions.Categorical
old_sample = FixedCategorical.sample
FixedCategorical.sample = lambda self: old_sample(self).unsqueeze(-1)
log_prob_cat = FixedCategorical.log_prob
FixedCategorical.log_probs = lambda self, actions: log_prob_cat(self, actions.squeeze(-1)).unsqueeze(-1)
FixedCategorical.mode = lambda self: self.probs.argmax(dim=1, keepdim=True)
FixedNormal = torch.distributions.Normal
log_prob_normal = FixedNormal.log_prob
FixedNormal.log_probs = lambda self, actions: log_prob_normal(self, actions).sum(-1, keepdim=True)
entropy = FixedNormal.entropy
FixedNormal.entropy = lambda self: entropy(self).sum(-1)
FixedNormal.mode = lambda self: self.mean
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
init_ = lambda m: init(m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
gain=0.01)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedCategorical(logits=x)
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs, normalized=False):
super(DiagGaussian, self).__init__()
self.normalized = normalized
init_ = lambda m: init(m,
init_normc_,
lambda x: nn.init.constant_(x, 0))
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
|
the-stack_106_27066 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_status.base_stage import base_stage
import binascii
from openerp.addons.crm import crm
from openerp.osv import fields, osv
import time
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
CRM_CLAIM_PENDING_STATES = (
crm.AVAILABLE_STATES[2][0], # Cancelled
crm.AVAILABLE_STATES[3][0], # Done
crm.AVAILABLE_STATES[4][0], # Pending
)
class crm_claim_stage(osv.osv):
""" Model for claim stages. This models the main stages of a claim
management flow. Main CRM objects (leads, opportunities, project
issues, ...) will now use only stages, instead of state and stages.
Stages are for example used to display the kanban view of records.
"""
_name = "crm.claim.stage"
_description = "Claim stages"
_rec_name = 'name'
_order = "sequence"
_columns = {
'name': fields.char('Stage Name', size=64, required=True, translate=True),
'sequence': fields.integer('Sequence', help="Used to order stages. Lower is better."),
'section_ids':fields.many2many('crm.case.section', 'section_claim_stage_rel', 'stage_id', 'section_id', string='Sections',
help="Link between stages and sales teams. When set, this limitate the current stage to the selected sales teams."),
'state': fields.selection(crm.AVAILABLE_STATES, 'Status', required=True, help="The related status for the stage. The status of your document will automatically change regarding the selected stage. For example, if a stage is related to the status 'Close', when your document reaches this stage, it will be automatically have the 'closed' status."),
'case_refused': fields.boolean('Refused stage',
help='Refused stages are specific stages for done.'),
'case_default': fields.boolean('Common to All Teams',
help="If you check this field, this stage will be proposed by default on each sales team. It will not assign this stage to existing teams."),
'fold': fields.boolean('Hide in Views when Empty',
help="This stage is not visible, for example in status bar or kanban view, when there are no records in that stage to display."),
}
_defaults = {
'sequence': lambda *args: 1,
'state': 'draft',
'fold': False,
'case_refused': False,
}
class crm_claim(base_stage, osv.osv):
""" Crm claim
"""
_name = "crm.claim"
_description = "Claim"
_order = "priority,date desc"
_inherit = ['mail.thread']
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Claim Subject', size=128, required=True),
'active': fields.boolean('Active'),
'action_next': fields.char('Next Action', size=200),
'date_action_next': fields.datetime('Next Action Date'),
'description': fields.text('Description'),
'resolution': fields.text('Resolution'),
'create_date': fields.datetime('Creation Date' , readonly=True),
'write_date': fields.datetime('Update Date' , readonly=True),
'date_deadline': fields.date('Deadline'),
'date_closed': fields.datetime('Closed', readonly=True),
'date': fields.datetime('Claim Date', select=True),
'ref' : fields.reference('Reference', selection=crm._links_get, size=128),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="[('section_id','=',section_id),\
('object_id.model', '=', 'crm.claim')]"),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'),
'type_action': fields.selection([('correction','Corrective Action'),('prevention','Preventive Action')], 'Action Type'),
'user_id': fields.many2one('res.users', 'Responsible'),
'user_fault': fields.char('Trouble Responsible', size=64),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help="Responsible sales team."\
" Define Responsible user and Email account for"\
" mail gateway."),
'company_id': fields.many2one('res.company', 'Company'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'email_cc': fields.text('Watchers Emails', size=252, help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'email_from': fields.char('Email', size=128, help="Destination email for email gateway."),
'partner_phone': fields.char('Phone', size=32),
'stage_id': fields.many2one ('crm.claim.stage', 'Stage', track_visibility='onchange',
domain="['&',('fold', '=', False),'|', ('section_ids', '=', section_id), ('case_default', '=', True)]"),
'cause': fields.text('Root Cause'),
'state': fields.related('stage_id', 'state', type="selection", store=True,
selection=crm.AVAILABLE_STATES, string="Status", readonly=True,
help='The status is set to \'Draft\', when a case is created.\
If the case is in progress the status is set to \'Open\'.\
When the case is over, the status is set to \'Done\'.\
If the case needs to be reviewed then the status is \
set to \'Pending\'.'),
}
_defaults = {
'user_id': lambda s, cr, uid, c: s._get_default_user(cr, uid, c),
'partner_id': lambda s, cr, uid, c: s._get_default_partner(cr, uid, c),
'email_from': lambda s, cr, uid, c: s._get_default_email(cr, uid, c),
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
'date': fields.datetime.now,
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.case', context=c),
'priority': lambda *a: crm.AVAILABLE_PRIORITIES[2][0],
'active': lambda *a: 1,
'stage_id':lambda s, cr, uid, c: s._get_default_stage_id(cr, uid, c)
}
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default case
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for claim in cases:
if claim.section_id:
section_ids.append(claim.section_id.id)
# OR all section_ids and OR with case_default
search_domain = []
if section_ids:
search_domain += [('|')] * len(section_ids)
for section_id in section_ids:
search_domain.append(('section_ids', '=', section_id))
search_domain.append(('case_default', '=', True))
# AND with the domain in parameter
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('crm.claim.stage').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def case_refuse(self, cr, uid, ids, context=None):
""" Mark the case as refused: state=done and case_refused=True """
for lead in self.browse(cr, uid, ids):
stage_id = self.stage_find(cr, uid, [lead], lead.section_id.id or False, ['&', ('state', '=', 'done'), ('case_refused', '=', True)], context=context)
if stage_id:
self.case_set(cr, uid, [lead.id], values_to_update={}, new_stage_id=stage_id, context=context)
return True
def onchange_partner_id(self, cr, uid, ids, part, email=False):
"""This function returns value of partner address based on partner
:param part: Partner's id
:param email: ignored
"""
if not part:
return {'value': {'email_from': False,
'partner_phone': False
}
}
address = self.pool.get('res.partner').browse(cr, uid, part)
return {'value': {'email_from': address.email, 'partner_phone': address.phone}}
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
desc = html2plaintext(msg.get('body')) if msg.get('body') else ''
defaults = {
'name': msg.get('subject') or _("No Subject"),
'description': desc,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'partner_id': msg.get('author_id', False),
}
if msg.get('priority'):
defaults['priority'] = msg.get('priority')
defaults.update(custom_values)
return super(crm_claim, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'claims_ids': fields.one2many('crm.claim', 'partner_id', 'Claims'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
the-stack_106_27067 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import functools
import itertools
import operator
from heat.common import exception
from heat.engine import function
from heat.engine import properties
__all__ = ['ResourceDefinition']
# Field names that can be passed to Template.get_section_name() in order to
# determine the appropriate name for a particular template format.
FIELDS = (
TYPE, PROPERTIES, METADATA, DELETION_POLICY, UPDATE_POLICY,
DEPENDS_ON, DESCRIPTION, EXTERNAL_ID,
) = (
'Type', 'Properties', 'Metadata', 'DeletionPolicy', 'UpdatePolicy',
'DependsOn', 'Description', 'external_id',
)
class ResourceDefinition(object):
"""A definition of a resource, independent of any template format."""
class Diff(object):
"""A diff between two versions of the same resource definition."""
def __init__(self, old_defn, new_defn):
if not (isinstance(old_defn, ResourceDefinition) and
isinstance(new_defn, ResourceDefinition)):
raise TypeError
self.old_defn = old_defn
self.new_defn = new_defn
def properties_changed(self):
"""Return True if the resource properties have changed."""
return self.old_defn._properties != self.new_defn._properties
def metadata_changed(self):
"""Return True if the resource metadata has changed."""
return self.old_defn._metadata != self.new_defn._metadata
def update_policy_changed(self):
"""Return True if the resource update policy has changed."""
return self.old_defn._update_policy != self.new_defn._update_policy
def __bool__(self):
"""Return True if anything has changed."""
return (self.properties_changed() or
self.metadata_changed() or
self.update_policy_changed())
__nonzero__ = __bool__
DELETION_POLICIES = (
DELETE, RETAIN, SNAPSHOT,
) = (
'Delete', 'Retain', 'Snapshot',
)
def __init__(self, name, resource_type, properties=None, metadata=None,
depends=None, deletion_policy=None, update_policy=None,
description=None, external_id=None, condition=None):
"""Initialise with the parsed definition of a resource.
Any intrinsic functions present in any of the sections should have been
parsed into Function objects before constructing the definition.
:param name: The name of the resource (for use in error messages)
:param resource_type: The resource type
:param properties: A dictionary of supplied property values
:param metadata: The supplied metadata
:param depends: A list of resource names on which this resource depends
:param deletion_policy: The deletion policy for the resource
:param update_policy: A dictionary of supplied update policies
:param description: A string describing the resource
:param external_id: A uuid of an external resource
:param condition: A condition name associated with the resource
"""
self.name = name
self.resource_type = resource_type
self.description = description or ''
self._properties = properties
self._metadata = metadata
self._depends = depends
self._deletion_policy = deletion_policy
self._update_policy = update_policy
self._external_id = external_id
self._condition = condition
self._hash = hash(self.resource_type)
self._rendering = None
self._dep_names = None
self._all_dep_attrs = None
assert isinstance(self.description, str)
if properties is not None:
assert isinstance(properties, (collections.abc.Mapping,
function.Function))
self._hash ^= _hash_data(properties)
if metadata is not None:
assert isinstance(metadata, (collections.abc.Mapping,
function.Function))
self._hash ^= _hash_data(metadata)
if depends is not None:
assert isinstance(depends, (collections.abc.Sequence,
function.Function))
assert not isinstance(depends, str)
self._hash ^= _hash_data(depends)
if deletion_policy is not None:
assert deletion_policy in self.DELETION_POLICIES
self._hash ^= _hash_data(deletion_policy)
if update_policy is not None:
assert isinstance(update_policy, (collections.abc.Mapping,
function.Function))
self._hash ^= _hash_data(update_policy)
if external_id is not None:
assert isinstance(external_id, (str,
function.Function))
self._hash ^= _hash_data(external_id)
self._deletion_policy = self.RETAIN
if condition is not None:
assert isinstance(condition, (str, bool,
function.Function))
self._hash ^= _hash_data(condition)
self.set_translation_rules()
def freeze(self, **overrides):
"""Return a frozen resource definition, with all functions resolved.
This return a new resource definition with fixed data (containing no
intrinsic functions). Named arguments passed to this method override
the values passed as arguments to the constructor.
"""
if getattr(self, '_frozen', False) and not overrides:
return self
def arg_item(attr_name):
name = attr_name.lstrip('_')
if name in overrides:
value = overrides[name]
if not value and getattr(self, attr_name) is None:
value = None
else:
value = function.resolve(getattr(self, attr_name))
return name, value
args = ('name', 'resource_type', '_properties', '_metadata',
'_depends', '_deletion_policy', '_update_policy',
'description', '_external_id', '_condition')
defn = type(self)(**dict(arg_item(a) for a in args))
defn._frozen = True
return defn
def reparse(self, stack, template):
"""Reinterpret the resource definition in the context of a new stack.
This returns a new resource definition, with all of the functions
parsed in the context of the specified stack and template.
Any conditions are *not* included - it is assumed that the resource is
being interpreted in any context that it should be enabled in that
context.
"""
assert not getattr(self, '_frozen', False
), "Cannot re-parse a frozen definition"
def reparse_snippet(snippet):
return template.parse(stack, copy.deepcopy(snippet))
return type(self)(
self.name, self.resource_type,
properties=reparse_snippet(self._properties),
metadata=reparse_snippet(self._metadata),
depends=reparse_snippet(self._depends),
deletion_policy=reparse_snippet(self._deletion_policy),
update_policy=reparse_snippet(self._update_policy),
external_id=reparse_snippet(self._external_id),
condition=None)
def validate(self):
"""Validate intrinsic functions that appear in the definition."""
function.validate(self._properties, PROPERTIES)
function.validate(self._metadata, METADATA)
function.validate(self._depends, DEPENDS_ON)
function.validate(self._deletion_policy, DELETION_POLICY)
function.validate(self._update_policy, UPDATE_POLICY)
function.validate(self._external_id, EXTERNAL_ID)
def dep_attrs(self, resource_name, load_all=False):
"""Iterate over attributes of a given resource that this references.
Return an iterator over dependent attributes for specified
resource_name in resources' properties and metadata fields.
"""
if self._all_dep_attrs is None and load_all:
attr_map = collections.defaultdict(set)
atts = itertools.chain(function.all_dep_attrs(self._properties),
function.all_dep_attrs(self._metadata))
for res_name, att_name in atts:
attr_map[res_name].add(att_name)
self._all_dep_attrs = attr_map
if self._all_dep_attrs is not None:
return self._all_dep_attrs[resource_name]
return itertools.chain(function.dep_attrs(self._properties,
resource_name),
function.dep_attrs(self._metadata,
resource_name))
def required_resource_names(self):
"""Return a set of names of all resources on which this depends.
Note that this is done entirely in isolation from the rest of the
template, so the resource names returned may refer to resources that
don't actually exist, or would have strict_dependency=False. Use the
dependencies() method to get validated dependencies.
"""
if self._dep_names is None:
explicit_depends = [] if self._depends is None else self._depends
def path(section):
return '.'.join([self.name, section])
prop_deps = function.dependencies(self._properties,
path(PROPERTIES))
metadata_deps = function.dependencies(self._metadata,
path(METADATA))
implicit_depends = map(lambda rp: rp.name,
itertools.chain(prop_deps,
metadata_deps))
# (ricolin) External resource should not depend on any other
# resources. This operation is not allowed for now.
if self.external_id():
if explicit_depends:
raise exception.InvalidExternalResourceDependency(
external_id=self.external_id(),
resource_type=self.resource_type
)
self._dep_names = set()
else:
self._dep_names = set(itertools.chain(explicit_depends,
implicit_depends))
return self._dep_names
def dependencies(self, stack):
"""Return the Resource objects in given stack on which this depends."""
def get_resource(res_name):
if res_name not in stack:
if res_name in stack.defn.all_rsrc_names():
# The resource is conditionally defined, allow dependencies
# on it
return
raise exception.InvalidTemplateReference(resource=res_name,
key=self.name)
res = stack[res_name]
if getattr(res, 'strict_dependency', True):
return res
return filter(None, map(get_resource, self.required_resource_names()))
def set_translation_rules(self, rules=None, client_resolve=True):
"""Helper method to update properties with translation rules."""
self._rules = rules or []
self._client_resolve = client_resolve
def properties(self, schema, context=None):
"""Return a Properties object representing the resource properties.
The Properties object is constructed from the given schema, and may
require a context to validate constraints.
"""
props = properties.Properties(schema, self._properties or {},
function.resolve, context=context,
section=PROPERTIES,
rsrc_description=self.description)
props.update_translation(self._rules, self._client_resolve)
return props
def deletion_policy(self):
"""Return the deletion policy for the resource.
The policy will be one of those listed in DELETION_POLICIES.
"""
return function.resolve(self._deletion_policy) or self.DELETE
def update_policy(self, schema, context=None):
"""Return a Properties object representing the resource update policy.
The Properties object is constructed from the given schema, and may
require a context to validate constraints.
"""
props = properties.Properties(schema, self._update_policy or {},
function.resolve, context=context,
section=UPDATE_POLICY)
props.update_translation(self._rules, self._client_resolve)
return props
def metadata(self):
"""Return the resource metadata."""
return function.resolve(self._metadata) or {}
def external_id(self):
"""Return the external resource id."""
return function.resolve(self._external_id)
def condition(self):
"""Return the name of the conditional inclusion rule, if any.
Returns None if the resource is included unconditionally.
"""
return function.resolve(self._condition)
def render_hot(self):
"""Return a HOT snippet for the resource definition."""
if self._rendering is None:
attrs = {
'type': 'resource_type',
'properties': '_properties',
'metadata': '_metadata',
'deletion_policy': '_deletion_policy',
'update_policy': '_update_policy',
'depends_on': '_depends',
'external_id': '_external_id',
'condition': '_condition'
}
def rawattrs():
"""Get an attribute with function objects stripped out."""
for key, attr in attrs.items():
value = getattr(self, attr)
if value is not None:
yield key, copy.deepcopy(value)
self._rendering = dict(rawattrs())
return self._rendering
def __sub__(self, previous):
"""Calculate the difference between this definition and a previous one.
Return a Diff object that can be used to establish differences between
this definition and a previous definition of the same resource.
"""
if not isinstance(previous, ResourceDefinition):
return NotImplemented
return self.Diff(previous, self)
def __eq__(self, other):
"""Compare this resource definition for equality with another.
Two resource definitions are considered to be equal if they can be
generated from the same template snippet. The name of the resource is
ignored, as are the actual values that any included functions resolve
to.
"""
if not isinstance(other, ResourceDefinition):
return NotImplemented
return self.render_hot() == other.render_hot()
def __ne__(self, other):
"""Compare this resource definition for inequality with another.
See __eq__() for the definition of equality.
"""
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __hash__(self):
"""Return a hash value for this resource definition.
Resource definitions that compare equal will have the same hash. (In
particular, the resource name is *not* taken into account.) See
the __eq__() method for the definition of equality.
"""
return self._hash
def __repr__(self):
"""Return a string representation of the resource definition."""
def arg_repr(arg_name):
return '='.join([arg_name, repr(getattr(self, '_%s' % arg_name))])
args = ('properties', 'metadata', 'depends',
'deletion_policy', 'update_policy', 'condition')
data = {
'classname': type(self).__name__,
'name': repr(self.name),
'type': repr(self.resource_type),
'args': ', '.join(arg_repr(n) for n in args)
}
return '%(classname)s(%(name)s, %(type)s, %(args)s)' % data
def _hash_data(data):
"""Return a stable hash value for an arbitrary parsed-JSON data snippet."""
if isinstance(data, function.Function):
data = copy.deepcopy(data)
if not isinstance(data, str):
if isinstance(data, collections.abc.Sequence):
return hash(tuple(_hash_data(d) for d in data))
if isinstance(data, collections.abc.Mapping):
item_hashes = (hash(k) ^ _hash_data(v) for k, v in data.items())
return functools.reduce(operator.xor, item_hashes, 0)
return hash(data)
|
the-stack_106_27068 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from kuryr.lib._i18n import _
from kuryr.lib import config as lib_config
from oslo_config import cfg
from oslo_log import log as logging
from kuryr_rancher import version
LOG = logging.getLogger(__name__)
kuryr_rancher_opts = [
cfg.StrOpt('pybasedir',
help=_('Directory where Kuryr-rancher python module is '
'installed.'),
default=os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../'))),
]
rancher_opts = [
cfg.BoolOpt('port_debug',
help=_('Enable port debug to force kuryr port names to be '
'set to their corresponding pod names.'),
default=False),
cfg.StrOpt('pod_project_driver',
help=_("The driver to determine OpenStack "
"project for pod ports"),
default='default'),
cfg.StrOpt('service_project_driver',
help=_("The driver to determine OpenStack "
"project for services"),
default='default'),
cfg.StrOpt('pod_subnets_driver',
help=_("The driver to determine Neutron "
"subnets for pod ports"),
default='default'),
cfg.StrOpt('service_subnets_driver',
help=_("The driver to determine Neutron "
"subnets for services"),
default='default'),
cfg.StrOpt('pod_security_groups_driver',
help=_("The driver to determine Neutron "
"security groups for pods"),
default='default'),
cfg.StrOpt('service_security_groups_driver',
help=_("The driver to determine Neutron "
"security groups for services"),
default='default'),
cfg.StrOpt('pod_vif_driver',
help=_("The driver that provides VIFs for Rancher Container."),
default='nested-macvlan'),
cfg.StrOpt('endpoints_lbaas_driver',
help=_("The driver that provides LoadBalancers for "
"Kubernetes Endpoints"),
default='lbaasv2'),
cfg.StrOpt('vif_pool_driver',
help=_("The driver that manages VIFs pools for "
"Rancher Container."),
default='noop'),
]
neutron_defaults = [
cfg.StrOpt('project',
help=_("Default OpenStack project ID for "
"Kubernetes resources"),
default='id_of_project_for_rancher'),
cfg.StrOpt('pod_subnet',
help=_("Default Neutron subnet ID for Rancher Container"),
default='id_of_subnet_for_rancher'),
cfg.ListOpt('pod_security_groups',
help=_("Default Neutron security groups' IDs "
"for Rancher Container"),
default='id_of_security_groups_for_rancher'),
cfg.StrOpt('ovs_bridge',
help=_("Default OpenVSwitch integration bridge"),
sample_default="br-int",
default='br-int'),
cfg.StrOpt('service_subnet',
help=_("Default Neutron subnet ID for Rancher services"),
default='id_of_subnet_for_rancher_services'),
cfg.StrOpt('external_svc_subnet',
help=_("Default external subnet for Rancher services")),
]
CONF = cfg.CONF
CONF.register_opts(kuryr_rancher_opts)
CONF.register_opts(rancher_opts, group='rancher')
CONF.register_opts(neutron_defaults, group='neutron_defaults')
CONF.register_opts(lib_config.core_opts)
CONF.register_opts(lib_config.binding_opts, 'binding')
lib_config.register_neutron_opts(CONF)
logging.register_options(CONF)
def init(args, **kwargs):
version_rancher = version.version_info.version_string()
CONF(args=args, project='kuryr-rancher', version=version_rancher, **kwargs)
def setup_logging():
logging.setup(CONF, 'kuryr-rancher')
logging.set_defaults(default_log_levels=logging.get_default_log_levels())
version_rancher = version.version_info.version_string()
LOG.info("Logging enabled!")
LOG.info("%(prog)s version %(version)s",
{'prog': sys.argv[0], 'version': version_rancher})
|
the-stack_106_27069 | # Copyright (c) 2011 The Chromium Embedded Framework Authors. All rights
# reserved. Use of this source code is governed by a BSD-style license that
# can be found in the LICENSE file.
from __future__ import absolute_import
from cef_version import VersionFormatter
from date_util import *
from file_util import *
from optparse import OptionParser
import git_util as git
import os
import sys
# cannot be loaded as a module
if __name__ != "__main__":
sys.stderr.write('This file cannot be loaded as a module!')
sys.exit()
# parse command-line options
disc = """
This utility creates the version header file.
"""
parser = OptionParser(description=disc)
parser.add_option(
'--header',
dest='header',
metavar='FILE',
help='output version header file [required]')
parser.add_option(
'-q',
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='do not output detailed status information')
(options, args) = parser.parse_args()
# the header option is required
if options.header is None:
parser.print_help(sys.stdout)
sys.exit()
def write_version_header(header):
""" Creates the header file for the current revision and Chrome version information
if the information has changed or if the file doesn't already exist. """
if not git.is_checkout('.'):
raise Exception('Not a valid checkout')
if path_exists(header):
oldcontents = read_file(header)
else:
oldcontents = ''
year = get_year()
formatter = VersionFormatter()
commit_hash = formatter.get_cef_commit_components()['HASH']
commit_number = formatter.get_cef_commit_components()['NUMBER']
version = formatter.get_version_string()
version_parts = formatter.get_version_parts()
chrome = formatter.get_chrome_version_components()
version_defines = '#define CEF_VERSION "%s"\n' % version
for key in ('MAJOR', 'MINOR', 'PATCH'):
version_defines += '#define CEF_VERSION_%s %d\n' % (key, version_parts[key])
newcontents = '// Copyright (c) '+year+' Marshall A. Greenblatt. All rights reserved.\n'+\
'//\n'+\
'// Redistribution and use in source and binary forms, with or without\n'+\
'// modification, are permitted provided that the following conditions are\n'+\
'// met:\n'+\
'//\n'+\
'// * Redistributions of source code must retain the above copyright\n'+\
'// notice, this list of conditions and the following disclaimer.\n'+\
'// * Redistributions in binary form must reproduce the above\n'+\
'// copyright notice, this list of conditions and the following disclaimer\n'+\
'// in the documentation and/or other materials provided with the\n'+\
'// distribution.\n'+\
'// * Neither the name of Google Inc. nor the name Chromium Embedded\n'+\
'// Framework nor the names of its contributors may be used to endorse\n'+\
'// or promote products derived from this software without specific prior\n'+\
'// written permission.\n'+\
'//\n'+\
'// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n'+\
'// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n'+\
'// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n'+\
'// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n'+\
'// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n'+\
'// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n'+\
'// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n'+\
'// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n'+\
'// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n'+\
'// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n'+\
'// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n'+\
'//\n'+\
'// ---------------------------------------------------------------------------\n'+\
'//\n'+\
'// This file is generated by the make_version_header.py tool.\n'+\
'//\n\n'+\
'#ifndef CEF_INCLUDE_CEF_VERSION_H_\n'+\
'#define CEF_INCLUDE_CEF_VERSION_H_\n\n'+\
version_defines+\
'#define CEF_COMMIT_NUMBER ' + commit_number + '\n'+\
'#define CEF_COMMIT_HASH "' + commit_hash + '"\n'+\
'#define COPYRIGHT_YEAR ' + year + '\n\n'+\
'#define CHROME_VERSION_MAJOR ' + chrome['MAJOR'] + '\n'+\
'#define CHROME_VERSION_MINOR ' + chrome['MINOR'] + '\n'+\
'#define CHROME_VERSION_BUILD ' + chrome['BUILD'] + '\n'+\
'#define CHROME_VERSION_PATCH ' + chrome['PATCH'] + '\n\n'+\
'#define DO_MAKE_STRING(p) #p\n'+\
'#define MAKE_STRING(p) DO_MAKE_STRING(p)\n\n'+\
'#ifndef APSTUDIO_HIDDEN_SYMBOLS\n\n'\
'#include "include/internal/cef_export.h"\n\n'+\
'#ifdef __cplusplus\n'+\
'extern "C" {\n'+\
'#endif\n\n'+\
'// Returns CEF version information for the libcef library. The |entry|\n'+\
'// parameter describes which version component will be returned:\n'+\
'// 0 - CEF_VERSION_MAJOR\n'+\
'// 1 - CEF_VERSION_MINOR\n'+\
'// 2 - CEF_VERSION_PATCH\n'+\
'// 3 - CEF_COMMIT_NUMBER\n'+\
'// 4 - CHROME_VERSION_MAJOR\n'+\
'// 5 - CHROME_VERSION_MINOR\n'+\
'// 6 - CHROME_VERSION_BUILD\n'+\
'// 7 - CHROME_VERSION_PATCH\n'+\
'///\n'+\
'CEF_EXPORT int cef_version_info(int entry);\n\n'+\
'#ifdef __cplusplus\n'+\
'}\n'+\
'#endif\n\n'+\
'#endif // APSTUDIO_HIDDEN_SYMBOLS\n\n'+\
'#endif // CEF_INCLUDE_CEF_VERSION_H_\n'
if newcontents != oldcontents:
write_file(header, newcontents)
return True
return False
written = write_version_header(options.header)
if not options.quiet:
if written:
sys.stdout.write('File ' + options.header + ' updated.\n')
else:
sys.stdout.write('File ' + options.header + ' is already up to date.\n')
|
the-stack_106_27071 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowSensorTimeout
from airflow.models import DagBag
from airflow.models.dag import DAG
from airflow.sensors.weekday import DayOfWeekSensor
from airflow.utils.timezone import datetime
from airflow.utils.weekday import WeekDay
from tests.test_utils import db
DEFAULT_DATE = datetime(2018, 12, 10)
WEEKDAY_DATE = datetime(2018, 12, 20)
WEEKEND_DATE = datetime(2018, 12, 22)
TEST_DAG_ID = 'weekday_sensor_dag'
DEV_NULL = '/dev/null'
class TestDayOfWeekSensor(unittest.TestCase):
@staticmethod
def clean_db():
db.clear_db_runs()
db.clear_db_task_fail()
def setUp(self):
self.clean_db()
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag = dag
def tearDown(self):
self.clean_db()
@parameterized.expand(
[
("with-string", "Thursday"),
("with-enum", WeekDay.THURSDAY),
("with-enum-set", {WeekDay.THURSDAY}),
("with-enum-list", [WeekDay.THURSDAY]),
("with-enum-dict", {WeekDay.THURSDAY: "some_value"}),
("with-enum-set-2-items", {WeekDay.THURSDAY, WeekDay.FRIDAY}),
("with-enum-list-2-items", [WeekDay.THURSDAY, WeekDay.FRIDAY]),
("with-enum-dict-2-items", {WeekDay.THURSDAY: "some_value", WeekDay.FRIDAY: "some_value_2"}),
("with-string-set", {"Thursday"}),
("with-string-set-2-items", {"Thursday", "Friday"}),
("with-set-mix-types", {"Thursday", WeekDay.FRIDAY}),
("with-list-mix-types", ["Thursday", WeekDay.FRIDAY]),
("with-dict-mix-types", {"Thursday": "some_value", WeekDay.FRIDAY: "some_value_2"}),
]
)
def test_weekday_sensor_true(self, _, week_day):
op = DayOfWeekSensor(
task_id='weekday_sensor_check_true', week_day=week_day, use_task_execution_day=True, dag=self.dag
)
op.run(start_date=WEEKDAY_DATE, end_date=WEEKDAY_DATE, ignore_ti_state=True)
assert op.week_day == week_day
def test_weekday_sensor_false(self):
op = DayOfWeekSensor(
task_id='weekday_sensor_check_false',
poke_interval=1,
timeout=2,
week_day='Tuesday',
use_task_execution_day=True,
dag=self.dag,
)
with pytest.raises(AirflowSensorTimeout):
op.run(start_date=WEEKDAY_DATE, end_date=WEEKDAY_DATE, ignore_ti_state=True)
def test_invalid_weekday_number(self):
invalid_week_day = 'Thsday'
with pytest.raises(AttributeError, match=f'Invalid Week Day passed: "{invalid_week_day}"'):
DayOfWeekSensor(
task_id='weekday_sensor_invalid_weekday_num',
week_day=invalid_week_day,
use_task_execution_day=True,
dag=self.dag,
)
def test_weekday_sensor_with_invalid_type(self):
invalid_week_day = 5
with pytest.raises(
TypeError,
match=f"Unsupported Type for week_day parameter: {type(invalid_week_day)}."
"Input should be iterable type:"
"str, set, list, dict or Weekday enum type",
):
DayOfWeekSensor(
task_id='weekday_sensor_check_true',
week_day=invalid_week_day,
use_task_execution_day=True,
dag=self.dag,
)
def test_weekday_sensor_timeout_with_set(self):
op = DayOfWeekSensor(
task_id='weekday_sensor_check_false',
poke_interval=1,
timeout=2,
week_day={WeekDay.MONDAY, WeekDay.TUESDAY},
use_task_execution_day=True,
dag=self.dag,
)
with pytest.raises(AirflowSensorTimeout):
op.run(start_date=WEEKDAY_DATE, end_date=WEEKDAY_DATE, ignore_ti_state=True)
|
the-stack_106_27072 | import os
from shapely.geometry import Point
from geospark.core.enums import FileDataSplitter, GridType, IndexType
from geospark.core.geom.envelope import Envelope
from tests.tools import tests_path
input_location = os.path.join(tests_path, "resources/crs-test-point.csv")
offset = 0
splitter = FileDataSplitter.CSV
grid_type = GridType.RTREE
index_type = IndexType.RTREE
num_partitions = 11
distance = 0.01
input_location_query_polygon = os.path.join(tests_path, "resources/crs-test-polygon.csv")
loop_times = 5
query_envelope = Envelope(30.01, 40.01, -90.01, -80.01)
query_point = Point(34.01, -84.01)
top_k = 100 |
the-stack_106_27075 | import math
#Coding Exercise 2:
#1.
x=50
print (x+50, 2*x-10)
#2
#30+*6 => error
print(6**6,6^6, 6+6+6+6+6+6) # 6^6 defaults to 6.__xor__(6)
#3
print("Hello World", "Hello World : 10")
#4
pv=int(input("Enter the present value of the loan, the interest, and the time the loan will be paid out, respectively: "))
inter=float(input())
time=int(input())
fv=pv
for x in range(time):
fv = fv+fv*(inter/12)
print(f"The total future value if no payments made and interest adds up after alloted time ({time}): " + "{:.2f}".format(fv))
print(f"At minimum, you will need to have a PMT of {math.ceil(fv/103)}")
|
the-stack_106_27076 | """Login classes and functions for Simple-Salesforce
Heavily Modified from RestForce 1.0.0
"""
DEFAULT_CLIENT_ID_PREFIX = 'RestForce'
import time
import xml
import warnings
import xmltodict
from datetime import datetime, timedelta
from html import escape
from json.decoder import JSONDecodeError
import requests
from authlib.jose import jwt
from .api import DEFAULT_API_VERSION
from .exceptions import SalesforceAuthenticationFailed
from .util import getUniqueElementValueFromXmlString
# pylint: disable=invalid-name,too-many-arguments,too-many-locals
def SalesforceLogin(
username=None,
password=None,
security_token=None,
organizationId=None,
sf_version=DEFAULT_API_VERSION,
proxies=None,
session=None,
client_id=None,
domain=None,
consumer_key=None,
privatekey_file=None,
privatekey=None,
):
"""Return a tuple of `(session_id, sf_instance)` where `session_id` is the
session ID to use for authentication to Salesforce and `sf_instance` is
the domain of the instance of Salesforce to use for the session.
Arguments:
* username -- the Salesforce username to use for authentication
* password -- the password for the username
* security_token -- the security token for the username
* organizationId -- the ID of your organization
NOTE: security_token an organizationId are mutually exclusive
* sf_version -- the version of the Salesforce API to use, for example
"27.0"
* proxies -- the optional map of scheme to proxy server
* session -- Custom requests session, created in calling code. This
enables the use of requets Session features not otherwise
exposed by simple_salesforce.
* client_id -- the ID of this client
* domain -- The domain to using for connecting to Salesforce. Use
common domains, such as 'login' or 'test', or
Salesforce My domain. If not used, will default to
'login'.
* consumer_key -- the consumer key generated for the user
* privatekey_file -- the path to the private key file used
for signing the JWT token.
* privatekey -- the private key to use
for signing the JWT token.
"""
if domain is None:
domain = 'login'
soap_url = 'https://{domain}.salesforce.com/services/Soap/u/{sf_version}'
if client_id:
client_id = "{prefix}/{app_name}".format(
prefix=DEFAULT_CLIENT_ID_PREFIX,
app_name=client_id)
else:
client_id = DEFAULT_CLIENT_ID_PREFIX
soap_url = soap_url.format(domain=domain,
sf_version=sf_version)
# pylint: disable=E0012,deprecated-method
username = escape(username) if username else None
password = escape(password) if password else None
# Check if token authentication is used
if security_token is not None:
# Security Token Soap request body
login_soap_request_body = """<?xml version="1.0" encoding="utf-8" ?>
<env:Envelope
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:env="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:urn="urn:partner.soap.sforce.com">
<env:Header>
<urn:CallOptions>
<urn:client>{client_id}</urn:client>
<urn:defaultNamespace>sf</urn:defaultNamespace>
</urn:CallOptions>
</env:Header>
<env:Body>
<n1:login xmlns:n1="urn:partner.soap.sforce.com">
<n1:username>{username}</n1:username>
<n1:password>{password}{token}</n1:password>
</n1:login>
</env:Body>
</env:Envelope>""".format(
username=username, password=password, token=security_token,
client_id=client_id)
# Check if IP Filtering is used in conjunction with organizationId
elif organizationId is not None:
# IP Filtering Login Soap request body
login_soap_request_body = """<?xml version="1.0" encoding="utf-8" ?>
<soapenv:Envelope
xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:urn="urn:partner.soap.sforce.com">
<soapenv:Header>
<urn:CallOptions>
<urn:client>{client_id}</urn:client>
<urn:defaultNamespace>sf</urn:defaultNamespace>
</urn:CallOptions>
<urn:LoginScopeHeader>
<urn:organizationId>{organizationId}</urn:organizationId>
</urn:LoginScopeHeader>
</soapenv:Header>
<soapenv:Body>
<urn:login>
<urn:username>{username}</urn:username>
<urn:password>{password}</urn:password>
</urn:login>
</soapenv:Body>
</soapenv:Envelope>""".format(
username=username, password=password, organizationId=organizationId,
client_id=client_id)
elif username is not None and password is not None:
# IP Filtering for non self-service users
login_soap_request_body = """<?xml version="1.0" encoding="utf-8" ?>
<soapenv:Envelope
xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:urn="urn:partner.soap.sforce.com">
<soapenv:Header>
<urn:CallOptions>
<urn:client>{client_id}</urn:client>
<urn:defaultNamespace>sf</urn:defaultNamespace>
</urn:CallOptions>
</soapenv:Header>
<soapenv:Body>
<urn:login>
<urn:username>{username}</urn:username>
<urn:password>{password}</urn:password>
</urn:login>
</soapenv:Body>
</soapenv:Envelope>""".format(
username=username, password=password, client_id=client_id)
elif username is not None and \
consumer_key is not None and \
(privatekey_file is not None or privatekey is not None):
header = {'alg': 'RS256'}
expiration = datetime.utcnow() + timedelta(minutes=3)
payload = {
'iss': consumer_key,
'sub': username,
'aud': 'https://{domain}.salesforce.com'.format(domain=domain),
'exp': '{exp:.0f}'.format(
exp=time.mktime(expiration.timetuple()) +
expiration.microsecond / 1e6
)
}
if privatekey_file is not None:
with open(privatekey_file, 'rb') as key_file:
key = key_file.read()
else:
key = privatekey
assertion = jwt.encode(header, payload, key)
login_token_request_data = {
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': assertion
}
return token_login(
'https://{domain}.salesforce.com/services/oauth2/token'.format(
domain=domain),
login_token_request_data, domain, consumer_key,
None, proxies, session)
else:
except_code = 'INVALID AUTH'
except_msg = (
'You must submit either a security token or organizationId for '
'authentication'
)
raise SalesforceAuthenticationFailed(except_code, except_msg)
login_soap_request_headers = {
'content-type': 'text/xml',
'charset': 'UTF-8',
'SOAPAction': 'login'
}
return soap_login(soap_url, login_soap_request_body,
login_soap_request_headers, proxies, session)
def soap_login(soap_url, request_body, headers, proxies, session=None):
"""Process SOAP specific login workflow."""
response = (session or requests).post(
soap_url, request_body, headers=headers, proxies=proxies)
if response.status_code != 200:
except_code = getUniqueElementValueFromXmlString(
response.content, 'sf:exceptionCode')
except_msg = getUniqueElementValueFromXmlString(
response.content, 'sf:exceptionMessage')
raise SalesforceAuthenticationFailed(except_code, except_msg)
parsed_content = xmltodict.parse(response.content)
content = parsed_content['soapenv:Envelope']['soapenv:Body']['loginResponse']['result']
session_id = content['sessionId']
server_url = content['serverUrl']
session_duration = content['userInfo']['sessionSecondsValid']
sf_instance = (server_url
.replace('http://', '')
.replace('https://', '')
.split('/')[0]
.replace('-api', ''))
return session_id, sf_instance, int(session_duration)
def token_login(token_url, token_data, domain, consumer_key,
headers, proxies, session=None):
"""Process OAuth 2.0 JWT Bearer Token Flow."""
response = (session or requests).post(
token_url, token_data, headers=headers, proxies=proxies)
try:
json_response = response.json()
except JSONDecodeError as json_decode_error:
raise SalesforceAuthenticationFailed(
response.status_code, response.text
) from json_decode_error
if response.status_code != 200:
except_code = json_response.get('error')
except_msg = json_response.get('error_description')
if except_msg == "user hasn't approved this consumer":
auth_url = 'https://{domain}.salesforce.com/services/oauth2/' \
'authorize?response_type=code&client_id=' \
'{consumer_key}&redirect_uri=<approved URI>'.format(
domain=domain,
consumer_key=consumer_key
)
warnings.warn("""
If your connected app policy is set to "All users may
self-authorize", you may need to authorize this
application first. Browse to
%s
in order to Allow Access. Check first to ensure you have a valid
<approved URI>.""" % auth_url)
raise SalesforceAuthenticationFailed(except_code, except_msg)
access_token = json_response.get('access_token')
instance_url = json_response.get('instance_url')
sf_instance = instance_url.replace(
'http://', '').replace(
'https://', '')
return access_token, sf_instance
|
the-stack_106_27077 | import unittest
class StackEntry:
def __init__(self, element: int, min_value: 'StackEntry'):
self.element = element
self.min_value = min_value
def __repr__(self):
return repr('StackEntry({0})'.format(self.element))
class MinStack:
"""
Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
push(x) -- Push element x onto stack.
pop() -- Removes the element on top of the stack.
top() -- Get the top element.
getMin() -- Retrieve the minimum element in the stack.
Example:
MinStack minStack = new MinStack();
minStack.push(-2);
minStack.push(0);
minStack.push(-3);
minStack.getMin(); --> Returns -3.
minStack.pop();
minStack.top(); --> Returns 0.
minStack.getMin(); --> Returns -2.
"""
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.min_value = StackEntry(float('inf'), None)
def push(self, x: int) -> None:
if x <= self.min_value.element:
entry = StackEntry(x, self.min_value)
self.stack.append(entry)
self.min_value = entry
else:
self.stack.append(StackEntry(x, None))
def pop(self) -> None:
if len(self.stack) > 1:
last_element = self.stack.pop()
if last_element.element == self.min_value.element:
self.min_value = last_element.min_value
else:
last_element = self.stack.pop()
self.min_value = StackEntry(float('inf'), None)
def top(self) -> int:
return self.stack[-1].element
def getMin(self) -> int:
return int(self.min_value.element)
class MinStackTest(unittest.TestCase):
def test_case_1(self):
min_stack = MinStack()
min_stack.push(1)
min_stack.push(3)
min_stack.push(2)
min_stack.push(-1)
min_stack.push(6)
min_stack.push(5)
min_stack.push(-4)
min_stack.push(3)
min_stack.pop()
min_stack.pop()
min_stack.pop()
min_stack.pop()
min_stack.pop()
min_stack.pop()
min_stack.pop()
|
the-stack_106_27078 | from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QLabel
class CamLabel(QLabel):
def __init__(self, parent):
super().__init__(parent)
self._image = None
self.setMinimumWidth(480)
self.setMinimumHeight(360)
def image(self):
return self._image
def setImage(self, image):
self._image = image
pixmap = QPixmap.fromImage(self._image).scaled(
self.width(), self.height(),
Qt.KeepAspectRatio,
Qt.SmoothTransformation
)
self.setPixmap(pixmap)
@pyqtSlot(QImage)
def receiveGrabSlot(self, image):
self.setImage(image)
|
the-stack_106_27080 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from setuptools import setup
from codecs import open # To use a consistent encoding
from os import path
HERE = path.abspath(path.dirname(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, "datadog_checks", "disk", "__about__.py")) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Parse requirements
def get_requirements(fpath):
with open(path.join(HERE, fpath), encoding='utf-8') as f:
return f.readlines()
setup(
name='datadog-disk',
version=ABOUT["__version__"],
description='The Disk check',
long_description=long_description,
keywords='datadog agent disk check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='[email protected]',
# License
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.disk'],
# Run-time dependencies
install_requires=get_requirements('requirements.in')+[
'datadog_checks_base',
],
# Development dependencies, run with:
# $ pip install -e .[dev]
extras_require={
'dev': [
'check-manifest',
],
},
# Testing setup and dependencies
setup_requires=['pytest-runner',],
tests_require=get_requirements(path.join('tests', 'requirements.txt')),
# Extra files to ship with the wheel package
package_data={'datadog_checks.disk': ['conf.yaml.default']},
include_package_data=True,
)
|
the-stack_106_27085 | import os, subprocess, gzip
from sift import Sift
className = "LoweSift"
class LoweSift(Sift):
win32Executable = "sift-lowe/siftWin32.exe"
linuxExecutable = "sift-lowe/sift"
def __init__(self, distrDir):
Sift.__init__(self, distrDir)
def extract(self, photo, photoInfo):
photoFile = open("%s.jpg.pgm" % photo, "rb")
siftTextFile = open("%s.key" % photo, "w")
subprocess.call(self.executable, **dict(stdin=photoFile, stdout=siftTextFile))
photoFile.close()
siftTextFile.close()
# gzip SIFT file and remove it
siftTextFile = open("%s.key" % photo, "r")
siftGzipFile = gzip.open("%s.key.gz" % photo, "wb")
siftGzipFile.writelines(siftTextFile)
siftGzipFile.close()
siftTextFile.close()
os.remove("%s.key" % photo) |
the-stack_106_27086 | # SPDX-License-Identifier: MIT
#
# Copyright (c) 2021 The Anvil Extras project team members listed at
# https://github.com/anvilistas/anvil-extras/graphs/contributors
#
# This software is published at https://github.com/anvilistas/anvil-extras
import anvil
from anvil.js.window import jQuery as _S
from ._anvil_designer import PageBreakTemplate
__version__ = "2.0.1"
class PageBreak(PageBreakTemplate):
def __init__(self, margin_top=0, border="1px solid grey", **properties):
dom_node = _S(anvil.js.get_dom_node(self))
self.margin_node = dom_node.find(".margin-element")
self.break_container = dom_node.find(".break-container")
self.margin_top = margin_top
self.border = border
self.init_components(**properties)
@property
def margin_top(self):
return self._margin_top
@margin_top.setter
def margin_top(self, value):
self.margin_node.css("margin-top", value)
self._margin_top = value
@property
def border(self):
return self._border
@border.setter
def border(self, value):
self.break_container.css("border", value)
self._border = value
|
the-stack_106_27087 | # -*- coding: utf-8 -*-
# imageio is distributed under the terms of the (new) BSD License.
""" Storage of image data in tiff format.
"""
import datetime
from .. import formats
from ..core import Format
import numpy as np
_tifffile = None # Defer loading to lib() function.
def load_lib():
global _tifffile
try:
import tifffile as _tifffile
except ImportError:
from . import _tifffile
return _tifffile
TIFF_FORMATS = (".tif", ".tiff", ".stk", ".lsm")
WRITE_METADATA_KEYS = (
"photometric",
"planarconfig",
"resolution",
"description",
"compress",
"volume",
"writeshape",
"extratags",
"datetime",
)
READ_METADATA_KEYS = (
"planar_configuration",
"is_fluoview",
"is_nih",
"is_contig",
"is_micromanager",
"is_ome",
"is_lsm" "is_palette",
"is_reduced",
"is_rgb",
"is_sgi",
"is_shaped",
"is_stk",
"is_tiled",
"is_mdgel" "resolution_unit",
"compression",
"is_mediacy",
"orientation",
"description",
"description1",
"is_imagej",
"software",
)
class TiffFormat(Format):
""" Provides support for a wide range of Tiff images.
Images that contain multiple pages can be read using ``imageio.mimread()``
to read the individual pages, or ``imageio.volread()`` to obtain a
single (higher dimensional) array.
Parameters for reading
----------------------
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
Parameters for saving
---------------------
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the image.
Saved with the first page only.
Metadata for reading
--------------------
planar_configuration : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution_unit : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
compression : int
Values from 0 to 9 indicating the level of zlib compression.
If 0, data is uncompressed.
orientation : {'top_left', 'bottom_right', ...}
Oriented of image array.
is_rgb : bool
True if page contains a RGB image.
is_contig : bool
True if page contains a contiguous image.
is_tiled : bool
True if page contains tiled image.
is_palette : bool
True if page contains a palette-colored image and not OME or STK.
is_reduced : bool
True if page is a reduced image of another image.
is_shaped : bool
True if page contains shape in image_description tag.
is_fluoview : bool
True if page contains FluoView MM_STAMP tag.
is_nih : bool
True if page contains NIH image header.
is_micromanager : bool
True if page contains Micro-Manager metadata.
is_ome : bool
True if page contains OME-XML in image_description tag.
is_sgi : bool
True if page contains SGI image and tile depth tags.
is_stk : bool
True if page contains UIC2Tag tag.
is_mdgel : bool
True if page contains md_file_tag tag.
is_mediacy : bool
True if page contains Media Cybernetics Id tag.
is_stk : bool
True if page contains UIC2Tag tag.
is_lsm : bool
True if page contains LSM CZ_LSM_INFO tag.
description : str
Image description
description1 : str
Additional description
is_imagej : None or str
ImageJ metadata
software : str
Software used to create the TIFF file
datetime : datetime.datetime
Creation date and time
Metadata for writing
--------------------
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
compress : int
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
volume : bool
If True, volume data are stored in one tile (if applicable) using
the SGI image_depth and tile_depth tags.
Image width and depth must be multiple of 16.
Few software can read this format, e.g. MeVisLab.
writeshape : bool
If True, write the data shape to the image_description tag
if necessary and no other description is given.
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
def _can_read(self, request):
# We support any kind of image data
return request.extension in self.extensions
def _can_write(self, request):
# We support any kind of image data
return request.extension in self.extensions
# -- reader
class Reader(Format.Reader):
def _open(self, **kwargs):
if not _tifffile:
load_lib()
# Allow loading from http; tiffile uses seek, so download first
if self.request.filename.startswith(("http://", "https://")):
self._f = f = open(self.request.get_local_filename(), "rb")
else:
self._f = None
f = self.request.get_file()
self._tf = _tifffile.TiffFile(f, **kwargs)
# metadata is the same for all images
self._meta = {}
def _close(self):
self._tf.close()
if self._f is not None:
self._f.close()
def _get_length(self):
if self.request.mode[1] in "vV":
return 1 # or can there be pages in pages or something?
else:
return len(self._tf.pages)
def _get_data(self, index):
if self.request.mode[1] in "vV":
# Read data as single 3D (+ color channels) array
if index != 0:
raise IndexError('Tiff support no more than 1 "volume" per file')
im = self._tf.asarray() # request as singleton image
meta = self._meta
else:
# Read as 2D image
if index < 0 or index >= self._get_length():
raise IndexError("Index out of range while reading from tiff file")
im = self._tf.pages[index].asarray()
meta = self._meta or self._get_meta_data(index)
# Return array and empty meta data
return im, meta
def _get_meta_data(self, index):
page = self._tf.pages[index or 0]
for key in READ_METADATA_KEYS:
try:
self._meta[key] = getattr(page, key)
except Exception:
pass
# tifffile <= 0.12.1 use datetime, newer use DateTime
for key in ("datetime", "DateTime"):
try:
self._meta["datetime"] = datetime.datetime.strptime(
page.tags[key].value, "%Y:%m:%d %H:%M:%S"
)
break
except Exception:
pass
return self._meta
# -- writer
class Writer(Format.Writer):
def _open(self, bigtiff=None, byteorder=None, software=None):
if not _tifffile:
load_lib()
try:
self._tf = _tifffile.TiffWriter(
self.request.get_file(), bigtiff, byteorder, software=software
)
self._software = None
except TypeError:
# In tifffile >= 0.15, the `software` arg is passed to
# TiffWriter.save
self._tf = _tifffile.TiffWriter(
self.request.get_file(), bigtiff, byteorder
)
self._software = software
self._meta = {}
def _close(self):
self._tf.close()
def _append_data(self, im, meta):
if meta:
self.set_meta_data(meta)
# No need to check self.request.mode; tiffile figures out whether
# this is a single page, or all page data at once.
if self._software is None:
self._tf.save(np.asanyarray(im), **self._meta)
else:
# tifffile >= 0.15
self._tf.save(np.asanyarray(im), software=self._software, **self._meta)
def set_meta_data(self, meta):
self._meta = {}
for (key, value) in meta.items():
if key in WRITE_METADATA_KEYS:
self._meta[key] = value
# Register
format = TiffFormat("tiff", "TIFF format", TIFF_FORMATS, "iIvV")
formats.add_format(format)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.