filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_27798 | import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-polls',
version='0.1',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='A simple Django app to construct Web-based polls.',
long_description=README,
url='https://www.example.com',
author='Valentin Ivanov',
author_email='[email protected]',
classifiers=[
'Enviroment :: Web Enviroment',
'Framework :: Django',
'Framework :: Django :: 2.2',
'Intended Audience :: Developers',
'Licence :: MIT Licence',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
) |
the-stack_106_27799 | """Tests for our `watches just_indices_stats` subcommand."""
import json
from subprocess import PIPE, Popen as popen
from secure_support import TestSecureSupport
class TestJustIndicesStats(TestSecureSupport):
def test_returns_index_per_line(self):
cmd = self.appendSecurityCommands(['watches', 'just_indices_stats', '-l', '--level=indices', '--timestamp'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
self.assertTrue(len(output) > 0)
lines = 0
for line in output.splitlines():
lines += 1
o = json.loads(line)
self.assertTrue('index' in o)
self.assertTrue('timestamp' in o)
self.assertTrue('total' in o)
self.assertTrue('primaries' in o)
self.assertTrue(lines > 0)
def test_returns_index_per_line_just__all(self):
cmd = self.appendSecurityCommands(['watches', 'just_indices_stats', '-l'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
self.assertTrue(len(output) > 0)
lines = 0
for line in output.splitlines():
lines += 1
o = json.loads(line)
self.assertTrue('index' in o)
self.assertTrue(o['index'] == '_all')
# Without specifying --level=indices we get only _all index stats
self.assertTrue(lines == 1)
|
the-stack_106_27800 | import os
import pprint
import warnings
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
warnings.filterwarnings("ignore",category=FutureWarning)
from model import Model
import numpy as np
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
flags = tf.app.flags
flags.DEFINE_string("arch", "FSRCNN", "Model name [FSRCNN]")
flags.DEFINE_boolean("fast", False, "Use the fast model (FSRCNN-s) [False]")
flags.DEFINE_integer("epoch", 10, "Number of epochs [10]")
flags.DEFINE_integer("batch_size", 32, "The size of batch images [32]")
flags.DEFINE_float("learning_rate", 1e-4, "The learning rate of the adam optimizer [1e-4]")
flags.DEFINE_integer("scale", 2, "The size of scale factor for preprocessing input image [2]")
flags.DEFINE_integer("radius", 1, "Max radius of the deconvolution input tensor [1]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Name of checkpoint directory [checkpoint]")
flags.DEFINE_string("output_dir", "result", "Name of test output directory [result]")
flags.DEFINE_string("data_dir", "Train", "Name of data directory to train on [FastTrain]")
flags.DEFINE_boolean("train", True, "True for training, false for testing [True]")
flags.DEFINE_boolean("distort", False, "Distort some images with JPEG compression artifacts after downscaling [False]")
flags.DEFINE_boolean("params", False, "Save weight and bias parameters [False]")
FLAGS = flags.FLAGS
pp = pprint.PrettyPrinter()
def main(_):
#pp.pprint(flags.FLAGS.__flags)
if FLAGS.fast:
FLAGS.checkpoint_dir = 'fast_{}'.format(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
with tf.Session() as sess:
model = Model(sess, config=FLAGS)
model.run()
if __name__ == '__main__':
tf.app.run()
|
the-stack_106_27801 | # -*- coding: utf-8 -*-
"""
dp for Tornado
MVC Web Application Framework with Tornado
http://github.com/why2pac/dp-tornado
Copyright (c) 2015, why2pac <[email protected]>
"""
import tornado.web
import tornado.ioloop
import tornado.httpserver
import time
import os
import multiprocessing
import importlib
from dp_tornado.engine.engine import EngineSingleton as dpEngineSingleton
from dp_tornado.engine.bootstrap import Bootstrap as EngineBootstrap
from dp_tornado.engine.scheduler import Scheduler
from dp_tornado.engine.testing import Testing
from dp_tornado.engine.plugin.static import Compressor
from dp_tornado.engine.plugin.static import StaticURL
from dp_tornado.engine.plugin.pagination import Pagination
from dp_tornado.engine.plugin import ui_methods
from dp_tornado.version import __version_info__
engine = dpEngineSingleton()
class RestfulApplication(tornado.web.Application):
def __init__(self, handlers, kwargs):
self.startup_at = int(round(time.time() * 1000))
super(RestfulApplication, self).__init__(handlers, **kwargs)
class Bootstrap(object):
def run(self, **kwargs):
cli = kwargs['as_cli'] if 'as_cli' in kwargs and kwargs['as_cli'] else False
dryrun = True if cli and cli.args.dryrun == 'yes' else False
custom_scheduler = kwargs['scheduler'] if 'scheduler' in kwargs else None
custom_service = kwargs['service'] if 'service' in kwargs else None
if cli and cli.args.ini:
custom_config_file = cli.args.ini
else:
custom_config_file = kwargs['config_file'] if 'config_file' in kwargs else 'config.ini'
application_path = kwargs['application_path'] if 'application_path' in kwargs else None
engine_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'engine')
engine_static_path = os.path.join(engine_path, 'static')
os.environ['DP_APPLICATION_PATH'] = application_path
os.environ['DP_APPLICATION_INI'] = custom_config_file
settings = EngineBootstrap.init_ini(
application_path=application_path, ini_file=custom_config_file, cli=cli)
engine.logger.sys_log('---------------------------------')
engine.logger.sys_log('dp for Python v%s' % '.'.join([str(e) for e in __version_info__]))
engine.logger.sys_log('---------------------------------')
services_raw = [
(r"/dp/scheduler/(.*)", 'dp_tornado.engine.scheduler_handler.SchedulerHandler'),
(r"/dp/identifier", 'dp_tornado.engine.dp_identifier_handler.DpIdentifierHandler'),
(r"/dp/(.*)", 'dp_tornado.engine.static_handler.StaticHandler', {'path': engine_static_path}),
(r"/", None),
(r"/(.*)", None),
]
if custom_service:
services_raw = custom_service + services_raw
services = []
default_handler = None
for service in services_raw:
if len(service) < 2:
raise Exception('The specified service is invalid.')
if service[1] is not None:
s = str.split(service[1], '.')
class_name = s.pop()
module_path = '.'.join(s)
handler_module = importlib.import_module(module_path)
handler = getattr(handler_module, class_name)
else:
if default_handler is None:
handler_module = importlib.import_module('dp_tornado.engine.default_handler')
default_handler = getattr(handler_module, 'DefaultHandler')
module_path = 'controller'
handler = default_handler
services.append((service[0], handler, dict(prefix=module_path) if len(service) < 3 else service[2]))
# Clear combined files
Compressor.clear(settings['combined_static_path'])
num_processed = engine.ini.server.num_processes if engine.ini.server.num_processes \
else multiprocessing.cpu_count()
deploy_mode = 'Production' if not engine.ini.server.debug else 'Debugging'
identify_mode = engine.ini.app.mode
engine.logger.sys_log('Server Mode : %s (%s)' % (deploy_mode, identify_mode))
engine.logger.sys_log('Server time : %s' % time.strftime('%Y.%m.%d %H:%M:%S'))
engine.logger.sys_log('Server Port : %s' % engine.ini.server.port)
engine.logger.sys_log('Processors : %s' % num_processed)
engine.logger.sys_log('CPU Count : %d' % multiprocessing.cpu_count())
engine.logger.sys_log('---------------------------------')
if not Testing('', application_path, doctest=True).traverse() and engine.ini.server.get('assert'):
return
application = RestfulApplication(services, settings)
service = tornado.httpserver.HTTPServer(
application,
xheaders=True,
max_body_size=engine.ini.server.max_body_size)
try:
service.bind(engine.ini.server.port, '')
except Exception as e:
engine.logger.error('Failed to service binding. (port %s)' % engine.ini.server.port)
engine.logger.error(e)
return False
if custom_scheduler:
scheduler = Scheduler(custom_scheduler)
scheduler.start()
else:
scheduler = None
service.start(engine.ini.server.num_processes)
import random
application.identifier = random.randint(100000, 999999)
engine.logger.start_handler()
try:
instance = tornado.ioloop.IOLoop.instance()
instance.__setattr__('startup_at', getattr(application, 'startup_at'))
if scheduler:
scheduler.prepare()
if not dryrun:
instance.start()
except KeyboardInterrupt:
pass
if scheduler:
scheduler.interrupt()
engine.logger.interrupt()
|
the-stack_106_27802 | from collections import OrderedDict
from guillotina import routes
from guillotina._settings import app_settings
from guillotina.configure import component
from guillotina.configure.behaviors import BehaviorAdapterFactory
from guillotina.configure.behaviors import BehaviorRegistration
from guillotina.exceptions import ConfigurationError
from guillotina.exceptions import ServiceConfigurationError
from guillotina.gtypes import ConfigurationType
from guillotina.gtypes import ResolvableType
from guillotina.interfaces import DEFAULT_ADD_PERMISSION
from guillotina.interfaces import IBehavior
from guillotina.interfaces import IBehaviorSchemaAwareFactory
from guillotina.interfaces import IDefaultLayer
from guillotina.interfaces import IJSONToValue
from guillotina.interfaces import ILanguage
from guillotina.interfaces import IPermission
from guillotina.interfaces import IRenderer
from guillotina.interfaces import IRequest
from guillotina.interfaces import IResource
from guillotina.interfaces import IResourceFactory
from guillotina.interfaces import IRole
from guillotina.interfaces import IValueToJson
from guillotina.interfaces import IView
from guillotina.security.permission import Permission
from guillotina.utils import get_caller_module
from guillotina.utils import get_module_dotted_name
from guillotina.utils import resolve_dotted_name
from guillotina.utils import resolve_module_path
from pprint import pformat
from typing import Any
from typing import Dict
from typing import Optional
from typing import Tuple
from zope.interface import classImplements
from zope.interface import Interface
import asyncio
import inspect
import logging
_registered_configurations: ConfigurationType = []
# stored as tuple of (type, configuration) so we get keep it in the order
# it is registered even if you mix types of registrations
_registered_configuration_handlers = {}
logger = logging.getLogger('guillotina')
def get_configurations(module_name, type_=None):
results = []
for reg_type, registration in _registered_configurations:
if type_ is not None and reg_type != type_:
continue
config = registration['config']
module = config.get('module', registration.get('klass'))
if (get_module_dotted_name(
resolve_dotted_name(module)) + '.').startswith(module_name + '.'):
results.append((reg_type, registration))
return results
def register_configuration_handler(type_, handler):
_registered_configuration_handlers[type_] = handler
def register_configuration(klass: ResolvableType, config: Dict[str, Any], type_: str):
value = (type_, {
'klass': klass,
'config': config
})
if value not in _registered_configurations:
# do not register twice
_registered_configurations.append(value)
def load_configuration(_context, module_name, _type):
if _type not in _registered_configuration_handlers:
raise Exception('Configuration handler for {} not registered'.format(_type))
for _type, configuration in get_configurations(module_name, _type):
_registered_configuration_handlers[_type](_context, configuration)
def load_all_configurations(_context, module_name):
for type_, configuration in get_configurations(module_name):
try:
_registered_configuration_handlers[type_](_context, configuration)
except TypeError as e:
logger.error('Can not find %s module' % configuration)
raise
def load_service(_context, service):
# prevent circular import
from guillotina.security.utils import protect_view
service_conf = service['config']
factory = resolve_dotted_name(service['klass'])
permission = service_conf.get(
'permission', app_settings.get('default_permission', None))
protect_view(factory, permission)
method = service_conf.get('method', 'GET')
default_layer = resolve_dotted_name(
app_settings.get('default_layer', IDefaultLayer))
layer = service_conf.get('layer', default_layer)
name = service_conf.get('name', '')
content = service_conf.get('context', Interface)
logger.debug('Defining adapter for ' # noqa
'{0:s} {1:s} {2:s} to {3:s} name {4:s}'.format(
content.__identifier__,
app_settings['http_methods'][method].__identifier__,
layer.__identifier__,
str(factory),
name))
if not getattr(factory, '__route__', None):
factory.__route__ = routes.Route(name)
component.adapter(
_context,
factory=(factory,),
provides=app_settings['http_methods'][method],
for_=(content, layer),
name=factory.__route__.view_name
)
api = app_settings['api_definition']
ct_name = content.__identifier__
if ct_name not in api:
api[ct_name] = OrderedDict()
ct_api = api[ct_name]
if name:
if 'endpoints' not in ct_api:
ct_api['endpoints'] = OrderedDict()
if name not in ct_api['endpoints']:
ct_api['endpoints'][name] = OrderedDict()
ct_api['endpoints'][name][method] = OrderedDict(service_conf)
else:
ct_api[method] = OrderedDict(service_conf)
register_configuration_handler('service', load_service) # noqa
def load_contenttype(_context, contenttype):
conf = contenttype['config']
klass = contenttype['klass']
if 'schema' in conf:
classImplements(klass, conf['schema'])
from guillotina.content import ResourceFactory
factory = ResourceFactory(
klass,
title='',
description='',
type_name=conf['type_name'],
schema=resolve_dotted_name(conf.get('schema', Interface)),
behaviors=[resolve_dotted_name(b) for b in conf.get('behaviors', []) or ()],
add_permission=conf.get('add_permission') or DEFAULT_ADD_PERMISSION,
allowed_types=conf.get('allowed_types', None)
)
component.utility(
_context,
provides=IResourceFactory,
component=factory,
name=conf['type_name'],
)
register_configuration_handler('contenttype', load_contenttype) # noqa
def load_behavior(_context, behavior):
conf = behavior['config']
klass = resolve_dotted_name(behavior['klass'])
factory = conf.get('factory') or klass
real_factory = resolve_dotted_name(factory)
schema = resolve_dotted_name(conf['provides'])
classImplements(real_factory, schema)
name = conf.get('name')
name_only = conf.get('name_only', False)
title = conf.get('title', '')
for_ = resolve_dotted_name(conf.get('for_'))
marker = resolve_dotted_name(conf.get('marker'))
if marker is None and real_factory is None:
marker = schema
if marker is not None and real_factory is None and marker is not schema:
raise ConfigurationError(
u"You cannot specify a different 'marker' and 'provides' if "
u"there is no adapter factory for the provided interface."
)
if name_only and name is None:
raise ConfigurationError(
u"If you decide to only register by 'name', a name must be given."
)
# Instantiate the real factory if it's the schema-aware type. We do
# this here so that the for_ interface may take this into account.
if factory is not None and IBehaviorSchemaAwareFactory.providedBy(factory):
factory = factory(schema)
registration = BehaviorRegistration(
title=conf.get('title', ''),
description=conf.get('description', ''),
interface=schema,
marker=marker,
factory=real_factory,
name=name,
for_=for_
)
if not name_only:
# behavior registration by provides interface identifier
component.utility(
_context,
provides=IBehavior,
name=schema.__identifier__,
component=registration
)
if name is not None:
# for convinience we register with a given name
component.utility(
_context,
provides=IBehavior,
name=name,
component=registration
)
if factory is None:
if for_ is not None:
logger.warning(
u"Specifying 'for' in behavior '{0}' if no 'factory' is given "
u"has no effect and is superfluous.".format(title)
)
# w/o factory we're done here
return
if for_ is None:
# Attempt to guess the factory's adapted interface and use it as
# the 'for_'.
# Fallback to '*' (=Interface).
adapts = getattr(factory, '__component_adapts__', None) or [Interface]
if len(adapts) != 1:
raise ConfigurationError(
u"The factory can not be declared as multi-adapter."
)
for_ = adapts[0]
adapter_factory = BehaviorAdapterFactory(registration)
component.adapter(
_context,
factory=(adapter_factory,),
provides=schema,
for_=(for_,)
)
register_configuration_handler('behavior', load_behavior) # noqa
def load_addon(_context, addon):
config = addon['config']
app_settings['available_addons'][config['name']] = {
'title': config['title'],
'handler': addon['klass']
}
register_configuration_handler('addon', load_addon) # noqa
def _component_conf(conf):
if type(conf['for_']) not in (tuple, set, list):
conf['for_'] = (conf['for_'],)
def load_adapter(_context, adapter):
conf = adapter['config']
klass = resolve_dotted_name(adapter['klass'])
factory = conf.pop('factory', None) or klass
_component_conf(conf)
if 'provides' in conf and isinstance(klass, type):
# not sure if this is what we want or not for sure but
# we are automatically applying the provides interface to
# registered class objects
classImplements(klass, conf['provides'])
component.adapter(
_context,
factory=(factory,),
**conf
)
register_configuration_handler('adapter', load_adapter) # noqa
def load_subscriber(_context, subscriber):
conf = subscriber['config']
conf['handler'] = resolve_dotted_name(conf.get('handler') or subscriber['klass'])
_component_conf(conf)
component.subscriber(
_context,
**conf
)
register_configuration_handler('subscriber', load_subscriber) # noqa
def load_utility(_context, _utility):
conf = _utility['config']
if 'factory' in conf:
conf['factory'] = resolve_dotted_name(conf['factory'])
elif 'component' in conf:
conf['component'] = resolve_dotted_name(conf['component'])
else:
# use provided klass
klass = _utility['klass']
if isinstance(klass, type):
# is a class type, use factory setting
conf['factory'] = klass
else:
# not a factory
conf['component'] = klass
component.utility(
_context,
**conf
)
register_configuration_handler('utility', load_utility) # noqa
def load_permission(_context, permission_conf):
permission = Permission(**permission_conf['config'])
component.utility(_context, IPermission, permission,
name=permission_conf['config']['id'])
register_configuration_handler('permission', load_permission) # noqa
def load_role(_context, role):
defineRole_directive(_context, **role['config'])
register_configuration_handler('role', load_role) # noqa
def load_grant(_context, grant):
grant_directive(_context, **grant['config'])
register_configuration_handler('grant', load_grant) # noqa
def load_grant_all(_context, grant_all):
grantAll_directive(_context, **grant_all['config'])
register_configuration_handler('grant_all', load_grant_all) # noqa
def load_json_schema_definition(_context, json_schema):
config = json_schema['config']
app_settings['json_schema_definitions'][config['name']] = config['schema']
register_configuration_handler('json_schema_definition', load_json_schema_definition) # noqa
# serializers are just adapters
register_configuration_handler('value_serializer', load_adapter)
register_configuration_handler('value_deserializer', load_adapter)
# renderers, languages are just adapters
register_configuration_handler('renderer', load_adapter)
register_configuration_handler('language', load_adapter)
class _base_decorator(object): # noqa: N801
configuration_type: Optional[str] = None
def __init__(self, **config):
self.config = config
def __call__(self, klass):
register_configuration(klass, self.config, self.configuration_type)
return klass
class _factory_decorator(_base_decorator): # noqa: N801
"""
behavior that can pass factory to it so it can be used standalone
"""
def __call__(self, klass=None):
if klass is None:
if 'factory' not in self.config:
raise Exception('Must provide factory configuration when defining '
'without a class')
klass = get_caller_module()
return super(_factory_decorator, self).__call__(klass)
def _has_parameters(func, number=2):
sig = inspect.signature(func)
required_params = [p for p in sig.parameters.keys()
if sig.parameters[p].default == inspect.Parameter.empty]
return len(sig.parameters) >= number and not len(required_params) > number
class service(_base_decorator): # noqa: N801
def __call__(self, func):
self.config['module'] = func
if isinstance(func, type):
if not hasattr(func, '__call__'):
raise ServiceConfigurationError(
f'Service must have async def __call__ method: {func.__call__}\n'
f'{pformat(self.config)}'
)
if not asyncio.iscoroutinefunction(func.__call__):
raise ServiceConfigurationError(
f'Service __call__ method must be async: {func.__call__}\n'
f'{pformat(self.config)}'
)
class _View(func):
__allow_access__ = self.config.get(
'allow_access', getattr(func, '__allow_access__', False))
__route__ = routes.Route(self.config.get('name', ''))
register_configuration(_View, self.config, 'service')
else:
if not _has_parameters(func):
raise ServiceConfigurationError(
f'Service configuration must accept 2 required parameters: {func}\n'
f'{pformat(self.config)}')
if not asyncio.iscoroutinefunction(func):
raise ServiceConfigurationError(
f'Service function must be async: {func}\n'
f'{pformat(self.config)}'
)
# avoid circular imports
from guillotina.api.service import Service
class _View(self.config.get('base', Service)):
__allow_access__ = self.config.get('allow_access', False)
__route__ = routes.Route(self.config.get('name', ''))
view_func = staticmethod(func)
async def __call__(self):
return await func(self.context, self.request)
register_configuration(_View, self.config, 'service')
return func
class generic_adapter(_base_decorator): # noqa: N801
provides: Interface = None
for_: Optional[Tuple[Interface, ...]] = None
multi = False
def __init__(self, for_=None, **config):
if for_ is not None:
config['for_'] = for_
if 'provides' not in config and self.provides is not None:
config['provides'] = self.provides
if 'for_' not in config and self.for_ is not None:
config['for_'] = self.for_
if not self.multi:
assert type(config['for_']) not in (list, set, tuple)
self.config = config
class value_serializer(generic_adapter): # noqa: N801
configuration_type = 'value_serializer'
provides = IValueToJson
class value_deserializer(generic_adapter): # noqa: N801
configuration_type = 'value_deserializer'
provides = IJSONToValue
class renderer(generic_adapter): # noqa: N801
configuration_type = 'renderer'
provides = IRenderer
for_ = (IView, IRequest)
multi = True
class language(generic_adapter): # noqa: N801
configuration_type = 'language'
provides = ILanguage
for_ = (IResource, IRequest)
multi = True
class contenttype(_base_decorator): # noqa: N801
configuration_type = 'contenttype'
class behavior(_factory_decorator): # noqa: N801
configuration_type = 'behavior'
class addon(_base_decorator): # noqa: N801
configuration_type = 'addon'
class adapter(_factory_decorator): # noqa: N801
configuration_type = 'adapter'
class subscriber(_factory_decorator): # noqa: N801
configuration_type = 'subscriber'
def __call__(self, klass=None):
klass.priority = self.config.pop('priority', 100)
return super().__call__(klass)
class utility(_factory_decorator): # noqa: N801
configuration_type = 'utility'
def permission(id, title, description=''):
register_configuration(
get_caller_module(),
dict(
id=id,
title=title,
description=description),
'permission')
def role(id, title, description='', local=True):
register_configuration(
get_caller_module(),
dict(
id=id,
title=title,
description=description,
local=local),
'role')
def grant(principal=None, role=None, permission=None,
permissions=None):
register_configuration(
get_caller_module(),
dict(
principal=principal,
role=role,
permission=permission,
permissions=permissions),
'grant')
def grant_all(principal=None, role=None):
register_configuration(
get_caller_module(),
dict(
principal=principal,
role=role),
'grant_all')
def json_schema_definition(name, schema):
register_configuration(
get_caller_module(),
dict(name=name, schema=schema),
'json_schema_definition')
def grant_directive(
_context, principal=None, role=None, permission=None,
permissions=None):
from guillotina.security.security_code import role_permission_manager as role_perm_mgr
from guillotina.security.security_code import principal_permission_manager as principal_perm_mgr
from guillotina.security.security_code import principal_role_manager as principal_role_mgr
nspecified = (
(principal is not None) +
(role is not None) +
(permission is not None) +
(permissions is not None))
permspecified = (
(permission is not None) +
(permissions is not None))
if nspecified != 2 or permspecified == 2:
raise ConfigurationError(
"Exactly two of the principal, role, and permission resp. "
"permissions attributes must be specified")
if permission:
permissions = [permission]
if principal and role:
_context.action(
discriminator=('grantRoleToPrincipal', role, principal),
callable=principal_role_mgr.assign_role_to_principal,
args=(role, principal),
)
elif principal and permissions:
for permission in permissions:
_context.action(
discriminator=('grantPermissionToPrincipal',
permission,
principal),
callable=principal_perm_mgr.grant_permission_to_principal,
args=(permission, principal),
)
elif role and permissions:
for permission in permissions:
_context.action(
discriminator=('grantPermissionToRole', permission, role),
callable=role_perm_mgr.grant_permission_to_role,
args=(permission, role),
)
def grantAll_directive(_context, principal=None, role=None): # noqa: N802
"""Grant all permissions to a role or principal
"""
from guillotina.security.security_code import role_permission_manager
from guillotina.security.security_code import principal_permission_manager
nspecified = (
(principal is not None) +
(role is not None))
if nspecified != 1:
raise ConfigurationError(
"Exactly one of the principal and role attributes "
"must be specified")
if principal:
_context.action(
discriminator=('grantAllPermissionsToPrincipal',
principal),
callable=principal_permission_manager.grantAllPermissionsToPrincipal,
args=(principal, ),
)
else:
_context.action(
discriminator=('grantAllPermissionsToRole', role),
callable=role_permission_manager.grantAllPermissionsToRole,
args=(role, ),
)
def defineRole_directive(_context, id, title, description='', local=True): # noqa: N802
from guillotina.auth.role import Role
role = Role(id, title, description, local)
component.utility(_context, IRole, role, name=id)
def scan(path):
"""
pyramid's version of scan has a much more advanced resolver that we
can look into supporting eventually...
"""
path = resolve_module_path(path)
__import__(path)
def clear():
_registered_configurations[:] = []
|
the-stack_106_27803 | """Main battle system implementation."""
from random import randint
# items
from items import antidote
from items import cookie
from items import echo_screen
from items import health_kit
from items import power_potion
from items import pp_restore
# moves
from moves import blast
from moves import blaze
from moves import counter
from moves import disable
from moves import drain
from moves import focus
from moves import glare
from moves import harden
from moves import kick
from moves import mimic
from moves import poison
from moves import sap
from moves import sing
from moves import tackle
from utils import Printer
from actions import Action
MOVES = [
tackle,
poison,
counter,
sing,
harden,
sap,
blast,
focus,
disable,
kick,
blaze,
mimic,
drain,
glare
]
ITEMS = [
cookie,
health_kit,
power_potion,
pp_restore,
antidote,
echo_screen
]
ALL_MOVES_COUNT = len(MOVES)
ALL_ITEMS_COUNT = len(ITEMS)
MOVE_COUNT = 3
MAX_ITEMS = 10
MAX_COST = 500
STAT_POINTS = 100
HP_W, PP_W, STR_W, DEF_W, SPEC_W = 1, 1, 2, 2, 2
def verify(csweekmon, max_cost=MAX_COST, stat_points=STAT_POINTS):
"""Verify that the players have valid initialisation."""
item_cost = [x.COST for x in ITEMS]
stats = csweekmon.stats
health = stats['HP']
ppoints = stats['PP']
strength = stats['Strength']
defense = stats['Defense']
special = stats['Special']
moves = stats['Moves']
items = stats['Items']
effects = stats['Effects']
return (all([x >= 0 for x in [ppoints, strength, defense, special]])
and health > 0
and HP_W * health + PP_W * ppoints + STR_W * strength +
DEF_W * defense + SPEC_W * special <= stat_points
and len(moves) == MOVE_COUNT
and all([i in range(ALL_MOVES_COUNT) for i in moves])
and len(items) <= MAX_ITEMS
and all([i in range(ALL_ITEMS_COUNT) for i in items])
and sum([item_cost[i] for i in items]) <= max_cost
and effects == [])
def write_stats(turn_number, agent_fst, agent_snd):
"""Displays both players' basic stats on the screen."""
Printer.print_ui()
Printer.print_ui('----- TURN {} ----------------------'.format(turn_number))
Printer.print_ui('[{}] HP: {} PP: {}'.format(agent_fst.name,
agent_fst.stats['HP'], agent_fst.stats['PP']))
Printer.print_ui('[{}] HP: {} PP: {}'.format(agent_snd.name,
agent_snd.stats['HP'], agent_snd.stats['PP']))
Printer.print_ui()
def process_effects(agent_cur):
"""Process all status effects that may affect the current player.
:param agent_cur: agent (AI instance) that needs to have effects inflicted
:return: returns a pair of boolean values (will_die, will_continue), stating whether to KO the
the agent or skip turn, respectively"""
will_die, will_skip = False, False
agent_cur.stats['Defense'] = agent_cur.stats['Base Defense']
if 'Sleep' in agent_cur.stats['Effects']:
if 'Focus' in agent_cur.stats['Effects']:
agent_cur.stats['Effects'].remove('Focus')
if sing.wakeup():
Printer.print_ui(' {} wakes up!'.format(agent_cur.name))
agent_cur.stats['Effects'].remove('Sleep')
Printer.delay_ui(1)
else:
try:
Printer.print_ui(' {} is still asleep. 💤'.format(agent_cur.name))
except UnicodeEncodeError:
Printer.print_ui(' {} is still asleep. *Zzz*'.format(agent_cur.name))
will_skip = True
if 'Poison' in agent_cur.stats['Effects']:
damage = poison.latent(agent_cur)
try:
Printer.print_ui(' {} loses {} HP due to Poison! ☠'.format(agent_cur.name, damage))
except UnicodeEncodeError:
Printer.print_ui(' {} loses {} HP due to Poison!'.format(agent_cur.name, damage))
agent_cur.stats['HP'] -= damage
Printer.delay_ui(1)
if agent_cur.stats['HP'] <= 0:
will_die = True
if 'Disable' in agent_cur.stats['Effects']:
Printer.print_ui(' {} is Disabled.'.format(agent_cur.name))
Printer.delay_ui(1)
# Focus not processed here
return will_die, will_skip
def knock_out(agent_a, agent_b):
"""Handle KO of first agent in line."""
Printer.print_ui(' {} is knocked out!'.format(agent_a.name))
Printer.delay_ui(2)
Printer.print_ui(' {} jumps into battle.'.format(agent_b.name))
agent_b.strategy.set_order_info(True)
return agent_b
def run_battle(agent_fst_a, agent_snd_a, agent_fst_b, agent_snd_b):
"""Have two players fight each other."""
Printer.delay_ui(1)
Printer.print_ui('============================================================')
Printer.print_ui(' {} is walking...'.format(agent_fst_a.name))
Printer.delay_ui(1)
Printer.print_ui(' ...a wild {} appears!'.format(agent_snd_a.name))
turn_number = 0
max_turns = 80
current_player = 2
agent_fst, agent_snd = agent_fst_a, agent_snd_a
# player turns
while turn_number < max_turns:
turn_number += 1
# determine who plays now
current_player = 3 - current_player
if current_player == 1:
agent_cur, agent_oth = agent_fst, agent_snd
else:
agent_cur, agent_oth = agent_snd, agent_fst
# UI
Printer.delay_ui(1)
write_stats(turn_number, agent_fst, agent_snd)
Printer.delay_ui(1)
# status effects logic
will_die, will_continue = process_effects(agent_cur)
if will_die:
current_player = 3 - current_player
if agent_cur == agent_fst_a:
agent_fst = knock_out(agent_fst_a, agent_fst_b)
continue
elif agent_cur == agent_snd_a:
agent_snd = knock_out(agent_snd_a, agent_snd_b)
continue
else:
# agent_cur = agent_oth
break
if will_continue:
continue
# pass status information to current player
agent_cur.give_stats_info(agent_oth.stats)
# player makes decision, unless delayed move
if 'Focus' in agent_cur.stats['Effects']:
focus.finally_perform(agent_cur, agent_oth)
if agent_oth.stats['HP'] <= 0:
if agent_oth == agent_fst_a:
current_player = 3 - current_player
agent_fst = knock_out(agent_fst_a, agent_fst_b)
continue
elif agent_oth == agent_snd_a:
current_player = 3 - current_player
agent_snd = knock_out(agent_snd_a, agent_snd_b)
continue
else:
continue
action, detail = agent_cur.choose_action()
# process the player's decision
if action == Action.PERFORM_MOVE: # use move
if detail < 0 or detail >= MOVE_COUNT:
Printer.print_ui(
' {} tries to perform a move, but stumbles!'.format(agent_cur.name))
else:
move = MOVES[agent_cur.stats['Moves'][detail]]
Printer.print_ui(' {} uses {}.'.format(agent_cur.name, move.NAME))
Printer.delay_ui(1)
if agent_cur.stats['PP'] < move.PP_COST:
Printer.print_ui(' But {} does not have enough PP!'.format(agent_cur.name))
elif 'Disable' in agent_cur.stats['Effects'] and move.CAN_DISABLE:
Printer.print_ui(' But {} is Disabled!'.format(agent_cur.name))
else:
agent_cur.stats['PP'] -= move.PP_COST
agent_cur.stats['Previous move'] = move
move.perform(agent_cur, agent_oth)
if agent_oth.stats['HP'] <= 0:
if agent_oth == agent_fst_a:
agent_fst = knock_out(agent_fst_a, agent_fst_b)
current_player = 3 - current_player
continue
elif agent_oth == agent_snd_a:
agent_snd = knock_out(agent_snd_a, agent_snd_b)
continue
else:
break
elif action == Action.USE_ITEM: # use item
if detail < 0 or detail >= MAX_ITEMS:
Printer.print_ui(' {} tries to use an item, but stumbles!'.format(agent_cur.name))
else:
item_index = agent_cur.stats['Items'][detail]
if item_index == -1:
Printer.print_ui(' {} tries to use an item, but it\'s not ' \
'there!'.format(agent_cur.name))
else:
item = ITEMS[item_index]
if item.NAME[0] in ['A', 'E', 'I', 'O', 'U']:
Printer.print_ui(' {} uses an {}'.format(agent_cur.name, item.NAME))
else:
Printer.print_ui(' {} uses a {}.'.format(agent_cur.name, item.NAME))
item.use(agent_cur, agent_oth)
agent_cur.stats['Items'][detail] = -1
elif action == Action.BLOCK: # block
Printer.print_ui(' {} blocks.'.format(agent_cur.name))
# temporary increase in Defense
agent_cur.stats['Defense'] += randint(8, 12)
# restore 3 to 5 PP
agent_cur.stats['PP'] = min(agent_cur.stats['Max PP'],
agent_cur.stats['PP'] + randint(3, 5))
else:
Printer.print_ui(' {} stumbles!'.format(agent_cur.name))
Printer.delay_ui(1)
Printer.print_ui()
Printer.print_ui()
Printer.print_ui(' Match over!')
if agent_fst.stats['HP'] > 0 and agent_snd.stats['HP'] > 0:
current_player = 0
Printer.print_ui('============================================================')
# Printer.print_ui()
return current_player
|
the-stack_106_27804 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import boto3
import datetime
from ecs_crd.canaryReleaseDeployStep import CanaryReleaseDeployStep
from ecs_crd.rollbackChangeRoute53WeightsStep import RollbackChangeRoute53WeightsStep
from ecs_crd.destroyBlueStackStep import DestroyBlueStackStep
from ecs_crd.sendNotificationBySnsStep import SendNotificationBySnsStep
class UpdateCanaryReleaseInfoStep(CanaryReleaseDeployStep):
def __init__(self, infos, logger):
"""initializes a new instance of the class"""
super().__init__(infos,'Update CanaryRelease Info (dynamo db)', logger)
def _on_execute(self):
"""operation containing the processing performed by this step"""
try:
client = boto3.resource('dynamodb', region_name=self.infos.region)
table = client.Table('canary_release')
if self.infos.action == 'deploy':
if self._exist_item(client, table):
self._update_item(table)
else:
self._insert_item(table)
if self.infos.action == 'undeploy':
self._delete_item(table)
return DestroyBlueStackStep(self.infos, self.logger)
except Exception as e:
self.logger.error('UpdateCanaryReleaseInfoStep', exc_info=True)
self.infos.exit_exception = e
self.infos.exit_code = 4
return RollbackChangeRoute53WeightsStep(self.infos, self.logger)
def _exist_item(self, client, table):
"""check if exist item in dynamoDB table"""
try:
response = table.get_item(Key={'id': self.infos.get_hash()})
return True if ('Item' in response and not response['Item']) else False
except client.exceptions.ResourceNotFoundException:
return False
except Exception:
raise
def _delete_item(self, table):
"""delete item in dynamo db table """
table.delete_item(Key={'id': self.infos.get_hash()})
def _update_item(self, table):
"""update item in dynamo db table """
table.update_item(
Key={'id': self.infos.get_hash()},
UpdateExpression="set service_version=:v, canary_releaset=:c, alb_arn=:a, deploy_at=:d, stack_name=:s",
ExpressionAttributeValues={
':v': self.infos.service_version,
':c': self.infos.green_infos.stack['Parameters']['CanaryRelease']['Default'],
':a': self.infos.green_infos.stack['Parameters']['LoadBalancer']['Default'],
':d': str(datetime.datetime.now().replace(microsecond=0).isoformat()),
':s': self.infos.green_infos.stack_name
},
ReturnValues="UPDATED_NEW"
)
def _insert_item(self, table):
"""insert item in dynamo db table """
item = {}
item['id'] = self.infos.get_hash()
item['canary_group'] = self.infos.canary_group
item['service_name'] = self.infos.service_name
item['service_version'] = self.infos.service_version
item['environment'] = self.infos.environment
item['project'] = self.infos.project
item['region'] = self.infos.region
item['canary_release'] = self.infos.green_infos.stack['Parameters']['CanaryRelease']['Default']
item['alb_arn'] = self.infos.green_infos.stack['Parameters']['LoadBalancer']['Default']
item['stack_name'] = self.infos.green_infos.stack_name
item['deploy_at'] = str(datetime.datetime.now().replace(microsecond=0).isoformat())
item['ecs_crd_version'] = self.infos.ecs_crd_version
table.put_item(Item=item) |
the-stack_106_27805 | from typing import List, Dict, Optional
import logging
from uuid import UUID
from sqlalchemy import select, update, delete
from sqlalchemy.exc import IntegrityError
from hetdesrun.utils import State, Type
from hetdesrun.persistence import SQLAlchemySession
from hetdesrun.persistence.dbservice.nesting import (
delete_own_nestings,
find_all_nested_transformation_revisions,
find_all_nesting_transformation_revisions,
update_nesting,
)
from hetdesrun.persistence.dbservice.exceptions import (
DBBadRequestError,
DBIntegrityError,
DBNotFoundError,
)
from hetdesrun.persistence import Session
from hetdesrun.persistence.models.transformation import TransformationRevision
from hetdesrun.persistence.models.workflow import WorkflowContent
from hetdesrun.persistence.dbmodels import TransformationRevisionDBModel
logger = logging.getLogger(__name__)
def add_tr(
session: SQLAlchemySession, transformation_revision: TransformationRevision
) -> None:
try:
db_model = transformation_revision.to_orm_model()
session.add(db_model)
except IntegrityError as e:
msg = (
f"Integrity Error while trying to store transformation revision "
f"with id {transformation_revision.id}. Error was:\n{str(e)}"
)
logger.error(msg)
raise DBIntegrityError(msg) from e
def store_single_transformation_revision(
transformation_revision: TransformationRevision,
) -> None:
with Session() as session, session.begin():
add_tr(session, transformation_revision)
if transformation_revision.type == Type.WORKFLOW:
assert isinstance(
transformation_revision.content, WorkflowContent
) # hint for mypy
update_nesting(
session, transformation_revision.id, transformation_revision.content
)
# pylint: disable=W0622
def select_tr_by_id(
session: SQLAlchemySession, id: UUID, log_error: bool = True
) -> TransformationRevision:
result = session.execute(
select(TransformationRevisionDBModel).where(
TransformationRevisionDBModel.id == id
)
).scalar_one_or_none()
if result is None:
msg = f"Found no transformation revision in database with id {id}"
if log_error:
logger.error(msg)
raise DBNotFoundError(msg)
return TransformationRevision.from_orm_model(result)
# pylint: disable=W0622
def read_single_transformation_revision(
id: UUID, log_error: bool = True
) -> TransformationRevision:
with Session() as session, session.begin():
return select_tr_by_id(session, id, log_error)
def update_tr(
session: SQLAlchemySession, transformation_revision: TransformationRevision
) -> None:
try:
db_model = transformation_revision.to_orm_model()
session.execute(
update(TransformationRevisionDBModel)
.where(TransformationRevisionDBModel.id == db_model.id)
.values(
revision_group_id=db_model.revision_group_id,
name=db_model.name,
description=db_model.description,
category=db_model.category,
version_tag=db_model.version_tag,
state=db_model.state,
type=db_model.type,
documentation=db_model.documentation,
workflow_content=db_model.workflow_content,
component_code=db_model.component_code,
io_interface=db_model.io_interface,
test_wiring=db_model.test_wiring,
released_timestamp=db_model.released_timestamp,
disabled_timestamp=db_model.disabled_timestamp,
)
)
except IntegrityError as e:
msg = (
f"Integrity Error while trying to update "
f"transformation revision with id {transformation_revision.id}.\n"
f"Error was:\n{str(e)}"
)
logger.error(msg)
raise DBIntegrityError(msg) from e
def pass_on_deprecation(session: SQLAlchemySession, transformation_id: UUID) -> None:
logger.debug(
"pass on deprecation for transformation revision %s", str(transformation_id)
)
sup_nestings = find_all_nesting_transformation_revisions(session, transformation_id)
for nesting in sup_nestings:
transformation_revision = select_tr_by_id(session, nesting.workflow_id)
assert isinstance(
transformation_revision.content, WorkflowContent
) # hint for mypy
for operator in transformation_revision.content.operators:
if operator.id == nesting.via_operator_id:
operator.state = State.DISABLED
update_tr(session, transformation_revision)
def update_or_create_single_transformation_revision(
transformation_revision: TransformationRevision,
) -> TransformationRevision:
with Session() as session, session.begin():
try:
select_tr_by_id(session, transformation_revision.id, log_error=False)
update_tr(session, transformation_revision)
except DBNotFoundError:
add_tr(session, transformation_revision)
if transformation_revision.state == State.DISABLED:
pass_on_deprecation(session, transformation_revision.id)
if transformation_revision.type == Type.WORKFLOW:
assert isinstance(
transformation_revision.content, WorkflowContent
) # hint for mypy
update_nesting(
session, transformation_revision.id, transformation_revision.content
)
return select_tr_by_id(session, transformation_revision.id)
# pylint: disable=W0622
def delete_single_transformation_revision(
id: UUID, type: Optional[Type] = None
) -> None:
with Session() as session, session.begin():
result = select_tr_by_id(session, id)
transformation_revision: TransformationRevision = result
if type is not None and transformation_revision.type != type:
msg = (
f"Transformation revision {id} has type {transformation_revision.type}"
f"Delete request with type {type} will not be executed"
)
logger.error(msg)
raise DBBadRequestError(msg)
if transformation_revision.state != State.DRAFT:
msg = (
f"Transformation revision {id} cannot be deleted "
f"since it is in the state {transformation_revision.state}"
)
logger.error(msg)
raise DBBadRequestError(msg)
delete_own_nestings(session, transformation_revision.id)
session.execute(
delete(TransformationRevisionDBModel).where(
TransformationRevisionDBModel.id == transformation_revision.id
)
)
# pylint: disable=W0622
def select_multiple_transformation_revisions(
category: Optional[str] = None,
revision_group_id: Optional[UUID] = None,
type: Optional[Type] = None,
state: Optional[State] = None,
) -> List[TransformationRevision]:
"""Filterable selection of transformation revisions from db"""
with Session() as session, session.begin():
selection = select(TransformationRevisionDBModel)
if category is not None:
selection = selection.where(
TransformationRevisionDBModel.category == category
)
if revision_group_id is not None:
selection = selection.where(
TransformationRevisionDBModel.revision_group_id == revision_group_id
)
if type is not None:
selection = selection.where(TransformationRevisionDBModel.type == type)
if state is not None:
selection = selection.where(TransformationRevisionDBModel.state == state)
results = session.execute(selection).all()
return [TransformationRevision.from_orm_model(result[0]) for result in results]
def get_all_nested_transformation_revisions(
transformation_revision: TransformationRevision,
) -> Dict[UUID, TransformationRevision]:
if transformation_revision.type != Type.WORKFLOW:
msg = (
f"cannot get operators of transformation revision {transformation_revision.id}"
f"because its type is not WORKFLOW"
)
logger.error(msg)
raise DBBadRequestError(msg)
with Session() as session, session.begin():
descendants = find_all_nested_transformation_revisions(
session, transformation_revision.id
)
nested_transformation_revisions: Dict[UUID, TransformationRevision] = {}
for descendant in descendants:
nested_transformation_revisions[descendant.operator_id] = select_tr_by_id(
session, descendant.transformation_id
)
return nested_transformation_revisions
|
the-stack_106_27806 | # -*- coding: UTF-8 -*-
# Copyright (c) 2020 Saint Corp. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Archive extraction utilities
"""
import os
import shutil
import subprocess
import tarfile
from pathlib import Path, PurePosixPath
from _common import (USE_REGISTRY, PlatformEnum, ExtractorEnum, get_logger, get_running_platform)
DEFAULT_EXTRACTORS = {
ExtractorEnum.SEVENZIP: USE_REGISTRY,
ExtractorEnum.TAR: 'tar',
ExtractorEnum.WINRAR: USE_REGISTRY,
}
class ExtractionError(BaseException):
"""Exceptions thrown in this module's methods"""
def _find_7z_by_registry():
"""
Return a string to 7-zip's 7z.exe from the Windows Registry.
Raises ExtractionError if it fails.
"""
import winreg #pylint: disable=import-error
sub_key_7zfm = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\7zFM.exe'
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, sub_key_7zfm) as key_handle:
sevenzipfm_dir = winreg.QueryValueEx(key_handle, 'Path')[0]
except OSError:
get_logger().exception('Unable to locate 7-zip from the Windows Registry')
raise ExtractionError()
sevenzip_path = Path(sevenzipfm_dir, '7z.exe')
if not sevenzip_path.is_file():
get_logger().error('7z.exe not found at path from registry: %s', sevenzip_path)
return sevenzip_path
def _find_winrar_by_registry():
"""
Return a string to WinRAR's WinRAR.exe from the Windows Registry.
Raises ExtractionError if it fails.
"""
import winreg #pylint: disable=import-error
sub_key_winrar = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\WinRAR.exe'
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, sub_key_winrar) as key_handle:
winrar_dir = winreg.QueryValueEx(key_handle, 'Path')[0]
except OSError:
get_logger().exception('Unable to locale WinRAR from the Windows Registry')
raise ExtractionError()
winrar_path = Path(winrar_dir, 'WinRAR.exe')
if not winrar_path.is_file():
get_logger().error('WinRAR.exe not found at path from registry: %s', winrar_path)
return winrar_path
def _find_extractor_by_cmd(extractor_cmd):
"""Returns a string path to the binary; None if it couldn't be found"""
if not extractor_cmd:
return None
if Path(extractor_cmd).is_file():
return extractor_cmd
return shutil.which(extractor_cmd)
def _process_relative_to(unpack_root, relative_to):
"""
For an extractor that doesn't support an automatic transform, move the extracted
contents from the relative_to/ directory to the unpack_root
If relative_to is None, nothing is done.
"""
if relative_to is None:
return
relative_root = unpack_root / relative_to
if not relative_root.is_dir():
get_logger().error('Could not find relative_to directory in extracted files: %s',
relative_to)
raise ExtractionError()
for src_path in relative_root.iterdir():
dest_path = unpack_root / src_path.name
src_path.rename(dest_path)
relative_root.rmdir()
def _extract_tar_with_7z(binary, archive_path, output_dir, relative_to):
get_logger().debug('Using 7-zip extractor')
if not relative_to is None and (output_dir / relative_to).exists():
get_logger().error('Temporary unpacking directory already exists: %s',
output_dir / relative_to)
raise ExtractionError()
cmd1 = (binary, 'x', str(archive_path), '-so')
cmd2 = (binary, 'x', '-si', '-aoa', '-ttar', '-o{}'.format(str(output_dir)))
get_logger().debug('7z command line: %s | %s', ' '.join(cmd1), ' '.join(cmd2))
proc1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE)
proc2 = subprocess.Popen(cmd2, stdin=proc1.stdout, stdout=subprocess.PIPE)
proc1.stdout.close()
(stdout_data, stderr_data) = proc2.communicate()
if proc2.returncode != 0:
get_logger().error('7z commands returned non-zero status: %s', proc2.returncode)
get_logger().debug('stdout: %s', stdout_data)
get_logger().debug('stderr: %s', stderr_data)
raise ExtractionError()
_process_relative_to(output_dir, relative_to)
def _extract_tar_with_tar(binary, archive_path, output_dir, relative_to):
get_logger().debug('Using BSD or GNU tar extractor')
output_dir.mkdir(exist_ok=True)
cmd = (binary, '-xf', str(archive_path), '-C', str(output_dir))
get_logger().debug('tar command line: %s', ' '.join(cmd))
result = subprocess.run(cmd)
if result.returncode != 0:
get_logger().error('tar command returned %s', result.returncode)
raise ExtractionError()
# for gnu tar, the --transform option could be used. but to keep compatibility with
# bsdtar on macos, we just do this ourselves
_process_relative_to(output_dir, relative_to)
def _extract_tar_with_winrar(binary, archive_path, output_dir, relative_to):
get_logger().debug('Using WinRAR extractor')
output_dir.mkdir(exist_ok=True)
cmd = (binary, 'x', '-o+', str(archive_path), str(output_dir))
get_logger().debug('WinRAR command line: %s', ' '.join(cmd))
result = subprocess.run(cmd)
if result.returncode != 0:
get_logger().error('WinRAR command returned %s', result.returncode)
raise ExtractionError()
_process_relative_to(output_dir, relative_to)
def _extract_tar_with_python(archive_path, output_dir, relative_to):
get_logger().debug('Using pure Python tar extractor')
class NoAppendList(list):
"""Hack to workaround memory issues with large tar files"""
def append(self, obj):
pass
# Simple hack to check if symlinks are supported
try:
os.symlink('', '')
except FileNotFoundError:
# Symlinks probably supported
symlink_supported = True
except OSError:
# Symlinks probably not supported
get_logger().info('System does not support symlinks. Ignoring them.')
symlink_supported = False
except BaseException:
# Unexpected exception
get_logger().exception('Unexpected exception during symlink support check.')
raise ExtractionError()
with tarfile.open(str(archive_path), 'r|%s' % archive_path.suffix[1:]) as tar_file_obj:
tar_file_obj.members = NoAppendList()
for tarinfo in tar_file_obj:
try:
if relative_to is None:
destination = output_dir / PurePosixPath(tarinfo.name)
else:
destination = output_dir / PurePosixPath(tarinfo.name).relative_to(relative_to)
if tarinfo.issym() and not symlink_supported:
# In this situation, TarFile.makelink() will try to create a copy of the
# target. But this fails because TarFile.members is empty
# But if symlinks are not supported, it's safe to assume that symlinks
# aren't needed. The only situation where this happens is on Windows.
continue
if tarinfo.islnk():
# Derived from TarFile.extract()
new_target = output_dir / PurePosixPath(
tarinfo.linkname).relative_to(relative_to)
tarinfo._link_target = new_target.as_posix() # pylint: disable=protected-access
if destination.is_symlink():
destination.unlink()
tar_file_obj._extract_member(tarinfo, str(destination)) # pylint: disable=protected-access
except BaseException:
get_logger().exception('Exception thrown for tar member: %s', tarinfo.name)
raise ExtractionError()
def extract_tar_file(archive_path, output_dir, relative_to, extractors=None):
"""
Extract regular or compressed tar archive into the output directory.
archive_path is the pathlib.Path to the archive to unpack
output_dir is a pathlib.Path to the directory to unpack. It must already exist.
relative_to is a pathlib.Path for directories that should be stripped relative to the
root of the archive, or None if no path components should be stripped.
extractors is a dictionary of PlatformEnum to a command or path to the
extractor binary. Defaults to 'tar' for tar, and '_use_registry' for 7-Zip and WinRAR.
Raises ExtractionError if unexpected issues arise during unpacking.
"""
if extractors is None:
extractors = DEFAULT_EXTRACTORS
current_platform = get_running_platform()
if current_platform == PlatformEnum.WINDOWS:
# Try to use 7-zip first
sevenzip_cmd = extractors.get(ExtractorEnum.SEVENZIP)
if sevenzip_cmd == USE_REGISTRY:
sevenzip_cmd = str(_find_7z_by_registry())
sevenzip_bin = _find_extractor_by_cmd(sevenzip_cmd)
if sevenzip_bin is not None:
_extract_tar_with_7z(sevenzip_bin, archive_path, output_dir, relative_to)
return
# Use WinRAR if 7-zip is not found
winrar_cmd = extractors.get(ExtractorEnum.WINRAR)
if winrar_cmd == USE_REGISTRY:
winrar_cmd = str(_find_winrar_by_registry())
winrar_bin = _find_extractor_by_cmd(winrar_cmd)
if winrar_bin is not None:
_extract_tar_with_winrar(winrar_bin, archive_path, output_dir, relative_to)
return
get_logger().warning(
'Neither 7-zip nor WinRAR were found. Falling back to Python extractor...')
elif current_platform == PlatformEnum.UNIX:
# NOTE: 7-zip isn't an option because it doesn't preserve file permissions
tar_bin = _find_extractor_by_cmd(extractors.get(ExtractorEnum.TAR))
if not tar_bin is None:
_extract_tar_with_tar(tar_bin, archive_path, output_dir, relative_to)
return
else:
# This is not a normal code path, so make it clear.
raise NotImplementedError(current_platform)
# Fallback to Python-based extractor on all platforms
_extract_tar_with_python(archive_path, output_dir, relative_to)
def extract_with_7z(
archive_path,
output_dir,
relative_to, #pylint: disable=too-many-arguments
extractors=None):
"""
Extract archives with 7-zip into the output directory.
Only supports archives with one layer of unpacking, so compressed tar archives don't work.
archive_path is the pathlib.Path to the archive to unpack
output_dir is a pathlib.Path to the directory to unpack. It must already exist.
relative_to is a pathlib.Path for directories that should be stripped relative to the
root of the archive.
extractors is a dictionary of PlatformEnum to a command or path to the
extractor binary. Defaults to 'tar' for tar, and '_use_registry' for 7-Zip.
Raises ExtractionError if unexpected issues arise during unpacking.
"""
# TODO: It would be nice to extend this to support arbitrary standard IO chaining of 7z
# instances, so _extract_tar_with_7z and other future formats could use this.
if extractors is None:
extractors = DEFAULT_EXTRACTORS
sevenzip_cmd = extractors.get(ExtractorEnum.SEVENZIP)
if sevenzip_cmd == USE_REGISTRY:
if not get_running_platform() == PlatformEnum.WINDOWS:
get_logger().error('"%s" for 7-zip is only available on Windows', sevenzip_cmd)
raise ExtractionError()
sevenzip_cmd = str(_find_7z_by_registry())
sevenzip_bin = _find_extractor_by_cmd(sevenzip_cmd)
if not relative_to is None and (output_dir / relative_to).exists():
get_logger().error('Temporary unpacking directory already exists: %s',
output_dir / relative_to)
raise ExtractionError()
cmd = (sevenzip_bin, 'x', str(archive_path), '-aoa', '-o{}'.format(str(output_dir)))
get_logger().debug('7z command line: %s', ' '.join(cmd))
result = subprocess.run(cmd)
if result.returncode != 0:
get_logger().error('7z command returned %s', result.returncode)
raise ExtractionError()
_process_relative_to(output_dir, relative_to)
def extract_with_winrar(
archive_path,
output_dir,
relative_to, #pylint: disable=too-many-arguments
extractors=None):
"""
Extract archives with WinRAR into the output directory.
Only supports archives with one layer of unpacking, so compressed tar archives don't work.
archive_path is the pathlib.Path to the archive to unpack
output_dir is a pathlib.Path to the directory to unpack. It must already exist.
relative_to is a pathlib.Path for directories that should be stripped relative to the
root of the archive.
extractors is a dictionary of PlatformEnum to a command or path to the
extractor binary. Defaults to 'tar' for tar, and '_use_registry' for WinRAR.
Raises ExtractionError if unexpected issues arise during unpacking.
"""
if extractors is None:
extractors = DEFAULT_EXTRACTORS
winrar_cmd = extractors.get(ExtractorEnum.WINRAR)
if winrar_cmd == USE_REGISTRY:
if not get_running_platform() == PlatformEnum.WINDOWS:
get_logger().error('"%s" for WinRAR is only available on Windows', winrar_cmd)
raise ExtractionError()
winrar_cmd = str(_find_winrar_by_registry())
winrar_bin = _find_extractor_by_cmd(winrar_cmd)
if not relative_to is None and (output_dir / relative_to).exists():
get_logger().error('Temporary unpacking directory already exists: %s',
output_dir / relative_to)
raise ExtractionError()
cmd = (winrar_bin, 'x', '-o+', str(archive_path), str(output_dir))
get_logger().debug('WinRAR command line: %s', ' '.join(cmd))
result = subprocess.run(cmd)
if result.returncode != 0:
get_logger().error('WinRAR command returned %s', result.returncode)
raise ExtractionError()
_process_relative_to(output_dir, relative_to)
|
the-stack_106_27809 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
from ._configuration import AutoRestParameterGroupingTestServiceConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Dict
class AutoRestParameterGroupingTestService:
"""Test Infrastructure for AutoRest.
:keyword endpoint: Service URL. Default value is 'http://localhost:3000'.
:paramtype endpoint: str
"""
def __init__(self, *, endpoint: str = "http://localhost:3000", **kwargs: Any) -> None:
self._config = AutoRestParameterGroupingTestServiceConfiguration(**kwargs)
self._client = AsyncPipelineClient(base_url=endpoint, config=self._config, **kwargs)
self._serialize = Serializer()
self._deserialize = Deserializer()
self._serialize.client_side_validation = False
def send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
We have helper methods to create requests specific to this service in `azureparametergroupinglowlevel.rest`.
Use these helper methods to create the request you pass to this method.
>>> from azureparametergroupinglowlevel.rest import parameter_grouping
>>> request = parameter_grouping.build_post_required_request(path, json=json, content=content, custom_header=custom_header, query=query, **kwargs)
<HttpRequest [POST], url: '/parameterGrouping/postRequired/{path}'>
>>> response = await client.send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AutoRestParameterGroupingTestService":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
the-stack_106_27810 | import os
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import time
tfk = tf.keras
tfkl = tf.keras.layers
tfpl = tfp.layers
tfd = tfp.distributions
tfb = tfp.bijectors
from softlearning.models.convnet import convnet_model
from softlearning.models.feedforward import feedforward_model
from softlearning.utils.times import timestamp
CURR_PATH = os.path.dirname(os.path.abspath(__file__))
URDF = {
"locobot": os.path.join(CURR_PATH, 'urdf/locobot_description.urdf'),
"locobot_dual_cam": os.path.join(CURR_PATH, 'urdf/locobot_description_dual_cam.urdf'),
"miniblock": os.path.join(CURR_PATH, 'urdf/miniblock.urdf'),
"greenbox": os.path.join(CURR_PATH, 'urdf/greenbox.urdf'),
"redbox": os.path.join(CURR_PATH, 'urdf/redbox.urdf'),
"largerminiblock": os.path.join(CURR_PATH, 'urdf/largerminiblock.urdf'),
"greenball": os.path.join(CURR_PATH, 'urdf/greenball.urdf'),
"greensquareball": os.path.join(CURR_PATH, 'urdf/greensquareball_v2.urdf'),
"bluesquareball": os.path.join(CURR_PATH, 'urdf/bluesquareball_v2.urdf'),
"yellowsquareball": os.path.join(CURR_PATH, 'urdf/yellowsquareball_v2.urdf'),
"orangesquareball": os.path.join(CURR_PATH, 'urdf/orangesquareball_v2.urdf'),
"whitesquareball": os.path.join(CURR_PATH, 'urdf/whitesquareball_v2.urdf'),
"blacksquareball": os.path.join(CURR_PATH, 'urdf/blacksquareball_v2.urdf'),
"greensquareball_large": os.path.join(CURR_PATH, 'urdf/greensquareball_large.urdf'),
"walls": os.path.join(CURR_PATH, 'urdf/walls.urdf'),
"plane": os.path.join(CURR_PATH, 'urdf/plane.urdf'),
"rectangular_pillar": os.path.join(CURR_PATH, 'urdf/rectangular_pillar.urdf'),
"solid_box": os.path.join(CURR_PATH, 'urdf/solid_box.urdf'),
"walls_2": os.path.join(CURR_PATH, 'urdf/medium_room/walls.urdf'),
"textured_box": os.path.join(CURR_PATH, 'urdf/medium_room/box.urdf'),
"floor": os.path.join(CURR_PATH, 'urdf/simple_texture_room/floor.urdf'),
"wall_single": os.path.join(CURR_PATH, 'urdf/double_room/wall_single.urdf'),
"wall_single_thin": os.path.join(CURR_PATH, 'urdf/double_room/wall_single_thin.urdf'),
"floor_patch": os.path.join(CURR_PATH, 'urdf/double_room/floor_patch.urdf'),
"cylinder_grey": os.path.join(CURR_PATH, 'urdf/double_room/cylinder_grey.urdf'),
"cylinder_black": os.path.join(CURR_PATH, 'urdf/double_room/cylinder_black.urdf'),
"box_grey": os.path.join(CURR_PATH, 'urdf/double_room/box_grey.urdf'),
"box_dark_grey": os.path.join(CURR_PATH, 'urdf/double_room/box_dark_grey.urdf'),
}
TEXTURE = {
"wood": os.path.join(CURR_PATH, 'urdf/medium_room/wood2.png'),
"floor": os.path.join(CURR_PATH, 'urdf/simple_texture_room/floor.png'),
"floor2": os.path.join(CURR_PATH, 'urdf/simple_texture_room/floor2.png'),
"testfloor": os.path.join(CURR_PATH, 'urdf/simple_texture_room/testfloor.png'),
"bluerugs": os.path.join(CURR_PATH, 'urdf/simple_texture_room/bluerugs.png'),
"wall": os.path.join(CURR_PATH, 'urdf/medium_room/wall1.png'),
"marble": os.path.join(CURR_PATH, 'urdf/medium_room/marble.png'),
"crate": os.path.join(CURR_PATH, 'urdf/medium_room/crate.png'),
"navy": os.path.join(CURR_PATH, 'urdf/medium_room/navy_cloth.png'),
"red": os.path.join(CURR_PATH, 'urdf/medium_room/red_cloth.png'),
"floor_carpet_1": os.path.join(CURR_PATH, 'urdf/double_room/floor_carpet_1.png'),
"floor_carpet_2": os.path.join(CURR_PATH, 'urdf/double_room/floor_carpet_2.png'),
"floor_carpet_3": os.path.join(CURR_PATH, 'urdf/double_room/floor_carpet_3.png'),
"floor_carpet_4": os.path.join(CURR_PATH, 'urdf/double_room/floor_carpet_4.png'),
"floor_carpet_5": os.path.join(CURR_PATH, 'urdf/double_room/floor_carpet_5.png'),
"floor_carpet_6": os.path.join(CURR_PATH, 'urdf/double_room/floor_carpet_6.png'),
"floor_carpet_7": os.path.join(CURR_PATH, 'urdf/double_room/floor_carpet_7.png'),
"floor_marble_1": os.path.join(CURR_PATH, 'urdf/double_room/floor_marble_1.png'),
"floor_marble_2": os.path.join(CURR_PATH, 'urdf/double_room/floor_marble_2.png'),
"floor_marble_3": os.path.join(CURR_PATH, 'urdf/double_room/floor_marble_3.png'),
"floor_wood_1": os.path.join(CURR_PATH, 'urdf/double_room/floor_wood_1.png'),
"floor_wood_2": os.path.join(CURR_PATH, 'urdf/double_room/floor_wood_2.png'),
"floor_wood_3": os.path.join(CURR_PATH, 'urdf/double_room/floor_wood_3.png'),
}
def is_in_rect(x, y, min_x, min_y, max_x, max_y):
return min_x < x < max_x and min_y < y < max_y
def is_in_circle(x, y, center_x, center_y, radius):
return (x - center_x) ** 2 + (y - center_y) ** 2 < radius ** 2
def dprint(*args, **kwargs):
# print(timestamp(), *args, **kwargs)
return
class Discretizer:
def __init__(self, sizes, mins, maxs):
self._sizes = np.array(sizes)
self._mins = np.array(mins)
self._maxs = np.array(maxs)
self._step_sizes = (self._maxs - self._mins) / self._sizes
@property
def dimensions(self):
return self._sizes
def discretize(self, action):
centered = action - self._mins
indices = np.floor_divide(centered, self._step_sizes)
clipped = np.clip(indices, 0, self._sizes)
return clipped
def undiscretize(self, action):
return action * self._step_sizes + self._mins + self._step_sizes * 0.5
def flatten(self, action):
return np.ravel_multi_index(action, self._sizes, order='C')
def unflatten(self, index):
return np.array(np.unravel_index(index, self._sizes, order='C')).squeeze()
def build_image_discrete_policy(
image_size=100,
discrete_hidden_layers=(512, 512),
discrete_dimension=15 * 31
):
obs_in = tfk.Input((image_size, image_size, 3))
conv_out = convnet_model(
conv_filters=(64, 64, 64),
conv_kernel_sizes=(3, 3, 3),
conv_strides=(2, 2, 2),
activation="relu",
)(obs_in)
logits_out = feedforward_model(
discrete_hidden_layers,
[discrete_dimension],
activation='relu',
output_activation='linear',
)(conv_out)
logits_model = tfk.Model(obs_in, logits_out)
def deterministic_model(obs):
logits = logits_model(obs)
inds = tf.argmax(logits, axis=-1)
return inds
return logits_model, deterministic_model
def build_discrete_Q_model(
image_size=100,
discrete_hidden_layers=(512, 512),
discrete_dimension=15 * 31
):
obs_in = tfk.Input((image_size, image_size, 3))
conv_out = convnet_model(
conv_filters=(64, 64, 64),
conv_kernel_sizes=(3, 3, 3),
conv_strides=(2, 2, 2),
activation="relu",
downsampling_type="conv",
# kernel_regularizer=tfk.regularizers.l2(l=0.1),
)(obs_in)
logits_out = feedforward_model(
discrete_hidden_layers,
[discrete_dimension],
activation='relu',
output_activation='linear',
# kernel_regularizer=tfk.regularizers.l2(l=0.1),
)(conv_out)
logits_model = tfk.Model(obs_in, logits_out)
return logits_model
def create_train_discrete_Q_sigmoid(logits_model, optimizer, discrete_dimension):
@tf.function(experimental_relax_shapes=True)
def train(data):
observations = data['observations']
rewards = tf.cast(data['rewards'], tf.float32)
actions_discrete = data['actions']
actions_onehot = tf.one_hot(actions_discrete[:, 0], depth=discrete_dimension)
with tf.GradientTape() as tape:
logits = logits_model(observations)
taken_logits = tf.reduce_sum(logits * actions_onehot, axis=-1, keepdims=True)
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels=rewards, logits=taken_logits)
loss = tf.nn.compute_average_loss(losses)
grads = tape.gradient(loss, logits_model.trainable_variables)
optimizer.apply_gradients(zip(grads, logits_model.trainable_variables))
return loss
return train
GRASP_MODEL = {
"alpha10min_6Q_stat_stat": os.path.join(CURR_PATH, 'grasp_models/alpha10min_6Q_stat_stat'),
"sock_8500": os.path.join(CURR_PATH, 'grasp_models/sock_8500'),
"n500": os.path.join(CURR_PATH, 'grasp_models/n500'),
"n1000": os.path.join(CURR_PATH, 'grasp_models/n1000'),
"n1250": os.path.join(CURR_PATH, 'grasp_models/n1250'),
"n1500": os.path.join(CURR_PATH, 'grasp_models/n1500'),
"n2000": os.path.join(CURR_PATH, 'grasp_models/n2000'),
"n4000": os.path.join(CURR_PATH, 'grasp_models/n4000'),
}
GRASP_DATA = {
"grasp_s500_f500": os.path.join(CURR_PATH, 'grasp_data/grasp_s500_f500.npy'),
"real_s1000_f1000": os.path.join(CURR_PATH, 'grasp_data/real_s1000_f1000.npy'),
"n500": os.path.join(CURR_PATH, 'grasp_data/n500.npy'),
"n1000": os.path.join(CURR_PATH, 'grasp_data/n1000.npy'),
"n1250": os.path.join(CURR_PATH, 'grasp_data/n1250.npy'),
"n1500": os.path.join(CURR_PATH, 'grasp_data/n1500.npy'),
"n2000": os.path.join(CURR_PATH, 'grasp_data/n2000.npy'),
"n4000": os.path.join(CURR_PATH, 'grasp_data/n4000.npy'),
}
class ReplayBuffer:
""" Poor man's replay buffer. """
def __init__(self, size, observation_shape, action_dim, observation_dtype=np.uint8, action_dtype=np.int32):
self._size = size
self._observations = np.zeros((size,) + observation_shape, dtype=observation_dtype)
self._actions = np.zeros((size, action_dim), dtype=action_dtype)
self._rewards = np.zeros((size, 1), dtype=np.float32)
self._num = 0
self._index = 0
@property
def num_samples(self):
return self._num
def store_sample(self, observation, action, reward):
self._observations[self._index] = observation
self._actions[self._index] = action
self._rewards[self._index] = reward
self._num = min(self._num + 1, self._size)
self._index = (self._index + 1) % self._size
def get_all_samples(self):
data = {
'observations': self._observations[:self._num],
'actions': self._actions[:self._num],
'rewards': self._rewards[:self._num],
}
return data
def get_all_samples_in_batch(self, batch_size):
datas = []
for i in range(0, (self._num // batch_size) * batch_size, batch_size):
data = {
'observations': self._observations[i:i+batch_size],
'actions': self._actions[i:i+batch_size],
'rewards': self._rewards[i:i+batch_size],
}
datas.append(data)
if self._num % batch_size != 0:
datas.append(self.sample_batch(batch_size))
return datas
def get_all_samples_in_batch_random(self, batch_size):
inds = np.concatenate([np.arange(self._num), np.arange((batch_size - self._num % batch_size) % batch_size)])
np.random.shuffle(inds)
observations = self._observations[inds]
actions = self._actions[inds]
rewards = self._rewards[inds]
datas = []
for i in range(0, self._num, batch_size):
data = {
'observations': observations[i:i+batch_size],
'actions': actions[i:i+batch_size],
'rewards': rewards[i:i+batch_size],
}
datas.append(data)
return datas
def get_all_success_in_batch_random(self, batch_size):
successes = (self._rewards == 1)[:, 0]
observations = self._observations[successes]
actions = self._actions[successes]
rewards = self._rewards[successes]
num_success = len(observations)
inds = np.concatenate([np.arange(num_success), np.arange((batch_size - num_success % batch_size) % batch_size)])
np.random.shuffle(inds)
observations = observations[inds]
actions = actions[inds]
rewards = rewards[inds]
datas = []
for i in range(0, num_success, batch_size):
data = {
'observations': observations[i:i+batch_size],
'actions': actions[i:i+batch_size],
'rewards': rewards[i:i+batch_size],
}
datas.append(data)
return datas
def sample_batch(self, batch_size):
inds = np.random.randint(0, self._num, size=(batch_size,))
data = {
'observations': self._observations[inds],
'actions': self._actions[inds],
'rewards': self._rewards[inds],
}
return data
def save(self, folder_path, file_name='replaybuffer'):
os.makedirs(folder_path, exist_ok=True)
np.save(os.path.join(folder_path, file_name), self.get_all_samples())
def load(self, path):
data = np.load(path, allow_pickle=True)[()]
self._num = min(data['observations'].shape[0], self._size)
self._index = self._num % self._size
self._observations[:self._num] = data['observations'][:self._num]
self._actions[:self._num] = data['actions'][:self._num]
self._rewards[:self._num] = data['rewards'][:self._num]
class Timer:
""" A timer... """
def __init__(self):
self.total_elapsed_seconds = 0.0
self.started = False
def start(self):
if not self.started:
self.start_time = time.perf_counter()
self.started = True
def end(self):
if self.started:
elapsed = time.perf_counter() - self.start_time
self.total_elapsed_seconds += elapsed
self.started = False
@property
def total_elapsed_time(self):
return self.total_elapsed_seconds |
the-stack_106_27811 | def calcEquation(self, equations, values, queries):
# Step 1: Build the graph
graph = collections.defaultdict(dict)
# Default dict beacuse if a value DNE it won't throw an expection unlike normal dict
for (num, denom), product in zip(equations, values):
# zip(equations, values) = [([a, b], 2.0), ([b, c], 3.0)]
graph[num][denom] = product
## dict[x][y] implies x is pointing to y. ie {'a': {'b': 2.0}} a is pointing to b.
# Set the key as the num, denom and value as the product of the division in the defaultdict
graph[denom][num] = 1.0 / product
def dfs(numerator, denominator, visited):
if numerator not in graph or denominator not in graph:
return -1.0
if denominator in graph[numerator]:
return graph[numerator][denominator]
# Recall that our dict is set up such that the numerator points to the deominator, hence if denominator is in the denominator it
# is a key of the numerator so we can return that value.
# ie {'a': {'b': 2.0}}, a is the numerator and b is the denominator. If the query was a/b we would ask "is b in a?" we can see it is hence we just return the value which is the product of a/b.
for i in graph[numerator]:
if i not in visited:
visited.add(i)
temp = dfs(i, denominator, visited)
if temp == -1:
continue
else:
return graph[numerator][i] * temp
return -1
result_list = []
for numerator, denominator in queries:
result_list.append(dfs(numerator, denominator, set()))
# Pass the numerator and denominator of the query division to dfs where we will find the product.
return result_list |
the-stack_106_27812 | from machinetranslation import translator
from flask import Flask, render_template, request
import json
import machinetranslation
app = Flask("Web Translator")
@app.route("/englishToFrench")
def englishToFrench():
textToTranslate = request.args.get('textToTranslate')
translatedText = machinetranslation.translator.english_to_french(textToTranslate)
return translatedText
@app.route("/frenchToEnglish")
def frenchToEnglish():
textToTranslate = request.args.get('textToTranslate')
translatedText = machinetranslation.translator.french_to_english(textToTranslate)
return translatedText
@app.route("/")
def renderIndexPage():
return render_template('index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
the-stack_106_27813 | class ControllerView:
def __init__(self):
pass
def final_result(self, name, sum_result):
if sum_result > 21:
print(f"O Player {name} perdeu a partida!")
elif sum_result == 21:
print(f"O Player {name} venceu a partida!")
else:
print(f"O Player {name} continua no jogo!")
def remove_winning_players(self, list_result_players):
list_winning_players = []
for player in list_result_players:
if player[1] == 21:
list_winning_players.append(player)
return list_winning_players
def remove_losing_players(self, list_result_players):
list_losing_players = []
for player in list_result_players:
if player[1] > 21:
list_losing_players.append(player)
return list_losing_players
def remove_players_finally(self, list_players, list_losing_player, list_winning_players):
for player_losing in list_losing_player:
for player in list_players:
if player_losing[0] == player.name:
list_players.remove(player)
for player_winning in list_winning_players:
for player in list_players:
if player_winning[0] == player.name:
list_players.remove(player)
return list_players
def check_last_player(self, list_players, list_winning_players):
if len(list_players) == 1:
for player in list_players:
list_winning_players.append([player.name, player.sum_result])
list_players.remove(player)
|
the-stack_106_27814 | """
Some codes from https://github.com/Newmu/dcgan_code
"""
from __future__ import division
import math
import json
import random
import pprint
import scipy.misc
import numpy as np
from time import gmtime, strftime
import tensorflow as tf
import tensorflow.contrib.slim as slim
pp = pprint.PrettyPrinter()
def get_stddev(x, k_h, k_w): return 1/math.sqrt(k_w*k_h*x.get_shape()[-1])
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def get_image(image_path, input_height, input_width,
resize_height=64, resize_width=64,
crop=True, grayscale=False):
image = imread(image_path, grayscale)
return transform(image, input_height, input_width,
resize_height, resize_width, crop)
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path, grayscale=False):
if (grayscale):
return scipy.misc.imread(path, flatten=True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3, 4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3] == 1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:, :, 0]
return img
else:
raise ValueError('in merge(images,size) images parameter '
'must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def center_crop(x, crop_h, crop_w,
resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(
x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
def transform(image, input_height, input_width,
resize_height=64, resize_width=64, crop=True):
if crop:
cropped_image = center_crop(
image, input_height, input_width,
resize_height, resize_width)
else:
cropped_image = scipy.misc.imresize(
image, [resize_height, resize_width])
return np.array(cropped_image)/127.5 - 1.
def inverse_transform(images):
return (images+1.)/2.
def to_json(output_path, *layers):
with open(output_path, "w") as layer_f:
lines = ""
for w, b, bn in layers:
layer_idx = w.name.split('/')[0].split('h')[1]
B = b.eval()
if "lin/" in w.name:
W = w.eval()
depth = W.shape[1]
else:
W = np.rollaxis(w.eval(), 2, 0)
depth = W.shape[0]
biases = {"sy": 1, "sx": 1, "depth": depth,
"w": ['%.2f' % elem for elem in list(B)]}
if bn != None:
gamma = bn.gamma.eval()
beta = bn.beta.eval()
gamma = {"sy": 1, "sx": 1, "depth": depth, "w": [
'%.2f' % elem for elem in list(gamma)]}
beta = {"sy": 1, "sx": 1, "depth": depth, "w": [
'%.2f' % elem for elem in list(beta)]}
else:
gamma = {"sy": 1, "sx": 1, "depth": 0, "w": []}
beta = {"sy": 1, "sx": 1, "depth": 0, "w": []}
if "lin/" in w.name:
fs = []
for w in W.T:
fs.append({"sy": 1, "sx": 1, "depth": W.shape[0], "w": [
'%.2f' % elem for elem in list(w)]})
lines += """
var layer_%s = {
"layer_type": "fc",
"sy": 1, "sx": 1,
"out_sx": 1, "out_sy": 1,
"stride": 1, "pad": 0,
"out_depth": %s, "in_depth": %s,
"biases": %s,
"gamma": %s,
"beta": %s,
"filters": %s
};""" % (layer_idx.split('_')[0], W.shape[1], W.shape[0], biases, gamma, beta, fs)
else:
fs = []
for w_ in W:
fs.append({"sy": 5, "sx": 5, "depth": W.shape[3], "w": [
'%.2f' % elem for elem in list(w_.flatten())]})
lines += """
var layer_%s = {
"layer_type": "deconv",
"sy": 5, "sx": 5,
"out_sx": %s, "out_sy": %s,
"stride": 2, "pad": 1,
"out_depth": %s, "in_depth": %s,
"biases": %s,
"gamma": %s,
"beta": %s,
"filters": %s
};""" % (layer_idx, 2**(int(layer_idx)+2), 2**(int(layer_idx)+2),
W.shape[0], W.shape[3], biases, gamma, beta, fs)
layer_f.write(" ".join(lines.replace("'", "").split()))
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps=len(images) / duration)
def visualize(sess, dcgan, config, option):
image_frame_dim = int(math.ceil(config.batch_size**.5))
if option == 0:
z_sample = np.random.uniform(-0.5, 0.5,
size=(config.batch_size, dcgan.z_dim))
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [image_frame_dim, image_frame_dim],
'./samples/test_%s.png' % strftime("%Y-%m-%d-%H-%M-%S", gmtime()))
elif option == 1:
values = np.arange(0, 1, 1./config.batch_size)
for idx in range(dcgan.z_dim):
print(" [*] %d" % idx)
z_sample = np.random.uniform(-1, 1,
size=(config.batch_size, dcgan.z_dim))
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [image_frame_dim, image_frame_dim],
'./samples/test_arange_%s.png' % (idx))
elif option == 2:
values = np.arange(0, 1, 1./config.batch_size)
for idx in [random.randint(0, dcgan.z_dim - 1) for _ in range(dcgan.z_dim)]:
print(" [*] %d" % idx)
z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
z_sample = np.tile(z, (config.batch_size, 1))
#z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
try:
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
except:
save_images(samples, [image_frame_dim, image_frame_dim],
'./samples/test_%s.png' % strftime("%Y-%m-%d-%H-%M-%S", gmtime()))
elif option == 3:
values = np.arange(0, 1, 1./config.batch_size)
for idx in range(dcgan.z_dim):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
elif option == 4:
image_set = []
values = np.arange(0, 1, 1./config.batch_size)
for idx in range(dcgan.z_dim):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
image_set.append(
sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))
new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10])
for idx in range(64) + range(63, -1, -1)]
make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
def image_manifold_size(num_images):
manifold_h = int(np.floor(np.sqrt(num_images)))
manifold_w = int(np.ceil(np.sqrt(num_images)))
assert manifold_h * manifold_w == num_images
return manifold_h, manifold_w
|
the-stack_106_27816 | # -*- coding: utf-8 -*-
"""
werkzeug.wsgi
~~~~~~~~~~~~~
This module implements WSGI related helpers.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import io
try:
import httplib
except ImportError:
from http import client as httplib
import mimetypes
import os
import posixpath
import re
import socket
from datetime import datetime
from functools import partial, update_wrapper
from itertools import chain
from time import mktime, time
from zlib import adler32
from werkzeug._compat import BytesIO, PY2, implements_iterator, iteritems, \
make_literal_wrapper, string_types, text_type, to_bytes, to_unicode, \
try_coerce_native, wsgi_get_bytes
from werkzeug._internal import _encode_idna
from werkzeug.filesystem import get_filesystem_encoding
from werkzeug.http import http_date, is_resource_modified, \
is_hop_by_hop_header
from werkzeug.urls import uri_to_iri, url_join, url_parse, url_quote
from werkzeug.datastructures import EnvironHeaders
def responder(f):
"""Marks a function as responder. Decorate a function with it and it
will automatically call the return value as WSGI application.
Example::
@responder
def application(environ, start_response):
return Response('Hello World!')
"""
return update_wrapper(lambda *a: f(*a)(*a[-2:]), f)
def get_current_url(environ, root_only=False, strip_querystring=False,
host_only=False, trusted_hosts=None):
"""A handy helper function that recreates the full URL as IRI for the
current request or parts of it. Here's an example:
>>> from werkzeug.test import create_environ
>>> env = create_environ("/?param=foo", "http://localhost/script")
>>> get_current_url(env)
'http://localhost/script/?param=foo'
>>> get_current_url(env, root_only=True)
'http://localhost/script/'
>>> get_current_url(env, host_only=True)
'http://localhost/'
>>> get_current_url(env, strip_querystring=True)
'http://localhost/script/'
This optionally it verifies that the host is in a list of trusted hosts.
If the host is not in there it will raise a
:exc:`~werkzeug.exceptions.SecurityError`.
Note that the string returned might contain unicode characters as the
representation is an IRI not an URI. If you need an ASCII only
representation you can use the :func:`~werkzeug.urls.iri_to_uri`
function:
>>> from werkzeug.urls import iri_to_uri
>>> iri_to_uri(get_current_url(env))
'http://localhost/script/?param=foo'
:param environ: the WSGI environment to get the current URL from.
:param root_only: set `True` if you only want the root URL.
:param strip_querystring: set to `True` if you don't want the querystring.
:param host_only: set to `True` if the host URL should be returned.
:param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted`
for more information.
"""
tmp = [environ['wsgi.url_scheme'], '://', get_host(environ, trusted_hosts)]
cat = tmp.append
if host_only:
return uri_to_iri(''.join(tmp) + '/')
cat(url_quote(wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))).rstrip('/'))
cat('/')
if not root_only:
cat(url_quote(wsgi_get_bytes(environ.get('PATH_INFO', '')).lstrip(b'/')))
if not strip_querystring:
qs = get_query_string(environ)
if qs:
cat('?' + qs)
return uri_to_iri(''.join(tmp))
def host_is_trusted(hostname, trusted_list):
"""Checks if a host is trusted against a list. This also takes care
of port normalization.
.. versionadded:: 0.9
:param hostname: the hostname to check
:param trusted_list: a list of hostnames to check against. If a
hostname starts with a dot it will match against
all subdomains as well.
"""
if not hostname:
return False
if isinstance(trusted_list, string_types):
trusted_list = [trusted_list]
def _normalize(hostname):
if ':' in hostname:
hostname = hostname.rsplit(':', 1)[0]
return _encode_idna(hostname)
try:
hostname = _normalize(hostname)
except UnicodeError:
return False
for ref in trusted_list:
if ref.startswith('.'):
ref = ref[1:]
suffix_match = True
else:
suffix_match = False
try:
ref = _normalize(ref)
except UnicodeError:
return False
if ref == hostname:
return True
if suffix_match and hostname.endswith(b'.' + ref):
return True
return False
def get_host(environ, trusted_hosts=None):
"""Return the host for the given WSGI environment. This first checks
the ``Host`` header. If it's not present, then ``SERVER_NAME`` and
``SERVER_PORT`` are used. The host will only contain the port if it
is different than the standard port for the protocol.
Optionally, verify that the host is trusted using
:func:`host_is_trusted` and raise a
:exc:`~werkzeug.exceptions.SecurityError` if it is not.
:param environ: The WSGI environment to get the host from.
:param trusted_hosts: A list of trusted hosts.
:return: Host, with port if necessary.
:raise ~werkzeug.exceptions.SecurityError: If the host is not
trusted.
"""
if 'HTTP_HOST' in environ:
rv = environ['HTTP_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
else:
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
if trusted_hosts is not None:
if not host_is_trusted(rv, trusted_hosts):
from werkzeug.exceptions import SecurityError
raise SecurityError('Host "%s" is not trusted' % rv)
return rv
def get_content_length(environ):
"""Returns the content length from the WSGI environment as
integer. If it's not available or chunked transfer encoding is used,
``None`` is returned.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the content length from.
"""
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
return None
content_length = environ.get('CONTENT_LENGTH')
if content_length is not None:
try:
return max(0, int(content_length))
except (ValueError, TypeError):
pass
def get_input_stream(environ, safe_fallback=True):
"""Returns the input stream from the WSGI environment and wraps it
in the most sensible way possible. The stream returned is not the
raw WSGI stream in most cases but one that is safe to read from
without taking into account the content length.
If content length is not set, the stream will be empty for safety reasons.
If the WSGI server supports chunked or infinite streams, it should set
the ``wsgi.input_terminated`` value in the WSGI environ to indicate that.
.. versionadded:: 0.9
:param environ: the WSGI environ to fetch the stream from.
:param safe_fallback: use an empty stream as a safe fallback when the
content length is not set. Disabling this allows infinite streams,
which can be a denial-of-service risk.
"""
stream = environ['wsgi.input']
content_length = get_content_length(environ)
# A wsgi extension that tells us if the input is terminated. In
# that case we return the stream unchanged as we know we can safely
# read it until the end.
if environ.get('wsgi.input_terminated'):
return stream
# If the request doesn't specify a content length, returning the stream is
# potentially dangerous because it could be infinite, malicious or not. If
# safe_fallback is true, return an empty stream instead for safety.
if content_length is None:
return safe_fallback and BytesIO() or stream
# Otherwise limit the stream to the content length
return LimitedStream(stream, content_length)
def get_query_string(environ):
"""Returns the `QUERY_STRING` from the WSGI environment. This also takes
care about the WSGI decoding dance on Python 3 environments as a
native string. The string returned will be restricted to ASCII
characters.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the query string from.
"""
qs = wsgi_get_bytes(environ.get('QUERY_STRING', ''))
# QUERY_STRING really should be ascii safe but some browsers
# will send us some unicode stuff (I am looking at you IE).
# In that case we want to urllib quote it badly.
return try_coerce_native(url_quote(qs, safe=':&%=+$!*\'(),'))
def get_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the `PATH_INFO` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path info, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('PATH_INFO', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def get_script_name(environ, charset='utf-8', errors='replace'):
"""Returns the `SCRIPT_NAME` from the WSGI environment and properly
decodes it. This also takes care about the WSGI decoding dance
on Python 3 environments. if the `charset` is set to `None` a
bytestring is returned.
.. versionadded:: 0.9
:param environ: the WSGI environment object to get the path from.
:param charset: the charset for the path, or `None` if no
decoding should be performed.
:param errors: the decoding error handling.
"""
path = wsgi_get_bytes(environ.get('SCRIPT_NAME', ''))
return to_unicode(path, charset, errors, allow_none_charset=True)
def pop_path_info(environ, charset='utf-8', errors='replace'):
"""Removes and returns the next segment of `PATH_INFO`, pushing it onto
`SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`.
If the `charset` is set to `None` a bytestring is returned.
If there are empty segments (``'/foo//bar``) these are ignored but
properly pushed to the `SCRIPT_NAME`:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> pop_path_info(env)
'a'
>>> env['SCRIPT_NAME']
'/foo/a'
>>> pop_path_info(env)
'b'
>>> env['SCRIPT_NAME']
'/foo/a/b'
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is modified.
"""
path = environ.get('PATH_INFO')
if not path:
return None
script_name = environ.get('SCRIPT_NAME', '')
# shift multiple leading slashes over
old_path = path
path = path.lstrip('/')
if path != old_path:
script_name += '/' * (len(old_path) - len(path))
if '/' not in path:
environ['PATH_INFO'] = ''
environ['SCRIPT_NAME'] = script_name + path
rv = wsgi_get_bytes(path)
else:
segment, path = path.split('/', 1)
environ['PATH_INFO'] = '/' + path
environ['SCRIPT_NAME'] = script_name + segment
rv = wsgi_get_bytes(segment)
return to_unicode(rv, charset, errors, allow_none_charset=True)
def peek_path_info(environ, charset='utf-8', errors='replace'):
"""Returns the next segment on the `PATH_INFO` or `None` if there
is none. Works like :func:`pop_path_info` without modifying the
environment:
>>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
>>> peek_path_info(env)
'a'
>>> peek_path_info(env)
'a'
If the `charset` is set to `None` a bytestring is returned.
.. versionadded:: 0.5
.. versionchanged:: 0.9
The path is now decoded and a charset and encoding
parameter can be provided.
:param environ: the WSGI environment that is checked.
"""
segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
if segments:
return to_unicode(wsgi_get_bytes(segments[0]),
charset, errors, allow_none_charset=True)
def extract_path_info(environ_or_baseurl, path_or_url, charset='utf-8',
errors='replace', collapse_http_schemes=True):
"""Extracts the path info from the given URL (or WSGI environment) and
path. The path info returned is a unicode string, not a bytestring
suitable for a WSGI environment. The URLs might also be IRIs.
If the path info could not be determined, `None` is returned.
Some examples:
>>> extract_path_info('http://example.com/app', '/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello')
u'/hello'
>>> extract_path_info('http://example.com/app',
... 'https://example.com/app/hello',
... collapse_http_schemes=False) is None
True
Instead of providing a base URL you can also pass a WSGI environment.
.. versionadded:: 0.6
:param environ_or_baseurl: a WSGI environment dict, a base URL or
base IRI. This is the root of the
application.
:param path_or_url: an absolute path from the server root, a
relative path (in which case it's the path info)
or a full URL. Also accepts IRIs and unicode
parameters.
:param charset: the charset for byte data in URLs
:param errors: the error handling on decode
:param collapse_http_schemes: if set to `False` the algorithm does
not assume that http and https on the
same server point to the same
resource.
"""
def _normalize_netloc(scheme, netloc):
parts = netloc.split(u'@', 1)[-1].split(u':', 1)
if len(parts) == 2:
netloc, port = parts
if (scheme == u'http' and port == u'80') or \
(scheme == u'https' and port == u'443'):
port = None
else:
netloc = parts[0]
port = None
if port is not None:
netloc += u':' + port
return netloc
# make sure whatever we are working on is a IRI and parse it
path = uri_to_iri(path_or_url, charset, errors)
if isinstance(environ_or_baseurl, dict):
environ_or_baseurl = get_current_url(environ_or_baseurl,
root_only=True)
base_iri = uri_to_iri(environ_or_baseurl, charset, errors)
base_scheme, base_netloc, base_path = url_parse(base_iri)[:3]
cur_scheme, cur_netloc, cur_path, = \
url_parse(url_join(base_iri, path))[:3]
# normalize the network location
base_netloc = _normalize_netloc(base_scheme, base_netloc)
cur_netloc = _normalize_netloc(cur_scheme, cur_netloc)
# is that IRI even on a known HTTP scheme?
if collapse_http_schemes:
for scheme in base_scheme, cur_scheme:
if scheme not in (u'http', u'https'):
return None
else:
if not (base_scheme in (u'http', u'https') and
base_scheme == cur_scheme):
return None
# are the netlocs compatible?
if base_netloc != cur_netloc:
return None
# are we below the application path?
base_path = base_path.rstrip(u'/')
if not cur_path.startswith(base_path):
return None
return u'/' + cur_path[len(base_path):].lstrip(u'/')
class ProxyMiddleware(object):
"""This middleware routes some requests to the provided WSGI app and
proxies some requests to an external server. This is not something that
can generally be done on the WSGI layer and some HTTP requests will not
tunnel through correctly (for instance websocket requests cannot be
proxied through WSGI). As a result this is only really useful for some
basic requests that can be forwarded.
Example configuration::
app = ProxyMiddleware(app, {
'/static/': {
'target': 'http://127.0.0.1:5001/',
}
})
For each host options can be specified. The following options are
supported:
``target``:
the target URL to dispatch to
``remove_prefix``:
if set to `True` the prefix is chopped off the URL before
dispatching it to the server.
``host``:
When set to ``'<auto>'`` which is the default the host header is
automatically rewritten to the URL of the target. If set to `None`
then the host header is unmodified from the client request. Any
other value overwrites the host header with that value.
``headers``:
An optional dictionary of headers that should be sent with the
request to the target host.
``ssl_context``:
In case this is an HTTPS target host then an SSL context can be
provided here (:class:`ssl.SSLContext`). This can be used for instance
to disable SSL verification.
In this case everything below ``'/static/'`` is proxied to the server on
port 5001. The host header is automatically rewritten and so are request
URLs (eg: the leading `/static/` prefix here gets chopped off).
.. versionadded:: 0.14
"""
def __init__(self, app, targets, chunk_size=2 << 13, timeout=10):
def _set_defaults(opts):
opts.setdefault('remove_prefix', False)
opts.setdefault('host', '<auto>')
opts.setdefault('headers', {})
opts.setdefault('ssl_context', None)
return opts
self.app = app
self.targets = dict(('/%s/' % k.strip('/'), _set_defaults(v))
for k, v in iteritems(targets))
self.chunk_size = chunk_size
self.timeout = timeout
def proxy_to(self, opts, path, prefix):
target = url_parse(opts['target'])
def application(environ, start_response):
headers = list(EnvironHeaders(environ).items())
headers[:] = [(k, v) for k, v in headers
if not is_hop_by_hop_header(k) and
k.lower() not in ('content-length', 'host')]
headers.append(('Connection', 'close'))
if opts['host'] == '<auto>':
headers.append(('Host', target.ascii_host))
elif opts['host'] is None:
headers.append(('Host', environ['HTTP_HOST']))
else:
headers.append(('Host', opts['host']))
headers.extend(opts['headers'].items())
remote_path = path
if opts['remove_prefix']:
remote_path = '%s/%s' % (
target.path.rstrip('/'),
remote_path[len(prefix):].lstrip('/')
)
content_length = environ.get('CONTENT_LENGTH')
chunked = False
if content_length not in ('', None):
headers.append(('Content-Length', content_length))
elif content_length is not None:
headers.append(('Transfer-Encoding', 'chunked'))
chunked = True
try:
if target.scheme == 'http':
con = httplib.HTTPConnection(
target.ascii_host, target.port or 80,
timeout=self.timeout)
elif target.scheme == 'https':
con = httplib.HTTPSConnection(
target.ascii_host, target.port or 443,
timeout=self.timeout,
context=opts['ssl_context'])
con.connect()
remote_url = url_quote(remote_path)
querystring = environ['QUERY_STRING']
if querystring:
remote_url = remote_url + '?' + querystring
con.putrequest(environ['REQUEST_METHOD'], remote_url,
skip_host=True)
for k, v in headers:
if k.lower() == 'connection':
v = 'close'
con.putheader(k, v)
con.endheaders()
stream = get_input_stream(environ)
while 1:
data = stream.read(self.chunk_size)
if not data:
break
if chunked:
con.send(b'%x\r\n%s\r\n' % (len(data), data))
else:
con.send(data)
resp = con.getresponse()
except socket.error:
from werkzeug.exceptions import BadGateway
return BadGateway()(environ, start_response)
start_response('%d %s' % (resp.status, resp.reason),
[(k.title(), v) for k, v in resp.getheaders()
if not is_hop_by_hop_header(k)])
def read():
while 1:
try:
data = resp.read(self.chunk_size)
except socket.error:
break
if not data:
break
yield data
return read()
return application
def __call__(self, environ, start_response):
path = environ['PATH_INFO']
app = self.app
for prefix, opts in iteritems(self.targets):
if path.startswith(prefix):
app = self.proxy_to(opts, path, prefix)
break
return app(environ, start_response)
class SharedDataMiddleware(object):
"""A WSGI middleware that provides static content for development
environments or simple server setups. Usage is quite simple::
import os
from werkzeug.wsgi import SharedDataMiddleware
app = SharedDataMiddleware(app, {
'/shared': os.path.join(os.path.dirname(__file__), 'shared')
})
The contents of the folder ``./shared`` will now be available on
``http://example.com/shared/``. This is pretty useful during development
because a standalone media server is not required. One can also mount
files on the root folder and still continue to use the application because
the shared data middleware forwards all unhandled requests to the
application, even if the requests are below one of the shared folders.
If `pkg_resources` is available you can also tell the middleware to serve
files from package data::
app = SharedDataMiddleware(app, {
'/shared': ('myapplication', 'shared_files')
})
This will then serve the ``shared_files`` folder in the `myapplication`
Python package.
The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch`
rules for files that are not accessible from the web. If `cache` is set to
`False` no caching headers are sent.
Currently the middleware does not support non ASCII filenames. If the
encoding on the file system happens to be the encoding of the URI it may
work but this could also be by accident. We strongly suggest using ASCII
only file names for static files.
The middleware will guess the mimetype using the Python `mimetype`
module. If it's unable to figure out the charset it will fall back
to `fallback_mimetype`.
.. versionchanged:: 0.5
The cache timeout is configurable now.
.. versionadded:: 0.6
The `fallback_mimetype` parameter was added.
:param app: the application to wrap. If you don't want to wrap an
application you can pass it :exc:`NotFound`.
:param exports: a list or dict of exported files and folders.
:param disallow: a list of :func:`~fnmatch.fnmatch` rules.
:param fallback_mimetype: the fallback mimetype for unknown files.
:param cache: enable or disable caching headers.
:param cache_timeout: the cache timeout in seconds for the headers.
"""
def __init__(self, app, exports, disallow=None, cache=True,
cache_timeout=60 * 60 * 12, fallback_mimetype='text/plain'):
self.app = app
self.exports = []
self.cache = cache
self.cache_timeout = cache_timeout
if hasattr(exports, 'items'):
exports = iteritems(exports)
for key, value in exports:
if isinstance(value, tuple):
loader = self.get_package_loader(*value)
elif isinstance(value, string_types):
if os.path.isfile(value):
loader = self.get_file_loader(value)
else:
loader = self.get_directory_loader(value)
else:
raise TypeError('unknown def %r' % value)
self.exports.append((key, loader))
if disallow is not None:
from fnmatch import fnmatch
self.is_allowed = lambda x: not fnmatch(x, disallow)
self.fallback_mimetype = fallback_mimetype
def is_allowed(self, filename):
"""Subclasses can override this method to disallow the access to
certain files. However by providing `disallow` in the constructor
this method is overwritten.
"""
return True
def _opener(self, filename):
return lambda: (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
def get_file_loader(self, filename):
return lambda x: (os.path.basename(filename), self._opener(filename))
def get_package_loader(self, package, package_path):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
loadtime = datetime.utcnow()
provider = get_provider(package)
manager = ResourceManager()
filesystem_bound = isinstance(provider, DefaultProvider)
def loader(path):
if path is None:
return None, None
path = posixpath.join(package_path, path)
if not provider.has_resource(path):
return None, None
basename = posixpath.basename(path)
if filesystem_bound:
return basename, self._opener(
provider.get_resource_filename(manager, path))
s = provider.get_resource_string(manager, path)
return basename, lambda: (
BytesIO(s),
loadtime,
len(s)
)
return loader
def get_directory_loader(self, directory):
def loader(path):
if path is not None:
path = os.path.join(directory, path)
else:
path = directory
if os.path.isfile(path):
return os.path.basename(path), self._opener(path)
return None, None
return loader
def generate_etag(self, mtime, file_size, real_filename):
if not isinstance(real_filename, bytes):
real_filename = real_filename.encode(get_filesystem_encoding())
return 'wzsdm-%d-%s-%s' % (
mktime(mtime.timetuple()),
file_size,
adler32(real_filename) & 0xffffffff
)
def __call__(self, environ, start_response):
cleaned_path = get_path_info(environ)
if PY2:
cleaned_path = cleaned_path.encode(get_filesystem_encoding())
# sanitize the path for non unix systems
cleaned_path = cleaned_path.strip('/')
for sep in os.sep, os.altsep:
if sep and sep != '/':
cleaned_path = cleaned_path.replace(sep, '/')
path = '/' + '/'.join(x for x in cleaned_path.split('/')
if x and x != '..')
file_loader = None
for search_path, loader in self.exports:
if search_path == path:
real_filename, file_loader = loader(None)
if file_loader is not None:
break
if not search_path.endswith('/'):
search_path += '/'
if path.startswith(search_path):
real_filename, file_loader = loader(path[len(search_path):])
if file_loader is not None:
break
if file_loader is None or not self.is_allowed(real_filename):
return self.app(environ, start_response)
guessed_type = mimetypes.guess_type(real_filename)
mime_type = guessed_type[0] or self.fallback_mimetype
f, mtime, file_size = file_loader()
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(mtime, file_size, real_filename)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=mtime):
f.close()
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
headers.extend((
('Content-Type', mime_type),
('Content-Length', str(file_size)),
('Last-Modified', http_date(mtime))
))
start_response('200 OK', headers)
return wrap_file(environ, f)
class DispatcherMiddleware(object):
"""Allows one to mount middlewares or applications in a WSGI application.
This is useful if you want to combine multiple WSGI applications::
app = DispatcherMiddleware(app, {
'/app2': app2,
'/app3': app3
})
"""
def __init__(self, app, mounts=None):
self.app = app
self.mounts = mounts or {}
def __call__(self, environ, start_response):
script = environ.get('PATH_INFO', '')
path_info = ''
while '/' in script:
if script in self.mounts:
app = self.mounts[script]
break
script, last_item = script.rsplit('/', 1)
path_info = '/%s%s' % (last_item, path_info)
else:
app = self.mounts.get(script, self.app)
original_script_name = environ.get('SCRIPT_NAME', '')
environ['SCRIPT_NAME'] = original_script_name + script
environ['PATH_INFO'] = path_info
return app(environ, start_response)
@implements_iterator
class ClosingIterator(object):
"""The WSGI specification requires that all middlewares and gateways
respect the `close` callback of the iterable returned by the application.
Because it is useful to add another close action to a returned iterable
and adding a custom iterable is a boring task this class can be used for
that::
return ClosingIterator(app(environ, start_response), [cleanup_session,
cleanup_locals])
If there is just one close function it can be passed instead of the list.
A closing iterator is not needed if the application uses response objects
and finishes the processing if the response is started::
try:
return response(environ, start_response)
finally:
cleanup_session()
cleanup_locals()
"""
def __init__(self, iterable, callbacks=None):
iterator = iter(iterable)
self._next = partial(next, iterator)
if callbacks is None:
callbacks = []
elif callable(callbacks):
callbacks = [callbacks]
else:
callbacks = list(callbacks)
iterable_close = getattr(iterable, 'close', None)
if iterable_close:
callbacks.insert(0, iterable_close)
self._callbacks = callbacks
def __iter__(self):
return self
def __next__(self):
return self._next()
def close(self):
for callback in self._callbacks:
callback()
def wrap_file(environ, file, buffer_size=8192):
"""Wraps a file. This uses the WSGI server's file wrapper if available
or otherwise the generic :class:`FileWrapper`.
.. versionadded:: 0.5
If the file wrapper from the WSGI server is used it's important to not
iterate over it from inside the application but to pass it through
unchanged. If you want to pass out a file wrapper inside a response
object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
More information about file wrappers are available in :pep:`333`.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
@implements_iterator
class FileWrapper(object):
"""This class can be used to convert a :class:`file`-like object into
an iterable. It yields `buffer_size` blocks until the file is fully
read.
You should not use this class directly but rather use the
:func:`wrap_file` function that uses the WSGI server's file wrapper
support if it's available.
.. versionadded:: 0.5
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param file: a :class:`file`-like object with a :meth:`~file.read` method.
:param buffer_size: number of bytes for one iteration.
"""
def __init__(self, file, buffer_size=8192):
self.file = file
self.buffer_size = buffer_size
def close(self):
if hasattr(self.file, 'close'):
self.file.close()
def seekable(self):
if hasattr(self.file, 'seekable'):
return self.file.seekable()
if hasattr(self.file, 'seek'):
return True
return False
def seek(self, *args):
if hasattr(self.file, 'seek'):
self.file.seek(*args)
def tell(self):
if hasattr(self.file, 'tell'):
return self.file.tell()
return None
def __iter__(self):
return self
def __next__(self):
data = self.file.read(self.buffer_size)
if data:
return data
raise StopIteration()
@implements_iterator
class _RangeWrapper(object):
# private for now, but should we make it public in the future ?
"""This class can be used to convert an iterable object into
an iterable that will only yield a piece of the underlying content.
It yields blocks until the underlying stream range is fully read.
The yielded blocks will have a size that can't exceed the original
iterator defined block size, but that can be smaller.
If you're using this object together with a :class:`BaseResponse` you have
to use the `direct_passthrough` mode.
:param iterable: an iterable object with a :meth:`__next__` method.
:param start_byte: byte from which read will start.
:param byte_range: how many bytes to read.
"""
def __init__(self, iterable, start_byte=0, byte_range=None):
self.iterable = iter(iterable)
self.byte_range = byte_range
self.start_byte = start_byte
self.end_byte = None
if byte_range is not None:
self.end_byte = self.start_byte + self.byte_range
self.read_length = 0
self.seekable = hasattr(iterable, 'seekable') and iterable.seekable()
self.end_reached = False
def __iter__(self):
return self
def _next_chunk(self):
try:
chunk = next(self.iterable)
self.read_length += len(chunk)
return chunk
except StopIteration:
self.end_reached = True
raise
def _first_iteration(self):
chunk = None
if self.seekable:
self.iterable.seek(self.start_byte)
self.read_length = self.iterable.tell()
contextual_read_length = self.read_length
else:
while self.read_length <= self.start_byte:
chunk = self._next_chunk()
if chunk is not None:
chunk = chunk[self.start_byte - self.read_length:]
contextual_read_length = self.start_byte
return chunk, contextual_read_length
def _next(self):
if self.end_reached:
raise StopIteration()
chunk = None
contextual_read_length = self.read_length
if self.read_length == 0:
chunk, contextual_read_length = self._first_iteration()
if chunk is None:
chunk = self._next_chunk()
if self.end_byte is not None and self.read_length >= self.end_byte:
self.end_reached = True
return chunk[:self.end_byte - contextual_read_length]
return chunk
def __next__(self):
chunk = self._next()
if chunk:
return chunk
self.end_reached = True
raise StopIteration()
def close(self):
if hasattr(self.iterable, 'close'):
self.iterable.close()
def _make_chunk_iter(stream, limit, buffer_size):
"""Helper for the line and chunk iter functions."""
if isinstance(stream, (bytes, bytearray, text_type)):
raise TypeError('Passed a string or byte object instead of '
'true iterator or stream.')
if not hasattr(stream, 'read'):
for item in stream:
if item:
yield item
return
if not isinstance(stream, LimitedStream) and limit is not None:
stream = LimitedStream(stream, limit)
_read = stream.read
while 1:
item = _read(buffer_size)
if not item:
break
yield item
def make_line_iter(stream, limit=None, buffer_size=10 * 1024,
cap_at_buffer=False):
"""Safely iterates line-based over an input stream. If the input stream
is not a :class:`LimitedStream` the `limit` parameter is mandatory.
This uses the stream's :meth:`~file.read` method internally as opposite
to the :meth:`~file.readline` method that is unsafe and can only be used
in violation of the WSGI specification. The same problem applies to the
`__iter__` function of the input stream which calls :meth:`~file.readline`
without arguments.
If you need line-by-line processing it's strongly recommended to iterate
over the input stream using this helper function.
.. versionchanged:: 0.8
This function now ensures that the limit was reached.
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is a :class:`LimitedStream`.
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
s = make_literal_wrapper(first_item)
empty = s('')
cr = s('\r')
lf = s('\n')
crlf = s('\r\n')
_iter = chain((first_item,), _iter)
def _iter_basic_lines():
_join = empty.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
new_buf = []
buf_size = 0
for item in chain(buffer, new_data.splitlines(True)):
new_buf.append(item)
buf_size += len(item)
if item and item[-1:] in crlf:
yield _join(new_buf)
new_buf = []
elif cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buffer = new_buf
if buffer:
yield _join(buffer)
# This hackery is necessary to merge 'foo\r' and '\n' into one item
# of 'foo\r\n' if we were unlucky and we hit a chunk boundary.
previous = empty
for item in _iter_basic_lines():
if item == lf and previous[-1:] == cr:
previous += item
item = empty
if previous:
yield previous
previous = item
if previous:
yield previous
def make_chunk_iter(stream, separator, limit=None, buffer_size=10 * 1024,
cap_at_buffer=False):
"""Works like :func:`make_line_iter` but accepts a separator
which divides chunks. If you want newline based processing
you should use :func:`make_line_iter` instead as it
supports arbitrary newline markers.
.. versionadded:: 0.8
.. versionadded:: 0.9
added support for iterators as input stream.
.. versionadded:: 0.11.10
added support for the `cap_at_buffer` parameter.
:param stream: the stream or iterate to iterate over.
:param separator: the separator that divides chunks.
:param limit: the limit in bytes for the stream. (Usually
content length. Not necessary if the `stream`
is otherwise already limited).
:param buffer_size: The optional buffer size.
:param cap_at_buffer: if this is set chunks are split if they are longer
than the buffer size. Internally this is implemented
that the buffer size might be exhausted by a factor
of two however.
"""
_iter = _make_chunk_iter(stream, limit, buffer_size)
first_item = next(_iter, '')
if not first_item:
return
_iter = chain((first_item,), _iter)
if isinstance(first_item, text_type):
separator = to_unicode(separator)
_split = re.compile(r'(%s)' % re.escape(separator)).split
_join = u''.join
else:
separator = to_bytes(separator)
_split = re.compile(b'(' + re.escape(separator) + b')').split
_join = b''.join
buffer = []
while 1:
new_data = next(_iter, '')
if not new_data:
break
chunks = _split(new_data)
new_buf = []
buf_size = 0
for item in chain(buffer, chunks):
if item == separator:
yield _join(new_buf)
new_buf = []
buf_size = 0
else:
buf_size += len(item)
new_buf.append(item)
if cap_at_buffer and buf_size >= buffer_size:
rv = _join(new_buf)
while len(rv) >= buffer_size:
yield rv[:buffer_size]
rv = rv[buffer_size:]
new_buf = [rv]
buf_size = len(rv)
buffer = new_buf
if buffer:
yield _join(buffer)
@implements_iterator
class LimitedStream(io.IOBase):
"""Wraps a stream so that it doesn't read more than n bytes. If the
stream is exhausted and the caller tries to get more bytes from it
:func:`on_exhausted` is called which by default returns an empty
string. The return value of that function is forwarded
to the reader function. So if it returns an empty string
:meth:`read` will return an empty string as well.
The limit however must never be higher than what the stream can
output. Otherwise :meth:`readlines` will try to read past the
limit.
.. admonition:: Note on WSGI compliance
calls to :meth:`readline` and :meth:`readlines` are not
WSGI compliant because it passes a size argument to the
readline methods. Unfortunately the WSGI PEP is not safely
implementable without a size argument to :meth:`readline`
because there is no EOF marker in the stream. As a result
of that the use of :meth:`readline` is discouraged.
For the same reason iterating over the :class:`LimitedStream`
is not portable. It internally calls :meth:`readline`.
We strongly suggest using :meth:`read` only or using the
:func:`make_line_iter` which safely iterates line-based
over a WSGI input stream.
:param stream: the stream to wrap.
:param limit: the limit for the stream, must not be longer than
what the string can provide if the stream does not
end with `EOF` (like `wsgi.input`)
"""
def __init__(self, stream, limit):
self._read = stream.read
self._readline = stream.readline
self._pos = 0
self.limit = limit
def __iter__(self):
return self
@property
def is_exhausted(self):
"""If the stream is exhausted this attribute is `True`."""
return self._pos >= self.limit
def on_exhausted(self):
"""This is called when the stream tries to read past the limit.
The return value of this function is returned from the reading
function.
"""
# Read null bytes from the stream so that we get the
# correct end of stream marker.
return self._read(0)
def on_disconnect(self):
"""What should happen if a disconnect is detected? The return
value of this function is returned from read functions in case
the client went away. By default a
:exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised.
"""
from werkzeug.exceptions import ClientDisconnected
raise ClientDisconnected()
def exhaust(self, chunk_size=1024 * 64):
"""Exhaust the stream. This consumes all the data left until the
limit is reached.
:param chunk_size: the size for a chunk. It will read the chunk
until the stream is exhausted and throw away
the results.
"""
to_read = self.limit - self._pos
chunk = chunk_size
while to_read > 0:
chunk = min(to_read, chunk)
self.read(chunk)
to_read -= chunk
def read(self, size=None):
"""Read `size` bytes or if size is not provided everything is read.
:param size: the number of bytes read.
"""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None or size == -1: # -1 is for consistence with file
size = self.limit
to_read = min(self.limit - self._pos, size)
try:
read = self._read(to_read)
except (IOError, ValueError):
return self.on_disconnect()
if to_read and len(read) != to_read:
return self.on_disconnect()
self._pos += len(read)
return read
def readline(self, size=None):
"""Reads one line from the stream."""
if self._pos >= self.limit:
return self.on_exhausted()
if size is None:
size = self.limit - self._pos
else:
size = min(size, self.limit - self._pos)
try:
line = self._readline(size)
except (ValueError, IOError):
return self.on_disconnect()
if size and not line:
return self.on_disconnect()
self._pos += len(line)
return line
def readlines(self, size=None):
"""Reads a file into a list of strings. It calls :meth:`readline`
until the file is read to the end. It does support the optional
`size` argument if the underlaying stream supports it for
`readline`.
"""
last_pos = self._pos
result = []
if size is not None:
end = min(self.limit, last_pos + size)
else:
end = self.limit
while 1:
if size is not None:
size -= last_pos - self._pos
if self._pos >= end:
break
result.append(self.readline(size))
if size is not None:
last_pos = self._pos
return result
def tell(self):
"""Returns the position of the stream.
.. versionadded:: 0.9
"""
return self._pos
def __next__(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readable(self):
return True
|
the-stack_106_27817 | from struct import pack, unpack
import hashlib
import sys
import traceback
from typing import Optional, Tuple
from electrum_zcash import constants
from electrum_zcash import ecc
from electrum_zcash import bip32
from electrum_zcash.crypto import hash_160
from electrum_zcash.bitcoin import (int_to_hex, var_int, b58_address_to_hash160,
hash160_to_b58_address, is_b58_address)
from electrum_zcash.bip32 import BIP32Node, convert_bip32_intpath_to_strpath
from electrum_zcash.i18n import _
from electrum_zcash.keystore import Hardware_KeyStore
from electrum_zcash.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_zcash.wallet import Standard_Wallet
from electrum_zcash.util import bfh, bh2u, versiontuple, UserFacingException
from electrum_zcash.base_wizard import ScriptTypeNotSupported
from electrum_zcash.logging import get_logger
from electrum_zcash.plugin import runs_in_hwd_thread, Device
from ..hw_wallet import HW_PluginBase, HardwareClientBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, validate_op_return_output, LibraryFoundButUnusable
_logger = get_logger(__name__)
def setAlternateCoinVersions(self, regular, p2sh):
apdu = [self.BTCHIP_CLA, 0x14, 0x00, 0x00, 0x02, regular, p2sh]
self.dongle.exchange(bytearray(apdu))
try:
import hid
from btchip.btchipComm import HIDDongleHIDAPI, DongleWait
from btchip.btchip import btchip
from btchip.btchipUtils import compress_public_key,format_transaction, get_regular_input_script, get_p2sh_input_script
from btchip.btchipFirmwareWizard import checkFirmware, updateFirmware
from btchip.btchipException import BTChipException
from .btchip_zcash import btchip_zcash, zcashTransaction
btchip.setAlternateCoinVersions = setAlternateCoinVersions
BTCHIP = True
BTCHIP_DEBUG = False
except ImportError as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'btchip'):
_logger.exception('error importing ledger plugin deps')
BTCHIP = False
btchip = object # to test whithout btchip modules (see btchip_zcash class)
MSG_NEEDS_FW_UPDATE_GENERIC = _('Firmware version too old. Please update at') + \
' https://www.ledgerwallet.com'
MSG_NEEDS_FW_UPDATE_OVERWINTER = (_('Firmware version too old for '
'Overwinter/Sapling support. '
'Please update at') +
' https://www.ledgerwallet.com')
MULTI_OUTPUT_SUPPORT = '1.1.4'
ALTERNATIVE_COIN_VERSION = '1.0.1'
OVERWINTER_SUPPORT = '1.3.3'
def test_pin_unlocked(func):
"""Function decorator to test the Ledger for being unlocked, and if not,
raise a human-readable exception.
"""
def catch_exception(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except BTChipException as e:
if e.sw == 0x6982:
raise UserFacingException(_('Your Ledger is locked. Please unlock it.'))
else:
raise
return catch_exception
class Ledger_Client(HardwareClientBase):
def __init__(self, hidDevice, *, product_key: Tuple[int, int],
plugin: HW_PluginBase):
HardwareClientBase.__init__(self, plugin=plugin)
self.dongleObject = btchip_zcash(hidDevice)
self.preflightDone = False
self._product_key = product_key
self._soft_device_id = None
def is_pairable(self):
return True
@runs_in_hwd_thread
def close(self):
self.dongleObject.dongle.close()
def is_initialized(self):
return True
@runs_in_hwd_thread
def get_soft_device_id(self):
if self._soft_device_id is None:
# modern ledger can provide xpub without user interaction
# (hw1 would prompt for PIN)
if not self.is_hw1():
self._soft_device_id = self.request_root_fingerprint_from_device()
return self._soft_device_id
def is_hw1(self) -> bool:
return self._product_key[0] == 0x2581
def device_model_name(self):
return LedgerPlugin.device_name_from_product_key(self._product_key)
@runs_in_hwd_thread
def has_usable_connection_with_device(self):
try:
self.dongleObject.getFirmwareVersion()
except BaseException:
return False
return True
@runs_in_hwd_thread
@test_pin_unlocked
def get_xpub(self, bip32_path, xtype):
self.checkDevice()
# bip32_path is of the form 44'/133'/1'
# S-L-O-W - we don't handle the fingerprint directly, so compute
# it manually from the previous node
# This only happens once so it's bearable
#self.get_client() # prompt for the PIN before displaying the dialog if necessary
#self.handler.show_message("Computing master public key")
bip32_path = bip32.normalize_bip32_derivation(bip32_path)
bip32_intpath = bip32.convert_bip32_path_to_list_of_uint32(bip32_path)
bip32_path = bip32_path[2:] # cut off "m/"
if len(bip32_intpath) >= 1:
prevPath = bip32.convert_bip32_intpath_to_strpath(bip32_intpath[:-1])[2:]
nodeData = self.dongleObject.getWalletPublicKey(prevPath)
publicKey = compress_public_key(nodeData['publicKey'])
fingerprint_bytes = hash_160(publicKey)[0:4]
childnum_bytes = bip32_intpath[-1].to_bytes(length=4, byteorder="big")
else:
fingerprint_bytes = bytes(4)
childnum_bytes = bytes(4)
nodeData = self.dongleObject.getWalletPublicKey(bip32_path)
publicKey = compress_public_key(nodeData['publicKey'])
depth = len(bip32_intpath)
return BIP32Node(xtype=xtype,
eckey=ecc.ECPubkey(bytes(publicKey)),
chaincode=nodeData['chainCode'],
depth=depth,
fingerprint=fingerprint_bytes,
child_number=childnum_bytes).to_xpub()
def has_detached_pin_support(self, client):
try:
client.getVerifyPinRemainingAttempts()
return True
except BTChipException as e:
if e.sw == 0x6d00:
return False
raise e
def is_pin_validated(self, client):
try:
# Invalid SET OPERATION MODE to verify the PIN status
client.dongle.exchange(bytearray([0xe0, 0x26, 0x00, 0x00, 0x01, 0xAB]))
except BTChipException as e:
if (e.sw == 0x6982):
return False
if (e.sw == 0x6A80):
return True
raise e
def supports_multi_output(self):
return self.multiOutputSupported
def supports_overwinter(self):
return self.overwinterSupported
@runs_in_hwd_thread
def perform_hw1_preflight(self):
try:
firmwareInfo = self.dongleObject.getFirmwareVersion()
firmware = firmwareInfo['version']
self.multiOutputSupported = versiontuple(firmware) >= versiontuple(MULTI_OUTPUT_SUPPORT)
self.overwinterSupported = versiontuple(firmware) >= versiontuple(OVERWINTER_SUPPORT)
self.canAlternateCoinVersions = (versiontuple(firmware) >= versiontuple(ALTERNATIVE_COIN_VERSION)
and firmwareInfo['specialVersion'] >= 0x20)
if not checkFirmware(firmwareInfo):
self.close()
raise UserFacingException(MSG_NEEDS_FW_UPDATE_GENERIC)
try:
self.dongleObject.getOperationMode()
except BTChipException as e:
if (e.sw == 0x6985):
self.close()
self.handler.get_setup()
# Acquire the new client on the next run
else:
raise e
if self.has_detached_pin_support(self.dongleObject) and not self.is_pin_validated(self.dongleObject):
assert self.handler, "no handler for client"
remaining_attempts = self.dongleObject.getVerifyPinRemainingAttempts()
if remaining_attempts != 1:
msg = "Enter your Ledger PIN - remaining attempts : " + str(remaining_attempts)
else:
msg = "Enter your Ledger PIN - WARNING : LAST ATTEMPT. If the PIN is not correct, the dongle will be wiped."
confirmed, p, pin = self.password_dialog(msg)
if not confirmed:
raise UserFacingException('Aborted by user - please unplug the dongle and plug it again before retrying')
pin = pin.encode()
self.dongleObject.verifyPin(pin)
if self.canAlternateCoinVersions:
self.dongleObject.setAlternateCoinVersions(constants.net.ADDRTYPE_P2PKH,
constants.net.ADDRTYPE_P2SH)
except BTChipException as e:
if (e.sw == 0x6faa):
raise UserFacingException("Dongle is temporarily locked - please unplug it and replug it again")
if ((e.sw & 0xFFF0) == 0x63c0):
raise UserFacingException("Invalid PIN - please unplug the dongle and plug it again before retrying")
if e.sw == 0x6f00 and e.message == 'Invalid channel':
# based on docs 0x6f00 might be a more general error, hence we also compare message to be sure
raise UserFacingException("Invalid channel.\n"
"Please make sure that 'Browser support' is disabled on your device.")
raise e
@runs_in_hwd_thread
def checkDevice(self):
if not self.preflightDone:
try:
self.perform_hw1_preflight()
except BTChipException as e:
if (e.sw == 0x6d00 or e.sw == 0x6700):
raise UserFacingException(_("Device not in Zcash mode")) from e
raise e
self.preflightDone = True
def password_dialog(self, msg=None):
response = self.handler.get_word(msg)
if response is None:
return False, None, None
return True, response, response
class Ledger_KeyStore(Hardware_KeyStore):
hw_type = 'ledger'
device = 'Ledger'
plugin: 'LedgerPlugin'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.signing = False
self.cfg = d.get('cfg', {'mode': 0})
def dump(self):
obj = Hardware_KeyStore.dump(self)
obj['cfg'] = self.cfg
return obj
def get_client(self):
return self.plugin.get_client(self).dongleObject
def get_client_electrum(self) -> Optional[Ledger_Client]:
return self.plugin.get_client(self)
def give_error(self, message, clear_client = False):
_logger.info(message)
if not self.signing:
self.handler.show_error(message)
else:
self.signing = False
if clear_client:
self.client = None
raise UserFacingException(message)
def set_and_unset_signing(func):
"""Function decorator to set and unset self.signing."""
def wrapper(self, *args, **kwargs):
try:
self.signing = True
return func(self, *args, **kwargs)
finally:
self.signing = False
return wrapper
def decrypt_message(self, pubkey, message, password):
raise UserFacingException(_('Encryption and decryption are currently not supported for {}').format(self.device))
@runs_in_hwd_thread
@test_pin_unlocked
@set_and_unset_signing
def sign_message(self, sequence, message, password):
message = message.encode('utf8')
message_hash = hashlib.sha256(message).hexdigest().upper()
# prompt for the PIN before displaying the dialog if necessary
client_ledger = self.get_client()
client_electrum = self.get_client_electrum()
address_path = self.get_derivation_prefix()[2:] + "/%d/%d"%sequence
self.handler.show_message("Signing message ...\r\nMessage hash: "+message_hash)
try:
info = client_ledger.signMessagePrepare(address_path, message)
pin = ""
if info['confirmationNeeded']:
# do the authenticate dialog and get pin:
pin = self.handler.get_auth(info, client=client_electrum)
if not pin:
raise UserWarning(_('Cancelled by user'))
pin = str(pin).encode()
signature = client_ledger.signMessageSign(pin)
except BTChipException as e:
if e.sw == 0x6a80:
self.give_error("Unfortunately, this message cannot be signed by the Ledger wallet. Only alphanumerical messages shorter than 140 characters are supported. Please remove any extra characters (tab, carriage return) and retry.")
elif e.sw == 0x6985: # cancelled by user
return b''
elif e.sw == 0x6982:
raise # pin lock. decorator will catch it
else:
self.give_error(e, True)
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return b''
except Exception as e:
self.give_error(e, True)
finally:
self.handler.finished()
# Parse the ASN.1 signature
rLength = signature[3]
r = signature[4 : 4 + rLength]
sLength = signature[4 + rLength + 1]
s = signature[4 + rLength + 2:]
if rLength == 33:
r = r[1:]
if sLength == 33:
s = s[1:]
# And convert it
# Pad r and s points with 0x00 bytes when the point is small to get valid signature.
r_padded = bytes([0x00]) * (32 - len(r)) + r
s_padded = bytes([0x00]) * (32 - len(s)) + s
return bytes([27 + 4 + (signature[0] & 0x01)]) + r_padded + s_padded
@runs_in_hwd_thread
@test_pin_unlocked
@set_and_unset_signing
def sign_transaction(self, tx, password):
if tx.is_complete():
return
inputs = []
inputsPaths = []
chipInputs = []
redeemScripts = []
changePath = ""
output = None
p2shTransaction = False
pin = ""
client_ledger = self.get_client() # prompt for the PIN before displaying the dialog if necessary
client_electrum = self.get_client_electrum()
assert client_electrum
if tx.overwintered:
if not client_electrum.supports_overwinter():
self.give_error(MSG_NEEDS_FW_UPDATE_OVERWINTER)
# Fetch inputs of the transaction to sign
for txin in tx.inputs():
if txin.is_coinbase_input():
self.give_error("Coinbase not supported") # should never happen
if txin.script_type in ['p2sh']:
p2shTransaction = True
my_pubkey, full_path = self.find_my_pubkey_in_txinout(txin)
if not full_path:
self.give_error("No matching pubkey for sign_transaction") # should never happen
full_path = convert_bip32_intpath_to_strpath(full_path)[2:]
redeemScript = Transaction.get_preimage_script(txin)
txin_prev_tx = txin.utxo
if txin_prev_tx is None:
raise UserFacingException(_('Missing previous tx for legacy input.'))
txin_prev_tx_raw = txin_prev_tx.serialize() if txin_prev_tx else None
inputs.append([txin_prev_tx_raw,
txin.prevout.out_idx,
redeemScript,
txin.prevout.txid.hex(),
my_pubkey,
txin.nsequence,
txin.value_sats()])
inputsPaths.append(full_path)
# Sanity check
if p2shTransaction:
for txin in tx.inputs():
if txin.script_type != 'p2sh':
self.give_error("P2SH / regular input mixed in same transaction not supported") # should never happen
txOutput = var_int(len(tx.outputs()))
for o in tx.outputs():
txOutput += int_to_hex(o.value, 8)
script = o.scriptpubkey.hex()
txOutput += var_int(len(script)//2)
txOutput += script
txOutput = bfh(txOutput)
if not client_electrum.supports_multi_output():
if len(tx.outputs()) > 2:
self.give_error("Transaction with more than 2 outputs not supported")
for txout in tx.outputs():
if client_electrum.is_hw1() and txout.address and not is_b58_address(txout.address):
self.give_error(_("This {} device can only send to base58 addresses.").format(self.device))
if not txout.address:
if client_electrum.is_hw1():
self.give_error(_("Only address outputs are supported by {}").format(self.device))
# note: max_size based on https://github.com/LedgerHQ/ledger-app-btc/commit/3a78dee9c0484821df58975803e40d58fbfc2c38#diff-c61ccd96a6d8b54d48f54a3bc4dfa7e2R26
validate_op_return_output(txout, max_size=190)
# Output "change" detection
# - only one output and one change is authorized (for hw.1 and nano)
# - at most one output can bypass confirmation (~change) (for all)
if not p2shTransaction:
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
if txout.is_mine and len(tx.outputs()) > 1 \
and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if txout.is_change == any_output_on_change_branch:
my_pubkey, changePath = self.find_my_pubkey_in_txinout(txout)
assert changePath
changePath = convert_bip32_intpath_to_strpath(changePath)[2:]
has_change = True
else:
output = txout.address
else:
output = txout.address
if not self.get_client_electrum().canAlternateCoinVersions:
v, h = b58_address_to_hash160(output)
if v == constants.net.ADDRTYPE_P2PKH:
output = hash160_to_b58_address(h, 0)
self.handler.show_message(_("Confirm Transaction on your Ledger device..."))
try:
# Get trusted inputs from the original transactions
for utxo in inputs:
sequence = int_to_hex(utxo[5], 4)
if tx.overwintered:
txtmp = zcashTransaction(bfh(utxo[0]))
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
tmp += txtmp.outputs[utxo[1]].amount
chipInputs.append({'value' : tmp, 'sequence' : sequence})
redeemScripts.append(bfh(utxo[2]))
elif (not p2shTransaction) or client_electrum.supports_multi_output():
txtmp = zcashTransaction(bfh(utxo[0]))
trustedInput = client_ledger.getTrustedInput(txtmp, utxo[1])
trustedInput['sequence'] = sequence
chipInputs.append(trustedInput)
if p2shTransaction:
redeemScripts.append(bfh(utxo[2]))
else:
redeemScripts.append(txtmp.outputs[utxo[1]].script)
else:
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
chipInputs.append({'value' : tmp, 'sequence' : sequence})
redeemScripts.append(bfh(utxo[2]))
# Sign all inputs
firstTransaction = True
inputIndex = 0
rawTx = tx.serialize_to_network()
client_ledger.enableAlternate2fa(False)
if tx.overwintered:
self.get_client().startUntrustedTransaction(True, inputIndex, chipInputs,
redeemScripts[inputIndex],
version=tx.version,
overwintered=tx.overwintered)
# we don't set meaningful outputAddress, amount and fees
# as we only care about the alternateEncoding==True branch
if tx.overwintered:
inputSignature = client_ledger.untrustedHashSign('',
pin, lockTime=tx.locktime,
overwintered=tx.overwintered)
outputData = client_ledger.finalizeInput(b'', 0, 0, changePath, bfh(rawTx))
outputData['outputData'] = txOutput
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
# do the authenticate dialog and get pin:
pin = self.handler.get_auth(outputData, client=client_electrum)
if not pin:
raise UserWarning()
self.handler.show_message(_("Confirmed. Signing Transaction..."))
while inputIndex < len(inputs):
singleInput = [chipInputs[inputIndex]]
client_ledger.startUntrustedTransaction(False, 0, singleInput,
redeemScripts[inputIndex],
version=tx.version,
overwintered=tx.overwintered)
inputSignature = client_ledger.untrustedHashSign(inputsPaths[inputIndex],
pin, lockTime=tx.locktime,
overwintered=tx.overwintered)
inputSignature[0] = 0x30 # force for 1.4.9+
my_pubkey = inputs[inputIndex][4]
tx.add_signature_to_txin(txin_idx=inputIndex,
signing_pubkey=my_pubkey.hex(),
sig=inputSignature.hex())
inputIndex = inputIndex + 1
else:
while inputIndex < len(inputs):
client_ledger.startUntrustedTransaction(firstTransaction, inputIndex,
chipInputs, redeemScripts[inputIndex], version=tx.version)
# we don't set meaningful outputAddress, amount and fees
# as we only care about the alternateEncoding==True branch
outputData = client_ledger.finalizeInput(b'', 0, 0, changePath, bfh(rawTx))
outputData['outputData'] = txOutput
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
# do the authenticate dialog and get pin:
pin = self.handler.get_auth(outputData, client=client_electrum)
if not pin:
raise UserWarning()
self.handler.show_message(_("Confirmed. Signing Transaction..."))
else:
# Sign input with the provided PIN
inputSignature = client_ledger.untrustedHashSign(inputsPaths[inputIndex], pin, lockTime=tx.locktime)
inputSignature[0] = 0x30 # force for 1.4.9+
my_pubkey = inputs[inputIndex][4]
tx.add_signature_to_txin(txin_idx=inputIndex,
signing_pubkey=my_pubkey.hex(),
sig=inputSignature.hex())
inputIndex = inputIndex + 1
firstTransaction = False
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return
except BTChipException as e:
if e.sw in (0x6985, 0x6d00): # cancelled by user
return
elif e.sw == 0x6982:
raise # pin lock. decorator will catch it
else:
self.logger.exception('')
self.give_error(e, True)
except BaseException as e:
self.logger.exception('')
self.give_error(e, True)
finally:
self.handler.finished()
@runs_in_hwd_thread
@test_pin_unlocked
@set_and_unset_signing
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation_prefix()[2:] + "/%d/%d"%sequence
self.handler.show_message(_("Showing address ..."))
try:
client.getWalletPublicKey(address_path, showOnScreen=True)
except BTChipException as e:
if e.sw == 0x6985: # cancelled by user
pass
elif e.sw == 0x6982:
raise # pin lock. decorator will catch it
elif e.sw == 0x6b00: # hw.1 raises this
self.handler.show_error('{}\n{}\n{}'.format(
_('Error showing address') + ':',
e,
_('Your device might not have support for this functionality.')))
else:
self.logger.exception('')
self.handler.show_error(e)
except BaseException as e:
self.logger.exception('')
self.handler.show_error(e)
finally:
self.handler.finished()
class LedgerPlugin(HW_PluginBase):
keystore_class = Ledger_KeyStore
minimum_library = (0, 1, 32)
client = None
DEVICE_IDS = [
(0x2581, 0x1807), # HW.1 legacy btchip
(0x2581, 0x2b7c), # HW.1 transitional production
(0x2581, 0x3b7c), # HW.1 ledger production
(0x2581, 0x4b7c), # HW.1 ledger test
(0x2c97, 0x0000), # Blue
(0x2c97, 0x0001), # Nano-S
(0x2c97, 0x0004), # Nano-X
(0x2c97, 0x0005), # RFU
(0x2c97, 0x0006), # RFU
(0x2c97, 0x0007), # RFU
(0x2c97, 0x0008), # RFU
(0x2c97, 0x0009), # RFU
(0x2c97, 0x000a) # RFU
]
VENDOR_IDS = (0x2c97,)
LEDGER_MODEL_IDS = {
0x10: "Ledger Nano S",
0x40: "Ledger Nano X",
}
SUPPORTED_XTYPES = ('standard', )
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
# to support legacy devices and legacy firmwares
self.device_manager().register_devices(self.DEVICE_IDS, plugin=self)
# to support modern firmware
self.device_manager().register_vendor_ids(self.VENDOR_IDS, plugin=self)
def get_library_version(self):
try:
import btchip
version = btchip.__version__
except ImportError:
raise
except:
version = "unknown"
if BTCHIP:
return version
else:
raise LibraryFoundButUnusable(library_version=version)
@classmethod
def _recognize_device(cls, product_key) -> Tuple[bool, Optional[str]]:
"""Returns (can_recognize, model_name) tuple."""
# legacy product_keys
if product_key in cls.DEVICE_IDS:
if product_key[0] == 0x2581:
return True, "Ledger HW.1"
if product_key == (0x2c97, 0x0000):
return True, "Ledger Blue"
if product_key == (0x2c97, 0x0001):
return True, "Ledger Nano S"
if product_key == (0x2c97, 0x0004):
return True, "Ledger Nano X"
return True, None
# modern product_keys
if product_key[0] == 0x2c97:
product_id = product_key[1]
model_id = product_id >> 8
if model_id in cls.LEDGER_MODEL_IDS:
model_name = cls.LEDGER_MODEL_IDS[model_id]
return True, model_name
# give up
return False, None
def can_recognize_device(self, device: Device) -> bool:
return self._recognize_device(device.product_key)[0]
@classmethod
def device_name_from_product_key(cls, product_key) -> Optional[str]:
return cls._recognize_device(product_key)[1]
def create_device_from_hid_enumeration(self, d, *, product_key):
device = super().create_device_from_hid_enumeration(d, product_key=product_key)
if not self.can_recognize_device(device):
return None
return device
@runs_in_hwd_thread
def get_btchip_device(self, device):
ledger = False
if device.product_key[0] == 0x2581 and device.product_key[1] == 0x3b7c:
ledger = True
if device.product_key[0] == 0x2581 and device.product_key[1] == 0x4b7c:
ledger = True
if device.product_key[0] == 0x2c97:
if device.interface_number == 0 or device.usage_page == 0xffa0:
ledger = True
else:
return None # non-compatible interface of a Nano S or Blue
dev = hid.device()
dev.open_path(device.path)
dev.set_nonblocking(True)
return HIDDongleHIDAPI(dev, ledger, BTCHIP_DEBUG)
@runs_in_hwd_thread
def create_client(self, device, handler):
if handler:
self.handler = handler
client = self.get_btchip_device(device)
if client is not None:
client = Ledger_Client(client, product_key=device.product_key, plugin=self)
return client
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m/0'", 'standard')) # TODO replace by direct derivation once Nano S > 1.1
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
client.checkDevice()
xpub = client.get_xpub(derivation, xtype)
return xpub
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True):
# All client interaction should not be in the main GUI thread
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
#if client:
# client.used()
if client is not None:
client.checkDevice()
return client
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
if type(wallet) is not Standard_Wallet:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
sequence = wallet.get_address_index(address)
txin_type = wallet.get_txin_type(address)
keystore.show_address(sequence, txin_type)
|
the-stack_106_27818 | # -*- coding: utf-8 -*-
'''
Functions for working with files
'''
from __future__ import absolute_import
# Import Python libs
import contextlib
import errno
import logging
import os
import re
import shutil
import stat
import subprocess
import tempfile
import time
import urllib
# Import Salt libs
import salt.utils.validate.path
import salt.utils.platform
import salt.utils.stringutils
import salt.modules.selinux
from salt.exceptions import CommandExecutionError, FileLockError, MinionError
from salt.utils.decorators.jinja import jinja_filter
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import range
try:
import fcntl
HAS_FCNTL = True
except ImportError:
# fcntl is not available on windows
HAS_FCNTL = False
log = logging.getLogger(__name__)
LOCAL_PROTOS = ('', 'file')
REMOTE_PROTOS = ('http', 'https', 'ftp', 'swift', 's3')
VALID_PROTOS = ('salt', 'file') + REMOTE_PROTOS
TEMPFILE_PREFIX = '__salt.tmp.'
HASHES = {
'sha512': 128,
'sha384': 96,
'sha256': 64,
'sha224': 56,
'sha1': 40,
'md5': 32,
}
HASHES_REVMAP = dict([(y, x) for x, y in six.iteritems(HASHES)])
def guess_archive_type(name):
'''
Guess an archive type (tar, zip, or rar) by its file extension
'''
name = name.lower()
for ending in ('tar', 'tar.gz', 'tgz',
'tar.bz2', 'tbz2', 'tbz',
'tar.xz', 'txz',
'tar.lzma', 'tlz'):
if name.endswith('.' + ending):
return 'tar'
for ending in ('zip', 'rar'):
if name.endswith('.' + ending):
return ending
return None
def mkstemp(*args, **kwargs):
'''
Helper function which does exactly what ``tempfile.mkstemp()`` does but
accepts another argument, ``close_fd``, which, by default, is true and closes
the fd before returning the file path. Something commonly done throughout
Salt's code.
'''
if 'prefix' not in kwargs:
kwargs['prefix'] = '__salt.tmp.'
close_fd = kwargs.pop('close_fd', True)
fd_, f_path = tempfile.mkstemp(*args, **kwargs)
if close_fd is False:
return fd_, f_path
os.close(fd_)
del fd_
return f_path
def recursive_copy(source, dest):
'''
Recursively copy the source directory to the destination,
leaving files with the source does not explicitly overwrite.
(identical to cp -r on a unix machine)
'''
for root, _, files in os.walk(source):
path_from_source = root.replace(source, '').lstrip(os.sep)
target_directory = os.path.join(dest, path_from_source)
if not os.path.exists(target_directory):
os.makedirs(target_directory)
for name in files:
file_path_from_source = os.path.join(source, path_from_source, name)
target_path = os.path.join(target_directory, name)
shutil.copyfile(file_path_from_source, target_path)
def copyfile(source, dest, backup_mode='', cachedir=''):
'''
Copy files from a source to a destination in an atomic way, and if
specified cache the file.
'''
if not os.path.isfile(source):
raise IOError(
'[Errno 2] No such file or directory: {0}'.format(source)
)
if not os.path.isdir(os.path.dirname(dest)):
raise IOError(
'[Errno 2] No such file or directory: {0}'.format(dest)
)
bname = os.path.basename(dest)
dname = os.path.dirname(os.path.abspath(dest))
tgt = mkstemp(prefix=bname, dir=dname)
shutil.copyfile(source, tgt)
bkroot = ''
if cachedir:
bkroot = os.path.join(cachedir, 'file_backup')
if backup_mode == 'minion' or backup_mode == 'both' and bkroot:
if os.path.exists(dest):
backup_minion(dest, bkroot)
if backup_mode == 'master' or backup_mode == 'both' and bkroot:
# TODO, backup to master
pass
# Get current file stats to they can be replicated after the new file is
# moved to the destination path.
fstat = None
if not salt.utils.platform.is_windows():
try:
fstat = os.stat(dest)
except OSError:
pass
shutil.move(tgt, dest)
if fstat is not None:
os.chown(dest, fstat.st_uid, fstat.st_gid)
os.chmod(dest, fstat.st_mode)
# If SELINUX is available run a restorecon on the file
rcon = salt.utils.path.which('restorecon')
if rcon:
policy = False
try:
policy = salt.modules.selinux.getenforce()
except (ImportError, CommandExecutionError):
pass
if policy == 'Enforcing':
with fopen(os.devnull, 'w') as dev_null:
cmd = [rcon, dest]
subprocess.call(cmd, stdout=dev_null, stderr=dev_null)
if os.path.isfile(tgt):
# The temp file failed to move
try:
os.remove(tgt)
except Exception:
pass
def rename(src, dst):
'''
On Windows, os.rename() will fail with a WindowsError exception if a file
exists at the destination path. This function checks for this error and if
found, it deletes the destination path first.
'''
try:
os.rename(src, dst)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
try:
os.remove(dst)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise MinionError(
'Error: Unable to remove {0}: {1}'.format(
dst,
exc.strerror
)
)
os.rename(src, dst)
def process_read_exception(exc, path):
'''
Common code for raising exceptions when reading a file fails
'''
if exc.errno == errno.ENOENT:
raise CommandExecutionError('{0} does not exist'.format(path))
elif exc.errno == errno.EACCES:
raise CommandExecutionError(
'Permission denied reading from {0}'.format(path)
)
else:
raise CommandExecutionError(
'Error {0} encountered reading from {1}: {2}'.format(
exc.errno, path, exc.strerror
)
)
@contextlib.contextmanager
def wait_lock(path, lock_fn=None, timeout=5, sleep=0.1, time_start=None):
'''
Obtain a write lock. If one exists, wait for it to release first
'''
if not isinstance(path, six.string_types):
raise FileLockError('path must be a string')
if lock_fn is None:
lock_fn = path + '.w'
if time_start is None:
time_start = time.time()
obtained_lock = False
def _raise_error(msg, race=False):
'''
Raise a FileLockError
'''
raise FileLockError(msg, time_start=time_start)
try:
if os.path.exists(lock_fn) and not os.path.isfile(lock_fn):
_raise_error(
'lock_fn {0} exists and is not a file'.format(lock_fn)
)
open_flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
while time.time() - time_start < timeout:
try:
# Use os.open() to obtain filehandle so that we can force an
# exception if the file already exists. Concept found here:
# http://stackoverflow.com/a/10979569
fh_ = os.open(lock_fn, open_flags)
except (IOError, OSError) as exc:
if exc.errno != errno.EEXIST:
_raise_error(
'Error {0} encountered obtaining file lock {1}: {2}'
.format(exc.errno, lock_fn, exc.strerror)
)
log.trace(
'Lock file %s exists, sleeping %f seconds', lock_fn, sleep
)
time.sleep(sleep)
else:
# Write the lock file
with os.fdopen(fh_, 'w'):
pass
# Lock successfully acquired
log.trace('Write lock %s obtained', lock_fn)
obtained_lock = True
# Transfer control back to the code inside the with block
yield
# Exit the loop
break
else:
_raise_error(
'Timeout of {0} seconds exceeded waiting for lock_fn {1} '
'to be released'.format(timeout, lock_fn)
)
except FileLockError:
raise
except Exception as exc:
_raise_error(
'Error encountered obtaining file lock {0}: {1}'.format(
lock_fn,
exc
)
)
finally:
if obtained_lock:
os.remove(lock_fn)
log.trace('Write lock for %s (%s) released', path, lock_fn)
@contextlib.contextmanager
def set_umask(mask):
'''
Temporarily set the umask and restore once the contextmanager exits
'''
if salt.utils.platform.is_windows():
# Don't attempt on Windows
yield
else:
try:
orig_mask = os.umask(mask)
yield
finally:
os.umask(orig_mask)
def fopen(*args, **kwargs):
'''
Wrapper around open() built-in to set CLOEXEC on the fd.
This flag specifies that the file descriptor should be closed when an exec
function is invoked;
When a file descriptor is allocated (as with open or dup), this bit is
initially cleared on the new file descriptor, meaning that descriptor will
survive into the new program after exec.
NB! We still have small race condition between open and fcntl.
'''
binary = None
# ensure 'binary' mode is always used on Windows in Python 2
if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or
kwargs.pop('binary', False)):
if len(args) > 1:
args = list(args)
if 'b' not in args[1]:
args[1] = args[1].replace('t', 'b')
if 'b' not in args[1]:
args[1] += 'b'
elif kwargs.get('mode'):
if 'b' not in kwargs['mode']:
kwargs['mode'] = kwargs['mode'].replace('t', 'b')
if 'b' not in kwargs['mode']:
kwargs['mode'] += 'b'
else:
# the default is to read
kwargs['mode'] = 'rb'
elif six.PY3 and 'encoding' not in kwargs:
# In Python 3, if text mode is used and the encoding
# is not specified, set the encoding to 'utf-8'.
binary = False
if len(args) > 1:
args = list(args)
if 'b' in args[1]:
binary = True
if kwargs.get('mode', None):
if 'b' in kwargs['mode']:
binary = True
if not binary:
kwargs['encoding'] = __salt_system_encoding__
if six.PY3 and not binary and not kwargs.get('newline', None):
kwargs['newline'] = ''
f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage
if is_fcntl_available():
# modify the file descriptor on systems with fcntl
# unix and unix-like systems only
try:
FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103
except AttributeError:
FD_CLOEXEC = 1 # pylint: disable=C0103
old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)
fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)
return f_handle
@contextlib.contextmanager
def flopen(*args, **kwargs):
'''
Shortcut for fopen with lock and context manager.
'''
with fopen(*args, **kwargs) as f_handle:
try:
if is_fcntl_available(check_sunos=True):
fcntl.flock(f_handle.fileno(), fcntl.LOCK_SH)
yield f_handle
finally:
if is_fcntl_available(check_sunos=True):
fcntl.flock(f_handle.fileno(), fcntl.LOCK_UN)
@contextlib.contextmanager
def fpopen(*args, **kwargs):
'''
Shortcut for fopen with extra uid, gid, and mode options.
Supported optional Keyword Arguments:
mode
Explicit mode to set. Mode is anything os.chmod would accept
as input for mode. Works only on unix/unix-like systems.
uid
The uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the path is already owned by this uid.
Must be int. Works only on unix/unix-like systems.
gid
The gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the path is already owned by this gid.
Must be int. Works only on unix/unix-like systems.
'''
# Remove uid, gid and mode from kwargs if present
uid = kwargs.pop('uid', -1) # -1 means no change to current uid
gid = kwargs.pop('gid', -1) # -1 means no change to current gid
mode = kwargs.pop('mode', None)
with fopen(*args, **kwargs) as f_handle:
path = args[0]
d_stat = os.stat(path)
if hasattr(os, 'chown'):
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(path, uid, gid)
if mode is not None:
mode_part = stat.S_IMODE(d_stat.st_mode)
if mode_part != mode:
os.chmod(path, (d_stat.st_mode ^ mode_part) | mode)
yield f_handle
def safe_walk(top, topdown=True, onerror=None, followlinks=True, _seen=None):
'''
A clone of the python os.walk function with some checks for recursive
symlinks. Unlike os.walk this follows symlinks by default.
'''
if _seen is None:
_seen = set()
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = os.listdir(top)
except os.error as err:
if onerror is not None:
onerror(err)
return
if followlinks:
status = os.stat(top)
# st_ino is always 0 on some filesystems (FAT, NTFS); ignore them
if status.st_ino != 0:
node = (status.st_dev, status.st_ino)
if node in _seen:
return
_seen.add(node)
dirs, nondirs = [], []
for name in names:
full_path = os.path.join(top, name)
if os.path.isdir(full_path):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = os.path.join(top, name)
if followlinks or not os.path.islink(new_path):
for x in safe_walk(new_path, topdown, onerror, followlinks, _seen):
yield x
if not topdown:
yield top, dirs, nondirs
def safe_rm(tgt):
'''
Safely remove a file
'''
try:
os.remove(tgt)
except (IOError, OSError):
pass
def rm_rf(path):
'''
Platform-independent recursive delete. Includes code from
http://stackoverflow.com/a/2656405
'''
def _onerror(func, path, exc_info):
'''
Error handler for `shutil.rmtree`.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : `shutil.rmtree(path, onerror=onerror)`
'''
if salt.utils.platform.is_windows() and not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise # pylint: disable=E0704
if os.path.islink(path) or not os.path.isdir(path):
os.remove(path)
else:
shutil.rmtree(path, onerror=_onerror)
@jinja_filter('is_empty')
def is_empty(filename):
'''
Is a file empty?
'''
try:
return os.stat(filename).st_size == 0
except OSError:
# Non-existent file or permission denied to the parent dir
return False
def is_fcntl_available(check_sunos=False):
'''
Simple function to check if the ``fcntl`` module is available or not.
If ``check_sunos`` is passed as ``True`` an additional check to see if host is
SunOS is also made. For additional information see: http://goo.gl/159FF8
'''
if check_sunos and salt.utils.platform.is_sunos():
return False
return HAS_FCNTL
def safe_filename_leaf(file_basename):
'''
Input the basename of a file, without the directory tree, and returns a safe name to use
i.e. only the required characters are converted by urllib.quote
If the input is a PY2 String, output a PY2 String. If input is Unicode output Unicode.
For consistency all platforms are treated the same. Hard coded to utf8 as its ascii compatible
windows is \\ / : * ? " < > | posix is /
.. versionadded:: 2017.7.2
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
def _replace(re_obj):
return urllib.quote(re_obj.group(0), safe='')
if not isinstance(file_basename, six.text_type):
# the following string is not prefixed with u
return re.sub('[\\\\:/*?"<>|]',
_replace,
six.text_type(file_basename, 'utf8').encode('ascii', 'backslashreplace'))
# the following string is prefixed with u
return re.sub('[\\\\:/*?"<>|]', _replace, file_basename, flags=re.UNICODE)
def safe_filepath(file_path_name, dir_sep=None):
'''
Input the full path and filename, splits on directory separator and calls safe_filename_leaf for
each part of the path. dir_sep allows coder to force a directory separate to a particular character
.. versionadded:: 2017.7.2
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
'''
if not dir_sep:
dir_sep = os.sep
# Normally if file_path_name or dir_sep is Unicode then the output will be Unicode
# This code ensure the output type is the same as file_path_name
if not isinstance(file_path_name, six.text_type) and isinstance(dir_sep, six.text_type):
dir_sep = dir_sep.encode('ascii') # This should not be executed under PY3
# splitdrive only set drive on windows platform
(drive, path) = os.path.splitdrive(file_path_name)
path = dir_sep.join([safe_filename_leaf(file_section) for file_section in path.rsplit(dir_sep)])
if drive:
path = dir_sep.join([drive, path])
return path
@jinja_filter('is_text_file')
def is_text(fp_, blocksize=512):
'''
Uses heuristics to guess whether the given file is text or binary,
by reading a single block of bytes from the file.
If more than 30% of the chars in the block are non-text, or there
are NUL ('\x00') bytes in the block, assume this is a binary file.
'''
int2byte = (lambda x: bytes((x,))) if six.PY3 else chr
text_characters = (
b''.join(int2byte(i) for i in range(32, 127)) +
b'\n\r\t\f\b')
try:
block = fp_.read(blocksize)
except AttributeError:
# This wasn't an open filehandle, so treat it as a file path and try to
# open the file
try:
with fopen(fp_, 'rb') as fp2_:
block = fp2_.read(blocksize)
except IOError:
# Unable to open file, bail out and return false
return False
if b'\x00' in block:
# Files with null bytes are binary
return False
elif not block:
# An empty file is considered a valid text file
return True
try:
block.decode('utf-8')
return True
except UnicodeDecodeError:
pass
nontext = block.translate(None, text_characters)
return float(len(nontext)) / len(block) <= 0.30
@jinja_filter('is_bin_file')
def is_binary(path):
'''
Detects if the file is a binary, returns bool. Returns True if the file is
a bin, False if the file is not and None if the file is not available.
'''
if not os.path.isfile(path):
return False
try:
with fopen(path, 'rb') as fp_:
try:
data = fp_.read(2048)
if six.PY3:
data = data.decode(__salt_system_encoding__)
return salt.utils.stringutils.is_binary(data)
except UnicodeDecodeError:
return True
except os.error:
return False
def remove(path):
'''
Runs os.remove(path) and suppresses the OSError if the file doesn't exist
'''
try:
os.remove(path)
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
@jinja_filter('list_files')
def list_files(directory):
'''
Return a list of all files found under directory (and its subdirectories)
'''
ret = set()
ret.add(directory)
for root, dirs, files in safe_walk(directory):
for name in files:
ret.add(os.path.join(root, name))
for name in dirs:
ret.add(os.path.join(root, name))
return list(ret)
def st_mode_to_octal(mode):
'''
Convert the st_mode value from a stat(2) call (as returned from os.stat())
to an octal mode.
'''
try:
return oct(mode)[-4:]
except (TypeError, IndexError):
return ''
def normalize_mode(mode):
'''
Return a mode value, normalized to a string and containing a leading zero
if it does not have one.
Allow "keep" as a valid mode (used by file state/module to preserve mode
from the Salt fileserver in file states).
'''
if mode is None:
return None
if not isinstance(mode, six.string_types):
mode = str(mode)
if six.PY3:
mode = mode.replace('0o', '0')
# Strip any quotes any initial zeroes, then though zero-pad it up to 4.
# This ensures that somethign like '00644' is normalized to '0644'
return mode.strip('"').strip('\'').lstrip('0').zfill(4)
def human_size_to_bytes(human_size):
'''
Convert human-readable units to bytes
'''
size_exp_map = {'K': 1, 'M': 2, 'G': 3, 'T': 4, 'P': 5}
human_size_str = str(human_size)
match = re.match(r'^(\d+)([KMGTP])?$', human_size_str)
if not match:
raise ValueError(
'Size must be all digits, with an optional unit type '
'(K, M, G, T, or P)'
)
size_num = int(match.group(1))
unit_multiplier = 1024 ** size_exp_map.get(match.group(2), 0)
return size_num * unit_multiplier
def backup_minion(path, bkroot):
'''
Backup a file on the minion
'''
dname, bname = os.path.split(path)
if salt.utils.platform.is_windows():
src_dir = dname.replace(':', '_')
else:
src_dir = dname[1:]
if not salt.utils.platform.is_windows():
fstat = os.stat(path)
msecs = str(int(time.time() * 1000000))[-6:]
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
stamp = time.strftime('%a_%b_%d_%H-%M-%S_%Y')
else:
stamp = time.strftime('%a_%b_%d_%H:%M:%S_%Y')
stamp = '{0}{1}_{2}'.format(stamp[:-4], msecs, stamp[-4:])
bkpath = os.path.join(bkroot,
src_dir,
'{0}_{1}'.format(bname, stamp))
if not os.path.isdir(os.path.dirname(bkpath)):
os.makedirs(os.path.dirname(bkpath))
shutil.copyfile(path, bkpath)
if not salt.utils.platform.is_windows():
os.chown(bkpath, fstat.st_uid, fstat.st_gid)
os.chmod(bkpath, fstat.st_mode)
|
the-stack_106_27821 | import distutils.cmd
import os
import subprocess
from setuptools import find_packages, setup
class BaseCommand(distutils.cmd.Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def create_command(text, commands):
"""Creates a custom setup.py command."""
class CustomCommand(BaseCommand):
description = text
def run(self):
for cmd in commands:
subprocess.check_call(cmd)
return CustomCommand
with open(
os.path.join(os.path.dirname(__file__), "README.rst"), encoding="utf-8"
) as readme:
README = readme.read().split("h1>\n\n", 2)[1]
setup(
name="django-postgres-extra",
version="1.23a1",
packages=find_packages(),
include_package_data=True,
license="MIT License",
description="Bringing all of PostgreSQL's awesomeness to Django.",
long_description=README,
long_description_content_type="text/x-rst",
url="https://github.com/SectorLabs/django-postgres-extra",
author="Sector Labs",
author_email="[email protected]",
keywords=["django", "postgres", "extra", "hstore", "ltree"],
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
cmdclass={
"lint": create_command(
"Lints the code",
[
["flake8", "setup.py", "psqlextra", "tests"],
["pycodestyle", "setup.py", "psqlextra", "tests"],
],
),
"lint_fix": create_command(
"Lints the code",
[
[
"autoflake",
"--remove-all-unused-imports",
"-i",
"-r",
"setup.py",
"psqlextra",
"tests",
],
["autopep8", "-i", "-r", "setup.py", "psqlextra", "tests"],
],
),
"format": create_command(
"Formats the code", [["black", "setup.py", "psqlextra", "tests"]]
),
"format_verify": create_command(
"Checks if the code is auto-formatted",
[["black", "--check", "setup.py", "psqlextra", "tests"]],
),
"sort_imports": create_command(
"Automatically sorts imports",
[
["isort", "setup.py"],
["isort", "-rc", "psqlextra"],
["isort", "-rc", "tests"],
],
),
"sort_imports_verify": create_command(
"Verifies all imports are properly sorted.",
[
["isort", "-c", "setup.py"],
["isort", "-c", "-rc", "psqlextra"],
["isort", "-c", "-rc", "tests"],
],
),
"fix": create_command(
"Automatically format code and fix linting errors",
[
["python", "setup.py", "format"],
["python", "setup.py", "sort_imports"],
["python", "setup.py", "lint_fix"],
],
),
"verify": create_command(
"Verifies whether the code is auto-formatted and has no linting errors",
[
[
["python", "setup.py", "format_verify"],
["python", "setup.py", "sort_imports_verify"],
["python", "setup.py", "lint"],
]
],
),
},
)
|
the-stack_106_27822 |
import random
import numpy
import math
from solution import solution
import time
def HHO(objf, lb, ub, dim, SearchAgents_no, Max_iter):
# dim=30
# SearchAgents_no=50
# lb=-100
# ub=100
# Max_iter=500
# initialize the location and Energy of the rabbit
Rabbit_Location = numpy.zeros(dim)
Rabbit_Energy = float("inf") # change this to -inf for maximization problems
if not isinstance(lb, list):
lb = [lb for _ in range(dim)]
ub = [ub for _ in range(dim)]
lb = numpy.asarray(lb)
ub = numpy.asarray(ub)
# Initialize the locations of Harris' hawks
X = numpy.asarray(
[x * (ub - lb) + lb for x in numpy.random.uniform(0, 1, (SearchAgents_no, dim))]
)
# Initialize convergence
convergence_curve = numpy.zeros(Max_iter)
############################
s = solution()
print('HHO is now tackling "' + objf.__name__ + '"')
timerStart = time.time()
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
############################
t = 0 # Loop counter
# Main loop
while t < Max_iter:
for i in range(0, SearchAgents_no):
# Check boundries
X[i, :] = numpy.clip(X[i, :], lb, ub)
# fitness of locations
fitness = objf(X[i, :])
# Update the location of Rabbit
if fitness < Rabbit_Energy: # Change this to > for maximization problem
Rabbit_Energy = fitness
Rabbit_Location = X[i, :].copy()
E1 = 2 * (1 - (t / Max_iter)) # factor to show the decreaing energy of rabbit
# Update the location of Harris' hawks
for i in range(0, SearchAgents_no):
E0 = 2 * random.random() - 1 # -1<E0<1
Escaping_Energy = E1 * (
E0
) # escaping energy of rabbit Eq. (3) in the paper
# -------- Exploration phase Eq. (1) in paper -------------------
if abs(Escaping_Energy) >= 1:
# Harris' hawks perch randomly based on 2 strategy:
q = random.random()
rand_Hawk_index = math.floor(SearchAgents_no * random.random())
X_rand = X[rand_Hawk_index, :]
if q < 0.5:
# perch based on other family members
X[i, :] = X_rand - random.random() * abs(
X_rand - 2 * random.random() * X[i, :]
)
elif q >= 0.5:
# perch on a random tall tree (random site inside group's home range)
X[i, :] = (Rabbit_Location - X.mean(0)) - random.random() * (
(ub - lb) * random.random() + lb
)
# -------- Exploitation phase -------------------
elif abs(Escaping_Energy) < 1:
# Attacking the rabbit using 4 strategies regarding the behavior of the rabbit
# phase 1: ----- surprise pounce (seven kills) ----------
# surprise pounce (seven kills): multiple, short rapid dives by different hawks
r = random.random() # probablity of each event
if (
r >= 0.5 and abs(Escaping_Energy) < 0.5
): # Hard besiege Eq. (6) in paper
X[i, :] = (Rabbit_Location) - Escaping_Energy * abs(
Rabbit_Location - X[i, :]
)
if (
r >= 0.5 and abs(Escaping_Energy) >= 0.5
): # Soft besiege Eq. (4) in paper
Jump_strength = 2 * (
1 - random.random()
) # random jump strength of the rabbit
X[i, :] = (Rabbit_Location - X[i, :]) - Escaping_Energy * abs(
Jump_strength * Rabbit_Location - X[i, :]
)
# phase 2: --------performing team rapid dives (leapfrog movements)----------
if (
r < 0.5 and abs(Escaping_Energy) >= 0.5
): # Soft besiege Eq. (10) in paper
# rabbit try to escape by many zigzag deceptive motions
Jump_strength = 2 * (1 - random.random())
X1 = Rabbit_Location - Escaping_Energy * abs(
Jump_strength * Rabbit_Location - X[i, :]
)
X1 = numpy.clip(X1, lb, ub)
if objf(X1) < fitness: # improved move?
X[i, :] = X1.copy()
else: # hawks perform levy-based short rapid dives around the rabbit
X2 = (
Rabbit_Location
- Escaping_Energy
* abs(Jump_strength * Rabbit_Location - X[i, :])
+ numpy.multiply(numpy.random.randn(dim), Levy(dim))
)
X2 = numpy.clip(X2, lb, ub)
if objf(X2) < fitness:
X[i, :] = X2.copy()
if (
r < 0.5 and abs(Escaping_Energy) < 0.5
): # Hard besiege Eq. (11) in paper
Jump_strength = 2 * (1 - random.random())
X1 = Rabbit_Location - Escaping_Energy * abs(
Jump_strength * Rabbit_Location - X.mean(0)
)
X1 = numpy.clip(X1, lb, ub)
if objf(X1) < fitness: # improved move?
X[i, :] = X1.copy()
else: # Perform levy-based short rapid dives around the rabbit
X2 = (
Rabbit_Location
- Escaping_Energy
* abs(Jump_strength * Rabbit_Location - X.mean(0))
+ numpy.multiply(numpy.random.randn(dim), Levy(dim))
)
X2 = numpy.clip(X2, lb, ub)
if objf(X2) < fitness:
X[i, :] = X2.copy()
convergence_curve[t] = Rabbit_Energy
if t % 1 == 0:
print(
[
"At iteration "
+ str(t)
+ " the best fitness is "
+ str(Rabbit_Energy)
]
)
t = t + 1
timerEnd = time.time()
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
s.executionTime = timerEnd - timerStart
s.convergence = convergence_curve
s.optimizer = "HHO"
s.objfname = objf.__name__
s.best = Rabbit_Energy
s.bestIndividual = Rabbit_Location
return s
def Levy(dim):
beta = 1.5
sigma = (
math.gamma(1 + beta)
* math.sin(math.pi * beta / 2)
/ (math.gamma((1 + beta) / 2) * beta * 2 ** ((beta - 1) / 2))
) ** (1 / beta)
u = 0.01 * numpy.random.randn(dim) * sigma
v = numpy.random.randn(dim)
zz = numpy.power(numpy.absolute(v), (1 / beta))
step = numpy.divide(u, zz)
return step
|
the-stack_106_27823 | import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import Flatten, Concatenate
from torch.nn.utils import spectral_norm
# https://github.com/heykeetae/Self-Attention-GAN/blob/master/sagan_models.py
def snconv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
return spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias))
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_channels):
super(Self_Attn, self).__init__()
self.in_channels = in_channels
self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)
self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)
self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels//2, kernel_size=1, stride=1, padding=0)
self.snconv1x1_attn = snconv2d(in_channels=in_channels//2, out_channels=in_channels, kernel_size=1, stride=1, padding=0)
self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)
self.softmax = nn.Softmax(dim=-1)
self.sigma = nn.Parameter(torch.zeros(1))
def forward(self, x):
"""
inputs :
x : input feature maps(B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
_, ch, h, w = x.size()
# Theta path
theta = self.snconv1x1_theta(x)
theta = theta.view(-1, ch//8, h*w)
# Phi path
phi = self.snconv1x1_phi(x)
phi = self.maxpool(phi)
phi = phi.view(-1, ch//8, h*w//4)
# Attn map
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = self.softmax(attn)
# g path
g = self.snconv1x1_g(x)
g = self.maxpool(g)
g = g.view(-1, ch//2, h*w//4)
# Attn_g
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(-1, ch//2, h, w)
attn_g = self.snconv1x1_attn(attn_g)
# Out
out = x + self.sigma*attn_g
return out
class CompletionNetwork(nn.Module):
def __init__(self):
super(CompletionNetwork, self).__init__()
# input_shape: (None, 4, img_h, img_w)
self.conv1 = nn.Conv2d(6, 64, kernel_size=5, stride=1, padding=2)
self.bn1 = nn.BatchNorm2d(64)
self.act1 = nn.ReLU()
# input_shape: (None, 64, img_h, img_w)
self.conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)
self.bn2 = nn.BatchNorm2d(128)
self.act2 = nn.ReLU()
# input_shape: (None, 128, img_h//2, img_w//2)
self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.bn3 = nn.BatchNorm2d(128)
self.act3 = nn.ReLU()
# input_shape: (None, 128, img_h//2, img_w//2)
self.conv4 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)
self.bn4 = nn.BatchNorm2d(256)
self.act4 = nn.ReLU()
# input_shape: (None, 256, img_h//4, img_w//4)
self.conv5 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.bn5 = nn.BatchNorm2d(256)
self.act5 = nn.ReLU()
# input_shape: (None, 256, img_h//4, img_w//4)
self.conv6 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.bn6 = nn.BatchNorm2d(256)
self.act6 = nn.ReLU()
# self
self.self_attn1 = Self_Attn(256)
# input_shape: (None, 256, img_h//4, img_w//4)
self.conv7 = nn.Conv2d(256, 256, kernel_size=3, stride=1, dilation=2, padding=2)
self.bn7 = nn.BatchNorm2d(256)
self.act7 = nn.ReLU()
# input_shape: (None, 256, img_h//4, img_w//4)
self.conv8 = nn.Conv2d(256, 256, kernel_size=3, stride=1, dilation=4, padding=4)
self.bn8 = nn.BatchNorm2d(256)
self.act8 = nn.ReLU()
# self
self.self_attn2 = Self_Attn(256)
# input_shape: (None, 256, img_h//4, img_w//4)
self.conv9 = nn.Conv2d(256, 256, kernel_size=3, stride=1, dilation=8, padding=8)
self.bn9 = nn.BatchNorm2d(256)
self.act9 = nn.ReLU()
# input_shape: (None, 256, img_h//4, img_w//4)
self.conv10 = nn.Conv2d(256, 256, kernel_size=3, stride=1, dilation=16, padding=16)
self.bn10 = nn.BatchNorm2d(256)
self.act10 = nn.ReLU()
# self
self.self_attn3 = Self_Attn(256)
# input_shape: (None, 256, img_h//4, img_w//4)
self.conv11 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.bn11 = nn.BatchNorm2d(256)
self.act11 = nn.ReLU()
# input_shape: (None, 256, img_h//4, img_w//4)
self.conv12 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.bn12 = nn.BatchNorm2d(256)
self.act12 = nn.ReLU()
# input_shape: (None, 256, img_h//4, img_w//4)
self.deconv13 = nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1)
self.bn13 = nn.BatchNorm2d(128)
self.act13 = nn.ReLU()
# input_shape: (None, 128, img_h//2, img_w//2)
self.conv14 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.bn14 = nn.BatchNorm2d(128)
self.act14 = nn.ReLU()
# input_shape: (None, 128, img_h//2, img_w//2)
self.deconv15 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1)
self.bn15 = nn.BatchNorm2d(64)
self.act15 = nn.ReLU()
# input_shape: (None, 64, img_h, img_w)
self.conv16 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1)
self.bn16 = nn.BatchNorm2d(32)
self.act16 = nn.ReLU()
# input_shape: (None, 32, img_h, img_w)
self.conv17 = nn.Conv2d(32, 3, kernel_size=3, stride=1, padding=1)
self.act17 = nn.Sigmoid()
# output_shape: (None, 3, img_h. img_w)
def forward(self, x):
x = self.bn1(self.act1(self.conv1(x)))
x = self.bn2(self.act2(self.conv2(x)))
x = self.bn3(self.act3(self.conv3(x)))
x = self.bn4(self.act4(self.conv4(x)))
x = self.bn5(self.act5(self.conv5(x)))
x = self.bn6(self.act6(self.conv6(x)))
x = self.self_attn1(x)
x = self.bn7(self.act7(self.conv7(x)))
x = self.bn8(self.act8(self.conv8(x)))
x = self.self_attn2(x)
x = self.bn9(self.act9(self.conv9(x)))
x = self.bn10(self.act10(self.conv10(x)))
x = self.self_attn3(x)
x = self.bn11(self.act11(self.conv11(x)))
x = self.bn12(self.act12(self.conv12(x)))
x = self.bn13(self.act13(self.deconv13(x)))
x = self.bn14(self.act14(self.conv14(x)))
x = self.bn15(self.act15(self.deconv15(x)))
x = self.bn16(self.act16(self.conv16(x)))
x = self.act17(self.conv17(x))
return x
class LocalDiscriminator(nn.Module):
def __init__(self, input_shape):
super(LocalDiscriminator, self).__init__()
self.input_shape = input_shape
self.output_shape = (1024,)
self.img_c = input_shape[0]
self.img_h = input_shape[1]
self.img_w = input_shape[2]
# input_shape: (None, img_c, img_h, img_w)
self.conv1 = nn.Conv2d(self.img_c, 64, kernel_size=5, stride=2, padding=2)
self.bn1 = nn.BatchNorm2d(64)
self.act1 = nn.ReLU()
# input_shape: (None, 64, img_h//2, img_w//2)
self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2)
self.bn2 = nn.BatchNorm2d(128)
self.act2 = nn.ReLU()
# input_shape: (None, 128, img_h//4, img_w//4)
self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2)
self.bn3 = nn.BatchNorm2d(256)
self.act3 = nn.ReLU()
# input_shape: (None, 256, img_h//8, img_w//8)
self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2)
self.bn4 = nn.BatchNorm2d(512)
self.act4 = nn.ReLU()
# input_shape: (None, 512, img_h//16, img_w//16)
self.conv5 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)
self.bn5 = nn.BatchNorm2d(512)
self.act5 = nn.ReLU()
# input_shape: (None, 512, img_h//32, img_w//32)
in_features = 512 * (self.img_h//32) * (self.img_w//32)
self.flatten6 = Flatten()
# input_shape: (None, 512 * img_h//32 * img_w//32)
self.linear6 = nn.Linear(in_features, 1024)
self.act6 = nn.ReLU()
# output_shape: (None, 1024)
def forward(self, x):
x = self.bn1(self.act1(self.conv1(x)))
x = self.bn2(self.act2(self.conv2(x)))
x = self.bn3(self.act3(self.conv3(x)))
x = self.bn4(self.act4(self.conv4(x)))
x = self.bn5(self.act5(self.conv5(x)))
x = self.act6(self.linear6(self.flatten6(x)))
return x
class GlobalDiscriminator(nn.Module):
def __init__(self, input_shape, arc='celeba'):
super(GlobalDiscriminator, self).__init__()
self.arc = arc
self.input_shape = input_shape
self.output_shape = (1024,)
self.img_c = input_shape[0]
self.img_h = input_shape[1]
self.img_w = input_shape[2]
# input_shape: (None, img_c, img_h, img_w)
self.conv1 = nn.Conv2d(self.img_c, 64, kernel_size=5, stride=2, padding=2)
self.bn1 = nn.BatchNorm2d(64)
self.act1 = nn.ReLU()
# input_shape: (None, 64, img_h//2, img_w//2)
self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2)
self.bn2 = nn.BatchNorm2d(128)
self.act2 = nn.ReLU()
# input_shape: (None, 128, img_h//4, img_w//4)
self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2)
self.bn3 = nn.BatchNorm2d(256)
self.act3 = nn.ReLU()
# input_shape: (None, 256, img_h//8, img_w//8)
self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2)
self.bn4 = nn.BatchNorm2d(512)
self.act4 = nn.ReLU()
# input_shape: (None, 512, img_h//16, img_w//16)
self.conv5 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)
self.bn5 = nn.BatchNorm2d(512)
self.act5 = nn.ReLU()
# input_shape: (None, 512, img_h//32, img_w//32)
if arc == 'celeba':
in_features = 512 * (self.img_h//32) * (self.img_w//32)
self.flatten6 = Flatten()
self.linear6 = nn.Linear(in_features, 1024)
self.act6 = nn.ReLU()
elif arc == 'places2':
self.conv6 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)
self.bn6 = nn.BatchNorm2d(512)
self.act6 = nn.ReLU()
# input_shape (None, 512, img_h//64, img_w//64)
in_features = 512 * (self.img_h//64) * (self.img_w//64)
self.flatten7 = Flatten()
self.linear7 = nn.Linear(in_features, 1024)
self.act7 = nn.ReLU()
else:
raise ValueError('Unsupported architecture \'%s\'.' % self.arc)
# output_shape: (None, 1024)
def forward(self, x):
x = self.bn1(self.act1(self.conv1(x)))
x = self.bn2(self.act2(self.conv2(x)))
x = self.bn3(self.act3(self.conv3(x)))
x = self.bn4(self.act4(self.conv4(x)))
x = self.bn5(self.act5(self.conv5(x)))
if self.arc == 'celeba':
x = self.act6(self.linear6(self.flatten6(x)))
elif self.arc == 'places2':
x = self.bn6(self.act6(self.conv6(x)))
x = self.act7(self.linear7(self.flatten7(x)))
return x
class ContextDiscriminator(nn.Module):
def __init__(self, local_input_shape, global_input_shape, arc='celeba'):
super(ContextDiscriminator, self).__init__()
self.arc = arc
self.input_shape = [local_input_shape, global_input_shape]
self.output_shape = (1,)
self.model_ld = LocalDiscriminator(local_input_shape)
self.model_gd = GlobalDiscriminator(global_input_shape, arc=arc)
# input_shape: [(None, 1024), (None, 1024)]
in_features = self.model_ld.output_shape[-1] + self.model_gd.output_shape[-1]
self.concat1 = Concatenate(dim=-1)
# input_shape: (None, 2048)
self.linear1 = nn.Linear(in_features, 1)
self.act1 = nn.Sigmoid()
# output_shape: (None, 1)
def forward(self, x):
x_ld, x_gd = x
x_ld = self.model_ld(x_ld)
x_gd = self.model_gd(x_gd)
out = self.act1(self.linear1(self.concat1([x_ld, x_gd])))
return out
class SingleDiscriminator(nn.Module):
def __init__(self, input_shape):
super(LocalDiscriminator, self).__init__()
self.input_shape = input_shape
self.output_shape = (1024,)
self.img_c = input_shape[0]
self.img_h = input_shape[1]
self.img_w = input_shape[2]
# input_shape: (None, img_c, img_h, img_w)
self.conv1 = nn.Conv2d(self.img_c, 64, kernel_size=5, stride=2, padding=2)
self.bn1 = nn.BatchNorm2d(64)
self.act1 = nn.ReLU()
# input_shape: (None, 64, img_h//2, img_w//2)
self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2)
self.bn2 = nn.BatchNorm2d(128)
self.act2 = nn.ReLU()
# input_shape: (None, 128, img_h//4, img_w//4)
self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2)
self.bn3 = nn.BatchNorm2d(256)
self.act3 = nn.ReLU()
# input_shape: (None, 256, img_h//8, img_w//8)
self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2)
self.bn4 = nn.BatchNorm2d(512)
self.act4 = nn.ReLU()
# input_shape: (None, 512, img_h//16, img_w//16)
self.conv5 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)
self.bn5 = nn.BatchNorm2d(512)
self.act5 = nn.ReLU()
# input_shape: (None, 512, img_h//32, img_w//32)
in_features = 512 * (self.img_h//32) * (self.img_w//32)
self.flatten6 = Flatten()
# input_shape: (None, 512 * img_h//32 * img_w//32)
self.linear6 = nn.Linear(in_features, 1024)
self.act6 = nn.ReLU()
# output_shape: (None, 1024)
def forward(self, x):
x = self.bn1(self.act1(self.conv1(x)))
x = self.bn2(self.act2(self.conv2(x)))
x = self.bn3(self.act3(self.conv3(x)))
x = self.bn4(self.act4(self.conv4(x)))
x = self.bn5(self.act5(self.conv5(x)))
x = self.act6(self.linear6(self.flatten6(x)))
return x
class GlobalDiscriminator_P(nn.Module):
def __init__(self, input_shape):
super(GlobalDiscriminator_P, self).__init__()
self.input_shape = input_shape
self.output_shape = (1024,)
self.img_c = input_shape[0]
self.img_h = input_shape[1]
self.img_w = input_shape[2]
# input_shape: (None, img_c, img_h, img_w)
self.conv1 = nn.Conv2d(self.img_c, 64, kernel_size=5, stride=2, padding=2)
self.bn1 = nn.BatchNorm2d(64)
self.act1 = nn.ReLU()
# input_shape: (None, 64, img_h//2, img_w//2)
self.conv2 = nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2)
self.bn2 = nn.BatchNorm2d(128)
self.act2 = nn.ReLU()
# self
self.self_attn1 = Self_Attn(128)
self.self_attn2 = Self_Attn(128)
# input_shape: (None, 128, img_h//4, img_w//4)
self.conv3 = nn.Conv2d(128, 256, kernel_size=5, stride=2, padding=2)
self.bn3 = nn.BatchNorm2d(256)
self.act3 = nn.ReLU()
# self
self.self_attn3 = Self_Attn(256)
# input_shape: (None, 256, img_h//8, img_w//8)
self.conv4 = nn.Conv2d(256, 512, kernel_size=5, stride=2, padding=2)
self.bn4 = nn.BatchNorm2d(512)
self.act4 = nn.ReLU()
# input_shape: (None, 512, img_h//16, img_w//16)
self.conv5 = nn.Conv2d(512, 512, kernel_size=5, stride=2, padding=2)
self.bn5 = nn.BatchNorm2d(512)
self.act5 = nn.ReLU()
# input_shape: (None, 512, img_h//32, img_w//32)
in_features = 512 * (self.img_h//32) * (self.img_w//32)
self.flatten6 = Flatten()
self.linear6 = nn.Linear(in_features, 1024)
self.act6 = nn.ReLU()
self.linear7 = nn.Linear(1024, 1)
self.act7 = nn.Sigmoid()
def forward(self, x):
x = self.bn1(self.act1(self.conv1(x)))
x = self.bn2(self.act2(self.conv2(x)))
x = self.self_attn1(x)
x = self.self_attn2(x)
x = self.bn3(self.act3(self.conv3(x)))
#x = self.self_attn2(x)
x = self.bn4(self.act4(self.conv4(x)))
x = self.bn5(self.act5(self.conv5(x)))
x = self.act6(self.linear6(self.flatten6(x)))
x = self.act7(self.linear7(x))
return x |
the-stack_106_27825 | import pytest
import numpy as np
import transform as tf
@pytest.mark.circles
@pytest.mark.parametrize('myinput, myref',
[(1, np.pi),
(0, 0),
(1j, 0),
(2.1, np.pi * 2.1**2),
# (-5, pytest.raises(ValueError)),
])
def test_area_circ(myinput, myref):
"""Test the area values against a reference for r >= 0."""
print(myinput)
assert tf.area_circ(myinput) == myref
@pytest.mark.circles
def test_values():
"""Make sure value errors are recognized for area_circ."""
with pytest.raises(ValueError):
tf.area_circ(-5)
|
the-stack_106_27826 | import unittest
import tethys_apps.base.handoff as tethys_handoff
from types import FunctionType
from unittest import mock
from tethys_sdk.testing import TethysTestCase
def test_function(*args):
if args is not None:
arg_list = []
for arg in args:
arg_list.append(arg)
return arg_list
else:
return ''
class TestHandoffManager(unittest.TestCase):
def setUp(self):
self.hm = tethys_handoff.HandoffManager
def tearDown(self):
pass
def test_init(self):
# Mock app
app = mock.MagicMock()
# Mock handoff_handlers
handlers = mock.MagicMock(name='handler_name')
app.handoff_handlers.return_value = handlers
# mock _get_valid_handlers
self.hm._get_valid_handlers = mock.MagicMock(return_value=['valid_handler'])
result = tethys_handoff.HandoffManager(app=app)
# Check result
self.assertEqual(app, result.app)
self.assertEqual(handlers, result.handlers)
self.assertEqual(['valid_handler'], result.valid_handlers)
def test_repr(self):
# Mock app
app = mock.MagicMock()
# Mock handoff_handlers
handlers = mock.MagicMock()
handlers.name = 'test_handler'
app.handoff_handlers.return_value = [handlers]
# mock _get_valid_handlers
self.hm._get_valid_handlers = mock.MagicMock(return_value=['valid_handler'])
result = tethys_handoff.HandoffManager(app=app).__repr__()
check_string = "<Handoff Manager: app={}, handlers=['{}']>".format(app, handlers.name)
self.assertEqual(check_string, result)
def test_get_capabilities(self):
# Mock app
app = mock.MagicMock()
# Mock _get_handoff_manager_for_app
manager = mock.MagicMock(valid_handlers='test_handlers')
self.hm._get_handoff_manager_for_app = mock.MagicMock(return_value=manager)
result = tethys_handoff.HandoffManager(app=app).get_capabilities(app_name='test_app')
# Check Result
self.assertEqual('test_handlers', result)
def test_get_capabilities_external(self):
# Mock app
app = mock.MagicMock()
# Mock _get_handoff_manager_for_app
handler1 = mock.MagicMock()
handler1.internal = False
handler2 = mock.MagicMock()
# Do not write out handler2
handler2.internal = True
manager = mock.MagicMock(valid_handlers=[handler1, handler2])
self.hm._get_handoff_manager_for_app = mock.MagicMock(return_value=manager)
result = tethys_handoff.HandoffManager(app=app).get_capabilities(app_name='test_app', external_only=True)
# Check Result
self.assertEqual([handler1], result)
@mock.patch('tethys_apps.base.handoff.json')
def test_get_capabilities_json(self, mock_json):
# Mock app
app = mock.MagicMock()
# Mock HandoffHandler.__json
handler1 = mock.MagicMock(name='test_name')
manager = mock.MagicMock(valid_handlers=[handler1])
self.hm._get_handoff_manager_for_app = mock.MagicMock(return_value=manager)
tethys_handoff.HandoffManager(app=app).get_capabilities(app_name='test_app', jsonify=True)
# Check Result
rts_call_args = mock_json.dumps.call_args_list
self.assertEqual('test_name', rts_call_args[0][0][0][0]['_mock_name'])
def test_get_handler(self):
app = mock.MagicMock()
# Mock _get_handoff_manager_for_app
handler1 = mock.MagicMock()
handler1.name = 'handler1'
manager = mock.MagicMock(valid_handlers=[handler1])
self.hm._get_handoff_manager_for_app = mock.MagicMock(return_value=manager)
result = tethys_handoff.HandoffManager(app=app).get_handler(handler_name='handler1')
self.assertEqual('handler1', result.name)
@mock.patch('tethys_apps.base.handoff.HttpResponseBadRequest')
def test_handoff_type_error(self, mock_hrbr):
from django.http import HttpRequest
request = HttpRequest()
# Mock app
app = mock.MagicMock()
app.name = 'test_app_name'
# Mock _get_handoff_manager_for_app
handler1 = mock.MagicMock()
handler1().internal = False
handler1().side_effect = TypeError('test message')
manager = mock.MagicMock(get_handler=handler1)
self.hm._get_handoff_manager_for_app = mock.MagicMock(return_value=manager)
tethys_handoff.HandoffManager(app=app).handoff(request=request, handler_name='test_handler')
rts_call_args = mock_hrbr.call_args_list
# Check result
self.assertIn('HTTP 400 Bad Request: test message.',
rts_call_args[0][0][0])
@mock.patch('tethys_apps.base.handoff.HttpResponseBadRequest')
def test_handoff_error(self, mock_hrbr):
from django.http import HttpRequest
request = HttpRequest()
#
# # Mock app
app = mock.MagicMock()
app.name = 'test_app_name'
# Mock _get_handoff_manager_for_app
handler1 = mock.MagicMock()
# Ask Nathan is this how the test should be. because internal = True has
# nothing to do with the error message.
handler1().internal = True
handler1().side_effect = TypeError('test message')
mapp = mock.MagicMock()
mapp.name = 'test manager name'
manager = mock.MagicMock(get_handler=handler1, app=mapp)
self.hm._get_handoff_manager_for_app = mock.MagicMock(return_value=manager)
tethys_handoff.HandoffManager(app=app).handoff(request=request, handler_name='test_handler')
rts_call_args = mock_hrbr.call_args_list
# Check result
check_message = "HTTP 400 Bad Request: No handoff handler '{0}' for app '{1}' found".\
format('test manager name', 'test_handler')
self.assertIn(check_message, rts_call_args[0][0][0])
@mock.patch('warnings.warn')
def test_get_valid_handlers(self, mock_warn):
app = mock.MagicMock(package='test_app')
# Mock handoff_handlers
handler1 = mock.MagicMock(handler='controllers.home', valid=True)
# Cover Import Error Case
handler2 = mock.MagicMock(handler='controllers1:home1', valid=False)
# Cover Deprecated format
handler3 = mock.MagicMock(handler='controllers:home', valid=False)
app.handoff_handlers.return_value = [handler1, handler2, handler3]
# mock _get_valid_handlers
result = tethys_handoff.HandoffManager(app=app)._get_valid_handlers()
# Check result
self.assertEqual('controllers.home', result[0].handler)
self.assertEqual('controllers:home', result[1].handler)
check_message = 'The handler attribute of a HandoffHandler should now be in the form:' \
' "my_first_app.controllers.my_handler". The form "handoff:my_handler" is now deprecated.'
mock_warn.assert_called_with(check_message, DeprecationWarning)
class TestHandoffHandler(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
result = tethys_handoff.HandoffHandler(name='test_name', handler='test_app.handoff.csv', internal=True)
# Check Result
self.assertEqual('test_name', result.name)
self.assertEqual('test_app.handoff.csv', result.handler)
self.assertTrue(result.internal)
self.assertIs(type(result.function), FunctionType)
def test_repr(self):
result = tethys_handoff.HandoffHandler(name='test_name', handler='test_app.handoff.csv',
internal=True).__repr__()
# Check Result
check_string = '<Handoff Handler: name=test_name, handler=test_app.handoff.csv>'
self.assertEqual(check_string, result)
def test_dict_json_arguments(self):
tethys_handoff.HandoffHandler.arguments = ['test_json', 'request']
result = tethys_handoff.HandoffHandler(name='test_name', handler='test_app.handoff.csv',
internal=True).__dict__()
# Check Result
check_dict = {'name': 'test_name', 'arguments': ['test_json']}
self.assertIsInstance(result, dict)
self.assertEqual(check_dict, result)
def test_arguments(self):
result = tethys_handoff.HandoffHandler(name='test_name', handler='test_app.handoff.csv', internal=True)\
.arguments
self.assertEqual(['request', 'csv_url'], result)
class TestGetHandoffManagerFroApp(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_not_app_name(self):
app = mock.MagicMock()
result = tethys_handoff.HandoffManager(app=app)._get_handoff_manager_for_app(app_name=None)
self.assertEqual(app, result.app)
@mock.patch('tethys_apps.base.handoff.tethys_apps')
def test_with_app(self, mock_ta):
app = mock.MagicMock(package='test_app')
app.get_handoff_manager.return_value = 'test_manager'
mock_ta.harvester.SingletonHarvester().apps = [app]
result = tethys_handoff.HandoffManager(app=app)._get_handoff_manager_for_app(app_name='test_app')
# Check result
self.assertEqual('test_manager', result)
class TestTestAppHandoff(TethysTestCase):
def set_up(self):
self.c = self.get_test_client()
self.user = self.create_test_user(username="joe", password="secret", email="joe@some_site.com")
self.c.force_login(self.user)
def tear_down(self):
self.user.delete()
def test_test_app_handoff(self):
response = self.c.get('/handoff/test-app/test_name/?csv_url=""')
self.assertEqual(302, response.status_code)
|
the-stack_106_27827 | import numpy as np
from vec_io import fvecs_read
from sorter import parallel_sort
from lsh import SRP
from transform import spherical_transform, simple_lsh
def intersect(gs, ids):
rc = np.mean([
len(np.intersect1d(g, list(id)))
for g, id in zip(gs, ids)])
return rc
def recalls(index, q_, gt):
ks = [20, 100]
ts = [16, 128, 1024]
print(" Probed \t Items \t", end="")
for top_k in ks:
print("top-%d\t" % (top_k), end="")
print()
for t in ts:
ids = index.search(q_, t)
items = np.mean([len(id) for id in ids])
print("%6d \t %6d \t" % (t, items), end="")
for top_k in ks:
rc = intersect(gt[:, :top_k], ids)
print("%.4f \t" % (rc / float(top_k)), end="")
print()
def load_data():
dataset = "netflix"
base = "/home/xinyan/program/data/"
# dataset = "imagenet"
# base = "/research/jcheng2/xinyan/data/"
x_path = "{}{}/{}_base.fvecs".format(base, dataset, dataset)
q_path = "{}{}/{}_query.fvecs".format(base, dataset, dataset)
x = fvecs_read(x_path)
q = fvecs_read(q_path)[:1000]
return x, q
def main():
x, q = load_data()
n, d = x.shape
np.random.seed(808)
w = np.random.uniform(size=d)
w = w / np.linalg.norm(w)
gt = parallel_sort(x, q, w, metric="weighted")
ks = 256
print("==================spherical_transform====================")
x_, q_ = spherical_transform(x, q, w)
n_, d_ = x_.shape
np.random.seed(808)
index = SRP(k=ks, d=d_)
index.add(x_)
recalls(index, q_, gt)
if __name__ == "__main__":
main()
|
the-stack_106_27828 | import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from torch.distributions import Categorical
from .layers import *
class BiGRULanguageModel(nn.Module):
def __init__(self, config, vocab, pad_token=0, device=0):
super().__init__()
self.hidden_size = config.lm_d_hidden
self.embed_size = config.lm_d_embed
self.n_vocab = len(vocab)
self.gpu = 0
self.device = device
# +2 because of <GO> tokens
self.encoder = DynamicEncoder(self.n_vocab + 2, self.embed_size, self.hidden_size, self.gpu)
self.fw_proj = nn.Linear(self.hidden_size, self.n_vocab + 2)
self.bw_proj = nn.Linear(self.hidden_size, self.n_vocab + 2)
self.loss = nn.CrossEntropyLoss(ignore_index=pad_token)
self.vocab = vocab
self.warning_flag = False
self.gpu = 0
# <GO> tokens
self.fw_start_token = self.n_vocab
self.bw_start_token = self.n_vocab + 1
self.pad_token = pad_token
def append_start_end_tokens(self, inp, inp_length):
batch_size = inp.size(1)
start_tokens = torch.LongTensor([self.fw_start_token] * batch_size).view(1, -1).to(inp.device) # [1,B]
end_tokens_pad = torch.LongTensor([self.pad_token] * batch_size).view(1, -1).to(inp.device) # [1,B]
new_inp = torch.cat([start_tokens, inp, end_tokens_pad], 0)
for b in range(batch_size):
new_inp[inp_length[b] + 1, b] = self.bw_start_token
new_inp_length = inp_length + 2
return new_inp, new_inp_length
def forward(self, batch):
inp = batch.text
inp = inp.to(self.device)
# append <GO> token
inp, inp_len = self.append_start_end_tokens(inp, batch.length)
inp_len_np = inp_len.cpu().numpy()
output = self.encoder(inp, inp_len_np)
fw_output, bw_output = output[:,:,:self.hidden_size], output[:,:,self.hidden_size:]
fw_proj, bw_proj = self.fw_proj(fw_output), self.bw_proj(bw_output)
inp_trunc = inp[:output.size(0)]
fw_loss = self.loss(fw_proj[:-1].view(-1,fw_proj.size(2)).contiguous(), inp_trunc[1:].view(-1).contiguous())
bw_loss = self.loss(bw_proj[1:].view(-1,bw_proj.size(2)).contiguous(), inp_trunc[:-1].view(-1).contiguous())
return fw_loss, bw_loss
def sample_single_sequence(self, method, direction, token_inp, hidden, length):
outputs = []
for t in range(length):
output, hidden = self.encoder.rollout(token_inp, hidden, direction=direction)
if direction == 'fw':
proj = self.fw_proj(output[:,:,:self.hidden_size])
elif direction == 'bw':
proj = self.bw_proj(output[:,:,self.hidden_size:])
assert(proj.size(0) == 1)
proj = proj.squeeze(0)
# outputs.append(proj)
if method == 'max':
_, token_inp = torch.max(proj)
outputs.append(token_inp)
elif method == 'random':
dist = Categorical(F.softmax(proj ,-1))
token_inp = dist.sample()
outputs.append(token_inp)
token_inp = token_inp.view(1,-1)
if direction == 'bw':
outputs = list(reversed(outputs))
outputs = torch.stack(outputs)
return outputs
def sample_n_sequences(self, method, direction, token_inp, hidden, length, sample_num):
outputs = []
token_inp = token_inp.repeat(1, sample_num) # [1, N]
hidden = hidden.repeat(1, sample_num, 1) # [x, N, H]
for t in range(length):
output, hidden = self.encoder.rollout(token_inp, hidden, direction=direction)
if direction == 'fw':
proj = self.fw_proj(output[:, :, :self.hidden_size])
elif direction == 'bw':
proj = self.bw_proj(output[:, :, self.hidden_size:])
proj = proj.squeeze(0)
if method == 'max':
_, token_inp = torch.max(proj,-1)
outputs.append(token_inp.view(-1))
elif method == 'random':
dist = Categorical(F.softmax(proj, -1))
token_inp = dist.sample()
outputs.append(token_inp)
token_inp = token_inp.view(1, -1)
if direction == 'bw':
outputs = list(reversed(outputs))
outputs = torch.stack(outputs)
return outputs
def sample_n(self, method, batch, max_sample_length, sample_num):
"""
this function to not assume input have <GO> tokens.
:param method:
:param batch:
:param max_sample_length:
:param sample_num:
:return:
"""
inp = batch.text
inp_len_np = batch.length.cpu().numpy()
pad_inp1 = torch.LongTensor([self.fw_start_token] * inp.size(1)).view(1,-1)
pad_inp2 = torch.LongTensor([self.pad_token] * inp.size(1)).view(1,-1)
if self.gpu >= 0:
inp = inp.to(self.gpu)
pad_inp1 = pad_inp1.to(self.gpu)
pad_inp2 = pad_inp2.to(self.gpu)
padded_inp = torch.cat([pad_inp1, inp, pad_inp2], 0)
padded_inp[inp_len_np + 1] = self.bw_start_token
assert padded_inp.max().item() < self.n_vocab + 2
assert inp_len_np[0] + 2 <= padded_inp.size(0)
padded_enc_out = self.encoder(padded_inp, inp_len_np + 2) # [T+2,B,H]
# extract forward hidden state
assert 0 <= batch.fw_pos.item() - 1 <= padded_enc_out.size(0) - 1
assert 0 <= batch.fw_pos.item() <= padded_enc_out.size(0) - 1
fw_hidden = padded_enc_out.index_select(0,batch.fw_pos - 1)
fw_hidden = torch.cat([fw_hidden[:,:,:self.hidden_size],fw_hidden[:,:,self.hidden_size:]], 0)
fw_next_token = padded_inp.index_select(0,batch.fw_pos).view(1,-1)
# extract backward hidden state
assert 0 <= batch.bw_pos.item() + 3 <= padded_enc_out.size(0) - 1
assert 0 <= batch.bw_pos.item() + 2 <= padded_enc_out.size(0) - 1
bw_hidden = padded_enc_out.index_select(0,batch.bw_pos + 3)
bw_hidden = torch.cat([bw_hidden[:,:,:self.hidden_size], bw_hidden[:,:,self.hidden_size:]], 0)
bw_next_token = padded_inp.index_select(0,batch.bw_pos + 2).view(1,-1)
fw_sample_outputs = self.sample_n_sequences(method, 'fw', fw_next_token, fw_hidden, max_sample_length, sample_num)
bw_sample_outputs = self.sample_n_sequences(method, 'bw', bw_next_token, bw_hidden, max_sample_length, sample_num)
self.filter_special_tokens(fw_sample_outputs)
self.filter_special_tokens(bw_sample_outputs)
return fw_sample_outputs, bw_sample_outputs
def filter_special_tokens(self, m):
for i in range(m.size(0)):
for j in range(m.size(1)):
if m[i,j] >= self.n_vocab - 2 or m[i,j] == self.vocab.get('[CLS]',0) \
or m[i,j] == self.vocab.get('[SEP]',0):
m[i,j] = 0
|
the-stack_106_27831 | from functools import reduce
## Journal of Biomedical And Health Informatics (JBHI) 2020
import pdb
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.autograd import Variable
from bis3d_v2.networks.nets.msga_net.attention import (
PAM_Module,
CAM_Module,
semanticModule,
PAM_CAM_Layer,
MultiConv
)
from bis3d_v2.networks.nets.msga_net.resnext101_regular import ResNeXt101
class DAF_stack(nn.Module):
def __init__(self, in_channels, out_channels_end):
super(DAF_stack, self).__init__()
self.resnext = ResNeXt101(in_channels)
self.down4 = nn.Sequential(
nn.Conv2d(2048, 64, kernel_size=1), nn.BatchNorm2d(64), nn.PReLU()
)
self.down3 = nn.Sequential(
nn.Conv2d(1024, 64, kernel_size=1), nn.BatchNorm2d(64), nn.PReLU()
)
self.down2 = nn.Sequential(
nn.Conv2d(512, 64, kernel_size=1), nn.BatchNorm2d(64), nn.PReLU()
)
self.down1 = nn.Sequential(
nn.Conv2d(256, 64, kernel_size=1), nn.BatchNorm2d(64), nn.PReLU()
)
inter_channels = 64
out_channels=64
self.conv6_1 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1))
self.conv6_2 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1))
self.conv6_3 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1))
self.conv6_4 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1))
self.conv7_1 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1))
self.conv7_2 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1))
self.conv7_3 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1))
self.conv7_4 = nn.Sequential(nn.Dropout2d(0.1, False), nn.Conv2d(64, out_channels, 1))
self.conv8_1=nn.Conv2d(64,64,1)
self.conv8_2=nn.Conv2d(64,64,1)
self.conv8_3=nn.Conv2d(64,64,1)
self.conv8_4=nn.Conv2d(64,64,1)
self.conv8_11=nn.Conv2d(64,64,1)
self.conv8_12=nn.Conv2d(64,64,1)
self.conv8_13=nn.Conv2d(64,64,1)
self.conv8_14=nn.Conv2d(64,64,1)
self.softmax_1 = nn.Softmax(dim=-1)
self.pam_attention_1_1= PAM_CAM_Layer(64, True)
self.cam_attention_1_1= PAM_CAM_Layer(64, False)
self.semanticModule_1_1 = semanticModule(128)
self.conv_sem_1_1 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.conv_sem_1_2 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.conv_sem_1_3 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.conv_sem_1_4 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
#Dual Attention mechanism
self.pam_attention_1_2 = PAM_CAM_Layer(64)
self.cam_attention_1_2 = PAM_CAM_Layer(64, False)
self.pam_attention_1_3 = PAM_CAM_Layer(64)
self.cam_attention_1_3 = PAM_CAM_Layer(64, False)
self.pam_attention_1_4 = PAM_CAM_Layer(64)
self.cam_attention_1_4 = PAM_CAM_Layer(64, False)
self.pam_attention_2_1 = PAM_CAM_Layer(64)
self.cam_attention_2_1 = PAM_CAM_Layer(64, False)
self.semanticModule_2_1 = semanticModule(128)
self.conv_sem_2_1 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.conv_sem_2_2 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.conv_sem_2_3 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.conv_sem_2_4 = nn.Conv2d(128, 64, kernel_size=3, padding=1)
self.pam_attention_2_2 = PAM_CAM_Layer(64)
self.cam_attention_2_2 = PAM_CAM_Layer(64, False)
self.pam_attention_2_3 = PAM_CAM_Layer(64)
self.cam_attention_2_3 = PAM_CAM_Layer(64, False)
self.pam_attention_2_4 = PAM_CAM_Layer(64)
self.cam_attention_2_4 = PAM_CAM_Layer(64, False)
self.fuse1 = MultiConv(256, 64, False)
self.attention4 = MultiConv(128, 64)
self.attention3 = MultiConv(128, 64)
self.attention2 = MultiConv(128, 64)
self.attention1 = MultiConv(128, 64)
self.refine4 = MultiConv(128, 64, False)
self.refine3 = MultiConv(128, 64, False)
self.refine2 = MultiConv(128, 64, False)
self.refine1 = MultiConv(128, 64, False)
self.predict4 = nn.Conv2d(64, out_channels_end, kernel_size=1)
self.predict3 = nn.Conv2d(64, out_channels_end, kernel_size=1)
self.predict2 = nn.Conv2d(64, out_channels_end, kernel_size=1)
self.predict1 = nn.Conv2d(64, out_channels_end, kernel_size=1)
self.predict4_2 = nn.Conv2d(64, out_channels_end, kernel_size=1)
self.predict3_2 = nn.Conv2d(64, out_channels_end, kernel_size=1)
self.predict2_2 = nn.Conv2d(64, out_channels_end, kernel_size=1)
self.predict1_2 = nn.Conv2d(64, out_channels_end, kernel_size=1)
def forward(self, x):
layer0 = self.resnext.layer0(x)
layer1 = self.resnext.layer1(layer0)
layer2 = self.resnext.layer2(layer1)
layer3 = self.resnext.layer3(layer2)
layer4 = self.resnext.layer4(layer3)
down4 = F.upsample(self.down4(layer4), size=layer1.size()[2:], mode='bilinear')
down3 = F.upsample(self.down3(layer3), size=layer1.size()[2:], mode='bilinear')
down2 = F.upsample(self.down2(layer2), size=layer1.size()[2:], mode='bilinear')
down1 = self.down1(layer1)
predict4 = self.predict4(down4)
predict3 = self.predict3(down3)
predict2 = self.predict2(down2)
predict1 = self.predict1(down1)
fuse1 = self.fuse1(torch.cat((down4, down3, down2, down1), 1))
semVector_1_1,semanticModule_1_1 = self.semanticModule_1_1(torch.cat((down4, fuse1),1))
attn_pam4 = self.pam_attention_1_4(torch.cat((down4, fuse1), 1))
attn_cam4 = self.cam_attention_1_4(torch.cat((down4, fuse1), 1))
attention1_4=self.conv8_1((attn_cam4+attn_pam4)*self.conv_sem_1_1(semanticModule_1_1))
semVector_1_2, semanticModule_1_2 = self.semanticModule_1_1(torch.cat((down3, fuse1), 1))
attn_pam3 = self.pam_attention_1_3(torch.cat((down3, fuse1), 1))
attn_cam3 = self.cam_attention_1_3(torch.cat((down3, fuse1), 1))
attention1_3=self.conv8_2((attn_cam3+attn_pam3)*self.conv_sem_1_2(semanticModule_1_2))
semVector_1_3, semanticModule_1_3 = self.semanticModule_1_1(torch.cat((down2, fuse1), 1))
attn_pam2 = self.pam_attention_1_2(torch.cat((down2, fuse1), 1))
attn_cam2 = self.cam_attention_1_2(torch.cat((down2, fuse1), 1))
attention1_2=self.conv8_3((attn_cam2+attn_pam2)*self.conv_sem_1_3(semanticModule_1_3))
semVector_1_4, semanticModule_1_4 = self.semanticModule_1_1(torch.cat((down1, fuse1), 1))
attn_pam1 = self.pam_attention_1_1(torch.cat((down1, fuse1), 1))
attn_cam1 = self.cam_attention_1_1(torch.cat((down1, fuse1), 1))
attention1_1 = self.conv8_4((attn_cam1+attn_pam1) * self.conv_sem_1_4(semanticModule_1_4))
##new design with stacked attention
semVector_2_1, semanticModule_2_1 = self.semanticModule_2_1(torch.cat((down4, attention1_4 * fuse1), 1))
refine4_1 = self.pam_attention_2_4(torch.cat((down4,attention1_4*fuse1),1))
refine4_2 = self.cam_attention_2_4(torch.cat((down4,attention1_4*fuse1),1))
refine4 = self.conv8_11((refine4_1+refine4_2) * self.conv_sem_2_1(semanticModule_2_1))
semVector_2_2, semanticModule_2_2 = self.semanticModule_2_1(torch.cat((down3, attention1_3 * fuse1), 1))
refine3_1 = self.pam_attention_2_3(torch.cat((down3,attention1_3*fuse1),1))
refine3_2 = self.cam_attention_2_3(torch.cat((down3,attention1_3*fuse1),1))
refine3 = self.conv8_12((refine3_1+refine3_2) * self.conv_sem_2_2(semanticModule_2_2))
semVector_2_3, semanticModule_2_3 = self.semanticModule_2_1(torch.cat((down2, attention1_2 * fuse1), 1))
refine2_1 = self.pam_attention_2_2(torch.cat((down2,attention1_2*fuse1),1))
refine2_2 = self.cam_attention_2_2(torch.cat((down2,attention1_2*fuse1),1))
refine2 = self.conv8_13((refine2_1+refine2_2)*self.conv_sem_2_3(semanticModule_2_3))
semVector_2_4, semanticModule_2_4 = self.semanticModule_2_1(torch.cat((down1, attention1_1 * fuse1), 1))
refine1_1 = self.pam_attention_2_1(torch.cat((down1,attention1_1 * fuse1),1))
refine1_2 = self.cam_attention_2_1(torch.cat((down1,attention1_1 * fuse1),1))
refine1=self.conv8_14((refine1_1+refine1_2) * self.conv_sem_2_4(semanticModule_2_4))
predict4_2 = self.predict4_2(refine4)
predict3_2 = self.predict3_2(refine3)
predict2_2 = self.predict2_2(refine2)
predict1_2 = self.predict1_2(refine1)
predict1 = F.upsample(predict1, size=x.size()[2:], mode='bilinear')
predict2 = F.upsample(predict2, size=x.size()[2:], mode='bilinear')
predict3 = F.upsample(predict3, size=x.size()[2:], mode='bilinear')
predict4 = F.upsample(predict4, size=x.size()[2:], mode='bilinear')
predict1_2 = F.upsample(predict1_2, size=x.size()[2:], mode='bilinear')
predict2_2 = F.upsample(predict2_2, size=x.size()[2:], mode='bilinear')
predict3_2 = F.upsample(predict3_2, size=x.size()[2:], mode='bilinear')
predict4_2 = F.upsample(predict4_2, size=x.size()[2:], mode='bilinear')
# if self.training:
# return semVector_1_1,\
# semVector_2_1, \
# semVector_1_2, \
# semVector_2_2, \
# semVector_1_3, \
# semVector_2_3, \
# semVector_1_4, \
# semVector_2_4, \
# torch.cat((down1, fuse1), 1),\
# torch.cat((down2, fuse1), 1),\
# torch.cat((down3, fuse1), 1),\
# torch.cat((down4, fuse1), 1), \
# torch.cat((down1, attention1_1 * fuse1), 1), \
# torch.cat((down2, attention1_2 * fuse1), 1), \
# torch.cat((down3, attention1_3 * fuse1), 1), \
# torch.cat((down4, attention1_4 * fuse1), 1), \
# semanticModule_1_4, \
# semanticModule_1_3, \
# semanticModule_1_2, \
# semanticModule_1_1, \
# semanticModule_2_4, \
# semanticModule_2_3, \
# semanticModule_2_2, \
# semanticModule_2_1, \
# predict1, \
# predict2, \
# predict3, \
# predict4, \
# predict1_2, \
# predict2_2, \
# predict3_2, \
# predict4_2
return predict4_2
# else:
# return ((predict1_2 + predict2_2 + predict3_2 + predict4_2) / 4)
if __name__ == '__main__':
model = DAF_stack(in_channels=2, out_channels_end=2)
t1 = torch.rand(1, 2, 256, 256)
out = model(t1)
print(out.shape) |
the-stack_106_27833 | import sciunit
from sciunit.scores import BooleanScore
# import morphounit.capabilities as cap
import morphounit.plots as plots
import os
from subprocess import call
import shlex
import json
from datetime import datetime
import matplotlib.backends.backend_pdf
from neurom.apps.cut_plane_detection import find_cut_plane
from neurom import load_neuron
import numpy
#==============================================================================
class NeuroM_MorphoCheck(sciunit.Test):
"""
Tests morphologies using NeuroM's `morph_check` feature.
Returns `True` if all checks passed successfully; else `False`.
"""
score_type = BooleanScore
def __init__(self,
observation=None,
name="NeuroM MorphCheck",
base_directory=None):
description = ("Tests morphologies using NeuroM's `morph_check` feature")
# required_capabilities = (cap.HandlesNeuroM,)
self.observation = observation
if not base_directory:
base_directory = "."
self.base_directory = base_directory
self.figures = []
sciunit.Test.__init__(self, self.observation, name)
#----------------------------------------------------------------------
def generate_prediction(self, model, verbose=False):
"""Implementation of sciunit.Test.generate_prediction."""
self.model_version = model.model_version
self.path_test_output = os.path.join(self.base_directory, 'validation_results', 'neuroM_morph_hardChecks', self.model_version, datetime.now().strftime("%Y%m%d-%H%M%S"))
if not os.path.exists(self.path_test_output):
os.makedirs(self.path_test_output)
# note: observation here is either the contents of the config file or a local path
# if local path load contents
if not isinstance(self.observation, dict):
with open(self.observation) as f:
self.observation = json.load(f)
# save morph_check config as local file
morph_check_config_file = os.path.join(self.path_test_output, "morph_check_config.json")
with open(morph_check_config_file,'w') as f:
json.dump(self.observation["morph_check"], f, indent=4)
cut_plane_config = self.observation["cut_plane"]
morhpcheck_output_file = os.path.join(self.path_test_output, "morph_check_output.json")
call(shlex.split(f"morph_check -C {morph_check_config_file} -o {morhpcheck_output_file} {model.morph_path}"))
with open(morhpcheck_output_file) as json_data:
prediction = json.load(json_data)
cut_plane_output_json = find_cut_plane(load_neuron(model.morph_path), bin_width=cut_plane_config["bin_width"], display=True)
cut_plane_figure_list = []
for key in cut_plane_output_json["figures"].keys():
cut_plane_figure_list.append(cut_plane_output_json["figures"][key][0])
cutplane_output_pdf = os.path.join(self.path_test_output, "cut_plane_figures.pdf")
cut_plane_pdf = matplotlib.backends.backend_pdf.PdfPages(cutplane_output_pdf)
for fig in range(1, len(cut_plane_figure_list)+1):
cut_plane_pdf.savefig(fig)
cut_plane_pdf.close()
cutplane_output_file = os.path.join(self.path_test_output, "cut_plane_output.json")
cut_plane_output_json.pop("figures")
cut_plane_output_json["cut_leaves"] = cut_plane_output_json["cut_leaves"].tolist()
def convert(o):
if isinstance(o, numpy.int64): return int(o)
raise TypeError
with open(cutplane_output_file, "w") as outfile:
json.dump(cut_plane_output_json, outfile, indent=4, default=convert)
self.figures.append(morhpcheck_output_file)
self.figures.append(cutplane_output_file)
self.figures.append(cutplane_output_pdf)
return prediction
#----------------------------------------------------------------------
def compute_score(self, observation, prediction):
"""Implementation of sciunit.Test.score_prediction."""
score_dict = {"PASS":True, "FAIL":False}
self.score = BooleanScore(score_dict[prediction["STATUS"]])
self.score.description = "Boolean: True = Pass / False = Fail"
return self.score
#----------------------------------------------------------------------
def bind_score(self, score, model, observation, prediction):
score.related_data["figures"] = self.figures
score.related_data["passed"] = score.score
return score
|
the-stack_106_27834 | # coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class ErrorSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'pointer': 'str',
'parameter': 'str',
'line': 'str',
'resource': 'str'
}
attribute_map = {
'pointer': 'pointer',
'parameter': 'parameter',
'line': 'line',
'resource': 'resource'
}
def __init__(self, pointer=None, parameter=None, line=None, resource=None, local_vars_configuration=None): # noqa: E501
"""ErrorSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._pointer = None
self._parameter = None
self._line = None
self._resource = None
self.discriminator = None
if pointer is not None:
self.pointer = pointer
if parameter is not None:
self.parameter = parameter
if line is not None:
self.line = line
if resource is not None:
self.resource = resource
@property
def pointer(self):
"""Gets the pointer of this ErrorSource. # noqa: E501
Pointer to the path in the payload that caused this error. # noqa: E501
:return: The pointer of this ErrorSource. # noqa: E501
:rtype: str
"""
return self._pointer
@pointer.setter
def pointer(self, pointer):
"""Sets the pointer of this ErrorSource.
Pointer to the path in the payload that caused this error. # noqa: E501
:param pointer: The pointer of this ErrorSource. # noqa: E501
:type: str
"""
self._pointer = pointer
@property
def parameter(self):
"""Gets the parameter of this ErrorSource. # noqa: E501
Query parameter that caused this error. # noqa: E501
:return: The parameter of this ErrorSource. # noqa: E501
:rtype: str
"""
return self._parameter
@parameter.setter
def parameter(self, parameter):
"""Sets the parameter of this ErrorSource.
Query parameter that caused this error. # noqa: E501
:param parameter: The parameter of this ErrorSource. # noqa: E501
:type: str
"""
self._parameter = parameter
@property
def line(self):
"""Gets the line of this ErrorSource. # noqa: E501
Line number in uploaded multipart file that caused this error. 'N/A' if unknown. # noqa: E501
:return: The line of this ErrorSource. # noqa: E501
:rtype: str
"""
return self._line
@line.setter
def line(self, line):
"""Sets the line of this ErrorSource.
Line number in uploaded multipart file that caused this error. 'N/A' if unknown. # noqa: E501
:param line: The line of this ErrorSource. # noqa: E501
:type: str
"""
self._line = line
@property
def resource(self):
"""Gets the resource of this ErrorSource. # noqa: E501
Pointer to the resource that caused this error # noqa: E501
:return: The resource of this ErrorSource. # noqa: E501
:rtype: str
"""
return self._resource
@resource.setter
def resource(self, resource):
"""Sets the resource of this ErrorSource.
Pointer to the resource that caused this error # noqa: E501
:param resource: The resource of this ErrorSource. # noqa: E501
:type: str
"""
self._resource = resource
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ErrorSource):
return True
return self.to_dict() != other.to_dict()
|
the-stack_106_27835 | from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
with open(filename) as fh:
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", fh.read()))
return metadata['version']
setup(
name='Mopidy-FrontPanel',
version=get_version('mopidy_frontpanel/__init__.py'),
url='https://github.com/doubteded/mopidy-frontpanel',
license='Apache License, Version 2.0',
author='Nick Bulleid',
author_email='[email protected]',
description='Extension for using a front panel for Mopidy',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.0',
'Pykka >= 1.1',
'luma.oled >= 2.5.1',
'gpiozero >= 1.4.1'
],
entry_points={
'mopidy.ext': [
'frontpanel = mopidy_frontpanel:Extension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
|
the-stack_106_27837 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Build and test a simple tokenizer
"""
def test():
import pyre.parsing
COMMENT = r"#"
SEPARATOR = r":"
class Simple(pyre.parsing.scanner):
"""a simple scanner"""
comment = pyre.parsing.token(COMMENT)
separator = pyre.parsing.token(SEPARATOR)
# access the token base class
from pyre.parsing.Token import Token
# check that the token descriptors have been turned into subclasses of Token
assert issubclass(Simple.comment, Token)
assert issubclass(Simple.separator, Token)
# check that the tokenizer was built correctly
assert Simple.pyre_tokenizer.pattern == '|'.join([
"(?P<comment>#)",
"(?P<separator>:)",
"(?P<whitespace>\s+)",
])
# and return the class record
return Simple
# main
if __name__ == "__main__":
# skip pyre initialization since we don't rely on the executive
pyre_noboot = True
# do...
test()
# end of file
|
the-stack_106_27838 | from netapp.netapp_object import NetAppObject
class SecurityLoginGetIterKeyTd(NetAppObject):
"""
Key typedef for table login_rbac_zapis
"""
_key_3 = None
@property
def key_3(self):
"""
Field authmethod
"""
return self._key_3
@key_3.setter
def key_3(self, val):
if val != None:
self.validate('key_3', val)
self._key_3 = val
_key_2 = None
@property
def key_2(self):
"""
Field application
"""
return self._key_2
@key_2.setter
def key_2(self, val):
if val != None:
self.validate('key_2', val)
self._key_2 = val
_key_1 = None
@property
def key_1(self):
"""
Field username
"""
return self._key_1
@key_1.setter
def key_1(self, val):
if val != None:
self.validate('key_1', val)
self._key_1 = val
_key_0 = None
@property
def key_0(self):
"""
Field vserver
"""
return self._key_0
@key_0.setter
def key_0(self, val):
if val != None:
self.validate('key_0', val)
self._key_0 = val
@staticmethod
def get_api_name():
return "security-login-get-iter-key-td"
@staticmethod
def get_desired_attrs():
return [
'key-3',
'key-2',
'key-1',
'key-0',
]
def describe_properties(self):
return {
'key_3': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_2': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_1': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'key_0': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
|
the-stack_106_27839 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Created on Fri Aug 9 14:01:22 2019
@author: cherrabi
"""
from P2N_Lib import GenereListeFichiers # import
from P2N_Config import LoadConfig #
import os # importation de la bibliothèque os qui sert à
from textblob import TextBlob # importation de textblob outil liguistique
from nltk.corpus import stopwords
import nltk
from sematch.semantic.similarity import WordNetSimilarity
from nltk.corpus import wordnet as wn
import pandas as pd
import re
import shutil
import sys
from nltk.corpus import stopwords
import numpy as np
import pandas as pd
import re
import umap
import matplotlib.pyplot as plt
import seaborn as sns
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import word_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
import string
import gensim
from gensim import corpora
from gensim.corpora import Dictionary
from sklearn.decomposition import TruncatedSVD
import os
import re
import codecs
import logging
import time
from operator import add
from textblob import TextBlob # importation de textblob outil liguistique
from nltk.corpus import stopwords
from P2N_Lib import LoadBiblioFile
from P2N_Lib import GenereListeFichiers
from P2N_Config import LoadConfig
from nltk.corpus import wordnet
import spacy
import en_core_web_sm
from itertools import product
ListeBrevet = [] # The patent List
stop_words = set(stopwords.words('english'))
configFile = LoadConfig()
requete = configFile.requete
BiblioPath = configFile.ResultBiblioPath
GatherContent = configFile.GatherContent
GatherBiblio = configFile.GatherBiblio
GatherPatent = configFile.GatherPatent
GatherFamilly = configFile.GatherFamilly
IsEnableScript = configFile.GatherIramuteq
ResultBiblioPath = configFile.ResultBiblioPath
ndf = configFile.ndf
DataBrevet = LoadBiblioFile(BiblioPath, ndf)
InventorList = []
InventorList = DataBrevet['brevets']
# preparing parsing data for indicator scientific publication and inventive production
inventor_list = [auth['inventor'] for auth in DataBrevet['brevets']]
label_list = [auth['label'] for auth in DataBrevet['brevets']]
title_list = [auth['title'] for auth in DataBrevet['brevets']]
dict = { 'label' : label_list, 'title' : title_list, 'inventor' : inventor_list }
df = pd.DataFrame(dict)
df.to_csv("data_inventor.csv", header=False, index=False)
temporPath = configFile.temporPath
ResultAbstractPath = configFile.ResultAbstractPath
#ResultClaimsPath = configFile.ResultClaimsPath
#add here templateFlask directory local to the request directory normalize path for windows
ResultPathContent= configFile.ResultContentsPath.replace('\\', '/' )
ResultTemplateFlask = os.path.join(ResultPathContent,'Trizifiier').replace('\\','/')
bigram_measures = nltk.collocations.BigramAssocMeasures()
trigram_measures = nltk.collocations.TrigramAssocMeasures()
if not os.path.exists(ResultTemplateFlask): #creation des dossiers templates et dataFormat
os.mkdir(ResultTemplateFlask)
if not os.path.exists(ResultTemplateFlask+'/templates'): #creation des dossiers templates et dataFormat
os.mkdir(ResultTemplateFlask+'/templates')
if not os.path.exists(ResultTemplateFlask+'/DataFormat'): #creation des dossiers templates et dataFormat
os.mkdir(ResultTemplateFlask+'/DataFormat')
#add here tempo dir
temporar = configFile.temporPath
wns = WordNetSimilarity()
i=0
# build file list
#direct = os.path.normpath(ResultBiblioPath)
#direct = os.path.normpath(ResultClaimsPath)
direct = os.path.normpath(ResultAbstractPath)
# affiche url de chaque documents txt dans le dossier de la requete inseree , EN tous les url dossier pour en ect...
Fr, En, Unk = GenereListeFichiers(direct)
def convert_tag(tag):
tag_dict = {'N': 'n', 'J': 'a', 'R': 'r', 'V': 'v'}
try:
return tag_dict[tag[0]]
except KeyError:
return None
CountFile_R = 0
CountFile_W = 0
FichierOrg={}
# compter les nombre de caractere de EN
#if len(En)
PSW = [] # liste de mots vide à compléter au fur et à mesure des recherches
# minimalistic HTML for result file in html format
dataF = """""" # va contenir tous les abstracts du dossier de la requete
import codecs
#DejaVus = dict()
f=open(ResultTemplateFlask + '/DataFormat/FileDataAnalysisTrizWikiE.csv','w')
entetes = [
u'i',
u'label',
u'classe',
u'Action',
u'indiceSimAction',
u'abstract',
u'urlEspacenet'
]
ligneEntete=",".join(entetes)+"\n"
f.write(ligneEntete)
d= pd.read_csv("trizOxfordData.csv",delimiter=";")
dnew= pd.read_csv("FileTrizNewList.csv",delimiter=",")
classes = pd.DataFrame(dnew,columns=['Ref_classe'])
classes_syn = pd.DataFrame(dnew,columns=['syn_classe'])
classesUnique = classes.drop_duplicates(keep = False)
expansionTriz = classes_syn.drop_duplicates(keep = False)
tal = spacy.load('en_core_web_sm')
#lecture des fichiers txt en boucle et placement element dans dataF
for fic in En:
with codecs.open(fic, 'r', 'utf8') as File:
dataF = File.readlines() #single File ne pas lire la première ligne de l'abstract
# dataF = '\n'.join(dataF)
# FichierOrg = dataF
abstract = '\n'.join(dataF[1:])
NumberBrevet= fic.split('-')[1]
#NumberBrevet=NumberBrevet.replace('*Label_','')
NumberBrevet=NumberBrevet.replace('.txt','')
#sys.exit(0)
# tokenization
abstract = re.sub("[^a-zA-Z#]", " ",str(abstract))
brevet = tal(abstract)
#Blob = TextBlob(abstract)
#wordlist=Blob.words #should give best results@ DR
# remove stop-words and words less 3 caracters
filtered_sentence = [mot.lemma_ for mot in brevet if mot.pos_ == "NOUN" or mot.pos_ == "VERB"]
#for w in wordlist:
#if w not in stop_words and len(w) > 3:
#filtered_sentence.append(w)
#Document-Term Matrix
#print(filtered_sentence)
#print(resultType)
urlEspacenet="https://worldwide.espacenet.com/searchResults?submitted=true&locale=fr_EP&DB=EPODOC&ST=advanced&TI=&AB=&PN="+format(NumberBrevet)
matriceListe = []
matricelistePaire = []
matricelistePaireSort=[]
matricelistePaireAction = []
matricelistePaireObject = []
for classe in expansionTriz.keys() :
ExpansionClasse = expansionTriz[classe]
allsyns1 = set(ss for word in ExpansionClasse for ss in wordnet.synsets(word))
allsyns2 = set(ss for word in filtered_sentence for ss in wordnet.synsets(word))
best = max((wordnet.wup_similarity(s1, s2) or 0, s1, s2) for s1, s2 in product(allsyns1, allsyns2))
#print("allsyns1 ========",allsyns1)
#print("\n")
#print("allsyns2========",allsyns2)
print("best: ", best)
print("\n")
sys.exit()
f.close()
sys.exit()
#open file data semantic classification
d= pd.read_csv(ResultTemplateFlask + "/DataFormat/FileDataAnalysisTrizWikiE.csv")
df = pd.DataFrame(d,columns=['i','label','Term','Action','indiceSimAction','abstract','urlEspacenet'])
df.to_csv(ResultTemplateFlask + '/DataFormat/tableauTriE.csv')
sys.exit(0) # je veux le csv généré ici, car a partir de cette ligne je vais changer pour afficher les classes autrement
# sorted data by id and term ascending
dfmax = df.sort_values(by=['i','Term','indiceSimAction'],ascending=[True,True,False])
dfmax.to_csv(ResultTemplateFlask + '/DataFormat/tableauTri.csv')
# selected just top indice similiraty for term / action
dresult = dfmax.drop_duplicates(['Term'],keep='first')
dresult.to_csv(ResultTemplateFlask + '/DataFormat/tableauDrop.csv')
dresultmaxI=dresult.sort_values(by='indiceSimAction')
# create file formated datas to use in tabulator html
dresultmaxI.to_csv(ResultTemplateFlask + '/DataFormat/resultatParserV2.csv')
dd=pd.read_csv(ResultTemplateFlask + '/DataFormat/resultatParserV2.csv')
dff = pd.DataFrame(dd,columns=['i','label','Action','Term','Patent Tags','indiceSimAction','abstract','urlEspacenet'])
dfjson= pd.DataFrame(dd,columns=['label','Action','Term','Patent Tags','abstract','urlEspacenet'])
dfjson.to_json(ResultTemplateFlask +'/DataFormat/caraTrizWikisemantic.json', orient='records', lines=False)
#shutil.copyfile("templates/sources", ResultTemplateFlask+"/sources")
ResFolder = configFile.ResultPath.replace('\\', '//')
ResFolder = ResFolder.replace('//','/')
shutil.copy("templates/P2N-Trizifyer-semantic.html", ResFolder)
#add variable vars json_data datatable
src = open(ResultTemplateFlask +'/DataFormat/caraTrizWikisemantic.json','r')
lineadd = " var json_data = "
online=src.readlines()
online.insert(0,lineadd)
src.close
src = open(ResultTemplateFlask +'/DataFormat/caraTrizWikisemantic.json','w')
src.writelines(online)
src.close
|
the-stack_106_27840 | #!/usr/bin/env python
# --coding:utf-8--
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import sys
import os
from datetime import date
current_dir = os.path.dirname(os.path.abspath(__file__))
root_dir = os.path.join(current_dir, '..')
sys.path.insert(0, root_dir)
from nebula2.Exception import InvalidKeyException
from nebula2.common.ttypes import (
Value,
NullType,
Time,
DateTime,
NSet,
Date,
NList,
NMap,
ErrorCode
)
from nebula2.common import ttypes
from nebula2.graph import ttypes as graphTtype
from unittest import TestCase
from nebula2.data.ResultSet import ResultSet
from nebula2.data.DataObject import (
ValueWrapper,
Node,
Relationship,
PathWrapper,
TimeWrapper,
DateTimeWrapper,
DateWrapper,
Null,
Segment,
DataSetWrapper
)
class TestBaseCase(TestCase):
@classmethod
def get_vertex_value(self, vid):
vertex = ttypes.Vertex()
vertex.vid = ttypes.Value(sVal=vid)
vertex.tags = list()
for i in range(0, 3):
tag = ttypes.Tag()
tag.name = ('tag{}'.format(i)).encode('utf-8')
tag.props = dict()
for j in range(0, 5):
value = ttypes.Value()
value.set_iVal(j)
tag.props[('prop{}'.format(j)).encode('utf-8')] = value
vertex.tags.append(tag)
return vertex
@classmethod
def get_edge_value(self, src_id, dst_id, is_reverse=False):
edge = ttypes.Edge()
if not is_reverse:
edge.src = ttypes.Value(sVal=src_id)
edge.dst = ttypes.Value(sVal=dst_id)
else:
edge.src = ttypes.Value(sVal=dst_id)
edge.dst = ttypes.Value(sVal=src_id)
edge.type = 1
edge.name = b'classmate'
edge.ranking = 100
edge.props = dict()
for i in range(0, 5):
value = ttypes.Value()
value.set_iVal(i)
edge.props[('prop{}'.format(i)).encode('utf-8')] = value
return edge
@classmethod
def get_path_value(self, start_id, steps=5):
path = ttypes.Path()
path.src = self.get_vertex_value(start_id)
path.steps = list()
for i in range(0, steps):
step = ttypes.Step()
step.dst = self.get_vertex_value(('vertex{}'.format(i)).encode('utf-8'))
step.type = 1 if i % 2 == 0 else -1
step.name = b'classmate'
step.ranking = 100
step.props = dict()
for i in range(0, 5):
value = ttypes.Value()
value.set_iVal(i)
step.props[('prop{}'.format(i)).encode('utf-8')] = value
path.steps.append(step)
return path
@classmethod
def get_data_set(self):
data_set = ttypes.DataSet()
data_set.column_names = [b"col1_empty",
b"col2_null",
b"col3_bool",
b"col4_int",
b"col5_double",
b"col6_string",
b"col7_list",
b"col8_set",
b"col9_map",
b"col10_time",
b"col11_date",
b"col12_datetime",
b"col13_vertex",
b"col14_edge",
b"col15_path"]
row = ttypes.Row()
row.values = []
value1 = ttypes.Value()
row.values.append(value1)
value2 = ttypes.Value()
value2.set_nVal(NullType.BAD_DATA)
row.values.append(value2)
value3 = ttypes.Value()
value3.set_bVal(False)
row.values.append(value3)
value4 = ttypes.Value()
value4.set_iVal(100)
row.values.append(value4)
value5 = ttypes.Value()
value5.set_fVal(10.01)
row.values.append(value5)
value6 = ttypes.Value()
value6.set_sVal(b"hello world")
row.values.append(value6)
value7 = ttypes.Value()
str_val1 = ttypes.Value()
str_val1.set_sVal(b"word")
str_val2 = ttypes.Value()
str_val2.set_sVal(b"car")
list_val = NList()
list_val.values = [str_val1, str_val2]
value7.set_lVal(list_val)
row.values.append(value7)
value8 = ttypes.Value()
set_val = NSet()
set_val.values = set()
set_val.values.add(str_val1)
set_val.values.add(str_val2)
value8.set_uVal(set_val)
row.values.append(value8)
value9 = ttypes.Value()
map = NMap()
map.kvs = {b"a": str_val1, b"b": str_val2}
value9.set_mVal(map)
row.values.append(value9)
value10 = ttypes.Value()
value10.set_tVal(Time(10, 10, 10, 10000))
row.values.append(value10)
value11 = ttypes.Value()
value11.set_dVal(date(2020, 10, 1))
row.values.append(value11)
value12 = ttypes.Value()
value12.set_dtVal(DateTime(2020, 10, 1, 10, 10, 10, 10000))
row.values.append(value12)
value13 = ttypes.Value()
value13.set_vVal(self.get_vertex_value(b"Tom"))
row.values.append(value13)
value14 = ttypes.Value()
value14.set_eVal(self.get_edge_value(b"Tom", b"Lily"))
row.values.append(value14)
value15 = ttypes.Value()
value15.set_pVal(self.get_path_value(b"Tom", 3))
row.values.append(value15)
data_set.rows = []
data_set.rows.append(row)
data_set.rows.append(row)
return data_set
@classmethod
def get_result_set(self):
resp = graphTtype.ExecutionResponse()
resp.error_code = ErrorCode.E_BAD_PERMISSION
resp.error_msg = b"Permission"
resp.comment = b"Permission"
resp.space_name = b"test"
resp.latency_in_us = 100
resp.data = self.get_data_set()
return ResultSet(resp, 100)
class TesValueWrapper(TestBaseCase):
def test_as_bool(self):
value = ttypes.Value()
value.set_bVal(False)
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_bool()
node = value_wrapper.as_bool()
assert isinstance(node, bool)
def test_as_int(self):
value = ttypes.Value()
value.set_iVal(100)
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_int()
node = value_wrapper.as_int()
assert isinstance(node, int)
def test_as_double(self):
value = ttypes.Value()
value.set_fVal(10.10)
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_double()
node = value_wrapper.as_double()
assert isinstance(node, float)
def test_as_string(self):
value = ttypes.Value()
value.set_sVal(b'Tom')
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_string()
str_val = value_wrapper.as_string()
assert isinstance(str_val, str)
def test_as_list(self):
value = ttypes.Value()
str_val1 = ttypes.Value()
str_val1.set_sVal(b"word")
str_val2 = ttypes.Value()
str_val2.set_sVal(b"car")
val_list = NList()
val_list.values = [str_val1, str_val2]
value.set_lVal(val_list)
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_list()
list_val = value_wrapper.as_list()
assert isinstance(list_val, list)
expect_result = [ValueWrapper(ttypes.Value(sVal=b"word")),
ValueWrapper(ttypes.Value(sVal=b"car"))]
assert list_val == expect_result
def test_as_set(self):
value = ttypes.Value()
str_val1 = ttypes.Value()
str_val1.set_sVal(b"word")
str_val2 = ttypes.Value()
str_val2.set_sVal(b"car")
set_val = NSet()
set_val.values = set()
set_val.values.add(str_val1)
set_val.values.add(str_val2)
value.set_uVal(set_val)
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_set()
set_val = value_wrapper.as_set()
assert isinstance(set_val, set)
expect_result = set()
expect_result.add(ValueWrapper(ttypes.Value(sVal=b"word")))
expect_result.add(ValueWrapper(ttypes.Value(sVal=b"car")))
assert set_val == expect_result
def test_as_map(self):
value = ttypes.Value()
str_val1 = ttypes.Value()
str_val1.set_sVal(b"word")
str_val2 = ttypes.Value()
str_val2.set_sVal(b"car")
map_val = NMap()
map_val.kvs = {b"a": str_val1, b"b": str_val2}
value.set_mVal(map_val)
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_map()
map_val = value_wrapper.as_map()
assert isinstance(map_val, dict)
expect_result = dict()
expect_result["a"] = ValueWrapper(ttypes.Value(sVal=b"word"))
expect_result["b"] = ValueWrapper(ttypes.Value(sVal=b"car"))
assert map_val == expect_result
def test_as_time(self):
time = Time()
time.hour = 10
time.minute = 20
time.sec = 10
time.microsec = 100
value = ttypes.Value(tVal = time)
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_time()
time_val = value_wrapper.as_time()
assert isinstance(time_val, TimeWrapper)
assert time_val.get_hour() == 10
assert time_val.get_minute() == 20
assert time_val.get_sec() == 10
assert time_val.get_microsec() == 100
assert '10:20:10.000100' == str(time_val)
def test_as_date(self):
date = Date()
date.year = 220
date.month = 2
date.day = 10
value = ttypes.Value(dVal=date)
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_date()
date_val = value_wrapper.as_date()
assert isinstance(date_val, DateWrapper)
assert date_val.get_year() == 220
assert date_val.get_month() == 2
assert date_val.get_day() == 10
assert '220-02-10' == str(date_val)
def test_as_datetime(self):
datetime = DateTime()
datetime.year = 123
datetime.month = 2
datetime.day = 1
datetime.hour = 10
datetime.minute = 20
datetime.sec = 10
datetime.microsec = 100
value = ttypes.Value(dtVal=datetime)
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_datetime()
datetime_val = value_wrapper.as_datetime()
assert isinstance(datetime_val, DateTimeWrapper)
assert datetime_val.get_hour() == 10
assert datetime_val.get_minute() == 20
assert datetime_val.get_sec() == 10
assert datetime_val.get_microsec() == 100
assert '123-02-01T10:20:10.000100' == str(datetime_val)
def test_as_node(self):
value = ttypes.Value()
value.set_vVal(self.get_vertex_value(b'Tom'))
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_vertex()
node = value_wrapper.as_node()
assert isinstance(node, Node)
def test_as_relationship(self):
value = ttypes.Value(eVal=self.get_edge_value(b'Tom', b'Lily'))
value_wrapper = ValueWrapper(value)
assert value_wrapper.is_edge()
relationship = value_wrapper.as_relationship()
assert isinstance(relationship, Relationship)
# test with reversely
reversely_value = ttypes.Value(eVal=self.get_edge_value(b'Lily', b'Tom', True))
reversely_value_wrapper = ValueWrapper(reversely_value)
reversely_relationship = reversely_value_wrapper.as_relationship()
assert isinstance(reversely_relationship, Relationship)
assert reversely_relationship == relationship
# test with reversely no equal
reversely_value = ttypes.Value(eVal=self.get_edge_value(b'Tom', b'Lily', True))
reversely_value_wrapper = ValueWrapper(reversely_value)
reversely_relationship = reversely_value_wrapper.as_relationship()
assert isinstance(reversely_relationship, Relationship)
assert reversely_relationship != relationship
def test_as_path(self):
value = ttypes.Value()
value.set_pVal(self.get_path_value(b'Tom'))
vaue_wrapper = ValueWrapper(value)
assert vaue_wrapper.is_path()
node = vaue_wrapper.as_path()
assert isinstance(node, PathWrapper)
class TestNode(TestBaseCase):
def test_node_api(self):
test_set = set()
test_set.add(Value())
node = Node(self.get_vertex_value(b'Tom'))
assert 'Tom' == node.get_id().as_string()
assert node.has_tag('tag2')
assert ['prop0', 'prop1', 'prop2', 'prop3', 'prop4'] == node.prop_names('tag2')
assert [0, 1, 2, 3, 4] == [(value.as_int()) for value in node.prop_values('tag2')]
assert ['tag0', 'tag1', 'tag2'] == node.tags()
expect_propertys = {}
for key in node.propertys('tag2').keys():
expect_propertys[key] = node.propertys('tag2')[key].as_int()
assert {'prop0': 0, 'prop1': 1, 'prop2': 2, 'prop3': 3, 'prop4': 4} == expect_propertys
class TestRelationship(TestBaseCase):
def test_relationship_api(self):
relationship = Relationship(self.get_edge_value(b'Tom', b'Lily'))
assert 'Tom' == relationship.start_vertex_id().as_string()
assert 'Lily' == relationship.end_vertex_id().as_string()
assert 100 == relationship.ranking()
assert 100 == relationship.ranking()
assert 'classmate' == relationship.edge_name()
assert ['prop0', 'prop1', 'prop2', 'prop3', 'prop4'] == relationship.keys()
expect_propertys = {}
for key in relationship.propertys().keys():
expect_propertys[key] = relationship.propertys()[key].as_int()
assert {'prop0': 0, 'prop1': 1, 'prop2': 2, 'prop3': 3, 'prop4': 4} == expect_propertys
class TestPath(TestBaseCase):
def test_path_api(self):
path = PathWrapper(self.get_path_value(b'Tom'))
assert Node(self.get_vertex_value(b'Tom')) == path.start_node()
assert 5 == path.length()
assert path.contain_node(Node(self.get_vertex_value(b'vertex3')))
assert path.contain_relationship(Relationship(self.get_edge_value(b'vertex3', b'vertex2')))
nodes = list()
nodes.append(path.start_node())
for i in range(0, 5):
nodes.append(Node(self.get_vertex_value(('vertex'.format(i)).encode('utf-8'))))
relationships = list()
relationships.append(Relationship(self.get_edge_value(b'Tom', b'vertex0')))
for i in range(0, 4):
if i % 2 == 0:
relationships.append(Relationship(
self.get_edge_value(('vertex{}'.format(i + 1)).encode('utf-8'),
('vertex{}'.format(i)).encode('utf-8'))))
else:
relationships.append(Relationship(
self.get_edge_value(('vertex{}'.format(i)).encode('utf-8'),
('vertex{}'.format(i + 1)).encode('utf-8'))))
assert relationships == path.relationships()
class TestDatesetWrapper(TestBaseCase):
def test_all(self):
data_set_warpper1 = DataSetWrapper(self.get_data_set())
data_set_warpper2 = DataSetWrapper(self.get_data_set())
# test iterator and compare
row_count = 0
for i in range(data_set_warpper1.get_row_size()):
row_count = row_count + 1
assert data_set_warpper1.row_values(i)[0] == data_set_warpper2.row_values(i)[0]
assert data_set_warpper1.row_values(i)[1] == data_set_warpper2.row_values(i)[1]
assert data_set_warpper1.row_values(i)[2] == data_set_warpper2.row_values(i)[2]
assert data_set_warpper1.row_values(i)[3] == data_set_warpper2.row_values(i)[3]
assert data_set_warpper1.row_values(i)[4] == data_set_warpper2.row_values(i)[4]
assert data_set_warpper1.row_values(i)[5] == data_set_warpper2.row_values(i)[5]
assert data_set_warpper1.row_values(i)[6] == data_set_warpper2.row_values(i)[6]
assert data_set_warpper1.row_values(i)[7] == data_set_warpper2.row_values(i)[7]
assert data_set_warpper1.row_values(i)[8] == data_set_warpper2.row_values(i)[8]
assert data_set_warpper1.row_values(i)[9] == data_set_warpper2.row_values(i)[9]
assert data_set_warpper1.row_values(i)[10] == data_set_warpper2.row_values(i)[10]
assert data_set_warpper1.row_values(i)[11] == data_set_warpper2.row_values(i)[11]
assert data_set_warpper1.row_values(i)[12] == data_set_warpper2.row_values(i)[12]
assert data_set_warpper1.row_values(i)[13] == data_set_warpper2.row_values(i)[13]
assert data_set_warpper1.row_values(i)[14] == data_set_warpper2.row_values(i)[14]
assert data_set_warpper1.row_values(i)[9] != data_set_warpper2.row_values(i)[8]
assert 2 == row_count
assert 2 == data_set_warpper1.get_row_size()
assert len(data_set_warpper1.column_values("col6_string")) == 2
assert data_set_warpper1.column_values("col6_string")[0].is_string()
assert data_set_warpper1.column_values("col6_string")[0].as_string() == 'hello world'
assert data_set_warpper1.column_values("col6_string")[1].as_string() == 'hello world'
assert data_set_warpper1.row_values(0)[5].is_string()
assert data_set_warpper1.row_values(1)[5].is_string()
assert data_set_warpper1.row_values(0)[5].as_string() == 'hello world'
assert data_set_warpper1.row_values(1)[5].as_string() == 'hello world'
class TestResultset(TestBaseCase):
def test_all_interface(self):
result = self.get_result_set()
assert result.space_name() == "test"
assert result.comment() == "Permission"
assert result.error_msg() == "Permission"
assert result.error_code() == ErrorCode.E_BAD_PERMISSION
assert result.plan_desc() is None
assert result.latency() == 100
assert not result.is_empty()
assert not result.is_succeeded()
expect_keys = ["col1_empty",
"col2_null",
"col3_bool",
"col4_int",
"col5_double",
"col6_string",
"col7_list",
"col8_set",
"col9_map",
"col10_time",
"col11_date",
"col12_datetime",
"col13_vertex",
"col14_edge",
"col15_path"]
assert result.keys() == expect_keys
assert result.col_size() == 15
assert result.row_size() == 2
# test column_values
assert len(result.column_values("col6_string")) == 2
assert result.column_values("col6_string")[0].is_string()
assert result.column_values("col6_string")[0].as_string() == "hello world"
# test row_values
assert len(result.row_values(0)) == 15
assert result.row_values(0)[5].is_string()
assert result.row_values(0)[5].as_string() == "hello world"
# test rows
assert len(result.rows()) == 2
assert len(result.rows()[0].values) == 15
assert isinstance(result.rows()[0].values[0], Value)
assert isinstance(result.get_row_types(), list)
# test get_row_types
assert result.get_row_types() == [ttypes.Value.__EMPTY__,
ttypes.Value.NVAL,
ttypes.Value.BVAL,
ttypes.Value.IVAL,
ttypes.Value.FVAL,
ttypes.Value.SVAL,
ttypes.Value.LVAL,
ttypes.Value.UVAL,
ttypes.Value.MVAL,
ttypes.Value.TVAL,
ttypes.Value.DVAL,
ttypes.Value.DTVAL,
ttypes.Value.VVAL,
ttypes.Value.EVAL,
ttypes.Value.PVAL]
# test record
in_use = False
for record in result:
in_use = True
record.size() == 15
# test keys()
assert record.keys() == expect_keys
# test values()
values = record.values()
assert len(record.values()) == 15
assert record.values()[0].is_empty()
assert record.values()[5].is_string()
assert record.values()[5].is_string()
assert record.values()[5].as_string() == "hello world"
# test get_value()
assert record.get_value(0).is_empty()
assert values[0].is_empty()
assert record.get_value(1).is_null()
assert record.get_value(1).as_null() == Null(Null.BAD_DATA)
null_value = Value(nVal=Null.BAD_DATA)
assert record.get_value(1) == ValueWrapper(null_value)
assert str(record.get_value(1).as_null()) == 'BAD_DATA'
# test get_value_by_key()
assert record.get_value_by_key('col2_null').is_null()
assert record.get_value_by_key('col3_bool').is_bool()
assert not record.get_value_by_key('col3_bool').as_bool()
# get_value_by_key with not exited key
try:
record.get_value_by_key('not existed')
assert False, 'Not expect here'
except InvalidKeyException as e:
assert True
assert e.message == "KeyError: `not existed'"
assert values[1].is_null()
assert record.get_value(2).is_bool()
assert not record.get_value(2).as_bool()
assert record.get_value(2).is_bool()
assert record.get_value(3).is_int()
assert record.get_value(3).as_int() == 100
assert record.get_value(4).is_double()
assert record.get_value(4).as_double() == 10.01
assert record.get_value(5).is_string()
assert record.get_value(5).as_string() == "hello world"
assert record.get_value(6).is_list()
assert record.get_value(7).is_set()
assert record.get_value(8).is_map()
assert record.get_value(9).is_time()
assert record.get_value(10).is_date()
assert record.get_value(11).is_datetime()
assert record.get_value(12).is_vertex()
assert record.get_value(13).is_edge()
assert record.get_value(14).is_path()
assert in_use
# test use iterator again
in_use = False
for record in result:
in_use = True
record.size() == 15
assert in_use
|
the-stack_106_27841 | #!/usr/bin/python
"""
IOC Report Configuration File
"""
# VirusTotal configurations
vt_api_key = ''
# Hybrid-Analysis configurations
ha_api_key = ''
ha_secret_key = ''
# These settings are used to create the reports. File paths should be entered
# as the absolute path.
input_file = 'sample_resources.txt'
csv_output_file = 'sample_osint_report.csv'
txt_output_file = 'sample_osint_report.txt'
feed_output_file = 'sample_feed_report.csv'
# Web server configurations
host = '127.0.0.1'
port = 8080
|
the-stack_106_27843 | """
============
Boxplot Demo
============
Example boxplot code
"""
import numpy as np
import matplotlib.pyplot as plt
# Fixing random state for reproducibility
np.random.seed(19680801)
# fake up some data
spread = np.random.rand(50) * 100
center = np.ones(25) * 50
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
data = np.concatenate((spread, center, flier_high, flier_low), 0)
###############################################################################
fig1, ax1 = plt.subplots()
ax1.set_title('Basic Plot')
ax1.boxplot(data)
###############################################################################
fig2, ax2 = plt.subplots()
ax2.set_title('Notched boxes')
ax2.boxplot(data, notch=True)
###############################################################################
green_diamond = dict(markerfacecolor='g', marker='D')
fig3, ax3 = plt.subplots()
ax3.set_title('Changed Outlier Symbols')
ax3.boxplot(data, flierprops=green_diamond)
###############################################################################
fig4, ax4 = plt.subplots()
ax4.set_title('Hide Outlier Points')
ax4.boxplot(data, showfliers=False)
###############################################################################
red_square = dict(markerfacecolor='r', marker='s')
fig5, ax5 = plt.subplots()
ax5.set_title('Horizontal Boxes')
ax5.boxplot(data, vert=False, flierprops=red_square)
###############################################################################
fig6, ax6 = plt.subplots()
ax6.set_title('Shorter Whisker Length')
ax6.boxplot(data, flierprops=red_square, vert=False, whis=0.75)
###############################################################################
# Fake up some more data
spread = np.random.rand(50) * 100
center = np.ones(25) * 40
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
d2 = np.concatenate((spread, center, flier_high, flier_low), 0)
data.shape = (-1, 1)
d2.shape = (-1, 1)
###############################################################################
# Making a 2-D array only works if all the columns are the
# same length. If they are not, then use a list instead.
# This is actually more efficient because boxplot converts
# a 2-D array into a list of vectors internally anyway.
data = [data, d2, d2[::2,0]]
fig7, ax7 = plt.subplots()
ax7.set_title('Multiple Samples with Different sizes')
ax7.boxplot(data)
plt.show()
|
the-stack_106_27844 | from __future__ import print_function
import sys
sys.path.append(r"../..")
from pymdwizard.gui import fgdc_date
def test_single_date_setgetdate(qtbot):
widget = fgdc_date.FGDCDate()
qtbot.addWidget(widget)
widget.ui.fgdc_caldate.setText('1234')
assert widget.get_date() == '1234'
widget.set_date('4567')
assert widget.ui.fgdc_caldate.text() == '4567'
def test_single_date_itit(qtbot):
widget = fgdc_date.FGDCDate(label='testing', show_format=False)
qtbot.addWidget(widget)
assert widget.ui.label.text() == 'testing'
assert widget.ui.widget_format.isHidden()
widget = fgdc_date.FGDCDate(label='testing', show_format=True)
qtbot.addWidget(widget)
assert not widget.ui.widget_format.isHidden()
|
the-stack_106_27845 | # -*- coding: utf-8 -*-
# Author: Your Name <[email protected]>
import argparse
import os
import tensorflow as tf
from tensorpack import *
"""
This is a boiler-plate template.
All code is in this file is the most minimalistic way to solve a deep-learning problem with cross-validation.
"""
BATCH_SIZE = 16
SHAPE = 28
CHANNELS = 3
class Model(ModelDesc):
def inputs(self):
return [tf.TensorSpec((None, SHAPE, SHAPE, CHANNELS), tf.float32, 'input1'),
tf.TensorSpec((None,), tf.int32, 'input2')]
def build_graph(self, input1, input2):
cost = tf.identity(input1 - input2, name='total_costs')
summary.add_moving_summary(cost)
return cost
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=5e-3, trainable=False)
return tf.train.AdamOptimizer(lr)
def get_data(subset):
# something that yields [[SHAPE, SHAPE, CHANNELS], [1]]
ds = FakeData([[SHAPE, SHAPE, CHANNELS], [1]], 1000, random=False,
dtype=['float32', 'uint8'], domain=[(0, 255), (0, 10)])
ds = PrefetchDataZMQ(ds, 2)
ds = BatchData(ds, BATCH_SIZE)
return ds
def get_config():
logger.auto_set_dir()
ds_train = get_data('train')
ds_test = get_data('test')
return TrainConfig(
model=Model(),
data=QueueInput(ds_train),
callbacks=[
ModelSaver(),
InferenceRunner(ds_test, [ScalarStats('total_costs')]),
],
steps_per_epoch=len(ds_train),
max_epoch=100,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
launch_train_with_config(config, SimpleTrainer())
|
the-stack_106_27848 | # (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <[email protected]>
# (c) Thierry Bouvet (@tbouvet)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from time import sleep
try:
from docker.errors import APIError, NotFound
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils._text import to_native
from ansible_collections.community.docker.plugins.module_utils.common import (
AnsibleDockerClient,
LooseVersion,
)
class AnsibleDockerSwarmClient(AnsibleDockerClient):
def __init__(self, **kwargs):
super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
def get_swarm_node_id(self):
"""
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
of Docker host the module is executed on
:return:
NodeID of host or 'None' if not part of Swarm
"""
try:
info = self.info()
except APIError as exc:
self.fail("Failed to get node information for %s" % to_native(exc))
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return swarm_info['Swarm']['NodeID']
return None
def check_if_swarm_node(self, node_id=None):
"""
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
it is not executed on Swarm manager
:param node_id: Node identifier
:return:
bool: True if node is part of Swarm, False otherwise
"""
if node_id is None:
try:
info = self.info()
except APIError:
self.fail("Failed to get host information.")
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return True
if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
return True
return False
else:
try:
node_info = self.get_node_inspect(node_id=node_id)
except APIError:
return
if node_info['ID'] is not None:
return True
return False
def check_if_swarm_manager(self):
"""
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
is performed. The inspect_swarm() will fail if node is not a manager
:return: True if node is Swarm Manager, False otherwise
"""
try:
self.inspect_swarm()
return True
except APIError:
return False
def fail_task_if_not_swarm_manager(self):
"""
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
"""
if not self.check_if_swarm_manager():
self.fail("Error running docker swarm module: must run on swarm manager node")
def check_if_swarm_worker(self):
"""
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
:return: True if node is Swarm Worker, False otherwise
"""
if self.check_if_swarm_node() and not self.check_if_swarm_manager():
return True
return False
def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
"""
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
host that is not part of Swarm it will fail the playbook
:param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
True if node is part of swarm but its state is down, False otherwise
"""
if repeat_check < 1:
repeat_check = 1
if node_id is None:
node_id = self.get_swarm_node_id()
for retry in range(0, repeat_check):
if retry > 0:
sleep(5)
node_info = self.get_node_inspect(node_id=node_id)
if node_info['Status']['State'] == 'down':
return True
return False
def get_node_inspect(self, node_id=None, skip_missing=False):
"""
Returns Swarm node info as in 'docker node inspect' command about single node
:param skip_missing: if True then function will return None instead of failing the task
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
Single node information structure
"""
if node_id is None:
node_id = self.get_swarm_node_id()
if node_id is None:
self.fail("Failed to get node information.")
try:
node_info = self.inspect_node(node_id=node_id)
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
if exc.status_code == 404:
if skip_missing:
return None
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
if 'ManagerStatus' in node_info:
if node_info['ManagerStatus'].get('Leader'):
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
# Check moby/moby#35437 for details
count_colons = node_info['ManagerStatus']['Addr'].count(":")
if count_colons == 1:
swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
else:
swarm_leader_ip = node_info['Status']['Addr']
node_info['Status']['Addr'] = swarm_leader_ip
return node_info
def get_all_nodes_inspect(self):
"""
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
:return:
Structure with information about all nodes
"""
try:
node_info = self.nodes()
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
return node_info
def get_all_nodes_list(self, output='short'):
"""
Returns list of nodes registered in Swarm
:param output: Defines format of returned data
:return:
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
if 'output' is 'long' then returns data is list of dict containing the attributes as in
output of command 'docker node ls'
"""
nodes_list = []
nodes_inspect = self.get_all_nodes_inspect()
if nodes_inspect is None:
return None
if output == 'short':
for node in nodes_inspect:
nodes_list.append(node['Description']['Hostname'])
elif output == 'long':
for node in nodes_inspect:
node_property = {}
node_property.update({'ID': node['ID']})
node_property.update({'Hostname': node['Description']['Hostname']})
node_property.update({'Status': node['Status']['State']})
node_property.update({'Availability': node['Spec']['Availability']})
if 'ManagerStatus' in node:
if node['ManagerStatus']['Leader'] is True:
node_property.update({'Leader': True})
node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
nodes_list.append(node_property)
else:
return None
return nodes_list
def get_node_name_by_id(self, nodeid):
return self.get_node_inspect(nodeid)['Description']['Hostname']
def get_unlock_key(self):
if self.docker_py_version < LooseVersion('2.7.0'):
return None
return super(AnsibleDockerSwarmClient, self).get_unlock_key()
def get_service_inspect(self, service_id, skip_missing=False):
"""
Returns Swarm service info as in 'docker service inspect' command about single service
:param service_id: service ID or name
:param skip_missing: if True then function will return None instead of failing the task
:return:
Single service information structure
"""
try:
service_info = self.inspect_service(service_id)
except NotFound as exc:
if skip_missing is False:
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
else:
return None
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
self.fail("Error inspecting swarm service: %s" % exc)
except Exception as exc:
self.fail("Error inspecting swarm service: %s" % exc)
json_str = json.dumps(service_info, ensure_ascii=False)
service_info = json.loads(json_str)
return service_info
|
the-stack_106_27851 | """Template for the build file used in android_sdk_repository."""
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def create_config_setting_rule():
"""Create config_setting rule for windows.
These represent the matching --host_cpu values.
"""
name = "windows"
if not native.existing_rule(name):
native.config_setting(
name = name,
values = {"host_cpu": "x64_" + name},
)
def create_android_sdk_rules(
name,
build_tools_version,
build_tools_directory,
api_levels,
default_api_level):
"""Generate android_sdk rules for the API levels in the Android SDK.
Args:
name: string, the name of the repository being generated.
build_tools_version: string, the version of Android's build tools to use.
build_tools_directory: string, the directory name of the build tools in
sdk's build-tools directory.
api_levels: list of ints, the API levels from which to get android.jar
et al. and create android_sdk rules.
default_api_level: int, the API level to alias the default sdk to if
--android_sdk is not specified on the command line.
"""
create_config_setting_rule()
windows_only_files = [
"build-tools/%s/aapt.exe" % build_tools_directory,
"build-tools/%s/aidl.exe" % build_tools_directory,
"build-tools/%s/zipalign.exe" % build_tools_directory,
"platform-tools/adb.exe",
] + native.glob(["build-tools/%s/aapt2.exe" % build_tools_directory])
linux_only_files = [
"build-tools/%s/aapt" % build_tools_directory,
"build-tools/%s/aidl" % build_tools_directory,
"build-tools/%s/zipalign" % build_tools_directory,
"platform-tools/adb",
] + native.glob(
["extras", "build-tools/%s/aapt2" % build_tools_directory],
exclude_directories = 0,
)
# This filegroup is used to pass the minimal contents of the SDK to the
# Android integration tests. Note that in order to work on Windows, we cannot
# include directories and must keep the size small.
native.filegroup(
name = "files",
srcs = [
"build-tools/%s/lib/apksigner.jar" % build_tools_directory,
"build-tools/%s/lib/dx.jar" % build_tools_directory,
"build-tools/%s/mainDexClasses.rules" % build_tools_directory,
] + [
"platforms/android-%d/%s" % (api_level, filename)
for api_level in api_levels
for filename in ["android.jar", "framework.aidl"]
] + select({
":windows": windows_only_files,
"//conditions:default": linux_only_files,
}),
)
for api_level in api_levels:
if api_level >= 23:
# Android 23 removed most of org.apache.http from android.jar and moved it
# to a separate jar.
native.java_import(
name = "org_apache_http_legacy-%d" % api_level,
jars = ["platforms/android-%d/optional/org.apache.http.legacy.jar" % api_level],
)
if api_level >= 28:
# Android 28 removed most of android.test from android.jar and moved it
# to separate jars.
native.java_import(
name = "legacy_test-%d" % api_level,
jars = [
"platforms/android-%d/optional/android.test.base.jar" % api_level,
"platforms/android-%d/optional/android.test.mock.jar" % api_level,
"platforms/android-%d/optional/android.test.runner.jar" % api_level,
],
neverlink = 1,
)
native.android_sdk(
name = "sdk-%d" % api_level,
build_tools_version = build_tools_version,
proguard = "@bazel_tools//tools/jdk:proguard",
aapt = select({
":windows": "build-tools/%s/aapt.exe" % build_tools_directory,
"//conditions:default": ":aapt_binary",
}),
aapt2 = select({
":windows": "build-tools/%s/aapt2.exe" % build_tools_directory,
"//conditions:default": ":aapt2_binary",
}),
dx = ":dx_binary",
main_dex_list_creator = ":main_dex_list_creator",
adb = select({
":windows": "platform-tools/adb.exe",
"//conditions:default": "platform-tools/adb",
}),
framework_aidl = "platforms/android-%d/framework.aidl" % api_level,
aidl = select({
":windows": "build-tools/%s/aidl.exe" % build_tools_directory,
"//conditions:default": ":aidl_binary",
}),
android_jar = "platforms/android-%d/android.jar" % api_level,
shrinked_android_jar = "platforms/android-%d/android.jar" % api_level,
main_dex_classes = "build-tools/%s/mainDexClasses.rules" % build_tools_directory,
apksigner = ":apksigner",
zipalign = select({
":windows": "build-tools/%s/zipalign.exe" % build_tools_directory,
"//conditions:default": ":zipalign_binary",
}),
)
native.alias(
name = "org_apache_http_legacy",
actual = ":org_apache_http_legacy-%d" % default_api_level,
)
native.alias(
name = "sdk",
actual = ":sdk-%d" % default_api_level,
)
native.java_binary(
name = "apksigner",
main_class = "com.android.apksigner.ApkSignerTool",
runtime_deps = ["build-tools/%s/lib/apksigner.jar" % build_tools_directory],
)
native.filegroup(
name = "build_tools_libs",
srcs = native.glob([
"build-tools/%s/lib/**" % build_tools_directory,
# Build tools version 24.0.0 added a lib64 folder.
"build-tools/%s/lib64/**" % build_tools_directory,
]),
)
for tool in ["aapt", "aapt2", "aidl", "zipalign"]:
native.genrule(
name = tool + "_runner",
outs = [tool + "_runner.sh"],
srcs = [],
cmd = "\n".join([
"cat > $@ << 'EOF'",
"#!/bin/bash",
"set -eu",
# The tools under build-tools/VERSION require the libraries under
# build-tools/VERSION/lib, so we can't simply depend on them as a
# file like we do with aapt.
# On Windows however we can use these binaries directly because
# there's no runfiles support so Bazel just creates a junction to
# {SDK}/build-tools.
"SDK=$${0}.runfiles/%s" % name,
# If $${SDK} is not a directory, it means that this tool is running
# from a runfiles directory, in the case of
# android_instrumentation_test. Hence, use the androidsdk
# that's already present in the runfiles of the current context.
"if [[ ! -d $${SDK} ]] ; then",
" SDK=$$(pwd)/../%s" % name,
"fi",
"exec $${SDK}/build-tools/%s/%s $$*" % (build_tools_directory, tool),
"EOF\n",
]),
)
native.sh_binary(
name = tool + "_binary",
srcs = [tool + "_runner.sh"],
data = [
":build_tools_libs",
"build-tools/%s/%s" % (build_tools_directory, tool),
],
)
native.sh_binary(
name = "fail",
srcs = select({
":windows": [":generate_fail_cmd"],
"//conditions:default": [":generate_fail_sh"],
}),
)
native.genrule(
name = "generate_fail_sh",
executable = 1,
outs = ["fail.sh"],
cmd = "echo -e '#!/bin/bash\\nexit 1' >> $@; chmod +x $@",
)
native.genrule(
name = "generate_fail_cmd",
executable = 1,
outs = ["fail.cmd"],
cmd = "echo @exit /b 1 > $@",
)
native.genrule(
name = "main_dex_list_creator_source",
srcs = [],
outs = ["main_dex_list_creator.sh"],
cmd = "\n".join([
"cat > $@ <<'EOF'",
"#!/bin/bash",
"",
"MAIN_DEX_LIST=$$1",
"STRIPPED_JAR=$$2",
"JAR=$$3",
"" +
"JAVA_BINARY=$$0.runfiles/%s/main_dex_list_creator_java" % name,
"$$JAVA_BINARY $$STRIPPED_JAR $$JAR > $$MAIN_DEX_LIST",
"exit $$?",
"",
"EOF\n",
]),
)
native.sh_binary(
name = "main_dex_list_creator",
srcs = ["main_dex_list_creator.sh"],
data = [":main_dex_list_creator_java"],
)
native.java_binary(
name = "main_dex_list_creator_java",
main_class = "com.android.multidex.ClassReferenceListBuilder",
runtime_deps = [":dx_jar_import"],
)
native.java_binary(
name = "dx_binary",
main_class = "com.android.dx.command.Main",
runtime_deps = [":dx_jar_import"],
)
native.java_import(
name = "dx_jar_import",
jars = ["build-tools/%s/lib/dx.jar" % build_tools_directory],
)
TAGDIR_TO_TAG_MAP = {
"google_apis": "google",
"default": "android",
"android-tv": "tv",
"android-wear": "wear",
}
ARCHDIR_TO_ARCH_MAP = {
"x86": "x86",
"armeabi-v7a": "arm",
}
def create_system_images_filegroups(system_image_dirs):
"""Generate filegroups for the system images in the Android SDK.
Args:
system_image_dirs: list of strings, the directories containing system image
files to be used to create android_device rules.
"""
# These images will need to be updated as Android releases new system images.
# We are intentionally not adding future releases because there is no
# guarantee that they will work out of the box. Supported system images should
# be added here once they have been confirmed to work with the Bazel Android
# testing infrastructure.
system_images = [
(tag, str(api), arch)
for tag in ["android", "google"]
for api in [10] + list(range(15, 20)) + list(range(21, 29))
for arch in ("x86", "arm")
]
tv_images = [
("tv", str(api), arch)
for api in range(21, 25)
for arch in ("x86", "arm")
]
wear_images = [
("wear", str(api), "x86")
for api in range(20, 26)
] + [
("wear", str(api), "arm")
for api in range(24, 26)
]
supported_system_images = system_images + tv_images + wear_images
installed_system_images_dirs = {}
for system_image_dir in system_image_dirs:
apidir, tagdir, archdir = system_image_dir.split("/")[1:]
if "-" not in apidir:
continue
api = apidir.split("-")[1] # "android-24" --> "24", "android-O" --> "O"
if tagdir not in TAGDIR_TO_TAG_MAP:
continue
tag = TAGDIR_TO_TAG_MAP[tagdir]
if archdir not in ARCHDIR_TO_ARCH_MAP:
continue
arch = ARCHDIR_TO_ARCH_MAP[archdir]
if (tag, api, arch) in supported_system_images:
name = "emulator_images_%s_%s_%s" % (tag, api, arch)
installed_system_images_dirs[name] = system_image_dir
else:
# TODO(bazel-team): If the user has an unsupported system image installed,
# should we print a warning? This includes all 64-bit system-images.
pass
for (tag, api, arch) in supported_system_images:
name = "emulator_images_%s_%s_%s" % (tag, api, arch)
if name in installed_system_images_dirs:
system_image_dir = installed_system_images_dirs[name]
# For supported system images that exist in /sdk/system-images/, we
# create a filegroup with their contents.
native.filegroup(
name = name,
srcs = native.glob([
"%s/**" % system_image_dir,
]),
)
native.filegroup(
name = "%s_qemu2_extra" % name,
srcs = native.glob(["%s/kernel-ranchu" % system_image_dir]),
)
else:
# For supported system images that are not installed in the SDK, we
# create a "poison pill" genrule to display a helpful error message to
# a user who attempts to run a test against an android_device that
# they don't have the system image for installed.
native.genrule(
name = name,
outs = [
# Necessary so that the build doesn't fail in analysis because
# android_device expects a file named source.properties.
"poison_pill_for_%s/source.properties" % name,
],
cmd = """echo \
This rule requires that the Android SDK used by Bazel has the \
following system image installed: %s. Please install this system \
image through the Android SDK Manager and try again. ; \
exit 1
""" % name,
)
native.filegroup(
name = "%s_qemu2_extra" % name,
srcs = [],
)
|
the-stack_106_27852 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from .. import BasicTokenizer, PretrainedTokenizer, WordpieceTokenizer
__all__ = ['ElectraTokenizer', ]
class ElectraTokenizer(PretrainedTokenizer):
"""
Constructs a Electra tokenizer. It uses a basic tokenizer to do punctuation
splitting, lower casing and so on, and follows a WordPiece tokenizer to
tokenize as subwords.
Args:
vocab_file (str): file path of the vocabulary
do_lower_case (bool): Whether the text strips accents and convert to
lower case. Default: `True`.
Default: True.
unk_token (str): The special token for unkown words. Default: "[UNK]".
sep_token (str): The special token for separator token . Default: "[SEP]".
pad_token (str): The special token for padding. Default: "[PAD]".
cls_token (str): The special token for cls. Default: "[CLS]".
mask_token (str): The special token for mask. Default: "[MASK]".
Examples:
.. code-block:: python
from paddlenlp.transformers import ElectraTokenizer
tokenizer = ElectraTokenizer.from_pretrained('electra-small-discriminator')
# the following line get: ['he', 'was', 'a', 'puppet', '##eer']
tokens = tokenizer('He was a puppeteer')
# the following line get: 'he was a puppeteer'
tokenizer.convert_tokens_to_string(tokens)
"""
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {
"vocab_file": {
"electra-small":
"https://paddlenlp.bj.bcebos.com/models/transformers/electra/electra-small-vocab.txt",
"electra-base":
"https://paddlenlp.bj.bcebos.com/models/transformers/electra/electra-base-vocab.txt",
"electra-large":
"https://paddlenlp.bj.bcebos.com/models/transformers/electra/electra-large-vocab.txt",
"chinese-electra-base":
"http://paddlenlp.bj.bcebos.com/models/transformers/chinese-electra-base/vocab.txt",
"chinese-electra-small":
"http://paddlenlp.bj.bcebos.com/models/transformers/chinese-electra-small/vocab.txt",
}
}
pretrained_init_configuration = {
"electra-small": {
"do_lower_case": True
},
"electra-base": {
"do_lower_case": True
},
"electra-large": {
"do_lower_case": True
},
"chinese-electra-base": {
"do_lower_case": True
},
"chinese-electra-small": {
"do_lower_case": True
}
}
def __init__(self,
vocab_file,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]"):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = ElectraTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(
vocab=self.vocab, unk_token=unk_token)
@property
def vocab_size(self):
"""
return the size of vocabulary.
Returns:
int: the size of vocabulary.
"""
return len(self.vocab)
def _tokenize(self, text):
"""
End-to-end tokenization for Electra models.
Args:
text (str): The text to be tokenized.
Returns:
list: A list of string representing converted tokens.
"""
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def __call__(self, text):
"""
End-to-end tokenization for Electra models.
Args:
text (str): The text to be tokenized.
Returns:
list: A list of string representing converted tokens.
"""
return self._tokenize(text)
def convert_tokens_to_string(self, tokens):
"""
Converts a sequence of tokens (list of string) in a single string. Since
the usage of WordPiece introducing `##` to concat subwords, also remove
`##` when converting.
Args:
tokens (list): A list of string representing tokens to be converted.
Returns:
str: Converted string from tokens.
"""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Note:
This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
inside your training loop.
Args:
pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
number of added tokens in the case of a single sequence if set to False.
Returns:
Number of tokens added to sequences
"""
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens.
A BERT sequence has the following format:
::
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of input_id with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
A BERT sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of token_type_id according to the given sequence(s).
"""
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``encode`` methods.
Args:
token_ids_0 (List[int]): List of ids of the first sequence.
token_ids_1 (List[int], optinal): List of ids of the second sequence.
already_has_special_tokens (bool, optional): Whether or not the token list is already
formatted with special tokens for the model. Defaults to None.
Returns:
results (List[int]): The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0,
token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def encode(self,
text,
text_pair=None,
max_seq_len=None,
pad_to_max_seq_len=True,
truncation_strategy="longest_first",
return_position_ids=True,
return_segment_ids=True,
return_input_mask=True,
return_length=True,
return_overflowing_tokens=False,
return_special_tokens_mask=False):
"""
Returns a dictionary containing the encoded sequence or sequence pair and additional information:
the mask for sequence classification and the overflowing elements if a ``max_seq_len`` is specified.
Args:
text (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`):
The first sequence to be encoded. This can be a string, a list of strings (tokenized string using
the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
method)
text_pair (:obj:`str`, :obj:`List[str]` or :obj:`List[int]`, `optional`, defaults to :obj:`None`):
Optional second sequence to be encoded. This can be a string, a list of strings (tokenized
string using the `tokenize` method) or a list of integers (tokenized string ids using the
`convert_tokens_to_ids` method)
max_seq_len (:obj:`int`, `optional`, defaults to :int:`None`):
If set to a number, will limit the total sequence returned so that it has a maximum length.
If there are overflowing tokens, those will be added to the returned dictionary
pad_to_max_seq_len (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to True, the returned sequences will be padded according to the model's padding side and
padding index, up to their max length. If no max length is specified, the padding is done up to the
model's max length.
truncation_strategy (:obj:`str`, `optional`, defaults to `longest_first`):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_seq_len
starting from the longest one at each token (when there is a pair of input sequences)
- 'only_first': Only truncate the first sequence
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_seq_len)
return_position_ids (:obj:`bool`, `optional`, defaults to :obj:`True`):
Set to True to return tokens position ids (default True).
return_segment_ids (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to return token type IDs.
return_input_mask (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether to return the attention mask.
return_length (:obj:`int`, defaults to :obj:`True`):
If set the resulting dictionary will include the length of each encoded inputs
return_overflowing_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return overflowing token information (default False).
return_special_tokens_mask (:obj:`bool`, `optional`, defaults to :obj:`False`):
Set to True to return special tokens mask information (default False).
Return:
A Dictionary of shape::
{
input_ids: list[int],
position_ids: list[int] if return_position_ids is True (default)
segment_ids: list[int] if return_segment_ids is True (default)
input_mask: list[int] if return_input_mask is True (default)
seq_len: int if return_length is True (default)
overflowing_tokens: list[int] if a ``max_seq_len`` is specified and return_overflowing_tokens is True
num_truncated_tokens: int if a ``max_seq_len`` is specified and return_overflowing_tokens is True
special_tokens_mask: list[int] if return_special_tokens_mask is True
}
With the fields:
- ``input_ids``: list of token ids to be fed to a model
- ``position_ids``: list of token position ids to be fed to a model
- ``segment_ids``: list of token type ids to be fed to a model
- ``input_mask``: list of indices specifying which tokens should be attended to by the model
- ``length``: the input_ids length
- ``overflowing_tokens``: list of overflowing tokens if a max length is specified.
- ``num_truncated_tokens``: number of overflowing tokens a ``max_seq_len`` is specified
- ``special_tokens_mask``: if adding special tokens, this is a list of [0, 1], with 0 specifying special added
tokens and 1 specifying sequence tokens.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self._tokenize(text)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text,
(list, tuple)) and len(text) > 0 and isinstance(
text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text,
(list, tuple)) and len(text) > 0 and isinstance(
text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
ids = get_input_ids(text)
pair_ids = get_input_ids(text_pair) if text_pair is not None else None
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
encoded_inputs = {}
# Truncation: Handle max sequence length
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(
pair=pair))
if max_seq_len and total_len > max_seq_len:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_seq_len,
truncation_strategy=truncation_strategy, )
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_seq_len
# Add special tokens
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
segment_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
# Build output dictionnary
encoded_inputs["input_ids"] = sequence
if return_segment_ids:
encoded_inputs["segment_ids"] = segment_ids
if return_special_tokens_mask:
encoded_inputs[
"special_tokens_mask"] = self.get_special_tokens_mask(ids,
pair_ids)
if return_length:
encoded_inputs["seq_len"] = len(encoded_inputs["input_ids"])
# Check lengths
assert max_seq_len is None or len(encoded_inputs[
"input_ids"]) <= max_seq_len
# Padding
needs_to_be_padded = pad_to_max_seq_len and \
max_seq_len and len(encoded_inputs["input_ids"]) < max_seq_len
if needs_to_be_padded:
difference = max_seq_len - len(encoded_inputs["input_ids"])
if return_input_mask:
encoded_inputs["input_mask"] = [1] * len(encoded_inputs[
"input_ids"]) + [0] * difference
if return_segment_ids:
# 0 for padding token mask
encoded_inputs["segment_ids"] = (
encoded_inputs["segment_ids"] + [0] * difference)
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = encoded_inputs[
"special_tokens_mask"] + [1] * difference
encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [
self.pad_token_id
] * difference
else:
if return_input_mask:
encoded_inputs["input_mask"] = [1] * len(encoded_inputs[
"input_ids"])
if return_position_ids:
encoded_inputs["position_ids"] = list(
range(len(encoded_inputs["input_ids"])))
return encoded_inputs
|
the-stack_106_27854 | """jase_im URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.conf import settings
from blog import views
app_name = 'blog'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^about/$', views.about, name='about'),
url(r'^tags/$', views.tag_list_show, name='tag_list_show'),
url(r'^tag/(?P<tag_slug>.+)/$', views.tag_show, name='tag_show'),
url(r'^post/add/$', views.add_post, name='addpost'),
url(r'^post/update/(?P<slug>.+)/$', views.update_post, name='update_post'),
url(r'^post/(?P<slug>.+)/$', views.post_detail, name='post_detail'),
url(r'^category/$', views.category, name='category'),
url(r'^register_profile/$', views.register_profile,
name='register_profile'),
url(r'^comment_submit/$', views.comment_submit, name='comment_submit'),
url(r'^archive/$', views.archive, name='archive'),
url(r'^user/(?P<username>.+)/$', views.user_show, name='user_show'),
url(
r'^mdeditor/',
include('mdeditor.urls'),
),
# api
url(r'^api/v1/posts/$', views.post_collection, name='api_posts'),
url(r'^api/v1/posts/(?P<pk>.+)/$', views.post_element, name='api_post_element'),
]
|
the-stack_106_27855 | """
Collection of functions to perform CRUD operations on a database in the background
"""
from operator import index
import yfinance as yf
import pandas as pd
from flask import Flask
import os
import sys
from models import db
from models import (Security, Transaction, Broker,
Event, CryptoCurrency, CryptoWallet, Dividend)
from tools import get_symbol_to_id_dict, webscrape_tipranks, get_mysql_uri
import yaml
#=================================================
def events_table_updater(db):
'''Uses yfinance library to collect split events and stores them in the database'''
sec_dict = get_symbol_to_id_dict(db)
for symbol in sec_dict:
events_df = pd.DataFrame(yf.Ticker(symbol).splits)
if len(events_df) < 1:
continue
#Retrive security from database to link to event
SEC_object = db.session.query(Security).filter(Security.symbol==symbol).first()
for row in events_df.itertuples():
date = row.Index #should be a datetime object
split_factor = row[1]
event_type = 'split'
#Check that the event is not already in the database
existing_match = db.session.query(Event).filter(Event.event_type == event_type,
Event.symbol_id == sec_dict[symbol],
Event.event_date == date).first()
if existing_match is None:
EVENT_obj = Event(event_type = event_type,
event_date = date,
split_factor = split_factor,)
#link event to security
SEC_object.events.append(EVENT_obj)
db.session.add(EVENT_obj)
print(f"\nSymbol: {SEC_object.symbol.upper()}\n--> Added:\n{EVENT_obj}")
else:
print(f"\nSymbol: {SEC_object.symbol.upper()}\n--> <{existing_match}> already exists in database")
db.session.commit()
def dividends_table_updater(db):
sec_dict = get_symbol_to_id_dict(db)
for symbol in sec_dict:
symbol_id = sec_dict[symbol]
#events_df = pd.DataFrame(yf.Ticker(symbol).splits)
#Retrive security from database to link to dividend
SEC_object = db.session.query(Security).filter(Security.symbol==symbol).first()
_, _, div_amount, ex_div_date, div_pay_date, schedule_type, *extras = webscrape_tipranks(symbol)
if schedule_type=='monthly':
pay_schedule = 0
elif schedule_type=='quarterly':
pay_schedule = (div_pay_date.month % 3) + 1 #this will make the value between [1-3]
elif schedule_type is None:
pay_schedule = -1
else:
pay_schedule = -1
DIVIDEND_OBJ = db.session.query(Dividend).filter(Dividend.symbol_id==symbol_id).first()
if DIVIDEND_OBJ is None:
DIVIDEND_OBJ = Dividend(dividend_amount =div_amount,
exdividend_date =ex_div_date,
payment_date =div_pay_date,
payment_schedule =pay_schedule,)
SEC_object.dividends.append(DIVIDEND_OBJ)
db.session.add(DIVIDEND_OBJ)
else:
update_dict = {
'dividend_amount' : div_amount,
'payment_schedule' : pay_schedule,
'exdividend_date' : ex_div_date,
'payment_date' : div_pay_date,
}
DIVIDEND_OBJ.dividend_amount = div_amount
db.session.query(Dividend).filter_by(symbol_id=symbol_id).update(update_dict)
db.session.commit()
if __name__ == '__main__':
#Here we define a database connection
try:
DB_TYPE = sys.argv[1]
except IndexError:
DB_TYPE = 'mysql'
supported_dbs = ['mysql','sqlite']
#DB_TYPE = 'mysql'
if DB_TYPE == 'mysql':
database_URI = get_mysql_uri(config_file='mysql_config.yml')
elif DB_TYPE == 'sqlite':
project_dir = os.path.dirname(os.path.abspath(__file__))
database_dir = os.path.join(project_dir, "asset_portfolio.db")
database_URI = f"sqlite:///{database_dir}"
else:
print(f'--> Database {DB_TYPE} not supported.\n\tDatabase options supported: {supported_dbs}')
sys.exit()
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = database_URI
try:
func_to_run = sys.argv[2]
except IndexError:
func_to_run = 'splits'
db.init_app(app)
with app.app_context():
if func_to_run=='splits':
events_table_updater(db)
elif func_to_run=='dividends':
dividends_table_updater(db) |
the-stack_106_27860 | # standard library
from datetime import datetime, date
# Django
from django.contrib import messages
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import JsonResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse, reverse_lazy
# local Django
from accounts.models import Profile
from apps.misc.models import Initiative
from apps.students.models import StudentAttendance
from apps.volunteers.models import Volunteer, VolunteerAttendance
from .models import Calendar, ClassworkHomework, Schedule, Section
# NON-VIEWS FUNCTIONS
def has_authenticated_profile(user):
"""User has a profiles and is authenticated by admin.
Necessary to access any page on site bar home page."""
return user.auth is True and Profile.objects.filter(user=user).exists()
def is_volunteer(user):
"""To be used in views accessible to volunteers only."""
return user.desig == 'v'
def captures(request):
return render(request, 'home/captures.html')
# VIEW FUNCTIONS
def index(request):
if request.user.is_authenticated:
return redirect('home:dashboard')
return render(request, 'home/index.html')
def new_index(request):
if request.user.is_authenticated:
return redirect('home:dashboard')
initiatives = Initiative.objects.all()
return render(request, 'new_home/index.html', {'initiatives': initiatives})
@login_required
@user_passes_test(
has_authenticated_profile,
login_url=reverse_lazy('accounts:complete_profile')
)
def dashboard(request):
# TO BE REMOVED...
# Update today's date in Calendar if not already there
today_cal = Calendar.objects.filter(date=date.today())
if today_cal.exists():
today_cal = today_cal[0]
else:
today_cal_new = Calendar(date=date.today())
today_cal_new.save()
today_cal = Calendar.objects.get(date=date.today())
# ...TILL HERE
# Dashboard Query
# HTTP Request is always GET
query_date_str = request.GET.get('d', '')
query_section = request.GET.get('s', '')
if query_date_str and query_section:
query_date = datetime.strptime(query_date_str, '%Y-%m-%d').date()
query_day = query_date.strftime("%w")
calendar = Calendar.objects.filter(date=query_date)
# If the section is not taught on selected day
# (URL parameters are altered manually)
schedule = Schedule.objects.filter(
day=query_day, section__section_id=query_section)
if schedule.exists():
schedule = schedule[0]
else:
return redirect('home:dashboard')
context = {
'selected_date': query_date,
'selected_section': query_section,
}
# If calendar instance for that day is not created
if not calendar.exists():
context['calendar_not_updated'] = True
return render(request, 'home/dashboard.html', context)
calendar = calendar[0]
# If No Class is Scheduled on that day
if not calendar.class_scheduled:
context['no_class_scheduled'] = True
context['calendar_remark'] = calendar.remark
return render(request, 'home/dashboard.html', context)
# Classwork/Homework info
cw_hw = ClassworkHomework.objects.filter(
cal_date=calendar, section__section_id=query_section).first()
context['cw_hw'] = cw_hw
# Subject Scheduled
subject_scheduled = schedule.get_subject_display()
context['subject_scheduled'] = subject_scheduled
# Students Attendance
student_attendance = StudentAttendance.objects.filter(
cal_date=calendar, present=True).order_by('student__school_class')
context['student_attendance'] = student_attendance
if student_attendance.exists():
stu_att_village = {}
stu_att_village['G'] = stu_att_village['M'] = stu_att_village['C'] = 0
stu_att_village['A'] = stu_att_village['S'] = 0
for stu_att in student_attendance:
stu_att_village[stu_att.student.village] += 1
# Mehgawan Side
stu_att_village['MS'] = (stu_att_village['M'] + stu_att_village['C']
+ stu_att_village['A'] + stu_att_village['S'])
context['stu_att_village'] = stu_att_village
# Volunteers Attendance
volun_attendance = VolunteerAttendance.objects.filter(
cal_date=calendar, present=True).order_by('volun__roll_no')
context['volun_attendance'] = volun_attendance
return render(request, 'home/dashboard.html', context)
elif query_date_str or query_section:
# If only one parameter is provided
return redirect('home:dashboard')
return render(request, 'home/dashboard.html')
@login_required
@user_passes_test(
has_authenticated_profile, redirect_field_name=None,
login_url=reverse_lazy('accounts:complete_profile')
)
@user_passes_test(
is_volunteer, redirect_field_name=None,
login_url=reverse_lazy('home:dashboard')
)
# @permission_required
def update_cwhw(request):
if request.method == 'POST':
""" This POST Request won't be Generated (because there will be no means to generated it in the template) if
1. Selected Date is not present in Calendar. (Already checked for above)
2. Selected Day (from Date) and Section not present in Schedule. (Already checked for above and
submit button will be disabled until AJAX is loaded completely)
3. No Class Scheduled on Selected Date. (Already checked for above)
4. User is not a Volunteer.
"""
profile = Profile.objects.get(user=request.user)
volun = Volunteer.objects.get(profile=profile)
# Date and section selected before pressing submit button
date_str = request.POST['date']
date = datetime.strptime(date_str, '%Y-%m-%d').date()
cal_date = Calendar.objects.get(date=date)
section_id = request.POST['section']
section = Section.objects.get(section_id=section_id)
# Update CW-HW
cw = request.POST['cw']
hw = request.POST['hw']
comment = request.POST['comment']
cw_hw = ClassworkHomework.objects.filter(
cal_date=cal_date, section=section)
if cw_hw.exists():
cw_hw = cw_hw[0]
else:
cw_hw = ClassworkHomework(
cal_date=cal_date, section=section, cw='', hw='', comment='')
if cw:
cw_hw.cw += f'{cw}\n - {profile.get_full_name}, {volun.roll_no}\n\n'
if hw:
cw_hw.hw += f'{hw}\n - {profile.get_full_name}, {volun.roll_no}\n\n'
if comment:
cw_hw.comment += f'{comment}\n - {profile.get_full_name}, {volun.roll_no}\n\n'
cw_hw.save()
messages.success(request, 'CW_HW update successful!')
redirect_url = reverse('home:dashboard') + f'?d={date}&s={section_id}'
return HttpResponseRedirect(redirect_url)
return redirect('home:dashboard')
@login_required
@user_passes_test(
has_authenticated_profile,
login_url=reverse_lazy('accounts:complete_profile')
)
def ajax_dashboard(request):
date_str = request.GET.get('class_date', None)
date = datetime.strptime(date_str, '%Y-%m-%d').date()
day = date.strftime("%w")
data = {}
schedule = Schedule.objects.filter(day=day).order_by('section__section_id')
for sch in schedule:
data[sch.section.section_id] = sch.section.name
return JsonResponse(data)
@login_required
@user_passes_test(
has_authenticated_profile,
login_url=reverse_lazy('accounts:complete_profile')
)
@user_passes_test(
is_volunteer, redirect_field_name=None,
login_url=reverse_lazy('home:dashboard')
)
def class_schedule(request):
days = Schedule.DAY
subjects = Schedule.SUBJECT
all_sections = Section.objects.order_by('section_id')
active_sections = all_sections.exclude(schedule=None)
schedule = Schedule.objects.order_by(
'day', 'section__section_id', 'subject')
context = {
'days': days,
'subjects': subjects,
'all_sections': all_sections,
'active_sections': active_sections,
'schedule': schedule,
}
return render(request, 'home/class_schedule.html', context)
|
the-stack_106_27861 | import math
import torch
from torch import nn
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.autograd import Function
import _torch_ipex as core
class IpexMLPHandle:
def __init__(self, N, C, K, bn, bc, bk, dtype, fuse_bias, act_type):
self.handle = core.mlp_create_handle(N, C, K, bn, bc, bk, 1 if dtype == torch.float32 else 2, fuse_bias, act_type)
self.N = N
self.C = C
self.K = K
self.bn = bn
self.bc = bc
self.bk = bk
self.fuse_bias = fuse_bias
self.act_type = act_type
if act_type == 1:
self.relu_mask_tensor = core.mlp_set_relu_mask(self.handle)
def __del__(self):
if self.handle:
core.mlp_release_handle(self.handle)
self.handle = None
self.relu_mask_tensor = None
class IpexMLPFC(Function):
@staticmethod
def forward(ctx, input, weight, bias, handle):
#print("Inside XsmmFCForward")
#t1 = time.time()
input = input.contiguous()
weight = weight.contiguous()
bias = bias.contiguous()
output = core.mlp_forward(handle.handle, input, weight, bias)
#t2 = time.time()
#print("XsmmFCFWD: q=%.3f" % ((t2-t1)*1000.0))
ctx.ipex_mlp_handle = handle
ctx.save_for_backward(input, weight)
return output
@staticmethod
def backward(ctx, grad_output):
#print("Inside XsmmFCBackward")
handle = ctx.ipex_mlp_handle
del ctx.ipex_mlp_handle
input, weight = ctx.saved_variables
#t1 = time.time()
grad_output = grad_output.contiguous()
grad_input, grad_weight, grad_bias = core.mlp_backward(handle.handle, grad_output, input, weight)
#t2 = time.time()
#print("XsmmFCBWD: q=%.3f w=%.3f" % ((t2-t1)*1000.0, (t3-t2)*1000.0))
return (grad_input, grad_weight, grad_bias, None)
class IpexMLPLinear(nn.Module):
r"""PCL Linear module for using libxsmm blocked GEMM"""
__constants__ = ['bias', 'C', 'K']
def __init__(self, C, K, bias=True, act_type=None, output_stays_blocked=True, default_blocking=None):
super(IpexMLPLinear, self).__init__()
self.C = C
self.K = K
self.bc = 0 #self.get_blocking_factor(C, default_blocking) # 64 if C % 64 == 0 else C
self.bk = 0 #self.get_blocking_factor(K, default_blocking) # 64 if K % 64 == 0 else K
self.nbc = 0 # C // self.bc
self.nbk = 0 # K // self.bk
self.C_pad = 0
self.padded_C = self.C
self.N = 0
self.nbn = 0
self.bn = 0
self.default_blocking = default_blocking
self.ipex_mlp_handle = None
self.set_activation_type(act_type)
self.output_stays_blocked = output_stays_blocked
self.weight = Parameter(torch.Tensor(K, C))
if bias:
self.bias = Parameter(torch.Tensor(K))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def set_activation_type(self, act_type):
if not act_type:
self.act_type = 0
elif act_type == 'relu':
self.act_type = 1
elif act_type == 'sigmoid':
self.act_type = 2
else:
raise RuntimeError("XsmmLinear: Unknown activation type %s" % act_type)
def get_blocking_factor(self, dim_size, default_blocking=None):
blocking_prio_list = [64, 48, 32, 50]
if default_blocking:
blocking_prio_list = [default_blocking] + blocking_prio_list
for bs in blocking_prio_list:
if dim_size % bs == 0:
#print("Returning block size of %d for dim_size of %d" % ( bs, dim_size))
return bs
#print("Returning block size of %d for dim_size of %d" % ( dim_size, dim_size))
return dim_size
def is_dtype_supported(self, dtype):
if dtype == torch.float32:
return True
elif dtype == torch.bfloat16 and self.C % 2 == 0:
return True
else:
return False
def maybe_pad_input(self, input):
if input.dim() == 2 and input.size(1) != self.padded_C:
input = torch.cat([input, input.new_zeros([input.size(0), self.C_pad])], dim=1)
return input
def maybe_pad_weight(self, weight):
if weight.dim() == 2 and weight.size(1) != self.padded_C:
weight = torch.cat([weight, weight.new_zeros([self.K, self.C_pad])], dim=1)
# elif weight.dim() == 4 and weight.size(1) * weight.size(2) != self.padded_C:
# raise RuntimeError("Trying to ad 4D weights")
# elif weight.dim() == 5 and weight.size(1) * weight.size(2) * weight.size(4) != self.padded_C:
# raise RuntimeError("Trying to ad 5D weights")
return weight
def get_blocked_weight(self, to_dtype=None, block_for_dtype=None):
weight = self.weight
new_weight = None
if to_dtype:
weight = weight.to(to_dtype)
if not block_for_dtype:
block_for_dtype = weight.dtype
if self.bc == 0 or self.bk == 0:
self.update_blocking(block_for_dtype)
weight = self.maybe_pad_weight(weight)
if weight.dim() == 2:
if block_for_dtype == torch.bfloat16:
l_view = [self.nbk, self.bk, self.nbc, self.bc // 2, 2]
l_perm = [0, 2, 3, 1, 4]
new_weight = weight.view(l_view).permute(l_perm).contiguous()
elif block_for_dtype == torch.float32:
l_view = [self.nbk, self.bk, self.nbc, self.bc]
l_perm = [0, 2, 3, 1]
new_weight = weight.view(l_view).permute(l_perm).contiguous()
else:
raise RuntimeError("Invalid datatype for blocking: %s" % block_for_dtype)
elif weight.dim() == 4:
if block_for_dtype == torch.bfloat16:
l_view = [self.nbk, self.nbc, self.bc // 2, 2, self.bk]
l_perm = [0, 1, 2, 4, 3]
new_weight = weight.view(l_view).permute(l_perm).contiguous()
elif block_for_dtype == torch.float32:
# We are already in correct format, do nothing
new_weight = weight
else:
raise RuntimeError("Invalid datatype for blocking: %s" % block_for_dtype)
elif weight.dim() == 5:
if block_for_dtype == torch.bfloat16:
# We are already in correct format, do nothing
new_weight = weight
elif block_for_dtype == torch.float32:
l_view = [self.nbk, self.nbc, self.bc, self.bk]
l_perm = [0, 1, 2, 4, 3]
new_weight = weight.permute(l_perm).view(l_view).contiguous()
else:
raise RuntimeError("Invalid datatype for blocking: %s" % block_for_dtype)
return new_weight
def update_blocking(self, dtype):
if dtype == torch.bfloat16 and self.padded_C % 2 != 0:
self.C_pad = 1
self.padded_C = self.C + self.C_pad
self.bc = self.get_blocking_factor(self.padded_C, self.default_blocking)
if dtype == torch.bfloat16 and self.bc % 2 != 0: self.bc *= 2
self.nbc = self.padded_C // self.bc
self.bk = self.get_blocking_factor(self.K, self.default_blocking)
self.nbk = self.K // self.bk
def reset_weight_shape(self, block_for_dtype=None):
#if not self.is_dtype_supported(block_for_dtype):
# block_for_dtype = torch.float32
#self.update_bc(block_for_dtype)
self.weight = Parameter(self.get_blocked_weight(block_for_dtype=block_for_dtype))
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
bound = 1 / math.sqrt(self.C)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
input_type = input.dtype
#if not self.is_dtype_supported(input_type):
# input = input.to(torch.float32)
if self.bc == 0 or self.bk == 0:
self.update_blocking(input_type)
input = self.maybe_pad_input(input)
if input.dtype == torch.bfloat16:
if self.bc % 2 != 0: raise RuntimeError("Bfloat16 requires even bc")
if input.dim() == 2:
N = input.size(0)
bn = self.get_blocking_factor(N, 48) #64 if N % 64 == 0 else N
input = input.view(N//bn, bn, self.nbc, self.bc).permute(0,2,1,3)
elif input.dim() == 4:
N = input.size(0) * input.size(2)
bn = input.size(2)
else:
print("Invalid Input dimensions (%d)" % input.dim())
input = input.contiguous()
if N != self.N or bn != self.bn:
# print("Create handle: ", N, self.padded_C, self.K, bn, self.bc, self.bk, input.dtype, 0 if self.bias is None else 1, self.act_type)
self.ipex_mlp_handle = IpexMLPHandle(N, self.padded_C, self.K, bn, self.bc, self.bk, input.dtype, 0 if self.bias is None else 1, self.act_type)
self.N = N
self.bn = bn
self.nbn = N // bn
wtensor = self.get_blocked_weight(to_dtype=input.dtype)
btensor = self.bias.to(input.dtype)
output = IpexMLPFC.apply(input, wtensor, btensor, self.ipex_mlp_handle)
if not self.output_stays_blocked:
#output = output.permute(0, 2, 1, 3).view(self.N, self.K).contiguous()
output = output.permute(0, 2, 1, 3).reshape(self.N, self.K).contiguous()
output = output.to(input_type)
return output
def extra_repr(self):
return 'C={}, K={}, bias={}'.format(
self.C, self.K, self.bias is not None
)
|
the-stack_106_27862 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import numpy as np
import wradlib.ipol as ipol
import wradlib.georef as georef
import unittest
import warnings
class InterpolationTest(unittest.TestCase):
def setUp(self):
# Kriging Variables
self.src = np.array([[0., 0.], [4., 0]])
self.trg = np.array([[0., 0.], [2., 0.], [1., 0], [4., 0]])
self.src_d = np.array([0., 1.])
self.trg_d = np.array([0., 1., 2., 3.])
self.vals = np.array([[1., 2., 3.],
[3., 2., 1.]])
# Need to use different test data because Linear requires more points
# depending on their spatial constellation (in order to create a
# convex hull)
self.src_lin = np.array([[0., 0.], [4., 0], [1., 1.]])
self.trg_lin = np.array([[0., 0.], [2., 0.], [1., 0], [4., 0]])
self.vals_lin = np.array([[1., 2., 3.], [3., 2., 1.], [1., 1., 1.]])
def test_parse_covariogram(self):
cov_model = '1.0 Exp(10.5) + 2.3 Sph(20.4) + 5.0 Nug(0.)'
h = 5.0
c = ipol.parse_covariogram(cov_model)
ci = sum([ipol.cov_exp(h, 1., 10.5),
ipol.cov_sph(h, 2.3, 20.4),
ipol.cov_nug(h, 5.0, 0.)])
self.assertTrue(c(h) == ci)
def test_cov_lin(self):
self.assertTrue(
np.allclose(ipol.cov_lin([0., 5., 10.]), np.array([1., 0., 0.])))
self.assertTrue(
np.allclose(ipol.cov_lin([0., 5., 10.], sill=2., rng=10.),
np.array([2., 1., 0.])))
def test_cov_sph(self):
self.assertTrue(
np.allclose(ipol.cov_sph([0., 5., 10.]), np.array([1., 0., 0.])))
self.assertTrue(
np.allclose(ipol.cov_sph([0., 5., 10.], sill=2., rng=10.),
np.array([2., 0.625, 0.])))
def test_cov_exp(self):
self.assertTrue(np.allclose(ipol.cov_exp([0., 5., 10.]), np.array(
[1., 6.73794700e-03, 4.53999298e-05])))
self.assertTrue(
np.allclose(ipol.cov_exp([0., 5., 10.], sill=2., rng=10.),
np.array([2., 1.21306132, 0.73575888])))
def test_cov_pow(self):
self.assertTrue(
np.allclose(ipol.cov_pow([0., 5., 10.]), np.array([1., -4., -9.])))
self.assertTrue(
np.allclose(ipol.cov_pow([0., 5., 10.], sill=2., rng=10.),
np.array([2.00000000e+00, -9.76562300e+06,
-1.00000000e+10])))
def test_cov_mat(self):
self.assertTrue(np.allclose(ipol.cov_mat([0., 5., 10.]),
np.array([1.00000000e+00, 8.49325705e-04,
7.21354153e-07])))
self.assertTrue(
np.allclose(ipol.cov_mat([0., 5., 10.], sill=2., rng=10.),
np.array([2., 0.98613738, 0.48623347])))
self.assertTrue(np.allclose(
ipol.cov_mat([0., 5., 10.], sill=2., rng=10., shp=0.25),
np.array([2., 0.74916629, 0.39961004])))
def test_cov_gau(self):
self.assertTrue(np.allclose(ipol.cov_gau([0., 5., 10.]),
np.array([1.00000000e+00, 1.38879439e-11,
3.72007598e-44])))
self.assertTrue(
np.allclose(ipol.cov_gau([0., 5., 10.], sill=2., rng=10.),
np.array([2., 1.55760157, 0.73575888])))
def test_cov_cau(self):
self.assertTrue(np.allclose(ipol.cov_cau([0., 5., 10.]),
np.array([1., 0.16666667, 0.09090909])))
self.assertTrue(
np.allclose(ipol.cov_cau([0., 5., 10.], sill=2., rng=10., ),
np.array([2., 1.33333333, 1.])))
self.assertTrue(np.allclose(
ipol.cov_cau([0., 5., 10.], sill=2., rng=10., alpha=0.5),
np.array([2., 0.6862915, 0.5])))
self.assertTrue(np.allclose(
ipol.cov_cau([0., 5., 10.], sill=2., rng=10., alpha=0.5, beta=1.5),
np.array([2., 0.40202025, 0.25])))
def test_Nearest_1(self):
"""testing the basic behaviour of the Idw class"""
ip = ipol.Nearest(self.src, self.trg)
# input more than one dataset
res = ip(self.vals)
self.assertTrue(
np.allclose(res, np.array([[1., 2., 3.],
[1., 2., 3.],
[1., 2., 3.],
[3., 2., 1.]])))
# input only one flat array
res = ip(self.vals[:, 2])
self.assertTrue(np.allclose(res, np.array([3., 3., 3., 1.])))
def test_Idw_1(self):
"""testing the basic behaviour of the Idw class"""
ip = ipol.Idw(self.src, self.trg)
# input more than one dataset
res = ip(self.vals)
self.assertTrue(
np.allclose(res, np.array([[1., 2., 3.],
[2., 2., 2.],
[1.2, 2., 2.8],
[3., 2., 1.]])))
# input only one flat array
res = ip(self.vals[:, 2])
self.assertTrue(np.allclose(res, np.array([3., 2., 2.8, 1.])))
def test_Linear_1(self):
"""testing the basic behaviour of the Linear class"""
ip = ipol.Linear(self.src_lin, self.trg_lin)
# input more than one dataset
res = ip(self.vals_lin)
self.assertTrue(
np.allclose(res, np.array([[1., 2., 3.],
[2., 2., 2.],
[1.5, 2., 2.5],
[3., 2., 1.]])))
# input only one flat array
res = ip(self.vals_lin[:, 2])
self.assertTrue(np.allclose(res, np.array([3., 2., 2.5, 1.])))
def test_OrdinaryKriging_1(self):
"""testing the basic behaviour of the OrdinaryKriging class"""
ip = ipol.OrdinaryKriging(self.src, self.trg, '1.0 Lin(2.0)')
# input more than one dataset
res = ip(self.vals)
self.assertTrue(np.all(res == np.array([[1., 2., 3.],
[2., 2., 2.],
[1.5, 2., 2.5],
[3., 2., 1.]])))
# input only one flat array
res = ip(self.vals[:, 2])
self.assertTrue(np.allclose(res, np.array([3., 2., 2.5, 1.])))
def test_ExternalDriftKriging_1(self):
"""testing the basic behaviour of the ExternalDriftKriging class
with drift terms constant over multiple fields"""
ip = ipol.ExternalDriftKriging(self.src, self.trg, '1.0 Lin(2.0)',
src_drift=self.src_d,
trg_drift=self.trg_d)
# input more than one dataset
res = ip(self.vals)
self.assertTrue(np.all(res == np.array([[1., 2., 3.],
[3., 2., 1.],
[5., 2., -1.],
[7., 2., -3.]])))
# input only one flat array
res = ip(self.vals[:, 2])
self.assertTrue(np.allclose(res, np.array([3., 1., -1., -3.])))
def test_ExternalDriftKriging_2(self):
"""testing the basic behaviour of the ExternalDriftKriging class
with drift terms varying over multiple fields"""
src_d = np.array([[0., 0., 0.],
[1., 1., 1.]])
trg_d = np.array([[0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
ip = ipol.ExternalDriftKriging(self.src, self.trg, '1.0 Lin(2.0)',
src_drift=src_d,
trg_drift=trg_d)
res = ip(self.vals)
self.assertTrue(np.all(res == np.array([[1., 2., 3.],
[3., 2., 1.],
[5., 2., -1.],
[7., 2., -3.]])))
# input only one flat array
res = ip(self.vals[:, 2],
src_drift=src_d[:, 2], trg_drift=trg_d[:, 2])
self.assertTrue(np.allclose(res, np.array([3., 1., -1., -3.])))
def test_ExternalDriftKriging_3(self):
"""testing the basic behaviour of the ExternalDriftKriging class
with missing drift terms"""
ip = ipol.ExternalDriftKriging(self.src, self.trg, '1.0 Lin(2.0)',
src_drift=None,
trg_drift=None)
self.assertRaises(ValueError, ip, self.vals)
def test_MissingErrors(self):
self.assertRaises(ipol.MissingSourcesError,
ipol.Nearest, np.array([]), self.trg)
self.assertRaises(ipol.MissingTargetsError,
ipol.Nearest, self.src, np.array([]))
self.assertRaises(ipol.MissingSourcesError,
ipol.Idw, np.array([]), self.trg)
self.assertRaises(ipol.MissingTargetsError,
ipol.Idw, self.src, np.array([]))
self.assertRaises(ipol.MissingSourcesError,
ipol.Linear, np.array([]), self.trg)
self.assertRaises(ipol.MissingTargetsError,
ipol.Linear, self.src, np.array([]))
self.assertRaises(ipol.MissingSourcesError,
ipol.OrdinaryKriging, np.array([]), self.trg)
self.assertRaises(ipol.MissingTargetsError,
ipol.OrdinaryKriging, self.src, np.array([]))
self.assertRaises(ipol.MissingSourcesError,
ipol.ExternalDriftKriging, np.array([]), self.trg)
self.assertRaises(ipol.MissingTargetsError,
ipol.ExternalDriftKriging, self.src, np.array([]))
def test_nnearest_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
ipol.Idw(self.src, self.trg, nnearest=len(self.src) + 1)
ipol.OrdinaryKriging(self.src, self.trg,
nnearest=len(self.src) + 1)
ipol.ExternalDriftKriging(self.src, self.trg,
nnearest=len(self.src) + 1)
for item in w:
self.assertTrue(issubclass(item.category, UserWarning))
self.assertTrue("nnearest" in str(item.message))
def test_IpolBase(self):
"""testing the basic behaviour of the base class"""
ip = ipol.IpolBase(self.src, self.trg)
res = ip(self.vals)
self.assertEqual(res, None)
# Check behaviour if args are passed as lists
src = [self.src[:, 0], self.src[:, 1]]
trg = [self.trg[:, 0], self.trg[:, 1]]
ip = ipol.IpolBase(src, trg)
self.assertEqual(len(self.src), ip.numsources)
# Check behaviour if dimension is > 2
ip = ipol.IpolBase(self.src, self.trg)
self.assertRaises(Exception, ipol.IpolBase,
np.arange(12).reshape((2, 3, 2)),
np.arange(20).reshape((2, 2, 5)))
class WrapperFunctionTest(unittest.TestCase):
def test_interpolate(self):
src = np.arange(10)[:, None]
trg = np.linspace(0, 20, 40)[:, None]
vals = np.hstack((np.sin(src), 10. + np.sin(src)))
vals[3:5, 1] = np.nan
print(np.any(np.isnan(vals.ravel())))
ipol_result = ipol.interpolate(src, trg, vals, ipol.Idw, nnearest=2)
np.testing.assert_allclose(ipol_result[3:5, 1],
np.array([10.880571, 10.909137]))
ipol_result = ipol.interpolate(src, trg, vals[:, 1], ipol.Idw,
nnearest=2)
np.testing.assert_allclose(ipol_result[3:5],
np.array([10.880571, 10.909137]))
vals = np.dstack((np.sin(src), 10. + np.sin(src)))
vals[3:5, :, 1] = np.nan
self.assertRaises(NotImplementedError,
lambda: ipol.interpolate(src, trg, vals, ipol.Idw,
nnearest=2))
def test_interpolate_polar(self):
data = np.arange(12.).reshape(4, 3)
masked_values = (data == 2) | (data == 9)
filled_a = ipol.interpolate_polar(data, mask=masked_values,
ipclass=ipol.Linear)
testfunc = ipol.interpolate_polar
self.assertRaises(ipol.MissingTargetsError,
lambda: testfunc(data, mask=None,
ipclass=ipol.Linear))
mdata = np.ma.array(data, mask=masked_values)
filled_b = ipol.interpolate_polar(mdata,
ipclass=ipol.Linear)
np.testing.assert_allclose(filled_a, filled_b)
class RegularToIrregularTest(unittest.TestCase):
def setUp(self):
NX = 2
nx = np.linspace(-NX + 0.5, NX - 0.5, num=2 * NX, endpoint=True)
vx = np.linspace(-NX, NX, num=2 * NX, endpoint=True)
meshx, meshy = np.meshgrid(nx, nx)
self.cartgrid = np.dstack((meshx, meshy))
self.values = np.repeat(vx[:, np.newaxis], 2 * NX, 1)
coord = georef.sweep_centroids(4, 1, NX, 0.)
xx = coord[..., 0]
yy = np.degrees(coord[..., 1])
xxx = xx * np.cos(np.radians(90. - yy))
x = xx * np.sin(np.radians(90. - yy))
y = xxx
self.newgrid = np.dstack((x, y))
self.result = np.array([[0.47140452, 1.41421356],
[0.47140452, 1.41421356],
[-0.47140452, -1.41421356],
[-0.47140452, -1.41421356]])
def test_cart_to_irregular_interp(self):
newvalues = ipol.cart_to_irregular_interp(self.cartgrid, self.values,
self.newgrid,
method='linear')
self.assertTrue(np.allclose(newvalues, self.result))
def test_cart_to_irregular_spline(self):
newvalues = ipol.cart_to_irregular_spline(self.cartgrid, self.values,
self.newgrid, order=1,
prefilter=False)
self.assertTrue(np.allclose(newvalues, self.result))
def test_cart_to_irregular_equality(self):
self.assertTrue(
np.allclose(ipol.cart_to_irregular_interp(self.cartgrid,
self.values,
self.newgrid,
method='linear'),
ipol.cart_to_irregular_spline(self.cartgrid,
self.values,
self.newgrid,
order=1,
prefilter=False)))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_27864 | """Store constants."""
import voluptuous as vol
import re
import homeassistant.util.dt as dt_util
from homeassistant.helpers import config_validation as cv
from homeassistant.const import (
WEEKDAYS,
ATTR_ENTITY_ID,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
CONF_CONDITIONS,
CONF_ATTRIBUTE,
ATTR_NAME,
)
VERSION = "3.1.1"
DOMAIN = "scheduler"
SUN_ENTITY = "sun.sun"
DAY_TYPE_DAILY = "daily"
DAY_TYPE_WORKDAY = "workday"
DAY_TYPE_WEEKEND = "weekend"
WORKDAY_ENTITY = "binary_sensor.workday_sensor"
ATTR_CONDITION_TYPE = "condition_type"
CONDITION_TYPE_AND = "and"
CONDITION_TYPE_OR = "or"
ATTR_MATCH_TYPE = "match_type"
MATCH_TYPE_EQUAL = "is"
MATCH_TYPE_UNEQUAL = "not"
MATCH_TYPE_BELOW = "below"
MATCH_TYPE_ABOVE = "above"
ATTR_REPEAT_TYPE = "repeat_type"
REPEAT_TYPE_REPEAT = "repeat"
REPEAT_TYPE_SINGLE = "single"
REPEAT_TYPE_PAUSE = "pause"
EVENT = "scheduler_updated"
SERVICE_REMOVE = "remove"
SERVICE_EDIT = "edit"
SERVICE_ADD = "add"
OffsetTimePattern = re.compile("^([a-z]+)([-|\+]{1})([0-9:]+)$")
ATTR_START = "start"
ATTR_STOP = "stop"
ATTR_TIMESLOTS = "timeslots"
ATTR_WEEKDAYS = "weekdays"
ATTR_ENABLED = "enabled"
ATTR_SCHEDULE_ID = "schedule_id"
ATTR_ACTIONS = "actions"
ATTR_VALUE = "value"
EVENT_TIMER_FINISHED = "scheduler_timer_finished"
EVENT_TIMER_UPDATED = "scheduler_timer_updated"
EVENT_ITEM_UPDATED = "scheduler_item_updated"
EVENT_ITEM_CREATED = "scheduler_item_created"
EVENT_STARTED = "scheduler_started"
STATE_INIT = "init"
STATE_READY = "ready"
def validate_time(time):
res = OffsetTimePattern.match(time)
if not res:
if dt_util.parse_time(time):
return time
else:
raise vol.Invalid("Invalid time entered: {}".format(time))
else:
if res.group(1) not in [SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET]:
raise vol.Invalid("Invalid time entered: {}".format(time))
elif res.group(2) not in ['+', '-']:
raise vol.Invalid("Invalid time entered: {}".format(time))
elif not dt_util.parse_time(res.group(3)):
raise vol.Invalid("Invalid time entered: {}".format(time))
else:
return time
CONDITION_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_VALUE): vol.Any(int, float, str),
vol.Optional(CONF_ATTRIBUTE): cv.string,
vol.Required(ATTR_MATCH_TYPE): vol.In(
[
MATCH_TYPE_EQUAL,
MATCH_TYPE_UNEQUAL,
MATCH_TYPE_BELOW,
MATCH_TYPE_ABOVE
]
),
}
)
ACTION_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_SERVICE): cv.entity_id,
vol.Optional(ATTR_SERVICE_DATA): dict,
}
)
TIMESLOT_SCHEMA = vol.Schema(
{
vol.Required(ATTR_START): validate_time,
vol.Optional(ATTR_STOP): validate_time,
vol.Optional(CONF_CONDITIONS): vol.All(
cv.ensure_list, vol.Length(min=1), [CONDITION_SCHEMA]
),
vol.Optional(ATTR_CONDITION_TYPE): vol.In(
[
CONDITION_TYPE_AND,
CONDITION_TYPE_OR,
]
),
vol.Required(ATTR_ACTIONS): vol.All(
cv.ensure_list, vol.Length(min=1), [ACTION_SCHEMA]
),
}
)
SCHEDULE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_WEEKDAYS): vol.All(
cv.ensure_list,
vol.Unique(),
vol.Length(min=1),
[
vol.In(
WEEKDAYS + [
DAY_TYPE_WORKDAY,
DAY_TYPE_WEEKEND,
DAY_TYPE_DAILY,
]
)
],
),
vol.Required(ATTR_TIMESLOTS): vol.All(
cv.ensure_list, vol.Length(min=1), [TIMESLOT_SCHEMA]
),
vol.Required(ATTR_REPEAT_TYPE): vol.In(
[
REPEAT_TYPE_REPEAT,
REPEAT_TYPE_SINGLE,
REPEAT_TYPE_PAUSE,
]
),
vol.Optional(ATTR_NAME): vol.Any(cv.string, None),
}
)
|
the-stack_106_27865 | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from cfnlint.rules import CloudFormationLintRule
from cfnlint.rules import RuleMatch
class Configuration(CloudFormationLintRule):
"""Check if Parameters are configured correctly"""
id = 'E2001'
shortdesc = 'Parameters have appropriate properties'
description = 'Making sure the parameters are properly configured'
source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'
tags = ['parameters']
valid_keys = {
'AllowedPattern': {
'Type': 'String'
},
'AllowedValues': {
'Type': 'List',
'ItemType': 'String',
},
'ConstraintDescription': {
'Type': 'String'
},
'Default': {
'Type': 'String'
},
'Description': {
'Type': 'String'
},
'MaxLength': {
'Type': 'Integer',
'ValidForTypes': [
'String',
'AWS::EC2::AvailabilityZone::Name',
'AWS::EC2::Image::Id',
'AWS::EC2::Instance::Id',
'AWS::EC2::KeyPair::KeyName',
'AWS::EC2::SecurityGroup::GroupName',
'AWS::EC2::SecurityGroup::Id',
'AWS::EC2::Subnet::Id',
'AWS::EC2::Volume::Id',
'AWS::EC2::VPC::Id',
'AWS::Route53::HostedZone::Id'
]
},
'MaxValue': {
'Type': 'Integer',
'ValidForTypes': ['Number']
},
'MinLength': {
'Type': 'Integer',
'ValidForTypes': [
'String',
'AWS::EC2::AvailabilityZone::Name',
'AWS::EC2::Image::Id',
'AWS::EC2::Instance::Id',
'AWS::EC2::KeyPair::KeyName',
'AWS::EC2::SecurityGroup::GroupName',
'AWS::EC2::SecurityGroup::Id',
'AWS::EC2::Subnet::Id',
'AWS::EC2::Volume::Id',
'AWS::EC2::VPC::Id',
'AWS::Route53::HostedZone::Id'
]
},
'MinValue': {
'Type': 'Integer',
'ValidForTypes': ['Number']
},
'NoEcho': {
'Type': 'Boolean'
},
'Type': {
'Type': 'String'
}
}
required_keys = [
'Type'
]
def check_type(self, value, path, props):
""" Check the type and handle recursion with lists """
results = []
prop_type = props.get('Type')
if value is None:
message = 'Property %s should be of type %s' % (
'/'.join(map(str, path)), prop_type)
results.append(RuleMatch(path, message))
return results
try:
if prop_type in ['List']:
if isinstance(value, list):
for i, item in enumerate(value):
results.extend(self.check_type(item, path[:] + [i], {
'Type': props.get('ItemType')
}))
else:
message = 'Property %s should be of type %s' % (
'/'.join(map(str, path)), prop_type)
results.append(RuleMatch(path, message))
if prop_type in ['String']:
if isinstance(value, (dict, list)):
message = 'Property %s should be of type %s' % (
'/'.join(map(str, path)), prop_type)
results.append(RuleMatch(path, message))
str(value)
elif prop_type in ['Boolean']:
if not isinstance(value, bool):
if value not in ['True', 'true', 'False', 'false']:
message = 'Property %s should be of type %s' % (
'/'.join(map(str, path)), prop_type)
results.append(RuleMatch(path, message))
elif prop_type in ['Integer']:
if isinstance(value, bool):
message = 'Property %s should be of type %s' % (
'/'.join(map(str, path)), prop_type)
results.append(RuleMatch(path, message))
else: # has to be a Double
int(value)
except Exception: # pylint: disable=W0703
message = 'Property %s should be of type %s' % (
'/'.join(map(str, path)), prop_type)
results.append(RuleMatch(path, message,))
return results
def match(self, cfn):
matches = []
for paramname, paramvalue in cfn.get_parameters().items():
if isinstance(paramvalue, dict):
for propname, propvalue in paramvalue.items():
if propname not in self.valid_keys:
message = 'Parameter {0} has invalid property {1}'
matches.append(RuleMatch(
['Parameters', paramname, propname],
message.format(paramname, propname)
))
else:
props = self.valid_keys.get(propname)
prop_path = ['Parameters', paramname, propname]
matches.extend(self.check_type(
propvalue, prop_path, props))
# Check that the property is needed for the current type
valid_for = props.get('ValidForTypes')
if valid_for is not None:
if paramvalue.get('Type') not in valid_for:
message = 'Parameter {0} has property {1} which is only valid for {2}'
matches.append(RuleMatch(
['Parameters', paramname, propname],
message.format(paramname, propname, valid_for)
))
for reqname in self.required_keys:
if reqname not in paramvalue.keys():
message = 'Parameter {0} is missing required property {1}'
matches.append(RuleMatch(
['Parameters', paramname],
message.format(paramname, reqname)
))
else:
message = 'Parameter {0} is not an object'
matches.append(RuleMatch(
['Parameters', paramname],
message.format(paramname, reqname)
))
return matches
|
the-stack_106_27866 | ## Brian Blaylock
## April 28, 2021
import warnings
import configparser
from pathlib import Path
warnings.warn(
"The hrrrb API is deprecated. Use the new Herbie API instead.",
DeprecationWarning
)
########################################################################
# Load custom xarray accessors
try:
import hrrrb.accessors
except:
warnings.warn("HRRR-B's xarray accessors could not be imported.")
pass
########################################################################
# Configure HRRR-B
# Configuration file is save in `~/config/hrrrb/config.cfg`
# `_default_save_dir` is the default path to save GRIB2 files.
config = configparser.ConfigParser()
_config_path = Path('~').expanduser() / '.config' / 'hrrrb' / 'config.cfg'
user_home_default = str(Path('~').expanduser() / 'data')
if not _config_path.exists():
_config_path.parent.mkdir(parents=True)
_config_path.touch()
config.read(_config_path)
config.add_section('download')
config.set('download', 'default_save_dir', user_home_default)
with open(_config_path, 'w') as configfile:
config.write(configfile)
print(f'⚙ Created config file [{_config_path}]',
f'with default download directory set as [{user_home_default}]')
config.read(_config_path)
try:
_default_save_dir = Path(config.get('download', 'default_save_dir'))
except:
print(f'🦁🐯🐻 oh my! {_config_path} looks weird,',
f'but I will add a new section')
config.add_section('download')
config.set('download', 'default_save_dir', user_home_default)
with open(_config_path, 'w') as configfile:
config.write(configfile)
_default_save_dir = Path(config.get('download', 'default_save_dir')) |
the-stack_106_27868 | """
Dataset setting and data loader for MNIST.
Adapted from https://github.com/corenel/pytorch-adda/tree/master/datasets
"""
import torch
from torchvision import datasets, transforms
import numpy as np
def get_mnist_shift(train, batch_size=32, drop_last=True, num_channel=3, image_size=28,
total_sample=5000, ratio=[0.3, 0.3, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05], logger=None):
"""Get MNIST dataset loader."""
# image pre-processing
pre_process = transforms.Compose([transforms.Resize(image_size),
transforms.Grayscale(num_channel),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.5] * num_channel,
std=[0.5] * num_channel)])
# dataset and data loader
mnist_dataset = datasets.MNIST(root='../dann_dataset/',
train=train,
transform=pre_process,
download=True)
mnist_data_loader = torch.utils.data.DataLoader(
dataset=mnist_dataset,
batch_size=1,
shuffle=True,
drop_last=False)
data = torch.zeros((len(mnist_data_loader), num_channel, image_size, image_size))
label = torch.zeros(len(mnist_data_loader))
for i, (data_, target) in enumerate(mnist_data_loader):
data[i] = data_
label[i] = target
# ----------------------Subsampling the dataset ---------------------------
c = len(torch.unique(label))
n = label.size(0)
ind = [[j for j in range(n) if label[j] == i] for i in range(c)]
nb_sample_class = [len(ind[i]) for i in range(c)]
logger.info(f'sample per class in data before subsampling: {nb_sample_class} / sum={np.sum(nb_sample_class)}')
logger.info(f'ratio*total: {np.array(ratio) * total_sample} / sum={np.sum(np.array(ratio) * total_sample)}')
all_index = torch.zeros(0).long()
for i in range(c):
perm = torch.randperm(nb_sample_class[i])
ind_classe = label.eq(i).nonzero()
ind = ind_classe[perm[:int(ratio[i] * total_sample)].long()]
all_index = torch.cat((all_index, ind))
label = label[all_index].squeeze()
data = data[all_index][:, 0, :, :, :]
full_data = torch.utils.data.TensorDataset(data, label.long())
mnist_data_loader = torch.utils.data.DataLoader(
dataset=full_data,
batch_size=batch_size,
shuffle=True,
drop_last=drop_last)
return mnist_data_loader
|
the-stack_106_27869 | import sqlite3
import os
import json
import pandas.io.sql as sqlio
class MapDb(object):
def __init__(self, folder="./db"):
self.conn_str = os.path.join(folder, 'dublin-bus.sqlite')
def connect(self):
return sqlite3.connect(self.conn_str, check_same_thread=False)
def disconnect(self):
pass
def insert_nodes(self, nodes):
conn = self.connect()
cur = conn.cursor()
cur.executemany('''
INSERT INTO node
(node_id, version, changeset, timestamp, uid, lat, lon)
values (?,?,?,?,?,?,?)
''', nodes)
conn.commit()
cur.close()
conn.close()
def insert_node_tags(self, node_tags):
conn = self.connect()
cur = conn.cursor()
cur.executemany('''
INSERT INTO node_tag
(node_id, tag_key, tag_value)
values (?,?,?)
''', node_tags)
conn.commit()
cur.close()
conn.close()
def insert_ways(self, ways):
conn = self.connect()
cur = conn.cursor()
cur.executemany('''
INSERT INTO way
(way_id, version, changeset, timestamp, uid)
values (?,?,?,?,?)
''', ways)
conn.commit()
cur.close()
conn.close()
def insert_way_nodes(self, way_nodes):
conn = self.connect()
cur = conn.cursor()
cur.executemany('''
INSERT INTO way_node
(way_id, node_uid)
values (?,?)
''', way_nodes)
conn.commit()
cur.close()
conn.close()
def insert_way_tagss(self, way_tags):
conn = self.connect()
cur = conn.cursor()
cur.executemany('''
INSERT INTO way_tag
(way_id, tag_key, tag_value)
values (?,?,?)
''', way_tags)
conn.commit()
cur.close()
conn.close()
|
the-stack_106_27870 | from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from queue import Queue
from typing import Any, Dict, List, Tuple, Callable, Union
import numpy as np
import torch
import torch.distributed as dist
from torch.utils.data.dataset import IterableDataset
from transformers import BertTokenizerFast
class DatasetForMatching(IterableDataset):
def __init__(
self,
file_path: str,
tokenizer: Union[BertTokenizerFast, str] = "bert-base-uncased",
):
self.data_file = open(file_path, "r", encoding="utf-8")
if isinstance(tokenizer, str):
self.tokenizer = BertTokenizerFast.from_pretrained(tokenizer)
else:
self.tokenizer = tokenizer
def process(self, input_line):
# Input file format:
# Example:
# A simple algorithm for Boolean operations on polygons|'|Geometric modelling based on simplicial chains|'|Boolean operations on general planar polygons|'|Reentrant polygon clipping|'|Plane-sweep algorithms for intersecting geometric figures|'|A new algorithm for computing Boolean operations on polygons An analysis and algorithm for polygon clipping|'|Set Membership Classification: A Unified Approach to Geometric Intersection Problems|'|Reentrant polygon clipping|'|Hidden surface removal using polygon area sorting|'|Polygon comparison using a graph representation|'|A New Concept and Method for Line Clipping
# Balanced Multifilter Banks for Multiple Description Coding|'|Balanced multiwavelets|'|On minimal lattice factorizations of symmetric-antisymmetric multifilterbanks|'|High-order balanced multiwavelets: theory, factorization, and design|'|Single-Trial Multiwavelet Coherence in Application to Neurophysiological Time Series|'|The application of multiwavelet filterbanks to image processing Armlets and balanced multiwavelets: flipping filter construction|'|Multiwavelet prefilters. II. Optimal orthogonal prefilters|'|Regularity of multiwavelets|'|Balanced GHM-like multiscaling functions|'|A new prefilter design for discrete multiwavelet transforms|'|Balanced multiwavelets with short filters
query_and_neighbors, key_and_neighbors = input_line.strip('\n').split('\t')[:2]
query_and_neighbors = query_and_neighbors.split('|\'|')
key_and_neighbors = key_and_neighbors.split('|\'|')
tokens_query_and_neighbors = self.tokenizer.batch_encode_plus(query_and_neighbors, add_special_tokens=False)[
'input_ids']
tokens_key_and_neighbors = self.tokenizer.batch_encode_plus(key_and_neighbors, add_special_tokens=False)[
'input_ids']
return tokens_query_and_neighbors, tokens_key_and_neighbors
def __iter__(self):
for line in self.data_file:
yield self.process(line)
@dataclass
class DataCollatorForMatching:
mlm: bool
neighbor_num: int
token_length: int
tokenizer: Union[BertTokenizerFast, str] = "bert-base-uncased"
mlm_probability: float = 0.15
random_seed: int = 42
def __post_init__(self):
if isinstance(self.tokenizer, str):
self.tokenizer = BertTokenizerFast.from_pretrained(self.tokenizer)
self.random_state = np.random.RandomState(seed=self.random_seed)
def __call__(self, samples: List[List[List[List[int]]]]) -> Dict[str, torch.Tensor]:
input_ids_query_and_neighbors_batch = []
attention_mask_query_and_neighbors_batch = []
mask_query_and_neighbors_batch = []
input_ids_key_and_neighbors_batch = []
attention_mask_key_and_neighbors_batch = []
mask_key_and_neighbors_batch = []
for i, sample in (enumerate(samples)):
input_ids_query_and_neighbors, attention_mask_query_and_neighbors, mask_query_and_neighbors, \
input_ids_key_and_neighbors, attention_mask_key_and_neighbors, mask_key_and_neighbors = self.create_training_sample(
sample)
input_ids_query_and_neighbors_batch.append(input_ids_query_and_neighbors)
attention_mask_query_and_neighbors_batch.append(attention_mask_query_and_neighbors)
mask_query_and_neighbors_batch.append(mask_query_and_neighbors)
input_ids_key_and_neighbors_batch.append(input_ids_key_and_neighbors)
attention_mask_key_and_neighbors_batch.append(attention_mask_key_and_neighbors)
mask_key_and_neighbors_batch.append(mask_key_and_neighbors)
if self.mlm:
input_ids_query_and_neighbors_batch, mlm_labels_query_batch = self.mask_tokens(
self._tensorize_batch(input_ids_query_and_neighbors_batch, self.tokenizer.pad_token_id),
self.tokenizer.mask_token_id)
input_ids_key_and_neighbors_batch, mlm_labels_key_batch = self.mask_tokens(
self._tensorize_batch(input_ids_key_and_neighbors_batch, self.tokenizer.pad_token_id),
self.tokenizer.mask_token_id)
else:
input_ids_query_and_neighbors_batch = self._tensorize_batch(input_ids_query_and_neighbors_batch,
self.tokenizer.pad_token_id)
input_ids_key_and_neighbors_batch = self._tensorize_batch(input_ids_key_and_neighbors_batch,
self.tokenizer.pad_token_id)
attention_mask_query_and_neighbors_batch = self._tensorize_batch(attention_mask_query_and_neighbors_batch, 0)
attention_mask_key_and_neighbors_batch = self._tensorize_batch(attention_mask_key_and_neighbors_batch, 0)
mask_query_and_neighbors_batch = self._tensorize_batch(mask_query_and_neighbors_batch, 0)
mask_key_and_neighbors_batch = self._tensorize_batch(mask_key_and_neighbors_batch, 0)
return {
"input_ids_query_and_neighbors_batch": input_ids_query_and_neighbors_batch,
"attention_mask_query_and_neighbors_batch": attention_mask_query_and_neighbors_batch,
"mlm_labels_query_batch": mlm_labels_query_batch if self.mlm else None,
"mask_query_and_neighbors_batch": mask_query_and_neighbors_batch,
"input_ids_key_and_neighbors_batch": input_ids_key_and_neighbors_batch,
"attention_mask_key_and_neighbors_batch": attention_mask_key_and_neighbors_batch,
"mlm_labels_key_batch": mlm_labels_key_batch if self.mlm else None,
"mask_key_and_neighbors_batch": mask_key_and_neighbors_batch,
}
def _tensorize_batch(self, sequences: Union[List[torch.Tensor], List[List[torch.Tensor]]],
padding_value) -> torch.Tensor:
if len(sequences[0].size()) == 1:
max_len_1 = max([s.size(0) for s in sequences])
out_dims = (len(sequences), max_len_1)
out_tensor = sequences[0].new_full(out_dims, padding_value)
for i, tensor in enumerate(sequences):
length_1 = tensor.size(0)
out_tensor[i, :length_1] = tensor
return out_tensor
elif len(sequences[0].size()) == 2:
max_len_1 = max([s.size(0) for s in sequences])
max_len_2 = max([s.size(1) for s in sequences])
out_dims = (len(sequences), max_len_1, max_len_2)
out_tensor = sequences[0].new_full(out_dims, padding_value)
for i, tensor in enumerate(sequences):
length_1 = tensor.size(0)
length_2 = tensor.size(1)
out_tensor[i, :length_1, :length_2] = tensor
return out_tensor
else:
raise
def create_training_sample(self, sample: List[List[List[int]]]):
def process_node_and_neighbors(tokens_node_and_neighbors):
max_num_tokens = self.token_length - self.tokenizer.num_special_tokens_to_add(pair=False)
input_ids_node_and_neighbors, attention_mask_node_and_neighbors, mask_node_and_neighbors = [], [], []
for i, tokens in enumerate(tokens_node_and_neighbors):
if i > self.neighbor_num: break
input_ids_node_and_neighbors.append(
torch.tensor(self.tokenizer.build_inputs_with_special_tokens(tokens[:max_num_tokens])))
attention_mask_node_and_neighbors.append(torch.tensor([1] * len(input_ids_node_and_neighbors[-1])))
if len(tokens) == 0:
mask_node_and_neighbors.append(torch.tensor(0))
else:
mask_node_and_neighbors.append(torch.tensor(1))
input_ids_node_and_neighbors = self._tensorize_batch(input_ids_node_and_neighbors,
self.tokenizer.pad_token_id)
attention_mask_node_and_neighbors = self._tensorize_batch(attention_mask_node_and_neighbors, 0)
mask_node_and_neighbors = torch.stack(mask_node_and_neighbors)
return input_ids_node_and_neighbors, attention_mask_node_and_neighbors, mask_node_and_neighbors
tokens_query_and_neighbors, tokens_key_and_neighbors = sample
input_ids_query_and_neighbors, attention_mask_query_and_neighbors, mask_query_and_neighbors = process_node_and_neighbors(
tokens_query_and_neighbors)
input_ids_key_and_neighbors, attention_mask_key_and_neighbors, mask_key_and_neighbors = process_node_and_neighbors(
tokens_key_and_neighbors)
return input_ids_query_and_neighbors, attention_mask_query_and_neighbors, mask_query_and_neighbors, \
input_ids_key_and_neighbors, attention_mask_key_and_neighbors, mask_key_and_neighbors
def mask_tokens(self, inputs_origin: torch.Tensor, mask_id: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Prepare masked tokens inputs/labels for masked language modeling.
"""
inputs = inputs_origin.clone()
labels = torch.zeros((inputs.shape[0], inputs.shape[2]), dtype=torch.long) - 100
for i in range(len(inputs_origin)):
input_origin = inputs_origin[i][0]
input = inputs[i][0]
mask_num, valid_length = 0, 0
start_indexes = []
for index, x in enumerate(input_origin):
if int(x) not in self.tokenizer.all_special_ids:
valid_length += 1
start_indexes.append(index)
labels[i][index] = -99
self.random_state.shuffle(start_indexes)
if valid_length > 0:
while mask_num / valid_length < self.mlm_probability:
start_index = start_indexes.pop()
span_length = 1e9
while span_length > 10: span_length = np.random.geometric(0.2)
for j in range(start_index, min(start_index + span_length, len(input_origin))):
if labels[i][j] != -99: continue
labels[i][j] = input_origin[j].clone()
rand = self.random_state.random()
if rand < 0.8:
input[j] = mask_id
elif rand < 0.9:
input[j] = self.random_state.randint(0, self.tokenizer.vocab_size - 1)
mask_num += 1
if mask_num / valid_length >= self.mlm_probability:
break
labels[i] = torch.masked_fill(labels[i], labels[i] < 0, -100)
return inputs, labels
@dataclass
class MultiProcessDataLoader:
dataset: IterableDataset
batch_size: int
collate_fn: Callable
local_rank: int
world_size: int
global_end: Any
blocking: bool = False
drop_last: bool = True
def _start(self):
self.local_end = False
self.aval_count = 0
self.outputs = Queue(10)
self.pool = ThreadPoolExecutor(1)
self.pool.submit(self._produce)
def _produce(self):
for batch in self._generate_batch():
self.outputs.put(batch)
self.aval_count += 1
self.pool.shutdown(wait=False)
raise
def _generate_batch(self):
batch = []
for i, sample in enumerate(self.dataset):
if i % self.world_size != self.local_rank: continue
batch.append(sample)
if len(batch) >= self.batch_size:
yield self.collate_fn(batch[:self.batch_size])
batch = batch[self.batch_size:]
else:
if len(batch) > 0 and not self.drop_last:
yield self.collate_fn(batch)
batch = []
self.local_end = True
def __iter__(self):
if self.blocking:
return self._generate_batch()
self._start()
return self
def __next__(self):
dist.barrier()
while self.aval_count == 0:
if self.local_end or self.global_end.value:
self.global_end.value = True
break
dist.barrier()
if self.global_end.value:
raise StopIteration
next_batch = self.outputs.get()
self.aval_count -= 1
return next_batch
@dataclass
class SingleProcessDataLoader:
dataset: IterableDataset
batch_size: int
collate_fn: Callable
blocking: bool = False
drop_last: bool = True
def _start(self):
self.local_end = False
self.aval_count = 0
self.outputs = Queue(10)
self.pool = ThreadPoolExecutor(1)
self.pool.submit(self._produce)
def _produce(self):
for batch in self._generate_batch():
self.outputs.put(batch)
self.aval_count += 1
self.pool.shutdown(wait=False)
raise
def _generate_batch(self):
batch = []
for i, sample in enumerate(self.dataset):
batch.append(sample)
if len(batch) >= self.batch_size:
yield self.collate_fn(batch[:self.batch_size])
batch = batch[self.batch_size:]
else:
if len(batch) > 0 and not self.drop_last:
yield self.collate_fn(batch)
batch = []
self.local_end = True
def __iter__(self):
if self.blocking:
return self._generate_batch()
self._start()
return self
def __next__(self):
while self.aval_count == 0:
if self.local_end: raise StopIteration
next_batch = self.outputs.get()
self.aval_count -= 1
return next_batch
|
the-stack_106_27871 | import requests
import networkx as nx
import matplotlib.pyplot as plt
from pyvis.network import Network
import numpy as np
from Graph import *
import os
import re
class Crawler(Graph):
# Constructor
def __init__(self, template, root_page):
self.template = template
self.root_page = root_page
self.pages = set()
self.graph = nx.MultiDiGraph()
self.network = Network()
# Method to parse url and extract html
def parse(self, page):
'''
cmd = 'curl https://lomando.com/main.html | grep "\.html"'
os.system(cmd)
'''
response = requests.get(self.template + page)
return re.findall("[a-zA-Z]+\.html", response.text)
# Method to create the directed graph (BFS)
def crawl(self):
# Initializing queue with root
queue = [self.root_page]
# Going till queue empty
while (len(queue) > 0):
parent = queue.pop(0)
# Adding to pages and queue
self.pages.add(parent)
children = set(self.parse(parent))
queue = queue + list(children - self.pages)
# Adding graph nodes and edges
self.graph.add_node(parent)
for child in children:
self.graph.add_edge(parent, child)
# Method to return a sorted list of the visited pages
def sorted_pages(self):
return sorted(list(self.pages))
if __name__ == "__main__":
# Initializing
print("Testing initialization:")
crawler = Crawler("https://lomando.com/", "main.html")
print(crawler.template + crawler.root_page)
print()
# Test for parse method
print("Testing parse function:")
print(crawler.parse("main.html"))
print(set(crawler.parse("main.html")))
print()
# Test for crawl method
print("Testing crawl funciton:")
crawler.crawl()
print(crawler.sorted_pages())
print(crawler.graph)
# Testing draw with matplotlib
crawler.plot()
# Testing draw with pyvis
crawler.plot_pretty()
|
the-stack_106_27872 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to MIOpen library."""
# pylint: disable-msg=C0103
import ctypes
import numpy as np
import tvm
import tvm._ffi
from tvm import te
def _get_np_int32_array_handle(arr):
"""Return a void_p handle for a numpy array
Parameters
----------
arr: numpy.NDArray
source numpy array
Returns
-------
ptr: ctypes.c_void_p
pointer to the data
"""
assert arr.dtype == np.int32
ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
return ctypes.cast(ptr, ctypes.c_void_p)
def conv2d_forward(x,
w,
stride_h=1,
stride_w=1,
pad_h=0,
pad_w=0,
dilation_h=1,
dilation_w=1,
conv_mode=0,
data_type=1,
group_count=1):
"""Create an extern op that compute 2D convolution with MIOpen
Parameters
----------
x: Tensor
input feature map
w: Tensor
convolution weight
stride_h: int
height stride
stride_w: int
width stride
pad_h: int
height pad
pad_w: int
weight pad
dilation_h: int
height dilation
dilation_w: int
width dilation
conv_mode: int
0: miopenConvolution
1: miopenTranspose
data_type: int
0: miopenHalf (fp16)
1: miopenFloat (fp32)
group_count: int
number of groups
Returns
-------
y: Tensor
The result tensor
"""
assert (0 <= conv_mode <= 2), "0: miopenConvolution / 1: miopenTranspose / 2: miopenGroupConv"
if group_count > 1:
conv_mode = 2
oshape = np.zeros((len(x.shape)), dtype=np.int32)
xshape = x.shape
wshape = w.shape
setup_func = tvm._ffi.get_global_func("tvm.contrib.miopen.conv2d.setup")
algo = setup_func(conv_mode,
data_type,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
xshape[0].value,
xshape[1].value,
xshape[2].value,
xshape[3].value,
wshape[0].value,
wshape[1].value,
wshape[2].value,
wshape[3].value,
group_count,
_get_np_int32_array_handle(oshape))
return te.extern(
list(oshape), [x, w],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.miopen.conv2d.forward",
conv_mode,
data_type,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
algo,
ins[0],
ins[1],
outs[0]), name="y")
|
the-stack_106_27874 | #!/usr/bin/env python3
# Copyright 2021-2022 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from typing import Dict, List
sys.path.append(os.path.dirname(__file__))
import includedyaml as yaml # noqa: E402
class Tool(object):
by_name: Dict[str, "Tool"]
def __init__(
self,
name,
repo,
commit,
build_script="make && make install",
default_branch=None,
in_install=True,
in_container=True,
dependencies=[],
pdk=False,
):
self.name = name
self.repo = repo
self.commit = commit
self.build_script = build_script
self.default_branch = default_branch
self.in_install = in_install
self.in_container = in_container
self.dependencies = dependencies
self.pdk = pdk
def __repr__(self) -> str:
return f"<Tool {self.name} (using {self.repo_pretty or 'None'}@{self.commit or 'None'})>"
@property
def repo_pretty(self):
gh_prefix = "https://github.com/"
repo = self.repo
if repo is not None and repo.startswith(gh_prefix):
return repo[len(gh_prefix) :]
return repo
@property
def version_string(self) -> str:
return f"{self.repo or 'None'}:{self.commit or 'None'}"
def get_docker_tag(self, for_os: str) -> str:
return f"{self.name}-{self.commit}-{for_os}"
@property
def docker_args(self) -> List[str]:
return [
"--build-arg",
f"{self.name.upper()}_REPO={self.repo}",
"--build-arg",
f"{self.name.upper()}_COMMIT={self.commit}",
]
@staticmethod
def from_metadata_yaml(metadata_yaml: str) -> Dict[str, "Tool"]:
final_dict = {}
tool_list = yaml.load(metadata_yaml, Loader=yaml.SafeLoader)
for tool in tool_list:
final_dict[tool["name"]] = Tool(
name=tool["name"],
repo=tool["repo"],
commit=tool["commit"],
build_script=tool.get("build") or "",
default_branch=tool.get("default_branch") or None,
in_container=tool["in_container"]
if tool.get("in_container") is not None
else True,
in_install=tool["in_install"]
if tool.get("in_install") is not None
else True,
dependencies=tool.get("dependencies") or [],
pdk=tool.get("pdk") or False,
)
return final_dict
Tool.by_name = Tool.from_metadata_yaml(
open(os.path.join(os.path.dirname(__file__), "tool_metadata.yml")).read()
)
def main():
import os
import argparse
parser = argparse.ArgumentParser(description="Get Tool Info")
parser.add_argument("--containerized", action="store_true")
parser.add_argument("--docker-args", action="store_true")
parser.add_argument("--no-pdks", action="store_true")
parser.add_argument("--docker-tag-for-os", default=None)
parser.add_argument("--field", "-f")
parser.add_argument("tool")
args = parser.parse_args()
if args.no_pdks:
pdk_keys = []
for key, value in Tool.by_name.items():
if value.pdk:
pdk_keys.append(key)
for key in pdk_keys:
del Tool.by_name[key]
if args.containerized:
for tool in Tool.by_name.values():
if tool.in_container:
print(tool.name, end=" ")
exit(0)
try:
tool = Tool.by_name[args.tool]
except Exception:
print(f"Unknown tool {args.tool}.", file=sys.stderr)
exit(os.EX_DATAERR)
if args.docker_tag_for_os:
print(tool.get_docker_tag(for_os=args.docker_tag_for_os))
elif args.docker_args:
arg_list = tool.docker_args
# 1. Dependents
dependents = []
for dependent in Tool.by_name.values():
if tool.name in dependent.dependencies:
dependents.append(dependent)
for dependent in dependents:
arg_list += dependent.docker_args
# 2. Dependencies
for dependency_name in tool.dependencies:
dependency = Tool.by_name[dependency_name]
arg_list += dependency.docker_args
print(" ".join(arg_list), end="")
elif args.field:
field = tool.__dict__[args.field]
print(field, end="")
else:
parser.print_help(file=sys.stderr)
exit(os.EX_USAGE)
if __name__ == "__main__":
main()
|
the-stack_106_27877 | """
In this code we include a lightweight adaption algorithm for dominating relationships computing. The code is adapted from Networkx
Please see the original one at https://networkx.org/documentation/stable/_modules/networkx/algorithms/dominance.html#dominance_frontiers
TODO: implement a datastructure such as G from networkx for control flow graph. so that networkx is not required in this library.
"""
import networkx as nx
from functools import reduce
def immediate_dominators(G, start):
"""Returns the immediate dominators of all nodes of a directed graph.
Parameters
----------
G : a DiGraph or MultiDiGraph
The graph where dominance is to be computed.
start : node
The start node of dominance computation.
Returns
-------
idom : dict keyed by nodes
A dict containing the immediate dominators of each node reachable from
`start`.
Raises
------
NetworkXNotImplemented
If `G` is undirected.
NetworkXError
If `start` is not in `G`.
Notes
-----
Except for `start`, the immediate dominators are the parents of their
corresponding nodes in the dominator tree.
Examples
--------
>>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)])
>>> sorted(nx.immediate_dominators(G, 1).items())
[(1, 1), (2, 1), (3, 1), (4, 3), (5, 1)]
References
----------
.. [1] K. D. Cooper, T. J. Harvey, and K. Kennedy.
A simple, fast dominance algorithm.
Software Practice & Experience, 4:110, 2001.
"""
if start not in G:
raise nx.NetworkXError("start is not in G")
idom = {start: start}
order = list(nx.dfs_postorder_nodes(G, start))
dfn = {u: i for i, u in enumerate(order)}
order.pop()
order.reverse()
def intersect(u, v):
while u != v:
while dfn[u] < dfn[v]:
u = idom[u]
while dfn[u] > dfn[v]:
v = idom[v]
return u
changed = True
while changed:
changed = False
for u in order:
new_idom = reduce(intersect, (v for v in G.pred[u] if v in idom))
if u not in idom or idom[u] != new_idom:
idom[u] = new_idom
changed = True
return idom
def dominance_frontiers(G, start):
"""Returns the dominance frontiers of all nodes of a directed graph.
Parameters
----------
G : a DiGraph or MultiDiGraph
The graph where dominance is to be computed.
start : node
The start node of dominance computation.
Returns
-------
df : dict keyed by nodes
A dict containing the dominance frontiers of each node reachable from
`start` as lists.
Raises
------
NetworkXNotImplemented
If `G` is undirected.
NetworkXError
If `start` is not in `G`.
Examples
--------
>>> G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)])
>>> sorted((u, sorted(df)) for u, df in nx.dominance_frontiers(G, 1).items())
[(1, []), (2, [5]), (3, [5]), (4, [5]), (5, [])]
References
----------
.. [1] K. D. Cooper, T. J. Harvey, and K. Kennedy.
A simple, fast dominance algorithm.
Software Practice & Experience, 4:110, 2001.
"""
idom = immediate_dominators(G, start)
df = {u: set() for u in idom}
for u in idom:
if len(G.pred[u]) >= 2:
for v in G.pred[u]:
if v in idom:
while v != idom[u]:
df[v].add(u)
v = idom[v]
return df
def main():
G = nx.DiGraph([(1, 2), (1, 3), (2, 5), (3, 4), (4, 5)])
res = sorted((u, sorted(df)) for u, df in dominance_frontiers(G, 1).items())
assert res == [(1, []), (2, [5]), (3, [5]), (4, [5]), (5, [])]
if __name__ == "__main__":
main() |
the-stack_106_27878 | import random
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.neighbors import NearestNeighbors
from flask import Flask, request, render_template
import cloudpickle
app = Flask(__name__, template_folder="templates")
with open("model.pkl", "rb") as f:
model = cloudpickle.load(f)
@app.route("/")
def index():
repos = model.cv.get_feature_names()
random_candy = ", ".join(random.choices(repos, k=5))
return render_template("index.html", random_candy=random_candy)
@app.route("/result", methods=["POST"])
def predict():
candy = request.form["candy"]
candy = ",".join([c.strip() for c in candy.split(", ")])
suggestions = model.predict([candy])[0]
random.shuffle(suggestions)
return render_template("result.html", suggestions=suggestions[:5])
if __name__ == "__main__":
app.run(debug=True, port=8000)
|
the-stack_106_27879 | # -*- coding: utf-8 -*-
# @Time : 2020/6/16 23:51
# @Author : zonas.wang
# @Email : [email protected]
# @File : inference.py
import math
import os
import os.path as osp
import time
import tensorflow as tf
import cv2
import numpy as np
import pyclipper
from shapely.geometry import Polygon
from tqdm import tqdm
from backend.text_detector_service.model import DBNet
from backend.text_detector_service.config import DBConfig
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
cfg = DBConfig()
def resize_image(image, image_short_side=736):
height, width, _ = image.shape
if height < width:
new_height = image_short_side
new_width = int(math.ceil(new_height / height * width / 32) * 32)
else:
new_width = image_short_side
new_height = int(math.ceil(new_width / width * height / 32) * 32)
resized_img = cv2.resize(image, (new_width, new_height))
return resized_img
def box_score_fast(bitmap, _box):
h, w = bitmap.shape[:2]
box = _box.copy()
xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int), 0, w - 1)
xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int), 0, w - 1)
ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int), 0, h - 1)
ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int), 0, h - 1)
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
box[:, 0] = box[:, 0] - xmin
box[:, 1] = box[:, 1] - ymin
cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
def unclip(box, unclip_ratio=1.5):
poly = Polygon(box)
distance = poly.area * unclip_ratio / poly.length
offset = pyclipper.PyclipperOffset()
offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
expanded = np.array(offset.Execute(distance))
return expanded
def get_mini_boxes(contour):
if not contour.size:
return [], 0
bounding_box = cv2.minAreaRect(contour)
points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
index_1, index_2, index_3, index_4 = 0, 1, 2, 3
if points[1][1] > points[0][1]:
index_1 = 0
index_4 = 1
else:
index_1 = 1
index_4 = 0
if points[3][1] > points[2][1]:
index_2 = 2
index_3 = 3
else:
index_2 = 3
index_3 = 2
box = [points[index_1], points[index_2],
points[index_3], points[index_4]]
return box, min(bounding_box[1])
def polygons_from_bitmap(pred, bitmap, dest_width, dest_height, max_candidates=500, box_thresh=0.7):
pred = pred[..., 0]
bitmap = bitmap[..., 0]
height, width = bitmap.shape
boxes = []
scores = []
contours, _ = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours[:max_candidates]:
epsilon = 0.001 * cv2.arcLength(contour, True)
approx = cv2.approxPolyDP(contour, epsilon, True)
points = approx.reshape((-1, 2))
if points.shape[0] < 4:
continue
score = box_score_fast(pred, points.reshape(-1, 2))
if box_thresh > score:
continue
if points.shape[0] > 2:
box = unclip(points, unclip_ratio=2.0)
if len(box) > 1:
continue
else:
continue
box = box.reshape(-1, 2)
_, sside = get_mini_boxes(box.reshape((-1, 1, 2)))
if sside < 5:
continue
box[:, 0] = np.clip(np.round(box[:, 0] / width * dest_width), 0, dest_width)
box[:, 1] = np.clip(np.round(box[:, 1] / height * dest_height), 0, dest_height)
boxes.append(box.tolist())
scores.append(score)
return boxes, scores
def main():
BOX_THRESH = 0.5
mean = np.array([103.939, 116.779, 123.68])
model_path = "checkpoints/2020-07-24/db_83_2.0894_1.9788.h5"
img_dir = 'datasets/test/input'
img_names = os.listdir(img_dir)
model = DBNet(cfg, model='inference')
model.load_weights(model_path, by_name=True, skip_mismatch=True)
for img_name in tqdm(img_names):
img_path = osp.join(img_dir, img_name)
image = cv2.imread(img_path)
src_image = image.copy()
h, w = image.shape[:2]
image = resize_image(image)
image = image.astype(np.float32)
image -= mean
image_input = np.expand_dims(image, axis=0)
image_input_tensor = tf.convert_to_tensor(image_input)
start_time = time.time()
p = model.predict(image_input_tensor)[0]
end_time = time.time()
print("time: ", end_time - start_time)
bitmap = p > 0.3
boxes, scores = polygons_from_bitmap(p, bitmap, w, h, box_thresh=BOX_THRESH)
for box in boxes:
cv2.drawContours(src_image, [np.array(box)], -1, (0, 255, 0), 2)
image_fname = osp.split(img_path)[-1]
cv2.imwrite('datasets/test/output/' + image_fname, src_image)
if __name__ == '__main__':
main()
|
the-stack_106_27880 | '''
DDS: DDS image loader
'''
__all__ = ('ImageLoaderDDS', )
from kivy.lib.ddsfile import DDSFile
from kivy.logger import Logger
from kivy.core.image import ImageLoaderBase, ImageData, ImageLoader
class ImageLoaderDDS(ImageLoaderBase):
@staticmethod
def extensions():
return ('dds', )
def load(self, filename):
try:
dds = DDSFile(filename=filename)
except:
Logger.warning('Image: Unable to load image <%s>' % filename)
raise
self.filename = filename
width, height = dds.size
im = ImageData(width, height, dds.dxt, dds.images[0], source=filename,
flip_vertical=False)
if len(dds.images) > 1:
images = dds.images
images_size = dds.images_size
for index in range(1, len(dds.images)):
w, h = images_size[index]
data = images[index]
im.add_mipmap(index, w, h, data, 0)
return [im]
# register
ImageLoader.register(ImageLoaderDDS)
|
the-stack_106_27884 | """methods/crf
CRF module
"""
import sys
import os
import pycrfsuite
from kleis.config.config import MODELS_PATH
from kleis.resources import dataset as kl
def crf_preprocess_candidates(candidates):
"""Receive annotated candidates and return features and labels list"""
features = []
labels = []
for candidate in candidates:
candidate_features = []
candidate_labels = []
for token_features, label in candidate:
candidate_features.append(token_features)
candidate_labels.append(label)
features.append(candidate_features)
labels.append(candidate_labels)
return features, labels
def pycrfsuite_train(annotated_candidates, name="candidates-model.pycrfsuite"):
"""Receive annotated candidates and train model"""
if not kl.path_exists(MODELS_PATH):
print("Info: Models path not found %s" % MODELS_PATH)
os.mkdir(MODELS_PATH)
model = MODELS_PATH + name
if not kl.path_exists(model):
print("Info: Model not found %s" % model)
features, labels = [], []
for candidates in annotated_candidates.values():
candidate_features, candidate_labels = crf_preprocess_candidates(candidates)
features.extend(candidate_features)
labels.extend(candidate_labels)
# pycrfsuite
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(features, labels):
trainer.append(xseq, yseq)
trainer.set_params({
'c1': 1.0, # coefficient for L1 penalty
'c2': 1e-3, # coefficient for L2 penalty
# 'max_iterations': 50, # stop earlier
# include transitions that are possible, but not observed
'feature.possible_transitions': True
})
trainer.params()
trainer.train(model)
tagger = pycrfsuite.Tagger()
tagger.open(model)
return tagger
def pycrfsuite_label(tagger, pos_sequences, text, context_tokens=1,
features_method="simple", tagging_notation="BILOU", generic_label=True):
"""Receive tagger, pos sequences and text and return labeled text"""
tokens, tokens_span = kl.tokenize_en(text)
tags = kl.tag_text_en(tokens, tokens_span)
dataset_element_fake = {"tags": tags}
candidates_spans = kl.filter_pos_sequences(
dataset_element_fake,
pos_sequences,
annotated=False
)
candidates = kl.candidates_spans_features_labels_from(
candidates_spans, dataset_element_fake,
context_tokens=context_tokens,
features_method=features_method,
tagging_notation=tagging_notation,
generic_label=generic_label
)
candidates_features, _ = crf_preprocess_candidates(candidates)
keyphrases = []
for i, candidate_feaures in enumerate(candidates_features):
labeled_candidate = tagger.tag(candidate_feaures)
if is_keyphrase((labeled_candidate, candidates_spans[i]),
tags, pos_sequences, tagging_notation=tagging_notation):
keyphrase_label_span = labeled_keyphrase_span(
(labeled_candidate, candidates_spans[i]),
tags,
tagging_notation=tagging_notation
)
keyphrase_label, (keyphrase_span_start, keyphrase_span_end) = keyphrase_label_span
keyphrases.append(
("T%d" % (i + 1),
(keyphrase_label, (keyphrase_span_start, keyphrase_span_end)),
text[keyphrase_span_start:keyphrase_span_end])
)
return keyphrases
def is_keyphrase(labeled_candidate, tags, pos_sequences, tagging_notation="BILOU"):
"""Receive labeled candidate and return true or false"""
labels, candidate_spans = labeled_candidate
start, end = candidate_spans["span"]
expected_tokens = end - start
is_valid = False
if tagging_notation == "BIO" or tagging_notation == "BILOU":
postags = list(map(lambda t: t[1], tags[start:end]))
labels_valid = list(map(lambda l: l[2:],
filter(lambda l: l != "O" \
and l[-len("NON-KEYPHRASE"):] != "NON-KEYPHRASE",
labels)))
if len(labels_valid) == expected_tokens \
and postags == pos_sequences[candidate_spans["pos-seq-id"]]["tags"] \
and len(set(labels_valid)) == 1:
is_valid = True
return is_valid
def labeled_keyphrase_span(keyphrase, tags, tagging_notation="BILOU"):
"""Receive labeled keyphrase and return span"""
labeled_candidate, candidate_spans = keyphrase
start, end = candidate_spans["span"]
label = "KEYPHRASE"
if tagging_notation == "BIO" or tagging_notation == "BILOU":
label = list(set(list(filter(lambda lc: lc != "O", labeled_candidate))))[0][2:]
_, _, token_span_start, _ = tags[start]
_, _, token_span_end, _ = tags[end - 1]
span = (token_span_start[0], token_span_end[1])
return label, span
|
the-stack_106_27887 | from collections import defaultdict, OrderedDict
import csv
import logging
import os
import arff
import pandas as pd
class AlgorithmSelectionProblem(object):
def __init__(self, directory):
self.logger = logging.getLogger(__name__)
# Create data structures
self.dir_ = directory
self.algorithm_runs = None
self.configurations = None
self.metafeatures = None
self.read_funcs = {
# "description.txt": self._read_description,
"algorithm_runs.arff": self._read_algorithm_runs,
# "feature_costs.arff": self._read_feature_costs,
"feature_values.arff": self._read_feature_values,
# "feature_runstatus.arff": self._read_feature_runstatus,
# "ground_truth.arff": self._read_ground_truth,
# "cv.arff": self._read_cv,
"configurations.csv": self._read_configurations
}
self.found_files = []
# Read ASLib files
self._find_files()
self._read_files()
def _find_files(self):
'''
find all expected files in self.dir_
fills self.found_files
'''
expected = [
# "description.txt",
"algorithm_runs.arff",
"feature_values.arff",
# "feature_runstatus.arff",
]
optional = ["ground_truth.arff", "feature_costs.arff", "citation.bib",
"cv.arff", "configurations.csv"]
for expected_file in expected:
full_path = os.path.join(self.dir_, expected_file)
if not os.path.isfile(full_path):
self.logger.error(
"Not found: %s (has to be added)" % (full_path))
else:
self.found_files.append(full_path)
for expected_file in optional:
full_path = os.path.join(self.dir_, expected_file)
if not os.path.isfile(full_path):
self.logger.warning(
"Not found: %s (maybe you want to add it)" % (full_path))
else:
self.found_files.append(full_path)
def _read_files(self):
'''
iterates over all found files (self.found_files) and
calls the corresponding function to validate file
'''
for file_ in self.found_files:
read_func = self.read_funcs.get(os.path.basename(file_))
if read_func:
read_func(file_)
def _read_algorithm_runs(self, filename):
with open(filename) as fh:
arff_dict = arff.load(fh)
if arff_dict["attributes"][0][0].upper() != "INSTANCE_ID":
self.logger.error(
"instance_id as first attribute is missing in %s" % (filename))
if arff_dict["attributes"][1][0].upper() != "REPETITION":
self.logger.error(
"repetition as second attribute is missing in %s" % (filename))
if arff_dict["attributes"][2][0].upper() != "ALGORITHM":
self.logger.error(
"algorithm as third attribute is missing in %s" % (filename))
performance_measures = [pm[0] for pm in arff_dict['attributes'][3:-1]]
measure_instance_algorithm_triples = defaultdict(lambda: defaultdict(dict))
for data in arff_dict["data"]:
inst_name = str(data[0])
# repetition = data[1]
algorithm = str(data[2])
perf_list = data[3:-1]
# status = data[-1]
for i, performance_measure in enumerate(performance_measures):
measure_instance_algorithm_triples[performance_measure][
inst_name][algorithm] = perf_list[i]
# TODO: this does not support any repetitions!
measure_algorithm_matrices = OrderedDict()
for pm in performance_measures:
measure_algorithm_matrices[pm] = pd.DataFrame(
measure_instance_algorithm_triples[pm]).transpose()
self.algorithm_runs = measure_algorithm_matrices
def _read_feature_values(self, filename):
with open(filename) as fh:
arff_dict = arff.load(fh)
metafeatures = dict()
for data in arff_dict["data"]:
inst_name = data[0]
# repetition = data[1]
features = data[2:]
metafeatures[inst_name] = {feature[0]: feature_value
for feature, feature_value in
zip(arff_dict['attributes'][2:], features)}
self.metafeatures = pd.DataFrame(metafeatures).transpose()
def _read_configurations(self, filename):
with open(filename) as fh:
csv_reader = csv.DictReader(fh)
configurations = dict()
for line in csv_reader:
configuration = dict()
algorithm_id = line['idx']
for hp_name, value in line.items():
if not value or hp_name == 'idx':
continue
try:
value = int(value)
except Exception:
try:
value = float(value)
except Exception:
pass
configuration[hp_name] = value
configurations[algorithm_id] = configuration
self.configurations = configurations
|
the-stack_106_27888 | from modules.content import *
def sort_base_by_number(data_base):
data_base = sorted(data_base)
return data_base
class User:
def __init__(self, telegram_id, username, number=None, name=None, sex=None, email=None,
phone_number=None, answers=None):
self.base_file = USERS_FILE_PATH
self.forms_file = FORMS_FILE_PATH
self.user_id = telegram_id
self.username = username
self.name = name
self.sex = sex
self.phone_number = phone_number
self.email = email
self.score = 0
if number is None:
self.number = int(self._get_last_number_in_base()) + 1
else:
self.number = number
if answers is None:
self.answers = {
'q1': "-",
'q2': "-",
'q3': "-",
'q4': "-",
'q5': "-",
'q6': "-",
'q7': "-",
}
else:
self.answers = answers
print(answers)
self.answers = self.str_answer_to_dict()
def __str__(self):
return f"{self.number},{self.user_id},{self.name},{self.sex},{self.username}," \
f"{self.phone_number},{self.email},{self.get_str_answers()}\n"
def add_phone_number(self, number: str):
self.phone_number = number
def add_email(self, email: str):
self.email = email
def change_base_file(self, filename: str):
self.base_file = filename
def add_sex(self, sex: str):
self.sex = sex
def add_name(self, name: str):
self.name = name
def update_answer(self, pair):
question = pair[0:2]
answer = pair[-1]
self.answers[question] = answer
def get_str_answers(self):
string = ''
for question in self.answers:
string += question + ":" + self.answers[question] + " "
return string
def get_answers_for_form(self) -> str:
answers = self._get_str_answers_for_form()
answers = answers.split(" ")
answers_separated_with_comma = ''
for answer in answers:
if answer != '':
try:
if self.sex == 'woman':
answers_separated_with_comma += woman_answers[answer].replace(',', '') + ","
if self.sex == 'man':
answers_separated_with_comma += man_answers[answer].replace(',', '') + ","
except ValueError or KeyError as er:
print(er)
continue
return answers_separated_with_comma[0:-1:]
def str_answer_to_dict(self):
# str = 'q1:- q2:1 q3:- q4:- q5:- q6:-'
answers = self.answers
tmp_list = answers.split(" ")
tmp_list.pop(-1)
print(tmp_list)
dict_answers = {i.split(":")[0]: i.split(":")[1] for i in tmp_list}
print(dict_answers)
return dict_answers
def show_telegram_id(self):
return f"{self.user_id}"
def _get_last_number_in_base(self):
with open(self.base_file, 'r') as file:
for user_line in file:
number, telegram_id, name, sex, username, phone_number, email, answers = user_line.split(',')
return number
def add_user_in_db(self):
with open(self.base_file, 'a') as file:
data_base_line = f"{self.number},{self.user_id},{self.name},{self.sex},{self.username}," \
f"{self.phone_number},{self.email},{self.get_str_answers()}\n"
file.write(data_base_line)
def add_user_in_forms(self):
with open(self.forms_file, 'a') as file:
forms_line = f"{self.name},{self.sex},{self.username}," \
f"{self.phone_number},{self.email},{self.get_answers_for_form()}\n"
print(forms_line)
file.write(forms_line)
def not_in_base(self):
with open(self.base_file, 'r') as file:
for user_line in file:
number, telegram_id, name, sex, username, phone_number, email, answers = user_line.split(',')
if str(self.user_id) == telegram_id:
print('User already registered')
return False
return True
def not_in_forms(self):
with open(self.forms_file, 'r') as file:
for user_line in file:
name, sex, username, phone_number, email, a1, a2, a3, a4, a5, a6 = user_line.split(',')
if str(self.phone_number) == phone_number:
print('User already registered')
return False
return True
def rewrite_user_info(self):
data_base = []
data_base_line = f"{self.number},{self.user_id},{self.name},{self.sex},{self.username}," \
f"{self.phone_number},{self.email},{self.get_str_answers()}\n"
with open(self.base_file, 'r') as file:
user_line_number = self.number
for user_line in file:
number, telegram_id, name, sex, username, phone_number, email, answers = user_line.split(',')
if user_line_number == number:
data_base.append(data_base_line)
else:
data_base.append(user_line)
data_base = sort_base_by_number(data_base)
i = data_base.index(data_base_head)
data_base.insert(0, data_base.pop(i))
with open(self.base_file, 'w') as file:
for user_line in data_base:
file.write(user_line)
def get_welcome_message(self):
if self.username is not None:
message = f'''Привет, {self.username}!
Я - чатбот Лёва, твой "второй пилот" в непредсказуемом мире отношений. Знаю много, болтаю мало!😎
А как твои дела? Пройди мой небольшой тест и мы вместе это выясним!'''
else:
message = f'''Привет!
Я - чатбот Лёва, твой "второй пилот" в непредсказуемом мире отношений. Знаю много, болтаю мало!😎
А как твои дела? Пройди мой небольшой тест и мы вместе это выясним!'''
return message
def form_filled(self):
print(self.phone_number, self.email, self.name)
return self.phone_number is not None and self.email is not None and self.name is not None \
and self.phone_number != 'None' and self.email != 'None' and self.name != 'None'
def _get_man_result(self):
result = 0
answers = self.get_str_answers()
answers_list = answers.split(" ")
for answer in answers_list:
try:
result += man_points[answer]
except KeyError:
pass
print(result)
if result == 100:
return result
if 99 >= result >= 80:
return 80
if 79 >= result >= 60:
return 60
if 59 >= result >= 0:
return 0
def _get_woman_result(self):
result = 0
answers = self.get_str_answers()
answers_list = answers.split(" ")
for answer in answers_list:
try:
result += woman_points[answer]
except KeyError:
pass
print(result)
if result == 100:
return result
if 99 >= result >= 80:
return 80
if 79 >= result >= 60:
return 60
if 59 >= result >= 0:
return 0
def get_result_from_answers(self):
result = None
if self.sex == 'man':
result = self._get_man_result()
message = man_results[result]
return message
if self.sex == 'woman':
result = self._get_woman_result()
message = woman_results[result]
return message
else:
return man_results[result]
def get_form_message(self):
message = f"{self.name},{self.sex},{self.username}," \
f"{self.phone_number},{self.email},{self.get_answers_for_form()}\n"
return message
def _get_str_answers_for_form(self):
answers = self.get_str_answers()
answers = answers.split(" ")
answers.pop(0)
# q1a1 q2a1 q3a1 q4a1 q5a1 q6a1
answers_for_form = ''
for answer in answers:
if answer != '':
question_n, answer_n = answer.split(":")
answers_for_form += f"q{int(question_n[-1]) - 1}a{answer_n} "
return answers_for_form
|
the-stack_106_27891 | class Pessoa:
def __init__(self, nome=None, idade=35):
self.idade = idade
self.nome = nome
def cumprimentar(self):
return f'Ola {id(self)}'
if __name__ == '__main__':
p = Pessoa('Renato')
print(Pessoa.cumprimentar(p))
print(id(p))
print(p.cumprimentar())
print(p.nome)
p.nome = 'Josemar'
print(p.nome)
print(p.idade)
|
the-stack_106_27893 | #!/usr/bin/env python
try:
from shlex import quote
except ImportError:
from pipes import quote
import subprocess
import shlex
import datetime
from time import sleep
# import virtkey
import os
def run_cmd(cmd, cwd=None, timeout=None, shell=False):
if shell:
ccmd = cmd
else:
ccmd = shlex.split(cmd)
print('execute command {}'.format(ccmd))
sleep(3)
end_time = 0
if timeout:
end_time = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
sub = subprocess.Popen(ccmd, cwd=cwd, stdin=subprocess.PIPE, shell=shell, bufsize=4096)
while sub.poll() is None:
sleep(0.5)
if timeout:
if end_time <= datetime.datetime.now():
raise Exception('Timeout {}'.format(ccmd))
return str(sub.returncode)
def open_terminal():
v = virtkey.virtkey()
v.press_keysym(65507)
v.press_keysym(65505)
v.press_unicode(ord('t'))
sleep(0.1)
v.release_unicode(ord('t'))
v.release_keysym(65507)
v.release_keysym(65505)
def switch_terminal():
v = virtkey.virtkey()
v.press_keysym(65507)
v.press_keysym(65366)
sleep(0.1)
v.release_keysym(65507)
v.release_keysym(65366)
def send_tab_key():
v = virtkey.virtkey()
sleep(2)
print('click tab key two times')
v.press_keysym(65289)
v.release_keysym(65289)
v.press_keysym(65289)
v.release_keysym(65289)
print('end to click')
def launch_udsserver_terminal():
print('Start to launch udsserver')
os.system("gnome-terminal -t udsserver -e 'bash -c \"source ./launch_udsserver_terminal.sh; exec bash\"'")
def launch_syncclient_terminal():
print('Start to launch esyncclient')
os.system("gnome-terminal -t syncclient -e 'bash -c \"source ./launch_esyncclient_terminal.sh; exec bash\"'")
def launch_otamonitor_terminal():
print('Start to launch otamonitor')
os.system("gnome-terminal -t otamonitor -e 'bash -c \"source ./launch_otamonitor_terminal.sh; exec bash\"'")
def check_env():
if os.environ['ESYNC_HOME_DIR'] == '/home/autotest/Downloads/Excelforepackage/excelfore/esync':
print('Set ESYNC HOEM DIR successfully')
return True
else:
print('Failed to set ESYNC HOEM DIR')
return False
def set_ubuntu_env():
esync_path = '/home/autotest/Downloads/Excelforepackage/excelfore/esync'
print('export ESYNC_HOME_DIR={}'.format(quote(esync_path)))
sleep(3)
if os.environ['ESYNC_HOME_DIR'] == '/home/autotest/Downloads/Excelforepackage/excelfore/esync':
print('set ESYNC_HOME successfully')
else:
print('failed to set ESYNC_HOME')
def check_result(campaign_path):
# campaign_path = "/home/autotest/Downloads/dm_tree/DevInfo/Ext/Excelfore/CampaignState/CampaignCorrelator"
folder_num = 0
current_time = datetime.datetime.now()
all_folder = []
if os.path.exists(campaign_path):
dirlist = os.listdir(campaign_path)
print(dirlist)
if dirlist is not None:
for x in dirlist:
if os.path.isdir((os.path.join(campaign_path,x))):
return True
else:
return False
# print(x + ' ' + "is folder")
# folder_num += 1
# all_folder.append((os.path.join(campaign_path,x)))
# return all_folder
def find_newest_folder(path_file):
lists = os.listdir(path_file)
lists.sort(key=lambda fn: os.path.getmtime(path_file +'/'+fn))
# print(lists)
for x in reversed(lists):
if os.path.isdir((os.path.join(path_file,x))):
# print(x)
floder_newest = os.path.join(path_file,x)
break
return floder_newest
def check_update_res():
path_file = "/home/autotest/Downloads/dm_tree/DevInfo/Ext/Excelfore/CampaignState/CampaignCorrelator"
state_path = '/State/value'
if find_newest_folder(path_file):
value_path = find_newest_folder(path_file) + state_path
with open(value_path,'r') as f:
vaule_data = f.read()
# print(vaule_data)
if vaule_data == "90":
print('---Ota update successfully---')
return True
else:
print('---Failed to update ota---')
return False
if __name__ == '__main__':
# execute_cmd('cat test.py')
# set_environment('ESYNC_HOME_DIR','/home/excelfore/Documents/excelfore/esync')
# setEnv()
# run_cmd('export ESYNC_HOME_DIR=/home/excelfore/Documents/excelfore/esync',cwd='/home/autotest/Downloads', timeout=2, shell=True)
# run_cmd('./set_env.sh', timeout=2, shell=True)
# run_cmd('eval $(./foo.py) && echo $ESYNC_HOME_DIR', timeout=2, shell=True)
# run_cmd('gnome-terminal -e ls', shell=True)
# run_cmd('env | grep ESYNC_HOME_DIR',cwd='/home/autotest/Downloads', timeout=2, shell=True)
# open_terminal()
# sleep(3)
# # run_cmd('eval $(./set_ubuntu_env.py) && echo $ESYNC_HOME_DIR', timeout=2, shell=True)
# switch_terminal()
# sleep(2)
# run_cmd('./set_env.sh', shell=True)
# set_ubuntu_env()
# sleep(2)
# check_env()
# launch_udsserver_terminal()
# sleep(5)
# launch_syncclient_terminal()
# sleep(1)
#
# check_result()
# get_latest_folder()
# dir = "/home/autotest/Downloads/dm_tree/DevInfo/Ext/Excelfore/CampaignState/CampaignCorrelator"
# new_report(path)
# print(find_newest_folder(dir))
check_update_res()
|
the-stack_106_27894 | import hlir
import utils
import expr
import vmcall
import dataflow
import addressdispenser
###########################################################################
# HLIR REWRITES BELOW
# undo the compiler optimization which turns individual returns into jumps to
# a common return BB
def duplicate_terminating_successors(node):
if len(node.get_successors()) == 0:
pass
elif utils.has_imprecise_successors(node):
pass
else:
return False
# it must have more than one predecessor
if len(node.get_predecessors()) <= 1:
return False
changeable = [p for p in node.get_predecessors()
if not utils.has_imprecise_successors(p)]
# sometimes changeable has a ridiculous size (200+) so avoid splitting in
# those cases
if len(changeable) > 5:
return False
changed = False
for pred in changeable:
# we should make sure that the copy doesn't share the old instruction,
# nor its arguments, but uses copies instead, and the copy should have
# the same sp-delta
new_bb = node.copy()
new_bb.address = addressdispenser.get_new_address()
new_bb.next_bb = None
# it should replace the original..
pred.replace_successor(node, new_bb)
if pred.terminator.loc == expr.Lit(node.address):
pred.terminator.loc = expr.Lit(new_bb.address)
if pred.next_bb == node:
pred.next_bb = new_bb
changed = True
return changed
def revert_reconstruct(n):
if (n.terminator.type == hlir.ins_types.jump and n.terminator.loc == expr.Lit(0x0)):
n.terminator = hlir.make_vmcall(vmcall.vmcalls.revert, [], [])
for s in n.get_successors():
n.remove_successor(s)
return True
return False
def make_memseq_assignment(ins1, ins2):
if ins1.type != hlir.ins_types.assign:
return
if ins2.type != hlir.ins_types.assign:
return
a = ins1.results[0]
b = ins2.results[0]
if not isinstance(a, expr.Mem):
return
if not isinstance(b, expr.Mem):
return
# ensure the lengths are OK
if not isinstance(a.length, expr.Lit):
return
if not isinstance(b.length, expr.Lit):
return
# ensure that the memories are adjacent
if isinstance(a.address, expr.Lit) and isinstance(b.address, expr.Lit):
if b.address.literal != a.address.literal + a.length.literal:
return
elif isinstance(b.address, expr.Add):
if (dataflow.exprs_must_be_equal(b.address.operand2, a.address, same_bb=True) and
dataflow.exprs_must_be_equal(b.address.operand1, a.length, same_bb=True)):
pass
elif (dataflow.exprs_must_be_equal(b.address.operand1, a.address, same_bb=True) and
dataflow.exprs_must_be_equal(b.address.operand2, a.length, same_bb=True)):
pass
else:
return
else:
return
return hlir.make_assign(
expr.Mem(a.address, expr.Add(a.length, b.length)),
expr.Sequence([ins1.args[0], ins2.args[0]])
)
def generate_mem_seqs(node):
for ins1, ins2 in utils.instruction_pairs(node):
new_ins = make_memseq_assignment(ins1, ins2)
if not new_ins:
new_ins = make_memseq_assignment(ins2, ins1)
if not new_ins:
continue
index = node.get_instructions().index(ins1)
node.remove_instruction(ins1)
node.remove_instruction(ins2)
node.insert_instruction(new_ins, index)
return True
return False
def move_calldataloads_to_params(node):
if node.function.address == 0x0:
return False
changed = False
for ins in node.get_instructions() + [node.terminator]:
if ins.type != hlir.ins_types.vmcall:
continue
if ins.loc != vmcall.vmcalls.calldataload:
continue
if not isinstance(ins.args[0], expr.Lit):
continue
offset = ins.args[0].literal
if offset < 4:
continue
assert (offset % 0x20 == 4)
pn = (offset - 4) / 0x20
f = node.function
while f.num_params < pn + 1:
f.params.append(expr.Var())
f.num_params += 1
assert (len(f.params) == f.num_params)
p = f.params[pn].copy()
new_ins = hlir.make_assign(ins.results[0].copy(), p)
node.replace_instruction(ins, new_ins)
changed = True
return changed
def jcond_not_not(node):
if node.terminator.type != hlir.ins_types.jcond:
return False
exp = node.terminator.args[0]
if not isinstance(exp, expr.Not):
return False
if not isinstance(exp.operand, expr.Not):
return False
node.terminator.args[0] = exp.operand.operand
return True
# if(0){x}else{y} -----> y, if(1){x}else{y} -----> x
def simplify_if_lit(node):
if node.terminator.type != hlir.ins_types.jcond:
return False
exp = node.terminator.args[0]
if not isinstance(exp, expr.Lit):
return False
if exp.literal == 0:
target = node.next_bb
else:
target = [n for n in node.get_successors()
if n != node.next_bb][0]
node.terminator = hlir.make_jump(expr.Lit(target.address))
for s in node.get_successors():
node.remove_successor(s)
node.add_successor(target)
return True
def remove_useless_assignments(node):
changed = False
for ins in list(node.get_instructions()):
if ins.type == hlir.ins_types.assign:
if (isinstance(ins.args[0], expr.Id)
and isinstance(ins.results[0], expr.Id)):
if dataflow.ids_must_be_equal(ins.args[0], ins.results[0], True):
node.remove_instruction(ins)
changed = True
return changed
def assert_lit(node):
for ins in node.get_instructions():
if (ins.type == hlir.ins_types.assertion
and isinstance(ins.args[0], expr.Lit)
and ins.args[0].literal != 0):
node.remove_instruction(ins)
return True
return False
############################################################################
# EXPRESSION REWRITES BELOW
# turn this:
# storage(sha3((param0, 0x0)))
# into this:
# mapping0[param0]
def detect_mapping_access(node):
if not isinstance(node, expr.Storage):
return
addr = node.address
if not isinstance(addr, expr.PureFunctionCall):
return
if addr.name != vmcall.vmcalls.sha3:
return
if not isinstance(addr.args[0], expr.Sequence):
return
offset, mapping_num = addr.args[0].expressions
if not isinstance(mapping_num, expr.Lit):
return
return expr.MappingAccess(mapping_num.literal, offset)
def do_detect_array_access(op1, op2):
if not isinstance(op1, expr.PureFunctionCall):
return
if op1.name != vmcall.vmcalls.sha3:
return
if not isinstance(op1.args[0], expr.Lit):
return
num = op1.args[0].literal
offset = op2
result = expr.ArrayAccess(num, offset)
return result
# turn this:
# storage(var0 + sha3(0x0))
# into this:
# array0[var0]
def detect_array_access(node):
if not isinstance(node, expr.Storage):
return
addr = node.address
if not isinstance(addr, expr.Add):
return
fixed = do_detect_array_access(addr.operand1, addr.operand2)
if fixed:
return fixed
return do_detect_array_access(addr.operand2, addr.operand1)
def simplify_eq(node):
if (isinstance(node, expr.Eq)
and dataflow.exprs_must_be_equal(node.operand1, node.operand2, True)):
return expr.Lit(1)
def fold_constants(node):
if (isinstance(node, expr.BinaryOp)
and isinstance(node.operand1, expr.Lit)
and isinstance(node.operand2, expr.Lit)):
result = expr.Lit(node.evaluate(None).num())
return result
if (isinstance(node, expr.UnaryOp)
and isinstance(node.operand, expr.Lit)):
result = expr.Lit(node.evaluate(None).num())
return result
def fold_commutative_constants(node):
if not isinstance(node, expr.BinaryOp) or not node.is_commutative:
return
typ = node.__class__
if isinstance(node.operand1, typ):
n = fold_constants(typ(node.operand2, node.operand1.operand1))
if n: return typ(n, node.operand1.operand2)
n = fold_constants(typ(node.operand2, node.operand1.operand2))
if n: return typ(n, node.operand1.operand1)
if isinstance(node.operand2, typ):
n = fold_constants(typ(node.operand1, node.operand2.operand1))
if n: return typ(n, node.operand2.operand2)
n = fold_constants(typ(node.operand1, node.operand2.operand2))
if n: return typ(n, node.operand2.operand1)
# e.g. ((0x60 + free_mem_ptr) - free_mem_ptr) -----> 0x60
def simplify_plus_minus(node):
if not isinstance(node, expr.Sub):
return
a, b = node.operand1, node.operand2
if not isinstance(a, expr.Add):
return
if dataflow.exprs_must_be_equal(a.operand2, b, True):
return a.operand1
if dataflow.exprs_must_be_equal(a.operand1, b, True):
return a.operand2
if isinstance(a.operand1, expr.Lit) and isinstance(b, expr.Lit):
return expr.Add(a.operand2, expr.Lit(a.operand1.literal - b.literal))
if isinstance(a.operand2, expr.Lit) and isinstance(b, expr.Lit):
return expr.Add(a.operand1, expr.Lit(a.operand2.literal - b.literal))
def simplify_duplicate_and(node):
if not isinstance(node, expr.And):
return
a, b = node.operand1, node.operand2
if isinstance(b, expr.And):
if dataflow.exprs_must_be_equal(a, b.operand1, True):
return b
if dataflow.exprs_must_be_equal(a, b.operand2, True):
return b
if isinstance(a, expr.And):
if dataflow.exprs_must_be_equal(b, a.operand1, True):
return a
if dataflow.exprs_must_be_equal(b, a.operand2, True):
return a
def simplify_add(node):
if not isinstance(node, expr.Add):
return
if node.operand1 == expr.Lit(0):
return node.operand2
if node.operand2 == expr.Lit(0):
return node.operand1
def simplify_div(node):
if not isinstance(node, expr.Div):
return
if node.operand2 == expr.Lit(1):
return node.operand1
def simplify_mul(node):
if not isinstance(node, expr.Mul):
return
if node.operand1 == expr.Lit(1):
return node.operand2
if node.operand2 == expr.Lit(1):
return node.operand1
def simplify_and(node):
if not isinstance(node, expr.And):
return
if node.operand1 == expr.Lit((2**256)-1):
return node.operand2
if node.operand2 == expr.Lit((2**256)-1):
return node.operand1
def simplify_minus(node):
if isinstance(node, expr.Sub):
if dataflow.exprs_must_be_equal(node.operand1, node.operand2, same_bb=True):
return expr.Lit(0)
def simplify_minus_minus(node):
if not isinstance(node, expr.Sub):
return
a, b = node.operand1, node.operand2
if not isinstance(a, expr.Sub):
return
a1, a2 = a.operand1, a.operand2
if not isinstance(a2, expr.Lit) or not isinstance(b, expr.Lit):
return
return expr.Sub(a1, expr.Lit(a2.literal + b.literal))
def simplify_expr_seqs(node):
if not isinstance(node, expr.Sequence):
return
if not any(isinstance(e, expr.Sequence) for e in node.expressions):
return
new_expressions = []
for e in node.expressions:
if isinstance(e, expr.Sequence):
new_expressions += e.expressions
else:
new_expressions.append(e)
node.expressions = new_expressions
return node
############################################################################
# these rewrites work on HLIR node
hlir_node_rewrites = [
simplify_if_lit,
generate_mem_seqs,
assert_lit,
remove_useless_assignments,
revert_reconstruct,
# TODO: this really belongs near CFA..?
duplicate_terminating_successors,
# TODO: move these to readability module
move_calldataloads_to_params,
jcond_not_not,
]
expr_rewrites = [
fold_constants,
fold_commutative_constants,
simplify_plus_minus,
simplify_duplicate_and,
simplify_eq,
simplify_and,
simplify_mul,
simplify_div,
simplify_add,
simplify_minus,
simplify_minus_minus,
simplify_expr_seqs,
# TODO: move this to readability.. maybe?
detect_mapping_access,
detect_array_access,
]
def rewrite_node(node):
assert (isinstance(node, hlir.HLIRNode))
result = False
# first rewrite the node itself
for rewrite in hlir_node_rewrites:
r = rewrite(node)
#if r: print(rewrite)
result |= r
# then rewrite any of its expressions
for rewrite in expr_rewrites:
r = utils.visit_and_modify_expressions(node, rewrite)
#if r: print(rewrite)
result |= r
return result
|
the-stack_106_27897 | def get_anchor_root(spec, state):
anchor_block_header = state.latest_block_header.copy()
if anchor_block_header.state_root == spec.Bytes32():
anchor_block_header.state_root = spec.hash_tree_root(state)
return spec.hash_tree_root(anchor_block_header)
def add_block_to_store(spec, store, signed_block):
pre_state = store.block_states[signed_block.message.parent_root]
block_time = pre_state.genesis_time + signed_block.message.slot * spec.SECONDS_PER_SLOT
if store.time < block_time:
spec.on_tick(store, block_time)
spec.on_block(store, signed_block)
def add_attestation_to_store(spec, store, attestation):
parent_block = store.blocks[attestation.data.beacon_block_root]
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
block_time = pre_state.genesis_time + parent_block.slot * spec.SECONDS_PER_SLOT
next_epoch_time = block_time + spec.SLOTS_PER_EPOCH * spec.SECONDS_PER_SLOT
if store.time < next_epoch_time:
spec.on_tick(store, next_epoch_time)
spec.on_attestation(store, attestation)
|
the-stack_106_27898 | """
The molten web framework is automatically traced by ``ddtrace`` when calling ``patch``::
from molten import App, Route
from ddtrace import patch_all; patch_all(molten=True)
def hello(name: str, age: int) -> str:
return f'Hello {age} year old named {name}!'
app = App(routes=[Route('/hello/{name}/{age}', hello)])
You may also enable molten tracing automatically via ``ddtrace-run``::
ddtrace-run python app.py
Configuration
~~~~~~~~~~~~~
.. py:data:: ddtrace.config.molten['distributed_tracing']
Whether to parse distributed tracing headers from requests received by your Molten app.
Default: ``True``
.. py:data:: ddtrace.config.molten['analytics_enabled']
Whether to analyze spans for Molten in App Analytics.
Can also be enabled with the ``DD_MOLTEN_ANALYTICS_ENABLED`` environment variable.
Default: ``None``
.. py:data:: ddtrace.config.molten['service_name']
The service name reported for your Molten app.
Can also be configured via the ``DD_MOLTEN_SERVICE_NAME`` environment variable.
Default: ``'molten'``
"""
from ...utils.importlib import require_modules
required_modules = ['molten']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from . import patch as _patch
patch = _patch.patch
unpatch = _patch.unpatch
__all__ = ['patch', 'unpatch']
|
the-stack_106_27899 | """Run-time calculation of offset into Python string structure
Does a scan to find the digits of pi in a string structure
in order to produce an offset that can be used to produce
data-pointers from Python strings.
Porting note:
Currently this uses id( str a ) to get the base address
of the Python string. Python implementations where id( a )
is *not* the memory address of the string will not work!
"""
import ctypes
from OpenGL._bytes import bytes,unicode
PI_DIGITS = '31415926535897931'
def calculateOffset( ):
"""Calculates the data-pointer offset for strings
This does a sequential scan for 100 bytes from the id
of a string to find special data-value stored in the
string (the digits of PI). It produces a dataPointer
function which adds that offset to the id of the
passed strings.
"""
finalOffset = None
a = PI_DIGITS
# XXX NOT portable across Python implmentations!!!
initial = id(a)
targetType = ctypes.POINTER( ctypes.c_char )
for offset in range( 100 ):
vector = ctypes.cast( initial+offset,targetType )
allMatched = True
for index,digit in enumerate( a ):
if vector[index] != digit:
allMatched = False
break
if allMatched:
finalOffset = offset
break
if finalOffset is not None:
def dataPointer( data ):
"""Return the data-pointer from the array using calculated offset
data -- a Python string
Returns the raw data-pointer to the internal buffer of the passed string
"""
if not isinstance( data, bytes ):
raise TypeError(
"""This function can only handle Python strings! Got %s"""%(
type(data),
)
)
return id(data) + finalOffset
# just for later reference...
dataPointer.offset = finalOffset
return dataPointer
raise RuntimeError(
"""Unable to determine dataPointer offset for strings!"""
)
dataPointer = calculateOffset()
if __name__ == "__main__":
a = 'this'
print((id(a), dataPointer( a ), dataPointer(a) - id(a)))
|
the-stack_106_27901 | import tensorflow as tf
import common
no_such_word = 'NOSUCH'
no_such_composite = no_such_word + ',' + no_such_word + ',' + no_such_word
class PathContextReader:
# 这个类很关键
class_word_table = None
class_target_word_table = None
class_path_table = None
def __init__(self, word_to_index, target_word_to_index, path_to_index, config, is_evaluating=False):
self.file_path = config.TEST_PATH if is_evaluating else (config.TRAIN_PATH + '.train.c2v')
self.batch_size = config.TEST_BATCH_SIZE if is_evaluating else min(config.BATCH_SIZE, config.NUM_EXAMPLES)
self.num_epochs = config.NUM_EPOCHS
self.reading_batch_size = config.READING_BATCH_SIZE if is_evaluating else min(config.READING_BATCH_SIZE, config.NUM_EXAMPLES)
self.num_batching_threads = config.NUM_BATCHING_THREADS
self.batch_queue_size = config.BATCH_QUEUE_SIZE
self.data_num_contexts = config.MAX_CONTEXTS
self.max_contexts = config.MAX_CONTEXTS
self.is_evaluating = is_evaluating
self.word_table = PathContextReader.get_word_table(word_to_index)
self.target_word_table = PathContextReader.get_target_word_table(target_word_to_index)
self.path_table = PathContextReader.get_path_table(path_to_index)
self.filtered_output = self.get_filtered_input()
@classmethod
def get_word_table(cls, word_to_index):
if cls.class_word_table is None:
cls.class_word_table = cls.initalize_hash_map(word_to_index, 0)
return cls.class_word_table
@classmethod
def get_target_word_table(cls, target_word_to_index):
if cls.class_target_word_table is None:
cls.class_target_word_table = cls.initalize_hash_map(target_word_to_index, 0)
return cls.class_target_word_table
@classmethod
def get_path_table(cls, path_to_index):
if cls.class_path_table is None:
cls.class_path_table = cls.initalize_hash_map(path_to_index, 0)
return cls.class_path_table
@classmethod
def initalize_hash_map(cls, word_to_index, default_value):
return tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(list(word_to_index.keys()), list(word_to_index.values()),
key_dtype=tf.string,
value_dtype=tf.int32), default_value)
def get_input_placeholder(self):
return self.input_placeholder
def start(self, session, data_lines=None):
self.coord = tf.train.Coordinator()
self.threads = tf.train.start_queue_runners(sess=session, coord=self.coord)
return self
def read_file(self):
row = self.get_row_input()
record_defaults = [[no_such_composite]] * (self.data_num_contexts + 1)
row_parts = tf.decode_csv(row, record_defaults=record_defaults, field_delim=' ')
word = row_parts[0] # (batch, )
contexts = tf.stack(row_parts[1:(self.max_contexts + 1)], axis=1) # (batch, max_contexts)
flat_contexts = tf.reshape(contexts, [-1]) # (batch * max_contexts, )
split_contexts = tf.string_split(flat_contexts, delimiter=',')
dense_split_contexts = tf.reshape(tf.sparse_tensor_to_dense(split_contexts,
default_value=no_such_word),
shape=[-1, self.max_contexts, 3]) # (batch, max_contexts, 3)
if self.is_evaluating:
target_word_label = word # (batch, ) of string
else:
target_word_label = self.target_word_table.lookup(word) # (batch, ) of int
path_source_strings = tf.slice(dense_split_contexts, [0, 0, 0], [-1, self.max_contexts, 1])
path_source_indices = self.word_table.lookup(path_source_strings) # (batch, max_contexts, 1)
path_strings = tf.slice(dense_split_contexts, [0, 0, 1], [-1, self.max_contexts, 1])
path_indices = self.path_table.lookup(path_strings) # (batch, max_contexts, 1)
path_target_strings = tf.slice(dense_split_contexts, [0, 0, 2], [-1, self.max_contexts, 1])
path_target_indices = self.word_table.lookup(path_target_strings) # (batch, max_contexts, 1)
return target_word_label, path_source_indices, path_target_indices, path_indices, \
path_source_strings, path_strings, path_target_strings
def get_row_input(self):
if self.is_evaluating: # test, read from queue (small data)
row = self.input_placeholder = tf.placeholder(tf.string)
else: # training, read from file
filename_queue = tf.train.string_input_producer([self.file_path], num_epochs=self.num_epochs, shuffle=False)
reader = tf.TextLineReader()
_, row = reader.read_up_to(filename_queue, num_records=self.reading_batch_size)
return row
def input_tensors(self):
return self.initialize_batch_outputs(self.filtered_output[:-3])
def get_filtered_batches(self):
return self.filtered_output
def initialize_batch_outputs(self, filtered_input):
return tf.train.shuffle_batch(filtered_input,
batch_size=self.batch_size,
enqueue_many=True,
capacity=self.batch_queue_size,
min_after_dequeue=int(self.batch_queue_size * 0.85),
num_threads=self.num_batching_threads,
allow_smaller_final_batch=True)
def get_filtered_input(self):
word_label, path_source_indices, path_target_indices, path_indices, \
source_strings, path_strings, target_strings = self.read_file()
any_contexts_is_valid = tf.logical_or(
tf.greater(tf.squeeze(tf.reduce_max(path_source_indices, 1), axis=1), 0),
tf.logical_or(
tf.greater(tf.squeeze(tf.reduce_max(path_target_indices, 1), axis=1), 0),
tf.greater(tf.squeeze(tf.reduce_max(path_indices, 1), axis=1), 0))
) # (batch, )
if self.is_evaluating:
cond = tf.where(any_contexts_is_valid)
else: # training
word_is_valid = tf.greater(word_label, 0) # (batch, )
cond = tf.where(tf.logical_and(word_is_valid, any_contexts_is_valid)) # (batch, 1)
valid_mask = tf.to_float( # (batch, max_contexts, 1)
tf.logical_or(tf.logical_or(tf.greater(path_source_indices, 0),
tf.greater(path_target_indices, 0)),
tf.greater(path_indices, 0))
)
filtered = \
tf.gather(word_label, cond), \
tf.squeeze(tf.gather(path_source_indices, cond), [1, 3]), \
tf.squeeze(tf.gather(path_indices, cond), [1, 3]), \
tf.squeeze(tf.gather(path_target_indices, cond), [1, 3]), \
tf.squeeze(tf.gather(valid_mask, cond), [1, 3]), \
tf.squeeze(tf.gather(source_strings, cond)), \
tf.squeeze(tf.gather(path_strings, cond)), \
tf.squeeze(tf.gather(target_strings, cond)) # (batch, max_contexts)
return filtered
def __enter__(self):
return self
def should_stop(self):
return self.coord.should_stop()
def __exit__(self, type, value, traceback):
print('Reader stopping')
self.coord.request_stop()
self.coord.join(self.threads)
|
the-stack_106_27904 | import sys
import click
from json import loads
import codecs
from ocrd import Resolver, Workspace
from ocrd.task_sequence import ProcessorTask, validate_tasks
from ocrd_utils import initLogging, parse_json_string_or_file
from ocrd_validators import (
OcrdToolValidator,
OcrdZipValidator,
PageValidator,
ParameterValidator,
WorkspaceValidator,
)
def _inform_of_result(report):
if not report.is_valid:
print(report.to_xml())
sys.exit(1)
@click.group("validate")
def validate_cli():
"""
All the validation in one CLI
"""
initLogging()
@validate_cli.command('tool-json')
@click.argument('ocrd_tool', required=False, nargs=1)
def validate_ocrd_tool(ocrd_tool):
'''
Validate OCRD_TOOL as an ocrd-tool.json file.
'''
if not ocrd_tool:
ocrd_tool = 'ocrd-tool.json'
with codecs.open(ocrd_tool, encoding='utf-8') as f:
ocrd_tool = loads(f.read())
_inform_of_result(OcrdToolValidator.validate(ocrd_tool))
@validate_cli.command('parameters')
@click.argument('ocrd_tool')
@click.argument('executable')
@click.argument('param_json')
def validate_parameters(ocrd_tool, executable, param_json):
'''
Validate PARAM_JSON against parameter definition of EXECUTABLE in OCRD_TOOL
'''
with codecs.open(ocrd_tool, encoding='utf-8') as f:
ocrd_tool = loads(f.read())
_inform_of_result(ParameterValidator(ocrd_tool['tools'][executable]).validate(parse_json_string_or_file(param_json)))
@validate_cli.command('page')
@click.argument('page', required=True, nargs=1)
@click.option('--page-textequiv-consistency', help="How strict to check PAGE multi-level textequiv consistency", type=click.Choice(['strict', 'lax', 'fix', 'off']), default='strict')
@click.option('--page-textequiv-strategy', help="Strategy to determine the correct textequiv", type=click.Choice(['first']), default='first')
@click.option('--check-baseline', help="Whether Baseline must be fully within TextLine/Coords", is_flag=True, default=False)
@click.option('--check-coords', help="Whether *Region/TextLine/Word/Glyph must each be fully contained within Border/*Region/TextLine/Word, resp.", is_flag=True, default=False)
def validate_page(page, **kwargs):
'''
Validate PAGE against OCR-D conventions
'''
_inform_of_result(PageValidator.validate(filename=page, **kwargs))
# @validate_cli.command('zip')
# @click.argument('src', type=click.Path(dir_okay=True, readable=True, resolve_path=True), required=True)
# @click.option('-Z', '--skip-unzip', help="Treat SRC as a directory not a ZIP", is_flag=True, default=False)
# @click.option('-B', '--skip-bag', help="Whether to skip all checks of manifests and files", is_flag=True, default=False)
# @click.option('-C', '--skip-checksums', help="Whether to omit checksum checks but still check basic BagIt conformance", is_flag=True, default=False)
# @click.option('-D', '--skip-delete', help="Whether to skip deleting the unpacked OCRD-ZIP dir after valdiation", is_flag=True, default=False)
# @click.option('-j', '--processes', help="Number of parallel processes", type=int, default=1)
# def validate(src, **kwargs):
# """
# Validate OCRD-ZIP
# SRC must exist an be an OCRD-ZIP, either a ZIP file or a directory.
# """
# _inform_of_result(OcrdZipValidator(Resolver(), src).validate(**kwargs))
# @validate_cli.command('workspace')
# @click.option('-a', '--download', is_flag=True, help="Download all files")
# @click.option('-s', '--skip', help="Tests to skip", default=[], multiple=True, type=click.Choice(['imagefilename', 'dimension', 'mets_unique_identifier', 'mets_file_group_names', 'mets_files', 'pixel_density', 'page', 'url']))
# @click.option('--page-textequiv-consistency', '--page-strictness', help="How strict to check PAGE multi-level textequiv consistency", type=click.Choice(['strict', 'lax', 'fix', 'off']), default='strict')
# @click.option('--page-coordinate-consistency', help="How fierce to check PAGE multi-level coordinate consistency", type=click.Choice(['poly', 'baseline', 'both', 'off']), default='poly')
# @click.argument('mets_url')
# def validate_workspace(mets_url, **kwargs):
# '''
# Validate a workspace
# '''
# _inform_of_result(WorkspaceValidator.validate(Resolver(), mets_url, **kwargs))
@validate_cli.command('tasks')
@click.option('--workspace', nargs=1, required=False, help='Workspace these tasks are to be run. If omitted, only validate syntax')
@click.argument('tasks', nargs=-1, required=True)
def validate_process(tasks, workspace):
'''
Validate a sequence of tasks passable to 'ocrd process'
'''
if workspace:
_inform_of_result(validate_tasks([ProcessorTask.parse(t) for t in tasks], Workspace(Resolver(), directory=workspace)))
else:
for t in [ProcessorTask.parse(t) for t in tasks]:
_inform_of_result(t.validate())
|
the-stack_106_27905 | ''' show_ospf.py
IOSXE parsers for the following show commands:
* show ip ospf
* show ip ospf interface
* show ip ospf interface {interface}
* show ip ospf sham-links
* show ip ospf virtual-links
* show ip ospf neighbor detail
* show ip ospf neighbor
* show ip ospf neighbor {interface}
* show ip ospf database
* show ip ospf database router
* show ip ospf database network
* show ip ospf database summary
* show ip ospf database external
* show ip ospf database opaque-area
* show ip ospf database opaque-area self-originate
* show ip ospf mpls ldp interface
* show ip ospf mpls traffic-eng link
* show ip ospf max-metric
* show ip ospf traffic
* show ip ospf interface brief
* show ip ospf {process_id} segment-routing adjacency-sid
* show ip ospf fast-reroute ti-lfa
* show ip ospf segment-routing protected-adjacencies
* show ip ospf segment-routing global-block
* show ip ospf {process_id} segment-routing global-block
* show ip ospf segment-routing
* show ip ospf database opaque-area adv-router {address}
'''
# Python
import re
import xmltodict
from netaddr import IPAddress, IPNetwork
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Or, Optional
from genie.libs.parser.utils.common import Common
# ===========================================================
# Schema for:
# * 'show ip ospf {process_id} segment-routing local-block'
# ===========================================================
class ShowIpOspfSegmentRoutingLocalBlockSchema(MetaParser):
''' Schema for:
* 'show ip ospf {process_id} segment-routing local-block'
'''
schema = {
'instance': {
Any(): {
'router_id': str,
'areas': {
Any(): {
'router_id': {
Any(): {
'sr_capable': str,
Optional('srlb_base'): int,
Optional('srlb_range'): int,
},
},
},
},
},
},
}
# ===========================================================
# Schema for:
# * 'show ip ospf {process_id} segment-routing local-block'
# ===========================================================
class ShowIpOspfSegmentRoutingLocalBlock(ShowIpOspfSegmentRoutingLocalBlockSchema):
''' Parser for:
* 'show ip ospf {process_id} segment-routing local-block'
'''
cli_command = ['show ip ospf segment-routing local-block',
'show ip ospf {process_id} segment-routing local-block']
def cli(self, process_id=None, output=None):
if output is None:
if process_id:
cmd = self.cli_command[1].format(process_id=process_id)
else:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
# Init vars
ret_dict = {}
# OSPF Router with ID (10.4.1.1) (Process ID 65109)
p1 = re.compile(r'^OSPF +Router +with +ID +\((?P<router_id>(\S+))\)'
' +\(Process +ID +(?P<pid>(\S+))\)$')
# OSPF Segment Routing Local Blocks in Area 8
p2 = re.compile(r'^OSPF +Segment +Routing +Local +Blocks +in +Area'
' +(?P<area>(\d+))$')
# Router ID SR Capable SRLB Base SRLB Range
# --------------------------------------------------------
# *10.4.1.1 Yes 15000 1000
# 10.16.2.2 Yes 15000 1000
# 10.169.197.252 No
p3 = re.compile(r'^(?P<value>\*)?(?P<router_id>\S+) +(?P<sr_capable>Yes|No)'
'( +(?P<srlb_base>\d+) +(?P<srlb_range>\d+))?$')
for line in out.splitlines():
line = line.strip()
# OSPF Router with ID (10.4.1.1) (Process ID 65109)
m = p1.match(line)
if m:
group = m.groupdict()
inst_dict = ret_dict.setdefault('instance', {}).\
setdefault(group['pid'], {})
inst_dict['router_id'] = group['router_id']
continue
# OSPF Segment Routing Local Blocks in Area 8
m = p2.match(line)
if m:
area_dict = inst_dict.setdefault('areas', {}).\
setdefault(str(IPAddress(str(m.groupdict()['area']))), {})
continue
# Router ID SR Capable SRLB Base SRLB Range
# --------------------------------------------------------
# *10.4.1.1 Yes 15000 1000
# 10.16.2.2 Yes 15000 1000
m = p3.match(line)
if m:
group = m.groupdict()
smgt_dict = area_dict.setdefault('router_id', {}).\
setdefault(group['router_id'], {})
smgt_dict['sr_capable'] = group['sr_capable']
if group['srlb_base']:
smgt_dict['srlb_base'] = int(group['srlb_base'])
if group['srlb_range']:
smgt_dict['srlb_range'] = int(group['srlb_range'])
continue
return ret_dict
# ==================
# Schema for:
# * 'show ip ospf'
# ==================
class ShowIpOspfSchema(MetaParser):
''' Schema for:
* 'show ip ospf'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{'router_id': str,
Optional('enable'): bool,
'nsr':
{'enable': bool},
'bfd':
{'enable': bool,
Optional('strict_mode'): bool},
Optional('domain_id_type'): str,
Optional('domain_id_value'): str,
Optional('start_time'): str,
Optional('nssa'): bool,
Optional('area_transit'): bool,
Optional('redistribution'):
{Optional('max_prefix'):
{Optional('num_of_prefix'): int,
Optional('prefix_thld'): int,
Optional('warn_only'): bool},
Optional('connected'):
{'enabled': bool,
Optional('subnets'): str,
Optional('metric'): int},
Optional('static'):
{'enabled': bool,
Optional('subnets'): str,
Optional('metric'): int},
Optional('bgp'):
{'bgp_id': int,
Optional('metric'): int,
Optional('subnets'): str,
Optional('nssa_only'): str,
},
Optional('isis'):
{'isis_pid': str,
Optional('subnets'): str,
Optional('metric'): int}},
Optional('database_control'):
{'max_lsa': int,
Optional('max_lsa_current'): int,
Optional('max_lsa_threshold_value'): int,
Optional('max_lsa_ignore_count'): int,
Optional('max_lsa_current_count'): int,
Optional('max_lsa_ignore_time'): int,
Optional('max_lsa_reset_time'): int,
Optional('max_lsa_limit'): int,
Optional('max_lsa_warning_only'): bool},
Optional('stub_router'):
{Optional('always'):
{'always': bool,
Optional('include_stub'): bool,
Optional('summary_lsa'): bool,
Optional('external_lsa'): bool,
Optional('summary_lsa_metric'): int,
Optional('external_lsa_metric'): int,
Optional('state'): str},
Optional('on_startup'):
{'on_startup': int,
Optional('include_stub'): bool,
Optional('summary_lsa'): bool,
Optional('summary_lsa_metric'): int,
Optional('external_lsa'): bool,
Optional('external_lsa_metric'): int,
'state': str},
},
Optional('spf_control'):
{Optional('incremental_spf'): bool,
'throttle':
{'spf':
{'start': int,
'hold': int,
'maximum': int},
'lsa':
{Optional('start'): int,
Optional('hold'): int,
Optional('maximum'): int,
Optional('arrival'): int},
},
},
Optional('auto_cost'):
{'enable': bool,
'reference_bandwidth': int,
'bandwidth_unit': str},
Optional('adjacency_stagger'):
{'initial_number': int,
'maximum_number': int,
Optional('no_initial_limit'): bool},
Optional('graceful_restart'):
{Any():
{'enable': bool,
'type': str,
Optional('helper_enable'): bool,
Optional('restart_interval'): int}},
Optional('event_log'):
{'enable': bool,
Optional('max_events'): int,
Optional('mode'): str,
},
Optional('numbers'):
{Optional('external_lsa'): int,
Optional('external_lsa_checksum'): str,
Optional('opaque_as_lsa'): int,
Optional('opaque_as_lsa_checksum'): str,
Optional('dc_bitless'): int,
Optional('do_not_age'): int},
Optional('total_areas'): int,
Optional('total_normal_areas'): int,
Optional('total_stub_areas'): int,
Optional('total_nssa_areas'): int,
Optional('total_areas_transit_capable'): int,
Optional('lsa_group_pacing_timer'): int,
Optional('interface_flood_pacing_timer'): int,
Optional('retransmission_pacing_timer'): int,
Optional('external_flood_list_length'): int,
Optional('db_exchange_summary_list_optimization'): bool,
Optional('elapsed_time'): str,
Optional('lls'): bool,
Optional('opqaue_lsa'): bool,
Optional('flags'):
{Optional('abr'): bool,
Optional('asbr'): bool},
Optional('areas'):
{Any():
{'area_id': str,
'area_type': str,
Optional('summary'): bool,
Optional('default_cost'): int,
Optional('authentication'): bool,
Optional('ranges'):
{Any():
{'prefix': str,
Optional('cost'): int,
'advertise': bool}},
Optional('rrr_enabled'): bool,
Optional('statistics'):
{Optional('spf_runs_count'): int,
Optional('spf_last_executed'): str,
Optional('interfaces_count'): int,
Optional('loopback_count'): int,
Optional('area_scope_lsa_count'): int,
Optional('area_scope_lsa_cksum_sum'): str,
Optional('area_scope_opaque_lsa_count'): int,
Optional('area_scope_opaque_lsa_cksum_sum'): str,
Optional('dcbitless_lsa_count'): int,
Optional('indication_lsa_count'): int,
Optional('donotage_lsa_count'): int,
Optional('flood_list_length'): int,
},
},
},
},
},
},
},
},
},
}
# ==================
# Parser for:
# * 'show ip ospf'
# ==================
class ShowIpOspf(ShowIpOspfSchema):
''' Parser for:
* 'show ip ospf'
'''
cli_command = 'show ip ospf'
exclude = ['area_scope_lsa_cksum_sum' , ]
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
ret_dict = {}
af = 'ipv4' # this is ospf - always ipv4
p1 = re.compile(r'(?:^VRF +(?P<vrf>(\S+)) +in +)?Routing +Process'
' +\"(?:ospf)? +(?P<instance>([a-zA-Z0-9\s]+))\"'
' +with +ID +(?P<router_id>(\S+))$')
p1_1 = re.compile(r'^Routing +Process +is +shutdown$')
p2 = re.compile(r'^Domain +ID +type +(?P<domain_id>(\S+)), +value'
' +(?P<value>(\S+))$')
p3 = re.compile(r'^Start +time: +(?P<start>([0-9\:\.]+)), +Time'
' +elapsed: +(?P<elapsed>(\S+))$')
p4 = re.compile(r'^Supports +only +single +TOS(TOS0) routes$')
p5 = re.compile(r'^Supports +opaque +LSA$')
p6 = re.compile(r'^Supports +Link-local +Signaling +\(LLS\)$')
p7 = re.compile(r'^Supports +area +transit +capability$')
p8 = re.compile(r'^Supports +NSSA +\(compatible +with +RFC +3101\)$')
p9 = re.compile(r'^Supports +Database +Exchange +Summary +List'
' +Optimization +\(RFC +5243\)$')
p10 = re.compile(r'^Event-log +(?P<event_log>(enabled|disabled)),'
'(?: +Maximum +number +of +events:'
' +(?P<max_events>(\d+)),'
' +Mode: +(?P<mode>(\S+)))?$')
p11 = re.compile(r'^It +is +an'
'(?: +(?P<abr>(area border)))?'
'(?: +and)?'
'(?: +(?P<asbr>(autonomous system boundary)))?'
' +router$')
p12_1 = re.compile(r'^Redistributing +External +Routes +from,$')
p12_2 = re.compile(r'^(?P<type>(connected|static))(?: +with +metric'
' +mapped +to +(?P<metric>(\d+)))?$')
p12_2_1 = re.compile(r'^(?P<type>(connected|static|isis))'
', +includes +(?P<redist>(subnets)) +in +redistribution')
p12_3 = re.compile(r'^(?P<prot>(bgp|isis)) +(?P<pid>(\d+))'
'(?: +with +metric +mapped +to +(?P<metric>(\d+)))?'
'(?:, +includes +(?P<redist>(subnets)) +in +redistribution)?'
'(?:, +(?P<nssa>(nssa areas only)))?$')
p12_4 = re.compile(r'^Maximum +number +of +redistributed +prefixes'
' +(?P<num_prefix>(\d+))'
'(?: +\((?P<warn>(warning-only))\))?')
p12_5 = re.compile(r'^Threshold +for +warning +message'
' +(?P<thld>(\d+))\%$')
p13 = re.compile(r'^Router +is +not +originating +router-LSAs'
' +with +maximum +metric$')
p14_1 = re.compile(r'^Originating +router-LSAs +with +maximum'
' +metric$')
p14_2 = re.compile(r'^Condition:'
' +(?P<condition>(always|on \S+))'
'(?: +for +(?P<seconds>(\d+)) +seconds,)?'
' +State: +(?P<state>(\S+))$')
p14_3 = re.compile(r'^Advertise +stub +links +with +maximum +metric'
' +in +router\-LSAs$')
p14_4 = re.compile(r'^Advertise +summary\-LSAs +with +metric'
' +(?P<metric>(\d+))$')
p14_5 = re.compile(r'^^Advertise +external\-LSAs +with +metric'
' +(?P<metric>(\d+))$')
p15 = re.compile(r'^Initial +SPF +schedule +delay +(?P<time>(\S+))'
' +msecs$')
p16 = re.compile(r'^Minimum +hold +time +between +two +consecutive'
' +SPFs +(?P<time>(\S+)) +msecs$')
p17 = re.compile(r'^Maximum +wait +time +between +two +consecutive'
' +SPFs +(?P<time>(\S+)) +msecs$')
p18 = re.compile(r'^Initial +LSA +throttle +delay +(?P<time>(\S+))'
' +msecs$')
p19 = re.compile(r'^Minimum +hold +time +for +LSA +throttle'
' +(?P<time>(\S+)) +msecs$')
p20 = re.compile(r'^Maximum +wait +time +for +LSA +throttle'
' +(?P<time>(\S+)) +msecs$')
p21 = re.compile(r'^Minimum +LSA +arrival'
' +(?P<arrival>(\S+)) +msecs$')
p22 = re.compile(r'^Incremental-SPF +(?P<incr>(disabled|enabled))$')
p23 = re.compile(r'LSA +group +pacing +timer'
' +(?P<pacing>(\d+)) +secs$')
p24 = re.compile(r'Interface +flood +pacing +timer'
' +(?P<interface>(\d+)) +msecs$')
p25 = re.compile(r'Retransmission +pacing +timer'
' +(?P<retransmission>(\d+)) +msecs$')
p26 = re.compile(r'EXCHANGE/LOADING +adjacency +limit: +initial'
' +(?P<initial>(\S+)), +process +maximum'
' +(?P<maximum>(\d+))$')
p27 = re.compile(r'^Number +of +external +LSA +(?P<ext>(\d+))\.'
' +Checksum +Sum +(?P<checksum>(\S+))$')
p28 = re.compile(r'^Number +of +opaque +AS +LSA +(?P<opq>(\d+))\.'
' +Checksum +Sum +(?P<checksum>(\S+))$')
p29 = re.compile(r'^Number +of +DCbitless +external +and +opaque'
' +AS +LSA +(?P<num>(\d+))$')
p30 = re.compile(r'^Number +of +DoNotAge +external +and +opaque'
' +AS +LSA +(?P<num>(\d+))$')
p31 = re.compile(r'^Number +of +areas +in +this +router +is'
' +(?P<total_areas>(\d+))\. +(?P<normal>(\d+))'
' +normal +(?P<stub>(\d+)) +stub +(?P<nssa>(\d+))'
' +nssa$')
p32 = re.compile(r'Number +of +areas +transit +capable +is'
' +(?P<num>(\d+))$')
p33 = re.compile(r'^Maximum +number +of +non +self-generated +LSA'
' +allowed +(?P<max_lsa>(\d+))$')
p33_1 = re.compile(r'^Current +number +of +non +self\-generated +LSA +(?P<max_lsa_current>\d+)$')
p33_2 = re.compile(r'^Threshold +for +warning +message +(?P<max_lsa_threshold_value>\d+)\%$')
p33_3 = re.compile(r'^Ignore\-time +(?P<max_lsa_ignore_time>\d+) +minutes,'
' +reset\-time +(?P<max_lsa_reset_time>\d+) +minutes$')
p33_4 = re.compile(r'^Ignore\-count +allowed +(?P<max_lsa_ignore_count>\d+),'
' +current ignore\-count +(?P<max_lsa_current_count>\d+)$')
p33_5 = re.compile(r'^Maximum +limit +of +redistributed +prefixes +(?P<max_lsa_limit>\d+) +\(warning\-only\)$')
p34 = re.compile(r'^External +flood +list +length +(?P<num>(\d+))$')
p35 = re.compile(r'^(?P<gr_type>(IETF|Cisco)) +Non-Stop +Forwarding'
' +(?P<enable>(enabled|disabled))$')
p36 = re.compile(r'^(?P<gr_type>(IETF|Cisco)) +NSF +helper +support'
' +(?P<gr_helper>(enabled|disabled))$')
p36_1 = re.compile(r'^restart-interval +limit *: +(?P<num>(\d+)) +sec$')
p37 = re.compile(r'^Reference +bandwidth +unit +is'
' +(?P<bd>(\d+)) +(?P<unit>(mbps))$')
p38 = re.compile(r'^Area +(?P<area>(\S+))(?: *\((I|i)nactive\))?$')
p39_1 = re.compile(r'^It +is +a +(?P<area_type>(\S+)) +area'
'(?:, +(?P<summary>(no +summary +LSA +in +this'
' +area)))?$')
p39_2 = re.compile(r'^generates +stub +default +route +with +cost'
' +(?P<default_cost>(\d+))$')
p40_1 = re.compile(r'^Area ranges are$')
p40_2 = re.compile(r'^(?P<prefix>([0-9\.\/]+)) +(Passive|Active)'
'(?:\((?P<cost>(\d+)) +\- +configured\))?'
' +(?P<advertise>(Advertise|DoNotAdvertise))$')
p41 = re.compile(r'^Number +of +interfaces +in +this +area +is'
' +(?P<num_intf>(\d+))(?:'
' *\((?P<loopback>(\d+)) +loopback\))?$')
p42 = re.compile(r'^Area +has +RRR +enabled$')
p43 = re.compile(r'^SPF +algorithm +executed +(?P<count>(\d+))'
' +times$')
p44 = re.compile(r'^SPF +algorithm +last +executed'
' +(?P<last_exec>(\S+)) +ago$')
p45 = re.compile(r'^Area +has +no +authentication$')
p46 = re.compile(r'^Number +of +LSA +(?P<lsa_count>(\d+))\.'
' +Checksum +Sum +(?P<checksum_sum>(\S+))$')
p47 = re.compile(r'^Number +of opaque +link +LSA'
' +(?P<opaque_count>(\d+))\. +Checksum +Sum'
' +(?P<checksum_sum>(\S+))$')
p48 = re.compile(r'^Number +of +DCbitless +LSA +(?P<count>(\d+))$')
p49 = re.compile(r'^Number +of +indication +LSA +(?P<count>(\d+))$')
p50 = re.compile(r'^Number +of +DoNotAge +LSA +(?P<count>(\d+))$')
p51 = re.compile(r'^Flood +list +length +(?P<len>(\d+))$')
p52 = re.compile(r'^Non-Stop +Routing +(?P<nsr>(enabled))$')
p53_1 = re.compile(r'^BFD +is +enabled +in +strict +mode$')
p53_2 = re.compile(r'^BFD +is +enabled$')
for line in out.splitlines():
line = line.strip()
# Routing Process "ospf 1" with ID 10.36.3.3
# VRF VRF1 in Routing Process "ospf 1" with ID 10.36.3.3
m = p1.match(line)
if m:
instance = str(m.groupdict()['instance'])
router_id = str(m.groupdict()['router_id'])
if m.groupdict()['vrf']:
vrf = str(m.groupdict()['vrf'])
else:
vrf = 'default'
# Set structure
if 'vrf' not in ret_dict:
ret_dict['vrf'] = {}
if vrf not in ret_dict['vrf']:
ret_dict['vrf'][vrf] = {}
if 'address_family' not in ret_dict['vrf'][vrf]:
ret_dict['vrf'][vrf]['address_family'] = {}
if af not in ret_dict['vrf'][vrf]['address_family']:
ret_dict['vrf'][vrf]['address_family'][af] = {}
if 'instance' not in ret_dict['vrf'][vrf]['address_family'][af]:
ret_dict['vrf'][vrf]['address_family'][af]['instance'] = {}
if instance not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance] = {}
# Set sub_dict
sub_dict = ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]
sub_dict['router_id'] = router_id
sub_dict['enable'] = True
# Set some default values
if 'nsr' not in sub_dict:
sub_dict['nsr'] = {}
sub_dict['nsr']['enable'] = False
if 'bfd' not in sub_dict:
sub_dict['bfd'] = {}
sub_dict['bfd']['enable'] = False
continue
# Routing Process is shutdown
m = p1_1.match(line)
if m:
sub_dict['enable'] = False
continue
# Domain ID type 0x0005, value 0.0.0.2
m = p2.match(line)
if m:
sub_dict['domain_id_type'] = str(m.groupdict()['domain_id'])
sub_dict['domain_id_value'] = str(m.groupdict()['value'])
continue
# Start time: 00:23:49.050, Time elapsed: 1d01h
m = p3.match(line)
if m:
sub_dict['start_time'] = str(m.groupdict()['start'])
sub_dict['elapsed_time'] = str(m.groupdict()['elapsed'])
continue
# Supports only single TOS(TOS0) routes
m = p4.match(line)
if m:
sub_dict['single_tos_route'] = True
continue
# Supports opaque LSA
m = p5.match(line)
if m:
sub_dict['opqaue_lsa'] = True
continue
# Supports Link-local Signaling (LLS)
m = p6.match(line)
if m:
sub_dict['lls'] = True
continue
# Supports area transit capability
m = p7.match(line)
if m:
sub_dict['area_transit'] = True
continue
# Supports NSSA (compatible with RFC 3101)
m = p8.match(line)
if m:
sub_dict['nssa'] = True
continue
# Supports Database Exchange Summary List Optimization (RFC 5243)
m = p9.match(line)
if m:
sub_dict['db_exchange_summary_list_optimization'] = True
continue
# Event-log disabled
# Event-log enabled, Maximum number of events: 1000, Mode: cyclic
m = p10.match(line)
if m:
if 'event_log' not in sub_dict:
sub_dict['event_log'] = {}
if 'enabled' in m.groupdict()['event_log']:
sub_dict['event_log']['enable'] = True
else:
sub_dict['event_log']['enable'] = False
if m.groupdict()['max_events']:
sub_dict['event_log']['max_events'] = \
int(m.groupdict()['max_events'])
if m.groupdict()['mode']:
sub_dict['event_log']['mode'] = str(m.groupdict()['mode'])
continue
# It is an area border router
# It is an autonomous system boundary router
# It is an area border and autonomous system boundary router
m = p11.match(line)
if m:
if 'flags' not in sub_dict:
sub_dict['flags'] = {}
if m.groupdict()['abr']:
sub_dict['flags']['abr'] = True
if m.groupdict()['asbr']:
sub_dict['flags']['asbr'] = True
continue
# Redistributing External Routes from,
m = p12_1.match(line)
if m:
if 'redistribution' not in sub_dict:
sub_dict['redistribution'] = {}
continue
# connected
# connected with metric mapped to 10
# static
# static with metric mapped to 10
m = p12_2.match(line)
if m:
the_type = str(m.groupdict()['type'])
if the_type not in sub_dict['redistribution']:
sub_dict['redistribution'][the_type] = {}
sub_dict['redistribution'][the_type]['enabled'] = True
if m.groupdict()['metric']:
sub_dict['redistribution'][the_type]['metric'] = \
int(m.groupdict()['metric'])
continue
# connected, includes subnets in redistribution
# static, includes subnets in redistribution
# isis, includes subnets in redistribution
m = p12_2_1.match(line)
if m:
the_type = str(m.groupdict()['type'])
if the_type not in sub_dict['redistribution']:
sub_dict['redistribution'][the_type] = {}
sub_dict['redistribution'][the_type]['enabled'] = True
sub_dict['redistribution'][the_type]['subnets'] = m.groupdict()['redist']
continue
# bgp 100 with metric mapped to 111
# isis 10 with metric mapped to 3333
# bgp 100 with metric mapped to 100, includes subnets in redistribution, nssa areas only
# bgp 100, includes subnets in redistribution
m = p12_3.match(line)
if m:
prot = str(m.groupdict()['prot'])
if prot not in sub_dict['redistribution']:
sub_dict['redistribution'][prot] = {}
if prot == 'bgp':
sub_dict['redistribution'][prot]['bgp_id'] = \
int(m.groupdict()['pid'])
else:
sub_dict['redistribution'][prot]['isis_pid'] = \
str(m.groupdict()['pid'])
# Set parsed values
if m.groupdict()['metric']:
sub_dict['redistribution'][prot]['metric'] = \
int(m.groupdict()['metric'])
if m.groupdict()['redist']:
sub_dict['redistribution'][prot]['subnets'] = \
str(m.groupdict()['redist'])
if m.groupdict()['nssa']:
sub_dict['redistribution'][prot]['nssa_only'] = True
continue
# Maximum number of redistributed prefixes 4000
# Maximum number of redistributed prefixes 3000 (warning-only)
m = p12_4.match(line)
if m:
if 'max_prefix' not in sub_dict['redistribution']:
sub_dict['redistribution']['max_prefix'] = {}
sub_dict['redistribution']['max_prefix']['num_of_prefix'] = \
int(m.groupdict()['num_prefix'])
if m.groupdict()['warn']:
sub_dict['redistribution']['max_prefix']['warn_only'] = True
else:
sub_dict['redistribution']['max_prefix']['warn_only'] = False
continue
# Threshold for warning message 70%
m = p12_5.match(line)
if m:
if 'max_prefix' not in sub_dict['redistribution']:
sub_dict['redistribution']['max_prefix'] = {}
sub_dict['redistribution']['max_prefix']['prefix_thld'] = \
int(m.groupdict()['thld'])
continue
# Router is not originating router-LSAs with maximum metric
m = p13.match(line)
if m:
if 'stub_router' not in sub_dict:
sub_dict['stub_router'] = {}
if 'always' not in sub_dict['stub_router']:
sub_dict['stub_router']['always'] = {}
# Set values
sub_dict['stub_router']['always']['always'] = False
sub_dict['stub_router']['always']['include_stub'] = False
sub_dict['stub_router']['always']['summary_lsa'] = False
sub_dict['stub_router']['always']['external_lsa'] = False
continue
# Originating router-LSAs with maximum metric
m = p14_1.match(line)
if m:
if 'stub_router' not in sub_dict:
sub_dict['stub_router'] = {}
continue
# Condition: always State: active
# Condition: always, State: active
# Condition: on start-up for 5 seconds, State: inactive
# Condition: on startup for 300 seconds, State: inactive
p14_2 = re.compile(r'^Condition:'
' +(?P<condition>(always|on \S+))'
'(?: +for +(?P<seconds>(\d+)) +seconds)?,?'
' +State: +(?P<state>(\S+))$')
m = p14_2.match(line)
if m:
condition = str(m.groupdict()['condition']).lower().replace("-", "")
condition = condition.replace(" ", "_")
if 'stub_router' not in sub_dict:
sub_dict['stub_router'] = {}
if condition not in sub_dict['stub_router']:
sub_dict['stub_router'][condition] = {}
sub_dict['stub_router'][condition]['state'] = \
str(m.groupdict()['state']).lower()
# Set 'condition' key
if condition == 'always':
sub_dict['stub_router'][condition][condition] = True
else:
sub_dict['stub_router'][condition][condition] = \
int(m.groupdict()['seconds'])
continue
# Advertise stub links with maximum metric in router-LSAs
m = p14_3.match(line)
if m:
sub_dict['stub_router'][condition]['include_stub'] = True
continue
# Advertise summary-LSAs with metric 16711680
m = p14_4.match(line)
if m:
sub_dict['stub_router'][condition]['summary_lsa'] = True
sub_dict['stub_router'][condition]['summary_lsa_metric'] = \
int(m.groupdict()['metric'])
continue
# Advertise external-LSAs with metric 16711680
m = p14_5.match(line)
if m:
sub_dict['stub_router'][condition]['external_lsa'] = True
sub_dict['stub_router'][condition]['external_lsa_metric'] = \
int(m.groupdict()['metric'])
continue
# Initial SPF schedule delay 50 msecs
m = p15.match(line)
if m:
start = int(float(m.groupdict()['time']))
if 'spf_control' not in sub_dict:
sub_dict['spf_control'] = {}
if 'throttle' not in sub_dict['spf_control']:
sub_dict['spf_control']['throttle'] = {}
if 'spf' not in sub_dict['spf_control']['throttle']:
sub_dict['spf_control']['throttle']['spf'] = {}
sub_dict['spf_control']['throttle']['spf']['start'] = start
continue
# Minimum hold time between two consecutive SPFs 200 msecs
m = p16.match(line)
if m:
hold = int(float(m.groupdict()['time']))
if 'spf_control' not in sub_dict:
sub_dict['spf_control'] = {}
if 'throttle' not in sub_dict['spf_control']:
sub_dict['spf_control']['throttle'] = {}
if 'spf' not in sub_dict['spf_control']['throttle']:
sub_dict['spf_control']['throttle']['spf'] = {}
sub_dict['spf_control']['throttle']['spf']['hold'] = hold
continue
# Maximum wait time between two consecutive SPFs 5000 msecs
m = p17.match(line)
if m:
maximum = int(float(m.groupdict()['time']))
if 'spf_control' not in sub_dict:
sub_dict['spf_control'] = {}
if 'throttle' not in sub_dict['spf_control']:
sub_dict['spf_control']['throttle'] = {}
if 'spf' not in sub_dict['spf_control']['throttle']:
sub_dict['spf_control']['throttle']['spf'] = {}
sub_dict['spf_control']['throttle']['spf']['maximum'] = maximum
continue
# Initial LSA throttle delay 50 msecs
m = p18.match(line)
if m:
start = int(float(m.groupdict()['time']))
if 'spf_control' not in sub_dict:
sub_dict['spf_control'] = {}
if 'throttle' not in sub_dict['spf_control']:
sub_dict['spf_control']['throttle'] = {}
if 'lsa' not in sub_dict['spf_control']['throttle']:
sub_dict['spf_control']['throttle']['lsa'] = {}
sub_dict['spf_control']['throttle']['lsa']['start'] = start
continue
# Minimum hold time for LSA throttle 200 msecs
m = p19.match(line)
if m:
hold = int(float(m.groupdict()['time']))
if 'spf_control' not in sub_dict:
sub_dict['spf_control'] = {}
if 'throttle' not in sub_dict['spf_control']:
sub_dict['spf_control']['throttle'] = {}
if 'lsa' not in sub_dict['spf_control']['throttle']:
sub_dict['spf_control']['throttle']['lsa'] = {}
sub_dict['spf_control']['throttle']['lsa']['hold'] = hold
continue
# Maximum wait time for LSA throttle 5000 msecs
m = p20.match(line)
if m:
maximum = int(float(m.groupdict()['time']))
if 'spf_control' not in sub_dict:
sub_dict['spf_control'] = {}
if 'throttle' not in sub_dict['spf_control']:
sub_dict['spf_control']['throttle'] = {}
if 'lsa' not in sub_dict['spf_control']['throttle']:
sub_dict['spf_control']['throttle']['lsa'] = {}
sub_dict['spf_control']['throttle']['lsa']['maximum'] = maximum
continue
# Minimum LSA interval 200 msecs. Minimum LSA arrival 100 msecs
# Minimum LSA arrival 100 msecs
m = p21.match(line)
if m:
if 'lsa' not in sub_dict['spf_control']['throttle']:
sub_dict['spf_control']['throttle']['lsa'] = {}
sub_dict['spf_control']['throttle']['lsa']['arrival'] = \
int(float(m.groupdict()['arrival']))
continue
# Incremental-SPF disabled
m = p22.match(line)
if m:
if 'spf_control' not in sub_dict:
sub_dict['spf_control'] = {}
if 'enabled' in m.groupdict()['incr']:
sub_dict['spf_control']['incremental_spf'] = True
else:
sub_dict['spf_control']['incremental_spf'] = False
continue
# LSA group pacing timer 240 secs
m = p23.match(line)
if m:
sub_dict['lsa_group_pacing_timer'] = \
int(float(m.groupdict()['pacing']))
continue
# Interface flood pacing timer 33 msecs
m = p24.match(line)
if m:
sub_dict['interface_flood_pacing_timer'] = \
int(float(m.groupdict()['interface']))
continue
# Retransmission pacing timer 66 msecs
m = p25.match(line)
if m:
sub_dict['retransmission_pacing_timer'] = \
int(float(m.groupdict()['retransmission']))
continue
# EXCHANGE/LOADING adjacency limit: initial 300, process maximum 300
m = p26.match(line)
if m:
if 'adjacency_stagger' not in sub_dict:
sub_dict['adjacency_stagger'] = {}
if m.groupdict()['initial'] == 'None':
sub_dict['adjacency_stagger']['no_initial_limit'] = True
else:
sub_dict['adjacency_stagger']['initial_number'] = \
int(m.groupdict()['initial'])
sub_dict['adjacency_stagger']['maximum_number'] = \
int(m.groupdict()['maximum'])
continue
# Number of external LSA 1. Checksum Sum 0x00607f
m = p27.match(line)
if m:
if 'numbers' not in sub_dict:
sub_dict['numbers'] = {}
sub_dict['numbers']['external_lsa'] = int(m.groupdict()['ext'])
sub_dict['numbers']['external_lsa_checksum'] = \
str(m.groupdict()['checksum'])
continue
# Number of opaque AS LSA 0. Checksum Sum 00000000
m = p28.match(line)
if m:
if 'numbers' not in sub_dict:
sub_dict['numbers'] = {}
sub_dict['numbers']['opaque_as_lsa'] = int(m.groupdict()['opq'])
sub_dict['numbers']['opaque_as_lsa_checksum'] = \
str(m.groupdict()['checksum'])
continue
# Number of DCbitless external and opaque AS LSA 0
m = p29.match(line)
if m:
if 'numbers' not in sub_dict:
sub_dict['numbers'] = {}
sub_dict['numbers']['dc_bitless'] = int(m.groupdict()['num'])
continue
# Number of DoNotAge external and opaque AS LSA 0
m = p30.match(line)
if m:
if 'numbers' not in sub_dict:
sub_dict['numbers'] = {}
sub_dict['numbers']['do_not_age'] = int(m.groupdict()['num'])
continue
# Number of areas in this router is 1. 1 normal 0 stub 0 nssa
m = p31.match(line)
if m:
sub_dict['total_areas'] = int(m.groupdict()['total_areas'])
sub_dict['total_normal_areas'] = int(m.groupdict()['normal'])
sub_dict['total_stub_areas'] = int(m.groupdict()['stub'])
sub_dict['total_nssa_areas'] = int(m.groupdict()['nssa'])
continue
# Number of areas transit capable is 0
m = p32.match(line)
if m:
sub_dict['total_areas_transit_capable'] = int(m.groupdict()['num'])
continue
# Maximum number of non self-generated LSA allowed 123
m = p33.match(line)
if m:
if 'database_control' not in sub_dict:
sub_dict['database_control'] = {}
sub_dict['database_control']['max_lsa'] = \
int(m.groupdict()['max_lsa'])
continue
# Current number of non self-generated LSA 0
m = p33_1.match(line)
if m:
if 'database_control' not in sub_dict:
sub_dict['database_control'] = {}
sub_dict['database_control']['max_lsa_current'] = \
int(m.groupdict()['max_lsa_current'])
continue
# Threshold for warning message 75%
m = p33_2.match(line)
if m:
if 'database_control' not in sub_dict:
sub_dict['database_control'] = {}
sub_dict['database_control']['max_lsa_threshold_value'] = \
int(m.groupdict()['max_lsa_threshold_value'])
continue
# Ignore-time 5 minutes, reset-time 10 minutes
m = p33_3.match(line)
if m:
if 'database_control' not in sub_dict:
sub_dict['database_control'] = {}
sub_dict['database_control']['max_lsa_ignore_time'] = \
int(m.groupdict()['max_lsa_ignore_time']) * 60
sub_dict['database_control']['max_lsa_reset_time'] = \
int(m.groupdict()['max_lsa_reset_time']) * 60
continue
# Ignore-count allowed 5, current ignore-count 0
m = p33_4.match(line)
if m:
if 'database_control' not in sub_dict:
sub_dict['database_control'] = {}
sub_dict['database_control']['max_lsa_ignore_count'] = \
int(m.groupdict()['max_lsa_ignore_count'])
sub_dict['database_control']['max_lsa_current_count'] = \
int(m.groupdict()['max_lsa_current_count'])
continue
# Maximum limit of redistributed prefixes 5000 (warning-only)
m = p33_5.match(line)
if m:
if 'database_control' not in sub_dict:
sub_dict['database_control'] = {}
sub_dict['database_control']['max_lsa_limit'] = int(m.groupdict()['max_lsa_limit'])
sub_dict['database_control']['max_lsa_warning_only'] = False
continue
# External flood list length 0
m = p34.match(line)
if m:
sub_dict['external_flood_list_length'] = int(m.groupdict()['num'])
continue
# Non-Stop Forwarding enabled
# IETF Non-Stop Forwarding enabled
m = p35.match(line)
if m:
gr_type = str(m.groupdict()['gr_type']).lower()
if 'enabled' in m.groupdict()['enable']:
enable = True
else:
enable = False
if 'graceful_restart' not in sub_dict:
sub_dict['graceful_restart'] = {}
if gr_type not in sub_dict['graceful_restart']:
sub_dict['graceful_restart'][gr_type] = {}
# Set keys
sub_dict['graceful_restart'][gr_type]['enable'] = True
sub_dict['graceful_restart'][gr_type]['type'] = gr_type
# IETF NSF helper support enabled
# Cisco NSF helper support enabled
m = p36.match(line)
if m:
gr_type = str(m.groupdict()['gr_type']).lower()
if 'enabled' in m.groupdict()['gr_helper']:
gr_helper = True
else:
gr_helper = False
if 'graceful_restart' not in sub_dict:
sub_dict['graceful_restart'] = {}
if gr_type not in sub_dict['graceful_restart']:
sub_dict['graceful_restart'][gr_type] = {}
# Set keys
sub_dict['graceful_restart'][gr_type]['type'] = gr_type
sub_dict['graceful_restart'][gr_type]['helper_enable'] = gr_helper
if 'enable' not in sub_dict['graceful_restart'][gr_type]:
sub_dict['graceful_restart'][gr_type]['enable'] = False
continue
# restart-interval limit: 11 sec
m = p36_1.match(line)
if m:
sub_dict['graceful_restart'][gr_type]['restart_interval'] = \
int(m.groupdict()['num'])
continue
# Reference bandwidth unit is 100 mbps
# Reference bandwidth unit is 4294967 mbps
m = p37.match(line)
if m:
bd = int(m.groupdict()['bd'])
if 'auto_cost' not in sub_dict:
sub_dict['auto_cost'] = {}
sub_dict['auto_cost']['reference_bandwidth'] = bd
sub_dict['auto_cost']['bandwidth_unit'] = str(m.groupdict()['unit'])
if bd == 100:
# This is the default - set to False
sub_dict['auto_cost']['enable'] = False
else:
sub_dict['auto_cost']['enable'] = True
continue
# Area BACKBONE(0)
# Area BACKBONE(0.0.0.0) (Inactive)
# Area 1
m = p38.match(line)
if m:
parsed_area = str(m.groupdict()['area'])
n = re.match('BACKBONE\((?P<area_num>(\S+))\)', parsed_area)
if n:
area = str(IPAddress(str(n.groupdict()['area_num'])))
else:
area = str(IPAddress(str(m.groupdict()['area'])))
# Create dict
if 'areas' not in sub_dict:
sub_dict['areas'] = {}
if area not in sub_dict['areas']:
sub_dict['areas'][area] = {}
# Set default values
sub_dict['areas'][area]['area_id'] = area
sub_dict['areas'][area]['area_type'] = 'normal'
continue
# It is a stub area
# It is a stub area, no summary LSA in this area
# It is a NSSA area
m = p39_1.match(line)
if m:
area_type = str(m.groupdict()['area_type']).lower()
sub_dict['areas'][area]['area_type'] = area_type
if area_type == 'stub':
if m.groupdict()['summary']:
sub_dict['areas'][area]['summary'] = False
else:
sub_dict['areas'][area]['summary'] = True
continue
# generates stub default route with cost 111
# generates stub default route with cost 222
m = p39_2.match(line)
if m:
sub_dict['areas'][area]['default_cost'] = \
int(m.groupdict()['default_cost'])
continue
# Area ranges are
m = p40_1.match(line)
if m:
if 'ranges' not in sub_dict['areas'][area]:
sub_dict['areas'][area]['ranges'] = {}
continue
# 10.4.1.0/24 Passive Advertise
# 10.4.0.0/16 Passive DoNotAdvertise
# 10.4.0.0/16 Active(10 - configured) Advertise
m = p40_2.match(line)
if m:
prefix = str(m.groupdict()['prefix'])
if 'ranges' not in sub_dict['areas'][area]:
sub_dict['areas'][area]['ranges'] = {}
if prefix not in sub_dict['areas'][area]['ranges']:
sub_dict['areas'][area]['ranges'][prefix] = {}
sub_dict['areas'][area]['ranges'][prefix]['prefix'] = prefix
if m.groupdict()['cost']:
sub_dict['areas'][area]['ranges'][prefix]['cost'] = \
int(m.groupdict()['cost'])
if 'Advertise' in m.groupdict()['advertise']:
sub_dict['areas'][area]['ranges'][prefix]['advertise'] = True
else:
sub_dict['areas'][area]['ranges'][prefix]['advertise'] = False
continue
# Number of interfaces in this area is 3
# Number of interfaces in this area is 3 (1 loopback)
m = p41.match(line)
if m:
if 'areas' not in sub_dict:
sub_dict['areas'] = {}
if area not in sub_dict['areas']:
sub_dict['areas'][area] = {}
if 'statistics' not in sub_dict['areas'][area]:
sub_dict['areas'][area]['statistics'] = {}
sub_dict['areas'][area]['statistics']['interfaces_count'] =\
int(m.groupdict()['num_intf'])
if m.groupdict()['loopback']:
sub_dict['areas'][area]['statistics']['loopback_count'] =\
int(m.groupdict()['loopback'])
continue
# Area has RRR enabled
m = p42.match(line)
if m:
sub_dict['areas'][area]['rrr_enabled'] = True
continue
# SPF algorithm executed 26 times
m = p43.match(line)
if m:
if 'statistics' not in sub_dict['areas'][area]:
sub_dict['areas'][area]['statistics'] = {}
sub_dict['areas'][area]['statistics']['spf_runs_count'] = \
int(m.groupdict()['count'])
continue
# SPF algorithm last executed 00:19:54.849 ago
m = p44.match(line)
if m:
if 'statistics' not in sub_dict['areas'][area]:
sub_dict['areas'][area]['statistics'] = {}
sub_dict['areas'][area]['statistics']['spf_last_executed'] = \
str(m.groupdict()['last_exec'])
continue
# Area has no authentication
m = p45.match(line)
if m:
continue
# Number of LSA 19. Checksum Sum 0x0a2fb5
m = p46.match(line)
if m:
if 'statistics' not in sub_dict['areas'][area]:
sub_dict['areas'][area]['statistics'] = {}
sub_dict['areas'][area]['statistics']['area_scope_lsa_count'] =\
int(m.groupdict()['lsa_count'])
sub_dict['areas'][area]['statistics']\
['area_scope_lsa_cksum_sum'] = \
str(m.groupdict()['checksum_sum'])
continue
# Number of opaque link LSA 0. Checksum Sum 00000000
m = p47.match(line)
if m:
if 'statistics' not in sub_dict['areas'][area]:
sub_dict['areas'][area]['statistics'] = {}
sub_dict['areas'][area]['statistics']\
['area_scope_opaque_lsa_count'] = \
int(m.groupdict()['opaque_count'])
sub_dict['areas'][area]['statistics']\
['area_scope_opaque_lsa_cksum_sum'] = \
str(m.groupdict()['checksum_sum'])
continue
# Number of DCbitless LSA 5
m = p48.match(line)
if m:
if 'statistics' not in sub_dict['areas'][area]:
sub_dict['areas'][area]['statistics'] = {}
sub_dict['areas'][area]['statistics']['dcbitless_lsa_count'] = \
int(m.groupdict()['count'])
continue
# Number of indication LSA 0
m = p49.match(line)
if m:
if 'statistics' not in sub_dict['areas'][area]:
sub_dict['areas'][area]['statistics'] = {}
sub_dict['areas'][area]['statistics']['indication_lsa_count'] =\
int(m.groupdict()['count'])
continue
# Number of DoNotAge LSA 0
m = p50.match(line)
if m:
if 'statistics' not in sub_dict['areas'][area]:
sub_dict['areas'][area]['statistics'] = {}
sub_dict['areas'][area]['statistics']['donotage_lsa_count'] = \
int(m.groupdict()['count'])
continue
# Flood list length 0
m = p51.match(line)
if m:
if 'statistics' not in sub_dict['areas'][area]:
sub_dict['areas'][area]['statistics'] = {}
sub_dict['areas'][area]['statistics']['flood_list_length'] = \
int(m.groupdict()['len'])
continue
# Non-Stop Routing enabled
m = p52.match(line)
if m:
sub_dict['nsr']['enable'] = True
continue
# BFD is enabled in strict mode
m = p53_1.match(line)
if m:
if 'bfd' not in sub_dict:
sub_dict['bfd'] = {}
sub_dict['bfd']['enable'] = True
sub_dict['bfd']['strict_mode'] = True
continue
# BFD is enabled
m = p53_2.match(line)
if m:
if 'bfd' not in sub_dict:
sub_dict['bfd'] = {}
sub_dict['bfd']['enable'] = True
continue
return ret_dict
# ============================
# Schema for:
# * 'show ip ospf interface brief'
# ============================
class ShowIpOspfInterfaceBriefSchema(MetaParser):
''' Schema for:
* 'show ip ospf interface brief'
'''
schema = {
'instance': {
Any(): {
'areas': {
Any(): {
'interfaces': {
Any(): {
'ip_address': str,
'cost': int,
'state': str,
'nbrs_full': int,
'nbrs_count': int,
},
},
},
},
},
},
}
class ShowIpOspfInterfaceBrief(ShowIpOspfInterfaceBriefSchema):
''' Parser for:
* 'show ip ospf interface brief'
'''
cli_command = 'show ip ospf interface brief'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
ret_dict = {}
p1 = re.compile(r'^(?P<interface>\S+) +(?P<instance>\S+) +(?P<area>\d+) +'
'(?P<address>\S+) +(?P<cost>\d+) +(?P<state>\S+) +(?P<nbrs_full>\d+)'
'\/(?P<nbrs_count>\d+)$$')
for line in out.splitlines():
line = line.strip()
m = p1.match(line)
if m:
group = m.groupdict()
interface = Common.convert_intf_name(
str(group['interface']))
instance = str(group['instance'])
ip_address = str(group['address'])
area = str(IPAddress(str(group['area'])))
state = group['state']
cost = int(group['cost'])
nbrs_full = int(group['nbrs_full'])
nbrs_count = int(group['nbrs_count'])
intf_dict = ret_dict.setdefault('instance', {}).\
setdefault(instance, {}).\
setdefault('areas', {}).\
setdefault(area, {}).\
setdefault('interfaces', {}).\
setdefault(interface, {})
intf_dict.update({'ip_address' : ip_address})
intf_dict.update({'cost' : cost})
intf_dict.update({'state' : state})
intf_dict.update({'nbrs_full' : nbrs_full})
intf_dict.update({'nbrs_count' : nbrs_count})
continue
return ret_dict
# ============================
# Schema for:
# * 'show ip ospf interface'
# * 'show ip ospf interface {interface}''
# ============================
class ShowIpOspfInterfaceSchema(MetaParser):
''' Schema for:
* 'show ip ospf interface'
* 'show ip ospf interface {interface}'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{'areas':
{Any():
{Optional('interfaces'):
{Any():
{'name': str,
'enable': bool,
'line_protocol': bool,
'ip_address': str,
Optional('interface_id'): int,
Optional('attached'): str,
'demand_circuit': bool,
'router_id': str,
'interface_type': str,
'bfd':
{'enable': bool},
Optional('if_cfg'): bool,
Optional('cost'): int,
Optional('transmit_delay'): int,
Optional('state'): str,
Optional('priority'): int,
Optional('dr_router_id'): str,
Optional('dr_ip_addr'): str,
Optional('bdr_router_id'): str,
Optional('bdr_ip_addr'): str,
Optional('hello_interval'): int,
Optional('dead_interval'): int,
Optional('wait_interval'): int,
Optional('retransmit_interval'): int,
Optional('passive'): bool,
Optional('oob_resync_timeout'): int,
Optional('hello_timer'): str,
Optional('index'): str,
Optional('flood_queue_length'): int,
Optional('next'): str,
Optional('lls'): bool,
Optional('last_flood_scan_length'): int,
Optional('max_flood_scan_length'): int,
Optional('last_flood_scan_time_msec'): int,
Optional('max_flood_scan_time_msec'): int,
Optional('total_dcbitless_lsa'): int,
Optional('donotage_lsa'): bool,
Optional('ti_lfa_protected'): bool,
Optional('ipfrr_candidate'): bool,
Optional('ipfrr_protected'): bool,
Optional('stub_host'): bool,
Optional('prefix_suppression'): bool,
Optional('ttl_security'):
{'enable': bool,
Optional('hops'): int},
Optional('graceful_restart'):
{Any():
{'type': str,
'helper': bool}},
Optional('topology'):
{Any():
{'cost': int,
'disabled': bool,
'shutdown': bool,
'name': str}},
Optional('statistics'):
{Optional('adj_nbr_count'): int,
Optional('nbr_count'): int,
Optional('num_nbrs_suppress_hello'): int,
},
Optional('neighbors'):
{Any():
{Optional('dr_router_id'): str,
Optional('bdr_router_id'): str,
},
},
Optional('authentication'):
{'auth_trailer_key':
{'crypto_algorithm': str,
Optional('youngest_key_id'): int,
},
},
Optional('teapp'): {
Optional('topology_id'): str,
Any(): {
Optional('affinity'): {
'length': int,
'bits': str,
},
Optional('extended_affinity'): {
'length': int,
'bits': str,
},
},
},
Optional('sr_policy_manager'): {
'te_opaque_lsa': str,
},
Optional('sr_mpls_enabled'): bool,
},
},
Optional('virtual_links'):
{Any():
{'name': str,
'enable': bool,
'line_protocol': bool,
'ip_address': str,
Optional('interface_id'): int,
Optional('attached'): str,
'demand_circuit': bool,
'router_id': str,
'interface_type': str,
'bfd':
{'enable': bool},
Optional('if_cfg'): bool,
Optional('cost'): int,
Optional('transmit_delay'): int,
Optional('state'): str,
Optional('priority'): int,
Optional('dr_router_id'): str,
Optional('dr_ip_addr'): str,
Optional('bdr_router_id'): str,
Optional('bdr_ip_addr'): str,
Optional('hello_interval'): int,
Optional('dead_interval'): int,
Optional('wait_interval'): int,
Optional('retransmit_interval'): int,
Optional('passive'): bool,
Optional('oob_resync_timeout'): int,
Optional('hello_timer'): str,
Optional('index'): str,
Optional('flood_queue_length'): int,
Optional('next'): str,
Optional('lls'): bool,
Optional('last_flood_scan_length'): int,
Optional('max_flood_scan_length'): int,
Optional('last_flood_scan_time_msec'): int,
Optional('max_flood_scan_time_msec'): int,
Optional('total_dcbitless_lsa'): int,
Optional('donotage_lsa'): bool,
Optional('ti_lfa_protected'): bool,
Optional('ipfrr_candidate'): bool,
Optional('ipfrr_protected'): bool,
Optional('stub_host'): bool,
Optional('prefix_suppression'): bool,
Optional('ttl_security'):
{'enable': bool,
Optional('hops'): int},
Optional('graceful_restart'):
{Any():
{'type': str,
'helper': bool}},
Optional('topology'):
{Any():
{'cost': int,
'disabled': bool,
'shutdown': bool,
'name': str}},
Optional('statistics'):
{Optional('adj_nbr_count'): int,
Optional('nbr_count'): int,
Optional('num_nbrs_suppress_hello'): int,
},
Optional('neighbors'):
{Any():
{Optional('dr_router_id'): str,
Optional('bdr_router_id'): str,
},
},
Optional('authentication'):
{'auth_trailer_key':
{'crypto_algorithm': str,
Optional('youngest_key_id'): int,
},
},
Optional('teapp'): {
Optional('topology_id'): str,
Any(): {
Optional('affinity'): {
'length': int,
'bits': str,
},
Optional('extended_affinity'): {
'length': int,
'bits': str,
},
},
},
Optional('sr_policy_manager'): {
'te_opaque_lsa': str,
},
Optional('sr_mpls_enabled'): bool,
},
},
Optional('sham_links'):
{Any():
{'name': str,
'enable': bool,
'line_protocol': bool,
'ip_address': str,
Optional('interface_id'): int,
Optional('attached'): str,
'demand_circuit': bool,
'router_id': str,
'interface_type': str,
'bfd':
{'enable': bool},
Optional('if_cfg'): bool,
Optional('cost'): int,
Optional('transmit_delay'): int,
Optional('state'): str,
Optional('priority'): int,
Optional('dr_router_id'): str,
Optional('dr_ip_addr'): str,
Optional('bdr_router_id'): str,
Optional('bdr_ip_addr'): str,
Optional('hello_interval'): int,
Optional('dead_interval'): int,
Optional('wait_interval'): int,
Optional('retransmit_interval'): int,
Optional('passive'): bool,
Optional('oob_resync_timeout'): int,
Optional('hello_timer'): str,
Optional('index'): str,
Optional('flood_queue_length'): int,
Optional('next'): str,
Optional('lls'): bool,
Optional('last_flood_scan_length'): int,
Optional('max_flood_scan_length'): int,
Optional('last_flood_scan_time_msec'): int,
Optional('max_flood_scan_time_msec'): int,
Optional('total_dcbitless_lsa'): int,
Optional('donotage_lsa'): bool,
Optional('ti_lfa_protected'): bool,
Optional('ipfrr_candidate'): bool,
Optional('ipfrr_protected'): bool,
Optional('stub_host'): bool,
Optional('prefix_suppression'): bool,
Optional('ttl_security'):
{'enable': bool,
Optional('hops'): int},
Optional('graceful_restart'):
{Any():
{'type': str,
'helper': bool}},
Optional('topology'):
{Any():
{'cost': int,
'disabled': bool,
'shutdown': bool,
'name': str}},
Optional('statistics'):
{Optional('adj_nbr_count'): int,
Optional('nbr_count'): int,
Optional('num_nbrs_suppress_hello'): int},
Optional('neighbors'):
{Any():
{Optional('dr_router_id'): str,
Optional('bdr_router_id'): str,
},
},
Optional('authentication'):
{'auth_trailer_key':
{'crypto_algorithm': str,
Optional('youngest_key_id'): int,
},
},
Optional('teapp'): {
Optional('topology_id'): str,
Any(): {
Optional('affinity'): {
'length': int,
'bits': str,
},
Optional('extended_affinity'): {
'length': int,
'bits': str,
},
},
},
Optional('sr_policy_manager'): {
'te_opaque_lsa': str,
},
Optional('sr_mpls_enabled'): bool,
},
},
},
},
},
},
},
},
},
},
}
# ===========================================
# Parser for:
# * 'show ospf vrf all-inclusive interface'
# ===========================================
class ShowIpOspfInterface(ShowIpOspfInterfaceSchema):
''' Parser for:
* 'show ip ospf interface'
* 'show ip ospf interface {interface}'
'''
cli_command = ['show ip ospf interface {interface}',
'show ip ospf interface']
exclude = ['hello_timer', 'dead_timer',
'bdr_ip_addr', 'bdr_router_id', 'last_flood_scan_length',
'last_flood_scan_time_msec',
'max_flood_scan_length', 'max_flood_scan_time_msec', 'state']
def cli(self, interface=None, output=None):
if output is None:
if interface:
cmd = self.cli_command[0].format(interface=interface)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
# Init vars
ret_dict = {}
af = 'ipv4' # this is ospf - always ipv4
# Mapping dict
bool_dict = {'up': True, 'down': False, 'unknown': False}
p1 = re.compile(r'^(?P<interface>(\S+)) +is( +administratively)?'
' +(?P<enable>(unknown|up|down)), +line +protocol'
' +is +(?P<line_protocol>(up|down))'
'(?: +\(\S+\))?$')
p2 = re.compile(r'^Internet +Address +(?P<address>(\S+)),'
'(?: +Interface +ID +(?P<intf_id>(\d+)),)?'
' +Area +(?P<area>(\S+))(?:, +Attached +via'
' +(?P<attach>(.*)))?$')
p2_1 = re.compile(r'^Attached +via +(?P<attached>([a-zA-Z0-9\s]+))$')
p3 = re.compile(r'^Process +ID +(?P<pid>(\S+)),'
'(?: +VRF +(?P<vrf>(\S+)))?'
' +Router +ID +(?P<router_id>(\S+)),'
' +Network +Type +(?P<interface_type>(\S+)),'
' +Cost: +(?P<cost>(\d+))$')
p5 = re.compile(r'^Configured as demand circuit$')
p6 = re.compile(r'^Run as demand circuit$')
p7 = re.compile(r'^DoNotAge +LSA +not +allowed +\(Number +of'
' +DCbitless +LSA +is +(?P<num>(\d+))\)\.$')
p8 = re.compile(r'^Enabled +by +interface +config, +including'
' +secondary +ip +addresses$')
p9 = re.compile(r'^Transmit +Delay is +(?P<delay>(\d+)) +sec,'
' +State +(?P<state>(\S+))'
'(?:, +Priority +(?P<priority>(\d+)))?'
'(?:, +BFD +(?P<bfd>(enabled|disabled)))?$')
p10 = re.compile(r'^Designated +(R|r)outer +\(ID\)'
' +(?P<dr_router_id>(\S+)), +(I|i)nterface'
' +(A|a)ddress +(?P<dr_ip_addr>(\S+))$')
p11 = re.compile(r'^Backup +(D|d)esignated +(R|r)outer +\(ID\)'
' +(?P<bdr_router_id>(\S+)), +(I|i)nterface'
' +(A|a)ddress +(?P<bdr_ip_addr>(\S+))$')
p12 = re.compile(r'^Timer +intervals +configured,'
' +Hello +(?P<hello>(\d+)),'
' +Dead +(?P<dead>(\d+)),'
' +Wait +(?P<wait>(\d+)),'
' +Retransmit +(?P<retransmit>(\d+))$')
p12_1 = re.compile(r'^oob-resync +timeout +(?P<oob>(\d+))$')
p12_2 = re.compile(r'^Hello +due +in +(?P<hello_timer>(\S+))$')
p13 = re.compile(r'^Supports +Link-local +Signaling +\(LLS\)$')
p14 = re.compile(r'^(?P<gr_type>(Cisco|IETF)) +NSF +helper +support'
' +(?P<helper>(enabled|disabled))$')
p15 = re.compile(r'^Index +(?P<index>(\S+)),'
' +flood +queue +length +(?P<length>(\d+))$')
p16 = re.compile(r'^Next +(?P<next>(\S+))$')
p17 = re.compile(r'^Last +flood +scan +length +is +(?P<num>(\d+)),'
' +maximum +is +(?P<max>(\d+))$')
p18 = re.compile(r'^Last +flood +scan +time +is +(?P<time1>(\d+))'
' +msec, +maximum +is +(?P<time2>(\d+)) +msec$')
p19 = re.compile(r'^Neighbor +Count +is +(?P<nbr_count>(\d+)),'
' +Adjacent +neighbor +count +is'
' +(?P<adj_nbr_count>(\d+))$')
p20_1 = re.compile(r'^Adjacent +with +neighbor +(?P<nbr>(\S+))'
' +\((B|b)ackup +(D|d)esignated +(R|r)outer\)$')
p20_2 = re.compile(r'^Adjacent +with +neighbor +(?P<nbr>(\S+))'
' +\((D|d)esignated +(R|r)outer\)$')
p20_3 = re.compile(r'^Adjacent +with +neighbor +(?P<nbr>(\S+))'
' +\(Hello suppressed\)$')
p21 = re.compile(r'^Suppress +hello +for +(?P<sup>(\d+))'
' +neighbor\(s\)$')
p22 = re.compile(r'^Loopback +interface +is +treated +as +a +stub'
' +Host$')
p23 = re.compile(r'^Can +be +protected +by per-+prefix +Loop-Free'
' +FastReroute$')
p24 = re.compile(r'^Can +be +used +for +per-prefix +Loop-Free'
' +FastReroute +repair +paths$')
p25 = re.compile(r'^Not +Protected +by +per-prefix +TI-LFA$')
p26 = re.compile(r'^Prefix-suppression +is +(?P<ps>(enabled|disabled))$')
p27 = re.compile(r'^Strict +TTL +checking'
' +(?P<strict_ttl>(enabled|disabled))'
'(?:, +up +to +(?P<hops>(\d+)) +hops +allowed)?$')
p28_1 = re.compile(r'^Simple +password +authentication +enabled$')
p28_2 = re.compile(r'^Cryptographic +authentication +enabled$')
p28_3 = re.compile(r'^Youngest +key +id +is +(?P<id>(\d+))$')
p28_4 = re.compile(r'^Rollover +in +progress, +(?P<num>(\d+))'
' +neighbor(s) +using +the +old +key(s):$')
p28_5 = re.compile(r'^key +id +1 +algorithm +MD5$')
# Segment Routing enabled for MPLS forwarding
p29 = re.compile(r'^Segment +Routing +enabled +for +MPLS +forwarding$')
# TEAPP:
p30 = re.compile(r'^TEAPP:$')
# Topology Id:0x0
p30_1 = re.compile(r'^Topology +Id: *(?P<topology_id>[\w]+)$')
# TEAPP:SRTE
p30_2 = re.compile(r'^TEAPP: *(?P<teapp>[\w]+)$')
# Affinity: length 32, bits 0x00000010
p30_3 = re.compile(r'^Affinity: *length +(?P<length>\d+), +bits +(?P<bits>\w+)$')
# Extended affinity: length 32, bits 0x00000010
p30_4 = re.compile(r'^Extended +affinity: *length +(?P<length>\d+), +bits +(?P<bits>\w+)$')
# SR Policy Manager:
p31 = re.compile(r'^SR +Policy +Manager:$')
# TE Opaque LSA: Source of link information OSPF
p31_1 = re.compile(r'^TE +Opaque +LSA: +(?P<te_opaque_lsa>[\S\s]+)$')
for line in out.splitlines():
line = line.strip()
# Loopback0 is up, line protocol is up
# GigabitEthernet2 is up, line protocol is up
# Port-channel2.100 is administratively down, line protocol is down
# OSPF_SL1 is up, line protocol is up
# OSPF_VL3 is up, line protocol is up
# TenGigabitEthernet3/0/1 is up, line protocol is up (connected)
# TenGigabitEthernet1/8 is down, line protocol is down (notconnect)
# TenGigabitEthernet2/6.3052 is administratively down, line protocol is down (disabled)
# TenGigabitEthernet1/15 is down, line protocol is down (err-disabled)
m = p1.match(line)
if m:
interface = str(m.groupdict()['interface'])
enable = str(m.groupdict()['enable'])
line_protocol = str(m.groupdict()['line_protocol'])
# Determine if 'interface' or 'sham_link' or 'virtual_link'
if re.search('SL', interface):
x = re.match('(?P<ignore>\S+)_SL(?P<num>(\d+))', interface)
if x:
intf_type = 'sham_links'
name = 'SL' + str(x.groupdict()['num'])
elif re.search('VL', interface):
x = re.match('(?P<ignore>\S+)_VL(?P<num>(\d+))', interface)
if x:
intf_type = 'virtual_links'
name = 'VL' + str(x.groupdict()['num'])
else:
intf_type = 'interfaces'
name = interface
continue
# Internet Address 10.4.1.1/32, Interface ID 11, Area 0
# Internet Address 0.0.0.0/0, Area 0, Attached via Not Attached
# Internet Address 10.229.4.4/24, Area 1, Attached via Interface Enable
m = p2.match(line)
if m:
ip_address = str(m.groupdict()['address'])
area = str(IPAddress(str(m.groupdict()['area'])))
if m.groupdict()['intf_id']:
intf_id = int(m.groupdict()['intf_id'])
if m.groupdict()['attach']:
attached = str(m.groupdict()['attach']).lower()
continue
# Attached via Interface Enable
m = p2_1.match(line)
if m:
attached = str(m.groupdict()['attached']).lower()
continue
# Process ID 1, Router ID 10.64.4.4, Network Type VIRTUAL_LINK, Cost: 1
# Process ID 2, Router ID 10.229.11.11, Network Type SHAM_LINK, Cost: 111
# Process ID 1, Router ID 10.4.1.1, Network Type BROADCAST, Cost: 1
m = p3.match(line)
if m:
instance = str(m.groupdict()['pid'])
router_id = str(m.groupdict()['router_id'])
interface_type = str(m.groupdict()['interface_type']).lower()
interface_type = interface_type.replace("_", "-")
# Get interface values
if intf_type == 'interfaces':
intf_name = interface
elif intf_type == 'virtual_links':
# Init
vl_addr = None
vl_transit_area_id = None
# Execute command to get virtual-link address
cmd = 'show ip ospf virtual-links | i {interface}'.format(interface=interface)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# Virtual Link OSPF_VL0 to router 10.100.5.5 is down
p = re.search('Virtual +Link +(?P<intf>(\S+)) +to +router'
' +(?P<address>(\S+)) +is +(up|down)'
'(?:.*)?', line)
if p:
if interface == str(p.groupdict()['intf']):
vl_addr = str(p.groupdict()['address'])
break
# Execute command to get virtual-link transit_area_id
if vl_addr is not None:
cmd = 'show running-config | i virtual-link | i {addr}'.format(addr=vl_addr)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# area 1 virtual-link 10.100.5.5
q = re.search('area +(?P<q_area>(\d+)) +virtual-link'
' +(?P<addr>(\S+))(?: +(.*))?', line)
if q:
q_addr = str(q.groupdict()['addr'])
# Check parameters match
if q_addr == vl_addr:
vl_transit_area_id = str(IPAddress(str(q.groupdict()['q_area'])))
break
if vl_transit_area_id is not None:
intf_name = '{} {}'.format(vl_transit_area_id, router_id)
area = vl_transit_area_id
elif intf_type == 'sham_links':
# Init
sl_local_id = None
sl_remote_id = None
# Execute command to get sham-link remote_id
cmd = 'show ip ospf sham-links | i {interface}'.format(interface=interface)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# Sham Link OSPF_SL1 to address 10.151.22.22 is up
p = re.search('Sham +Link +(?P<intf>(\S+)) +to +address'
' +(?P<remote>(\S+)) +is +(up|down)', line)
if p:
if interface == str(p.groupdict()['intf']):
sl_remote_id = str(p.groupdict()['remote'])
break
# Execute command to get sham-link local_id
if sl_remote_id is not None:
cmd = 'show running-config | i sham-link | i {remote}'.format(remote=sl_remote_id)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# area 1 sham-link 10.229.11.11 10.151.22.22 cost 111 ttl-security hops 3
q = re.search('area +(?P<q_area>(\d+)) +sham-link'
' +(?P<local_id>(\S+))'
' +(?P<remote_id>(\S+)) +(.*)', line)
if q:
q_area = str(IPAddress(str(q.groupdict()['q_area'])))
q_remote_id = str(q.groupdict()['remote_id'])
# Check parameters match
if q_area == area and q_remote_id == sl_remote_id:
sl_local_id = str(q.groupdict()['local_id'])
break
# Set intf_name based on parsed values
if sl_local_id is not None:
intf_name = '{} {}'.format(sl_local_id, sl_remote_id)
# Get VRF information based on OSPF instance
cmd = 'show running-config | section router ospf {}'.format(instance)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# Skip the show command line so as to not match
if re.search('show', line):
continue
# router ospf 1
# router ospf 2 vrf VRF1
p = re.search('router +ospf +(?P<instance>(\S+))'
'(?: +vrf +(?P<vrf>(\S+)))?', line)
if p:
p_instance = str(p.groupdict()['instance'])
if p_instance == instance:
if p.groupdict()['vrf']:
vrf = str(p.groupdict()['vrf'])
break
else:
vrf = 'default'
break
# Build dictionary
if 'vrf' not in ret_dict:
ret_dict['vrf'] = {}
if vrf not in ret_dict['vrf']:
ret_dict['vrf'][vrf] = {}
if 'address_family' not in ret_dict['vrf'][vrf]:
ret_dict['vrf'][vrf]['address_family'] = {}
if af not in ret_dict['vrf'][vrf]['address_family']:
ret_dict['vrf'][vrf]['address_family'][af] = {}
if 'instance' not in ret_dict['vrf'][vrf]['address_family'][af]:
ret_dict['vrf'][vrf]['address_family'][af]['instance'] = {}
if instance not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance] = {}
if 'areas' not in ret_dict['vrf'][vrf]['address_family']\
[af]['instance'][instance]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'] = {}
if area not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area] = {}
if intf_type not in ret_dict['vrf'][vrf]['address_family']\
[af]['instance'][instance]['areas'][area]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area][intf_type] = {}
if intf_name not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area][intf_type]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area][intf_type][intf_name] = {}
# Set sub_dict
sub_dict = ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]\
[intf_type][intf_name]
# Delete variables to avoid overwrite issues for next intf
del area
del intf_name
# Set values found in this regex
sub_dict['router_id'] = router_id
sub_dict['interface_type'] = interface_type
if m.groupdict()['cost']:
sub_dict['cost'] = int(m.groupdict()['cost'])
# Set defaault keys
sub_dict['demand_circuit'] = False
if 'bfd' not in sub_dict:
sub_dict['bfd'] = {}
sub_dict['bfd']['enable'] = False
# Set previously parsed keys
try:
sub_dict['name'] = name
del name
except Exception:
pass
try:
sub_dict['ip_address'] = ip_address
del ip_address
except Exception:
pass
try:
sub_dict['interface_id'] = intf_id
del intf_id
except Exception:
pass
try:
sub_dict['attached'] = attached
del attached
except Exception:
pass
try:
sub_dict['enable'] = bool_dict[enable]
except Exception:
pass
try:
sub_dict['line_protocol'] = bool_dict[line_protocol]
except Exception:
pass
continue
# Topology-MTID Cost Disabled Shutdown Topology Name
# 0 1 no no Base
p4 = re.compile(r'^(?P<mtid>(\d+)) +(?P<topo_cost>(\d+))'
' +(?P<disabled>(yes|no)) +(?P<shutdown>(yes|no))'
' +(?P<topo_name>(\S+))$')
m = p4.match(line)
if m:
mtid = int(m.groupdict()['mtid'])
if 'topology' not in sub_dict:
sub_dict['topology'] = {}
if mtid not in sub_dict['topology']:
sub_dict['topology'][mtid] = {}
sub_dict['topology'][mtid]['cost'] = int(m.groupdict()['topo_cost'])
sub_dict['topology'][mtid]['name'] = str(m.groupdict()['topo_name'])
if 'yes' in m.groupdict()['disabled']:
sub_dict['topology'][mtid]['disabled'] = True
else:
sub_dict['topology'][mtid]['disabled'] = False
if 'yes' in m.groupdict()['shutdown']:
sub_dict['topology'][mtid]['shutdown'] = True
else:
sub_dict['topology'][mtid]['shutdown'] = False
continue
# Configured as demand circuit
m = p5.match(line)
if m:
sub_dict['demand_circuit'] = True
continue
# Run as demand circuit
m = p6.match(line)
if m:
sub_dict['demand_circuit'] = True
continue
# DoNotAge LSA not allowed (Number of DCbitless LSA is 1).
m = p7.match(line)
if m:
sub_dict['donotage_lsa'] = False
sub_dict['total_dcbitless_lsa'] = int(m.groupdict()['num'])
continue
# Enabled by interface config, including secondary ip addresses
m = p8.match(line)
if m:
sub_dict['if_cfg'] = True
continue
# Transmit Delay is 1 sec, State POINT_TO_POINT
# Transmit Delay is 1 sec, State DR, Priority 1
# Transmit Delay is 1 sec, State DR, Priority 111, BFD enabled
m = p9.match(line)
if m:
sub_dict['transmit_delay'] = int(m.groupdict()['delay'])
state = str(m.groupdict()['state']).lower()
state = state.replace("_", "-")
sub_dict['state'] = state
if m.groupdict()['priority']:
sub_dict['priority'] = int(m.groupdict()['priority'])
if m.groupdict()['bfd']:
if 'bfd' not in sub_dict:
sub_dict['bfd'] = {}
if 'enabled' in m.groupdict()['bfd']:
sub_dict['bfd']['enable'] = True
else:
sub_dict['bfd']['enable'] = False
continue
# Designated Router (ID) 10.36.3.3, Interface address 10.2.3.3
m = p10.match(line)
if m:
sub_dict['dr_router_id'] = str(m.groupdict()['dr_router_id'])
sub_dict['dr_ip_addr'] = str(m.groupdict()['dr_ip_addr'])
continue
# Backup Designated router (ID) 10.16.2.2, Interface address 10.2.3.2
m = p11.match(line)
if m:
sub_dict['bdr_router_id'] = str(m.groupdict()['bdr_router_id'])
sub_dict['bdr_ip_addr'] = str(m.groupdict()['bdr_ip_addr'])
continue
# Timer intervals configured, Hello 10, Dead 40, Wait 40, Retransmit 5
m = p12.match(line)
if m:
sub_dict['hello_interval'] = int(m.groupdict()['hello'])
sub_dict['dead_interval'] = int(m.groupdict()['dead'])
sub_dict['wait_interval'] = int(m.groupdict()['wait'])
sub_dict['retransmit_interval'] = int(m.groupdict()['retransmit'])
continue
# oob-resync timeout 40
m = p12_1.match(line)
if m:
sub_dict['oob_resync_timeout'] = int(m.groupdict()['oob'])
continue
# Hello due in 00:00:00
m = p12_2.match(line)
if m:
sub_dict['passive'] = False
sub_dict['hello_timer'] = str(m.groupdict()['hello_timer'])
continue
# Supports Link-local Signaling (LLS)
m = p13.match(line)
if m:
sub_dict['lls'] = True
continue
# Cisco NSF helper support enabled
# IETF NSF helper support enabled
m = p14.match(line)
if m:
gr_type = str(m.groupdict()['gr_type']).lower()
if 'graceful_restart' not in sub_dict:
sub_dict['graceful_restart'] = {}
if gr_type not in sub_dict['graceful_restart']:
sub_dict['graceful_restart'][gr_type] = {}
sub_dict['graceful_restart'][gr_type]['type'] = gr_type
if 'enabled' in m.groupdict()['helper']:
sub_dict['graceful_restart'][gr_type]['helper'] = True
else:
sub_dict['graceful_restart'][gr_type]['helper'] = False
continue
# Index 2/2, flood queue length 0
m = p15.match(line)
if m:
sub_dict['index'] = str(m.groupdict()['index'])
sub_dict['flood_queue_length'] = int(m.groupdict()['length'])
continue
# Next 0(0)/0(0)
m = p16.match(line)
if m:
sub_dict['next'] = str(m.groupdict()['next'])
continue
# Last flood scan length is 0, maximum is 11
m = p17.match(line)
if m:
sub_dict['last_flood_scan_length'] = int(m.groupdict()['num'])
sub_dict['max_flood_scan_length'] = int(m.groupdict()['max'])
continue
# Last flood scan time is 0 msec, maximum is 1 msec
m = p18.match(line)
if m:
sub_dict['last_flood_scan_time_msec'] = \
int(m.groupdict()['time1'])
sub_dict['max_flood_scan_time_msec'] = \
int(m.groupdict()['time2'])
continue
# Neighbor Count is 1, Adjacent neighbor count is 1
m = p19.match(line)
if m:
if 'statistics' not in sub_dict:
sub_dict['statistics'] = {}
sub_dict['statistics']['nbr_count'] = \
int(m.groupdict()['nbr_count'])
sub_dict['statistics']['adj_nbr_count'] = \
int(m.groupdict()['adj_nbr_count'])
continue
# Adjacent with neighbor 10.16.2.2 (Backup Designated Router)
m = p20_1.match(line)
if m:
neighbor = str(m.groupdict()['nbr'])
if 'neighbors' not in sub_dict:
sub_dict['neighbors'] = {}
if neighbor not in sub_dict['neighbors']:
sub_dict['neighbors'][neighbor] = {}
sub_dict['neighbors'][neighbor]['bdr_router_id'] = neighbor
continue
# Adjacent with neighbor 10.36.3.3 (Designated Router)
m = p20_2.match(line)
if m:
neighbor = str(m.groupdict()['nbr'])
if 'neighbors' not in sub_dict:
sub_dict['neighbors'] = {}
if neighbor not in sub_dict['neighbors']:
sub_dict['neighbors'][neighbor] = {}
sub_dict['neighbors'][neighbor]['dr_router_id'] = neighbor
continue
# Adjacent with neighbor 10.64.4.4 (Hello suppressed)
m = p20_3.match(line)
if m:
neighbor = str(m.groupdict()['nbr'])
if 'neighbors' not in sub_dict:
sub_dict['neighbors'] = {}
if neighbor not in sub_dict['neighbors']:
sub_dict['neighbors'][neighbor] = {}
continue
# Suppress hello for 0 neighbor(s)
m = p21.match(line)
if m:
if 'statistics' not in sub_dict:
sub_dict['statistics'] = {}
sub_dict['statistics']['num_nbrs_suppress_hello'] = \
int(m.groupdict()['sup'])
continue
# Loopback interface is treated as a stub Host
m = p22.match(line)
if m:
sub_dict['stub_host'] = True
continue
# Can be protected by per-prefix Loop-Free FastReroute
m = p23.match(line)
if m:
sub_dict['ipfrr_protected'] = True
continue
# Can be used for per-prefix Loop-Free FastReroute repair paths
m = p24.match(line)
if m:
sub_dict['ipfrr_candidate'] = True
continue
# Not Protected by per-prefix TI-LFA
m = p25.match(line)
if m:
sub_dict['ti_lfa_protected'] = False
continue
# Prefix-suppression is enabled
m = p26.match(line)
if m:
if 'enabled' in m.groupdict()['ps']:
sub_dict['prefix_suppression'] = True
else:
sub_dict['prefix_suppression'] = False
# Strict TTL checking enabled, up to 3 hops allowed
m = p27.match(line)
if m:
if 'ttl_security' not in sub_dict:
sub_dict['ttl_security'] = {}
if 'enabled' in m.groupdict()['strict_ttl']:
sub_dict['ttl_security']['enable'] = True
else:
sub_dict['ttl_security']['enable'] = False
if m.groupdict()['hops']:
sub_dict['ttl_security']['hops'] = int(m.groupdict()['hops'])
continue
# Simple password authentication enabled
m = p28_1.match(line)
if m:
if 'authentication' not in sub_dict:
sub_dict['authentication'] = {}
if 'auth_trailer_key' not in sub_dict['authentication']:
sub_dict['authentication']['auth_trailer_key'] = {}
sub_dict['authentication']['auth_trailer_key']\
['crypto_algorithm'] = 'simple'
continue
# Cryptographic authentication enabled
m = p28_2.match(line)
if m:
if 'authentication' not in sub_dict:
sub_dict['authentication'] = {}
if 'auth_trailer_key' not in sub_dict['authentication']:
sub_dict['authentication']['auth_trailer_key'] = {}
sub_dict['authentication']['auth_trailer_key']\
['crypto_algorithm'] = 'md5'
continue
# Youngest key id is 2
m = p28_3.match(line)
if m:
if 'authentication' not in sub_dict:
sub_dict['authentication'] = {}
if 'auth_trailer_key' not in sub_dict['authentication']:
sub_dict['authentication']['auth_trailer_key'] = {}
sub_dict['authentication']['auth_trailer_key']\
['youngest_key_id'] = int(m.groupdict()['id'])
continue
# Rollover in progress, 1 neighbor(s) using the old key(s):
m = p28_4.match(line)
if m:
continue
# key id 1 algorithm MD5
m = p28_5.match(line)
if m:
if 'authentication' not in sub_dict:
sub_dict['authentication'] = {}
if 'auth_trailer_key' not in sub_dict['authentication']:
sub_dict['authentication']['auth_trailer_key'] = {}
sub_dict['authentication']['auth_trailer_key']\
['crypto_algorithm'] = 'md5'
continue
# Segment Routing enabled for MPLS forwarding
m = p29.match(line)
if m:
sub_dict.update({'sr_mpls_enabled': True})
continue
# TEAPP:
m = p30.match(line)
if m:
teapp_dict = sub_dict.setdefault('teapp', {})
continue
# Topology Id:0x0
m = p30_1.match(line)
if m:
topology_id = m.groupdict()['topology_id']
teapp_dict = sub_dict.setdefault('teapp', {})
teapp_dict.update({'topology_id': topology_id})
continue
# TEAPP:SRTE
m = p30_2.match(line)
if m:
teapp = m.groupdict()['teapp']
teapp_dict = sub_dict.setdefault('teapp', {})
item_dict = teapp_dict.setdefault(teapp, {})
continue
# Affinity: length 32, bits 0x00000010
m = p30_3.match(line)
if m:
length = int(m.groupdict()['length'])
bits = m.groupdict()['bits']
aff_dict = item_dict.setdefault('affinity', {})
aff_dict.update({'length': length})
aff_dict.update({'bits': bits})
continue
# Extended affinity: length 32, bits 0x00000010
m = p30_4.match(line)
if m:
length = int(m.groupdict()['length'])
bits = m.groupdict()['bits']
exa_dict = item_dict.setdefault('extended_affinity', {})
exa_dict.update({'length': length})
exa_dict.update({'bits': bits})
continue
# SR Policy Manager:
m = p31.match(line)
if m:
mgn_dict = sub_dict.setdefault('sr_policy_manager', {})
continue
# TE Opaque LSA: Source of link information OSPF
m = p31_1.match(line)
if m:
mgn_dict.update({'te_opaque_lsa': m.groupdict()['te_opaque_lsa']})
return ret_dict
# ================================
# Super parser for:
# * 'show ip ospf virtual-links'
# * 'show ip ospf sham-links'
# ================================
class ShowIpOspfLinksParser(MetaParser):
''' Parser for:
* 'show ip ospf virtual-links'
* 'show ip ospf sham-links'
'''
def cli(self, cmd, link_type,output=None):
assert link_type in ['virtual_links', 'sham_links']
if output is None:
out = self.device.execute(cmd)
else:
out = output
# Init vars
ret_dict = {}
af = 'ipv4'
# crypo_algorithm dict
crypto_dict = {'cryptographic': 'md5', 'simple password': 'simple'}
p1 = re.compile(r'^(Virtual|Sham) +Link +(?P<interface>(\S+)) +to'
' +(address|router) +(?P<address>(\S+)) +is'
' +(?P<link_state>(up|down))$')
p2 = re.compile(r'^Area +(?P<area>(\S+)),? +source +address'
' +(?P<source_address>(\S+))$')
p3 = re.compile(r'^Run +as +demand +circuit$')
p4 = re.compile(r'^DoNotAge +LSA +not +allowed'
' +\(Number +of +DCbitless +LSA +is +(?P<dcbitless>(\d+))\).'
'(?: +Cost +of +using +(?P<cost>(\d+)))?'
'(?: State +(?P<state>(\S+)))?$')
p5 = re.compile(r'^Transit +area +(?P<area>(\S+)),'
'(?: +via +interface +(?P<intf>(\S+)))?$')
p6 = re.compile(r'^(?P<mtid>(\d+)) +(?P<topo_cost>(\d+))'
' +(?P<disabled>(yes|no)) +(?P<shutdown>(yes|no))'
' +(?P<topo_name>(\S+))$')
p7 = re.compile(r'^Transmit +Delay +is +(?P<transmit_delay>(\d+))'
' +sec, +State +(?P<state>(\S+)),?$')
p8 = re.compile(r'^Timer +intervals +configured,'
' +Hello +(?P<hello>(\d+)),'
' +Dead +(?P<dead>(\d+)),'
' +Wait +(?P<wait>(\d+)),'
'(?: +Retransmit +(?P<retransmit>(\d+)))?$')
p9 = re.compile(r'^Strict +TTL +checking'
' +(?P<strict_ttl>(enabled|disabled))'
'(?:, +up +to +(?P<hops>(\d+)) +hops +allowed)?$')
p10 = re.compile(r'^Hello +due +in +(?P<hello_timer>(\S+))$')
p11 = re.compile(r'^Adjacency +State +(?P<adj_state>(\S+))$')
p12 = re.compile(r'^Index +(?P<index>(\S+)), +retransmission +queue'
' +length +(?P<length>(\d+)), +number +of'
' +retransmission +(?P<retrans>(\d+))$')
p13 = re.compile(r'^First +(?P<first>(\S+)) +Next +(?P<next>(\S+))$')
p14 = re.compile(r'^Last +retransmission +scan +length +is'
' +(?P<len>(\d+)), +maximum +is +(?P<max>(\d+))$')
p15 = re.compile(r'^Last +retransmission +scan +time +is'
' +(?P<time>(\d+)) +msec, +maximum +is'
' +(?P<max>(\d+)) +msec$')
for line in out.splitlines():
line = line.strip()
# Sham Link OSPF_SL0 to address 10.151.22.22 is up
# Virtual Link OSPF_VL0 to router 10.64.4.4 is up
m = p1.match(line)
if m:
address = str(m.groupdict()['address'])
sl_remote_id = vl_router_id = address
interface = str(m.groupdict()['interface'])
link_state = str(m.groupdict()['link_state'])
instance = None
# Get link name
n = re.match('(?P<ignore>\S+)_(?P<name>(S|V)L(\d+))', interface)
if n:
real_link_name = str(n.groupdict()['name'])
else:
real_link_name = interface
# Get OSPF process ID from 'show ip ospf interface'
cmd = 'show ip ospf interface | section {}'.format(interface)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# Process ID 2, Router ID 10.229.11.11, Network Type SHAM_LINK, Cost: 111
p = re.search('Process +ID +(?P<instance>(\S+)), +Router'
' +(.*)', line)
if p:
instance = str(p.groupdict()['instance'])
break
# Get VRF information using the ospf instance
if instance is not None:
cmd = 'show running-config | section router ospf {}'.format(instance)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# Skip the show command line so as to not match
if re.search('show', line):
continue
# router ospf 1
# router ospf 2 vrf VRF1
p = re.search('router +ospf +(?P<instance>(\S+))'
'(?: +vrf +(?P<vrf>(\S+)))?', line)
if p:
p_instance = str(p.groupdict()['instance'])
if p_instance == instance:
if p.groupdict()['vrf']:
vrf = str(p.groupdict()['vrf'])
break
else:
vrf = 'default'
break
# Build dict
if 'vrf' not in ret_dict:
ret_dict['vrf'] = {}
if vrf not in ret_dict['vrf']:
ret_dict['vrf'][vrf] = {}
if 'address_family' not in ret_dict['vrf'][vrf]:
ret_dict['vrf'][vrf]['address_family'] = {}
if af not in ret_dict['vrf'][vrf]['address_family']:
ret_dict['vrf'][vrf]['address_family'][af] = {}
if 'instance' not in ret_dict['vrf'][vrf]['address_family'][af]:
ret_dict['vrf'][vrf]['address_family'][af]['instance'] = {}
if instance not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance] = {}
continue
# Area 1, source address 10.21.33.33
# Area 1 source address 10.229.11.11
m = p2.match(line)
if m:
area = str(IPAddress(str(m.groupdict()['area'])))
source_address = str(m.groupdict()['source_address'])
# Set link_name for sham_link
link_name = '{} {}'.format(source_address, sl_remote_id)
# Build dict
if 'areas' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'] = {}
if area not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area] = {}
if link_type not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area][link_type] = {}
if link_name not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area][link_type]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area][link_type][link_name] = {}
# Set sub_dict
sub_dict = ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]\
[link_type][link_name]
# Set values
sub_dict['transit_area_id'] = area
sub_dict['local_id'] = source_address
sub_dict['demand_circuit'] = False
# Set previously parsed values
try:
sub_dict['name'] = real_link_name
sub_dict['remote_id'] = sl_remote_id
sub_dict['link_state'] = link_state
except Exception:
pass
continue
# Run as demand circuit
m = p3.match(line)
if m:
if link_type == 'sham_links':
sub_dict['demand_circuit'] = True
else:
demand_circuit = True
continue
# DoNotAge LSA not allowed (Number of DCbitless LSA is 7).
# DoNotAge LSA not allowed (Number of DCbitless LSA is 1). Cost of using 111 State POINT_TO_POINT,
m = p4.match(line)
if m:
dcbitless_lsa_count = int(m.groupdict()['dcbitless'])
donotage_lsa = 'not allowed'
if m.groupdict()['cost']:
cost = int(m.groupdict()['cost'])
if m.groupdict()['state']:
link_state = str(m.groupdict()['state']).lower()
# Set values for sham_links
if link_type == 'sham_links':
sub_dict['dcbitless_lsa_count'] = dcbitless_lsa_count
sub_dict['donotage_lsa'] = donotage_lsa
if m.groupdict()['cost']:
sub_dict['cost'] = cost
if m.groupdict()['state']:
sub_dict['state'] = link_state
continue
# Transit area 1
# Transit area 1, via interface GigabitEthernet0/1
m = p5.match(line)
if m:
area = str(IPAddress(str(m.groupdict()['area'])))
# Set link_name for virtual_link
link_name = '{} {}'.format(area, vl_router_id)
# Create dict
if 'areas' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'] = {}
if area not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area] = {}
if link_type not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area][link_type] = {}
if link_name not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area][link_type]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area][link_type][link_name] = {}
# Set sub_dict
sub_dict = ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]\
[link_type][link_name]
# Set values
sub_dict['transit_area_id'] = area
sub_dict['demand_circuit'] = False
if m.groupdict()['intf']:
sub_dict['interface'] = str(m.groupdict()['intf'])
# Set previously parsed values
try:
sub_dict['name'] = real_link_name
except Exception:
pass
try:
sub_dict['router_id'] = vl_router_id
except Exception:
pass
try:
sub_dict['dcbitless_lsa_count'] = dcbitless_lsa_count
except Exception:
pass
try:
sub_dict['donotage_lsa'] = donotage_lsa
except Exception:
pass
try:
sub_dict['demand_circuit'] = demand_circuit
except Exception:
pass
try:
sub_dict['link_state'] = link_state
except Exception:
pass
continue
# Topology-MTID Cost Disabled Shutdown Topology Name
# 0 1 no no Base
m = p6.match(line)
if m:
mtid = int(m.groupdict()['mtid'])
if 'topology' not in sub_dict:
sub_dict['topology'] = {}
if mtid not in sub_dict['topology']:
sub_dict['topology'][mtid] = {}
sub_dict['topology'][mtid]['cost'] = int(m.groupdict()['topo_cost'])
sub_dict['topology'][mtid]['name'] = str(m.groupdict()['topo_name'])
if 'yes' in m.groupdict()['disabled']:
sub_dict['topology'][mtid]['disabled'] = True
else:
sub_dict['topology'][mtid]['disabled'] = False
if 'yes' in m.groupdict()['shutdown']:
sub_dict['topology'][mtid]['shutdown'] = True
else:
sub_dict['topology'][mtid]['shutdown'] = False
continue
# Transmit Delay is 1 sec, State POINT_TO_POINT,
m = p7.match(line)
if m:
sub_dict['transmit_delay'] = int(m.groupdict()['transmit_delay'])
state = str(m.groupdict()['state']).lower()
state = state.replace("_", "-")
sub_dict['state'] = state
continue
# Timer intervals configured, Hello 3, Dead 13, Wait 13, Retransmit 5
# Timer intervals configured, Hello 4, Dead 16, Wait 16, Retransmit 44
# Timer intervals configured, Hello 10, Dead 40, Wait 40,
m = p8.match(line)
if m:
if m.groupdict()['hello']:
sub_dict['hello_interval'] = int(m.groupdict()['hello'])
if m.groupdict()['dead']:
sub_dict['dead_interval'] = int(m.groupdict()['dead'])
if m.groupdict()['wait']:
sub_dict['wait_interval'] = int(m.groupdict()['wait'])
if m.groupdict()['retransmit']:
sub_dict['retransmit_interval'] = int(m.groupdict()['retransmit'])
continue
# Strict TTL checking enabled, up to 3 hops allowed
m = p9.match(line)
if m:
if 'ttl_security' not in sub_dict:
sub_dict['ttl_security'] = {}
if 'enabled' in m.groupdict()['strict_ttl']:
sub_dict['ttl_security']['enable'] = True
else:
sub_dict['ttl_security']['enable'] = False
if m.groupdict()['hops']:
sub_dict['ttl_security']['hops'] = int(m.groupdict()['hops'])
continue
# Hello due in 00:00:03:179
m = p10.match(line)
if m:
sub_dict['hello_timer'] = str(m.groupdict()['hello_timer'])
continue
# Adjacency State FULL
m = p11.match(line)
if m:
sub_dict['adjacency_state'] = str(m.groupdict()['adj_state']).lower()
continue
# Index 1/2/2, retransmission queue length 0, number of retransmission 2
m = p12.match(line)
if m:
sub_dict['index'] = str(m.groupdict()['index'])
sub_dict['retrans_qlen'] = int(m.groupdict()['length'])
sub_dict['total_retransmission'] = int(m.groupdict()['retrans'])
continue
# First 0x0(0)/0x0(0)/0x0(0) Next 0x0(0)/0x0(0)/0x0(0)
m = p13.match(line)
if m:
sub_dict['first'] = str(m.groupdict()['first'])
sub_dict['next'] = str(m.groupdict()['next'])
continue
# Last retransmission scan length is 1, maximum is 1
m = p14.match(line)
if m:
sub_dict['last_retransmission_scan_length'] = \
int(m.groupdict()['len'])
sub_dict['last_retransmission_max_length'] = \
int(m.groupdict()['max'])
continue
# Last retransmission scan time is 0 msec, maximum is 0 msec
m = p15.match(line)
if m:
sub_dict['last_retransmission_scan_time'] = \
int(m.groupdict()['time'])
sub_dict['last_retransmission_max_scan'] = \
int(m.groupdict()['max'])
continue
return ret_dict
# =============================
# Schema for:
# * 'show ip ospf sham-links'
# =============================
class ShowIpOspfShamLinksSchema(MetaParser):
''' Schema for:
* 'show ip ospf sham-links'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{'areas':
{Any():
{'sham_links':
{Any():
{'name': str,
'link_state': str,
'local_id': str,
'remote_id': str,
'transit_area_id': str,
Optional('hello_interval'): int,
Optional('dead_interval'): int,
Optional('wait_interval'): int,
Optional('retransmit_interval'): int,
Optional('transmit_delay'): int,
'cost': int,
'state': str,
Optional('hello_timer'): str,
Optional('demand_circuit'): bool,
Optional('dcbitless_lsa_count'): int,
Optional('donotage_lsa'): str,
Optional('adjacency_state'): str,
Optional('ttl_security'):
{'enable': bool,
Optional('hops'): int},
Optional('index'): str,
Optional('first'): str,
Optional('next'): str,
Optional('last_retransmission_max_length'): int,
Optional('last_retransmission_max_scan'): int,
Optional('last_retransmission_scan_length'): int,
Optional('last_retransmission_scan_time'): int,
Optional('total_retransmission'): int,
Optional('retrans_qlen'): int,
Optional('topology'):
{Any():
{'cost': int,
'disabled': bool,
'shutdown': bool,
'name': str,
},
},
},
},
},
},
},
},
},
},
},
},
}
# =============================
# Parser for:
# * 'show ip ospf sham-links'
# =============================
class ShowIpOspfShamLinks(ShowIpOspfShamLinksSchema, ShowIpOspfLinksParser):
''' Parser for:
* 'show ip ospf sham-links'
'''
cli_command = 'show ip ospf sham-links'
def cli(self, output=None):
return super().cli(cmd=self.cli_command, link_type='sham_links',output=output)
# ================================
# Schema for:
# * 'show ip ospf virtual-links'
# ================================
class ShowIpOspfVirtualLinksSchema(MetaParser):
''' Schema for:
* 'show ip ospf virtual-links'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{'areas':
{Any():
{'virtual_links':
{Any():
{'name': str,
'link_state': str,
'router_id': str,
'transit_area_id': str,
Optional('hello_interval'): int,
Optional('dead_interval'): int,
Optional('wait_interval'): int,
Optional('retransmit_interval'): int,
'transmit_delay': int,
'state': str,
'demand_circuit': bool,
Optional('cost'): int,
Optional('hello_timer'): str,
Optional('interface'): str,
Optional('dcbitless_lsa_count'): int,
Optional('donotage_lsa'): str,
Optional('adjacency_state'): str,
Optional('ttl_security'):
{'enable': bool,
Optional('hops'): int},
Optional('index'): str,
Optional('first'): str,
Optional('next'): str,
Optional('last_retransmission_max_length'): int,
Optional('last_retransmission_max_scan'): int,
Optional('last_retransmission_scan_length'): int,
Optional('last_retransmission_scan_time'): int,
Optional('total_retransmission'): int,
Optional('retrans_qlen'): int,
Optional('topology'):
{Any():
{'cost': int,
'disabled': bool,
'shutdown': bool,
'name': str,
},
},
},
},
},
},
},
},
},
},
},
},
}
# ================================
# Parser for:
# * 'show ip ospf virtual-links'
# ================================
class ShowIpOspfVirtualLinks(ShowIpOspfVirtualLinksSchema, ShowIpOspfLinksParser):
''' Parser for:
* 'show ip ospf virtual-links'
'''
cli_command = 'show ip ospf virtual-links'
def cli(self, output=None):
return super().cli(cmd=self.cli_command, link_type='virtual_links', output=output)
# ==================================
# Schema for:
# * 'show ip ospf neighbor detail'
# =========================================
class ShowIpOspfNeighborDetailSchema(MetaParser):
''' Schema for:
* 'show ip ospf neighbor detail'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{'areas':
{Any():
{Optional('interfaces'):
{Any():
{'neighbors':
{Any():
{'neighbor_router_id': str,
'address': str,
'interface': str,
'priority': int,
'state': str,
'dr_ip_addr': str,
'bdr_ip_addr': str,
Optional('interface_id'): str,
Optional('hello_options'): str,
Optional('sr_adj_label'): str,
Optional('dbd_options'): str,
Optional('dead_timer'): str,
Optional('uptime'): str,
Optional('index'): str,
Optional('first'): str,
Optional('next'): str,
Optional('ls_ack_list'): str,
Optional('statistics'):
{Optional('nbr_event_count'): int,
Optional('nbr_retrans_qlen'): int,
Optional('total_retransmission'): int,
Optional('last_retrans_scan_length'): int,
Optional('last_retrans_max_scan_length'): int,
Optional('last_retrans_scan_time_msec'): int,
Optional('last_retrans_max_scan_time_msec'): int},
},
},
},
},
Optional('sham_links'):
{Any():
{'neighbors':
{Any():
{'neighbor_router_id': str,
'address': str,
'interface': str,
'priority': int,
'state': str,
'dr_ip_addr': str,
'bdr_ip_addr': str,
Optional('interface_id'): str,
Optional('hello_options'): str,
Optional('dbd_options'): str,
Optional('dead_timer'): str,
Optional('uptime'): str,
Optional('index'): str,
Optional('first'): str,
Optional('next'): str,
Optional('ls_ack_list'): str,
Optional('statistics'):
{Optional('nbr_event_count'): int,
Optional('nbr_retrans_qlen'): int,
Optional('total_retransmission'): int,
Optional('last_retrans_scan_length'): int,
Optional('last_retrans_max_scan_length'): int,
Optional('last_retrans_scan_time_msec'): int,
Optional('last_retrans_max_scan_time_msec'): int},
},
},
},
},
Optional('virtual_links'):
{Any():
{'neighbors':
{Any():
{'neighbor_router_id': str,
'address': str,
'interface': str,
'priority': int,
'state': str,
'dr_ip_addr': str,
'bdr_ip_addr': str,
Optional('interface_id'): str,
Optional('hello_options'): str,
Optional('dbd_options'): str,
Optional('dead_timer'): str,
Optional('uptime'): str,
Optional('index'): str,
Optional('first'): str,
Optional('next'): str,
Optional('ls_ack_list'): str,
Optional('statistics'):
{Optional('nbr_event_count'): int,
Optional('nbr_retrans_qlen'): int,
Optional('total_retransmission'): int,
Optional('last_retrans_scan_length'): int,
Optional('last_retrans_max_scan_length'): int,
Optional('last_retrans_scan_time_msec'): int,
Optional('last_retrans_max_scan_time_msec'): int},
},
},
},
},
},
},
},
},
},
},
},
},
}
# ================================
# Parser for:
# 'show ip ospf neighbor detail'
# ================================
class ShowIpOspfNeighborDetail(ShowIpOspfNeighborDetailSchema):
''' Parser for:
* 'show ip ospf neighbor detail'
'''
cli_command = ['show ip ospf neighbor detail', 'show ip ospf neighbor {neighbor} detail']
exclude = ['hello_timer', 'dead_timer', 'bdr_ip_addr',
'bdr_router_id', 'index', 'last_retrans_max_scan_length',
'last_retrans_max_scan_time_msec', 'total_retransmission',
'uptime', 'last_retrans_scan_length', 'last_retrans_scan_time_msec']
def cli(self, neighbor='', output=None):
if output is None:
# Execute command on device
if neighbor:
out = self.device.execute(self.cli_command[1].format(neighbor=neighbor))
else:
out = self.device.execute(self.cli_command[0])
else:
out = output
# Init vars
ret_dict = {}
af = 'ipv4' # this is ospf - always ipv4
p1 = re.compile(r'^Neighbor +(?P<neighbor>(\S+)), +interface'
' +address +(?P<address>(\S+))'
'(?:, +interface-id +(?P<intf_id>(\S+)))?$')
p2 = re.compile(r'^In +the +area +(?P<area>(\S+)) +via +interface'
' +(?P<interface>(\S+))$')
p3 = re.compile(r'^Neighbor +priority +is +(?P<priority>(\d+)),'
' +State +is +(?P<state>(\S+)),'
' +(?P<num>(\d+)) +state +changes$')
p4 = re.compile(r'^DR +is +(?P<dr_ip_addr>(\S+))'
' +BDR +is +(?P<bdr_ip_addr>(\S+))$')
p5 = re.compile(r'^Options +is +(?P<options>(\S+)) +in +Hello'
' +\(E-bit\)$')
p6 = re.compile(r'^Options +is +(?P<options>(\S+)) +in +DBD'
' +\(E-bit, O-bit\)$')
p7 = re.compile(r'^Dead +timer +due +in +(?P<dead_timer>(\S+))$')
p8 = re.compile(r'^Neighbor +is +up +for +(?P<uptime>(\S+))$')
p9 = re.compile(r'^Index +(?P<index>(\S+)) +retransmission +queue'
' +length +(?P<ql>(\d+)), +number +of'
' +retransmission +(?P<num_retrans>(\d+))$')
p10 = re.compile(r'^First +(?P<first>(\S+)) +Next +(?P<next>(\S+))$')
p11 = re.compile(r'^Last +retransmission +scan +length +is'
' +(?P<num1>(\d+)), +maximum +is'
' +(?P<num2>(\d+))$')
p12 = re.compile(r'^Last +retransmission +scan +time +is'
' +(?P<num1>(\d+)) +msec, +maximum +is'
' +(?P<num2>(\d+)) +msec$')
p13 = re.compile(r'^SR +adj +label +(?P<sr_adj_label>\d+)$')
for line in out.splitlines():
line = line.strip()
# Neighbor 10.16.2.2, interface address 10.1.2.2
# Neighbor 192.168.111.1, interface address 192.168.70.1, interface-id 192
# Neighbor 192.168.255.9, interface address 10.0.109.9, interface-id unknown
m = p1.match(line)
if m:
neighbor = str(m.groupdict()['neighbor'])
address = str(m.groupdict()['address'])
if m.groupdict()['intf_id']:
interface_id = str(m.groupdict()['intf_id'])
continue
# In the area 0 via interface GigabitEthernet2
m = p2.match(line)
if m:
area = str(IPAddress(str(m.groupdict()['area'])))
interface = str(m.groupdict()['interface'])
instance = None
router_id = None
# Get OSPF process ID from 'show ip ospf interface'
cmd = 'show ip ospf interface | section {}'.format(interface)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# Process ID 2, Router ID 10.229.11.11, Network Type SHAM_LINK, Cost: 111
p = re.search('Process +ID +(?P<instance>(\S+)), +Router +ID'
' +(?P<router_id>(\S+)) +(.*)', line)
if p:
instance = str(p.groupdict()['instance'])
router_id = str(p.groupdict()['router_id'])
break
# Get VRF information using the ospf instance
if instance is not None:
cmd = 'show running-config | section router ospf {}'.format(instance)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# Skip the show command line so as to not match
if re.search('show', line):
continue
# router ospf 1
# router ospf 2 vrf VRF1
p = re.search('router +ospf +(?P<instance>(\S+))'
'(?: +vrf +(?P<vrf>(\S+)))?', line)
if p:
p_instance = str(p.groupdict()['instance'])
if p_instance == instance:
if p.groupdict()['vrf']:
vrf = str(p.groupdict()['vrf'])
break
else:
vrf = 'default'
break
# Build dict
if 'vrf' not in ret_dict:
ret_dict['vrf'] = {}
if vrf not in ret_dict['vrf']:
ret_dict['vrf'][vrf] = {}
if 'address_family' not in ret_dict['vrf'][vrf]:
ret_dict['vrf'][vrf]['address_family'] = {}
if af not in ret_dict['vrf'][vrf]['address_family']:
ret_dict['vrf'][vrf]['address_family'][af] = {}
if 'instance' not in ret_dict['vrf'][vrf]['address_family'][af]:
ret_dict['vrf'][vrf]['address_family'][af]['instance'] = {}
if instance not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance] = {}
# Determine if 'interface' or 'virtual_links' or 'sham_links'
if re.search('VL', interface):
# Init
intf_type = 'virtual_links'
vl_addr = None
vl_transit_area_id = None
# Execute command to get virtual-link address
cmd = 'show ip ospf virtual-links | i {interface}'.format(interface=interface)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# Virtual Link OSPF_VL0 to router 10.100.5.5 is down
p = re.search('Virtual +Link +(?P<intf>(\S+)) +to +router'
' +(?P<address>(\S+)) +is +(up|down)'
'(?:.*)?', line)
if p:
if interface == str(p.groupdict()['intf']):
vl_addr = str(p.groupdict()['address'])
break
# Execute command to get virtual-link transit_area_id
if vl_addr is not None and router_id is not None:
cmd = 'show running-config | i virtual-link | i {addr}'.format(addr=vl_addr)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# area 1 virtual-link 10.100.5.5
q = re.search('area +(?P<q_area>(\d+)) +virtual-link'
' +(?P<addr>(\S+))(?: +(.*))?', line)
if q:
q_addr = str(q.groupdict()['addr'])
# Check parameters match
if q_addr == vl_addr:
vl_transit_area_id = str(IPAddress(str(q.groupdict()['q_area'])))
break
if vl_transit_area_id is not None:
intf_name = '{} {}'.format(vl_transit_area_id, router_id)
area = vl_transit_area_id
elif re.search('SL', interface):
# Init
intf_type = 'sham_links'
sl_local_id = None
sl_remote_id = None
# Execute command to get sham-link remote_id
cmd = 'show ip ospf sham-links | i {interface}'.format(interface=interface)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# Sham Link OSPF_SL1 to address 10.151.22.22 is up
p = re.search('Sham +Link +(?P<intf>(\S+)) +to +address'
' +(?P<remote>(\S+)) +is +(up|down)', line)
if p:
if interface == str(p.groupdict()['intf']):
sl_remote_id = str(p.groupdict()['remote'])
break
# Execute command to get sham-link local_id
if sl_remote_id is not None:
cmd = 'show running-config | i sham-link | i {remote}'.format(remote=sl_remote_id)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# area 1 sham-link 10.229.11.11 10.151.22.22 cost 111 ttl-security hops 3
q = re.search('area +(?P<q_area>(\d+)) +sham-link'
' +(?P<local_id>(\S+))'
' +(?P<remote_id>(\S+)) +(.*)', line)
if q:
q_area = str(IPAddress(str(q.groupdict()['q_area'])))
q_remote_id = str(q.groupdict()['remote_id'])
# Check parameters match
if q_area == area and q_remote_id == sl_remote_id:
sl_local_id = str(q.groupdict()['local_id'])
break
# Set intf_name based on parsed values
if sl_local_id is not None:
intf_name = '{} {}'.format(sl_local_id, sl_remote_id)
else:
# Set values for dict
intf_type = 'interfaces'
intf_name = interface
if 'areas' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'] = {}
if area not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area] = {}
if intf_type not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area][intf_type] = {}
if intf_name not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area][intf_type]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area][intf_type][intf_name] = {}
if 'neighbors' not in ret_dict['vrf'][vrf]['address_family']\
[af]['instance'][instance]['areas'][area][intf_type]\
[intf_name]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area][intf_type][intf_name]\
['neighbors'] = {}
if neighbor not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area][intf_type]\
[intf_name]['neighbors']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area][intf_type][intf_name]\
['neighbors'][neighbor] = {}
# Set sub_dict
sub_dict = ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area][intf_type]\
[intf_name]['neighbors'][neighbor]
# Set values
sub_dict['neighbor_router_id'] = neighbor
sub_dict['interface'] = interface
try:
sub_dict['address'] = address
del address
except Exception:
pass
try:
sub_dict['interface_id'] = interface_id
del interface_id
except Exception:
pass
continue
# Neighbor priority is 1, State is FULL, 6 state changes
m = p3.match(line)
if m:
sub_dict['priority'] = int(m.groupdict()['priority'])
state = str(m.groupdict()['state']).lower()
state = state.replace('_', '-')
sub_dict['state'] = state
if 'statistics' not in sub_dict:
sub_dict['statistics'] = {}
sub_dict['statistics']['nbr_event_count'] = \
int(m.groupdict()['num'])
continue
# DR is 10.2.3.3 BDR is 10.2.3.2
m = p4.match(line)
if m:
sub_dict['dr_ip_addr'] = str(m.groupdict()['dr_ip_addr'])
sub_dict['bdr_ip_addr'] = str(m.groupdict()['bdr_ip_addr'])
continue
# Options is 0x2 in Hello (E-bit)
m = p5.match(line)
if m:
sub_dict['hello_options'] = str(m.groupdict()['options'])
continue
# Options is 0x42 in DBD (E-bit, O-bit)
# Options is 0x42 in DBD (E-bit, O-bit)
m = p6.match(line)
if m:
sub_dict['dbd_options'] = str(m.groupdict()['options'])
continue
# Dead timer due in 00:00:38
m = p7.match(line)
if m:
sub_dict['dead_timer'] = str(m.groupdict()['dead_timer'])
continue
# Neighbor is up for 08:22:07
m = p8.match(line)
if m:
sub_dict['uptime'] = str(m.groupdict()['uptime'])
continue
# Index 1/2/2, retransmission queue length 0, number of retransmission 0
m = p9.match(line)
if m:
sub_dict['index'] = str(m.groupdict()['index'])
if 'statistics' not in sub_dict:
sub_dict['statistics'] = {}
sub_dict['statistics']['nbr_retrans_qlen'] = \
int(m.groupdict()['ql'])
sub_dict['statistics']['total_retransmission'] = \
int(m.groupdict()['num_retrans'])
continue
# First 0x0(0)/0x0(0)/0x0(0) Next 0x0(0)/0x0(0)/0x0(0)
m = p10.match(line)
if m:
sub_dict['first'] = str(m.groupdict()['first'])
sub_dict['next'] = str(m.groupdict()['next'])
continue
# Last retransmission scan length is 0, maximum is 0
m = p11.match(line)
if m:
if 'statistics' not in sub_dict:
sub_dict['statistics'] = {}
sub_dict['statistics']['last_retrans_scan_length'] = \
int(m.groupdict()['num1'])
sub_dict['statistics']['last_retrans_max_scan_length'] = \
int(m.groupdict()['num2'])
continue
# Last retransmission scan time is 0 msec, maximum is 0 msec
m = p12.match(line)
if m:
if 'statistics' not in sub_dict:
sub_dict['statistics'] = {}
sub_dict['statistics']['last_retrans_scan_time_msec'] = \
int(m.groupdict()['num1'])
sub_dict['statistics']['last_retrans_max_scan_time_msec'] = \
int(m.groupdict()['num2'])
continue
# SR adj label 10
m = p13.match(line)
if m:
sub_dict['sr_adj_label'] = str(m.groupdict()['sr_adj_label'])
continue
return ret_dict
# ===========================
# Schema for:
# * 'show ip ospf database'
# ===========================
class ShowIpOspfDatabaseSchema(MetaParser):
''' Schema for:
* 'show ip ospf database'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{Optional('areas'):
{Any():
{'database':
{'lsa_types':
{Any():
{'lsa_type': int,
'lsas':
{Any():
{'lsa_id': str,
'adv_router': str,
'ospfv2':
{'header':
{'lsa_id': str,
'adv_router': str,
'age': int,
'seq_num': str,
'checksum': str,
Optional('link_count'): int,
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
}
# ==========================
# Parser for:
# 'show ip ospf database'
# ==========================
class ShowIpOspfDatabase(ShowIpOspfDatabaseSchema):
''' Parser for:
* 'show ip ospf database'
'''
cli_command = 'show ip ospf database'
exclude = ['age']
def cli(self, output=None):
if output is None:
# Execute command on device
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
ret_dict = {}
address_family = 'ipv4'
default_mt_id = 0
# 1: Router
# 2: Network Link
# 3: Summary
# 3: Summary Network
# 3: Summary Net
# 4: Summary ASB
# 5: Type-5 AS External
# 10: Opaque Area
lsa_type_mapping = {
'router': 1,
'net': 2,
'summary': 3,
'summary net': 3,
'summary asb': 4,
'external': 5,
'opaque': 10,
}
# OSPF Router with ID (172.16.1.214) (Process ID 65109)
# OSPF Router with ID (10.36.3.3) (Process ID 1, VRF VRF1)
p1 = re.compile(r'^OSPF +Router +with +ID +\((?P<router_id>(\S+))\)'
' +\(Process +ID +(?P<instance>(\d+))'
'(?:, +VRF +(?P<vrf>(\S+)))?\)$')
# Router Link States (Area 0)
# Net Link States (Area 0)
# Summary Net Link States (Area 8)
# Summary ASB Link States (Area 8)
p2 = re.compile(r'^(?P<lsa_type>([a-zA-Z\s]+)) +Link +States +\(Area'
' +(?P<area>(\S+))\)$')
# Link ID ADV Router Age Seq# Checksum Link count
# 10.13.202.64 10.120.202.64 2794 0x80000043 0x002254 3
# 10.1.1.2 10.169.197.253 70 0x8000003F 0x0015EF
p3 = re.compile(r'^(?P<link_id>(\S+)) +(?P<adv_router>(\S+))'
' +(?P<age>(\d+)) +(?P<seq>(\S+)) +(?P<checksum>(\S+))'
'(?: *(?P<link_count>(\d+)))?$')
for line in out.splitlines():
line = line.strip()
# Load for five secs: 71%/0%; one minute: 11%; five minutes: 9%
# Time source is NTP, 20:29:26.348 EST Fri Nov 11 2016
# OSPF Router with ID (10.36.3.3) (Process ID 1, VRF VRF1)
m = p1.match(line)
if m:
group = m.groupdict()
router_id = str(group['router_id'])
instance = str(group['instance'])
if group['vrf']:
vrf = str(group['vrf'])
else:
vrf = 'default'
# Create dict
ospf_dict = ret_dict.setdefault('vrf', {}).\
setdefault(vrf, {}).\
setdefault('address_family', {}).\
setdefault(address_family, {}).\
setdefault('instance', {}).\
setdefault(instance, {})
continue
# Router Link States (Area 0)
# Net Link States (Area 0)
# Summary Net Link States (Area 8)
# Summary ASB Link States (Area 8)
m = p2.match(line)
if m:
group = m.groupdict()
lsa_type_key = group['lsa_type'].lower()
if lsa_type_key in lsa_type_mapping:
lsa_type = lsa_type_mapping[lsa_type_key]
else:
continue
# Set area
if group['area']:
try:
int(group['area'])
area = str(IPAddress(str(group['area'])))
except Exception:
area = str(group['area'])
else:
area = '0.0.0.0'
# Create dict structure
lsa_type_dict = ospf_dict.setdefault('areas', {}).\
setdefault(area, {}).\
setdefault('database', {}).\
setdefault('lsa_types', {}).\
setdefault(lsa_type, {})
# Set lsa_type
lsa_type_dict['lsa_type'] = lsa_type
continue
# Link ID ADV Router Age Seq# Checksum Link count
# 10.13.202.64 10.120.202.64 2794 0x80000043 0x002254 3
# 10.1.1.2 10.169.197.253 70 0x8000003F 0x0015EF
m = p3.match(line)
if m:
group = m.groupdict()
lsa_id = group['link_id']
# Create dict
lsas_dict = lsa_type_dict.setdefault('lsas', {}).\
setdefault(lsa_id, {})
lsas_dict['lsa_id'] = lsa_id
lsas_dict['adv_router'] = group['adv_router']
# osfpv2 dict
ospfv2_dict = lsas_dict.setdefault('ospfv2', {}).\
setdefault('header', {})
ospfv2_dict['lsa_id'] = lsa_id
ospfv2_dict['adv_router'] = group['adv_router']
ospfv2_dict['age'] = int(group['age'])
ospfv2_dict['seq_num'] = group['seq']
ospfv2_dict['checksum'] = group['checksum']
if group['link_count']:
ospfv2_dict['link_count'] = int(group['link_count'])
continue
return ret_dict
# =====================================
# Super parser for:
# * 'show ip ospf database external'
# * 'show ip ospf database network'
# * 'show ip ospf database summary'
# * 'show ip ospf database router'
# * 'show ip ospf database opaque'
# * 'show ip ospf database opaque-area self-originate'
# =====================================
class ShowIpOspfDatabaseTypeParser(MetaParser):
''' Parser for:
* 'show ip ospf database external'
* 'show ip ospf database network'
* 'show ip ospf database summary'
* 'show ip ospf database router'
* 'show ip ospf database opaque
* 'show ip ospf database opaque-area self-originate''
'''
def cli(self, db_type, out=None):
assert db_type in ['external', 'network', 'summary', 'router',
'opaque']
# Init vars
ret_dict = {}
af = 'ipv4'
default_mt_id = 0
capabilities_flag = False
tlv_type_flag = False
sub_tlv_type_flag = False
# Router
# Network Link
# Summary Network
# Opaque Area
# Type-5 AS External
lsa_type_mapping = {
'router': 1,
'network': 2,
'summary': 3,
'external': 5,
'opaque': 10,
}
p1 = re.compile(r'^OSPF +Router +with +ID +\((?P<router_id>(\S+))\)'
' +\(Process +ID +(?P<instance>(\d+))'
'(?:, +VRF +(?P<vrf>(\S+)))?\)$')
p2 = re.compile(r'^(?P<lsa_type_name>(.*)) +Link +States'
'(?: +\(Area +(?P<area>(\S+))\))?$')
p3_1 = re.compile(r'^Routing +Bit +Set +on +this +LSA$')
p3_2 = re.compile(r'^LS +age: +(?P<age>(\d+))$')
p3_2_1 = re.compile(r'^LS +age: +\w+\((?P<age>(\d+))\)$')
p4 = re.compile(r'^Options:(?: +(?P<option>([a-zA-Z0-9]+)))?'
'(?: *\((?P<option_desc>(.*))\))?$')
p5_1 = re.compile(r'^LS +Type: +(?P<lsa_type>(.*))$')
p5_2 = re.compile(r'^Link +State +ID: +(?P<lsa_id>(\S+))'
'(?: +\(.*\))?$')
p6 = re.compile(r'^Advertising +Router: +(?P<adv_router>(\S+))$')
p7 = re.compile(r'^LS +Seq +Number: +(?P<ls_seq_num>(\S+))$')
p8 = re.compile(r'^Checksum: +(?P<checksum>(\S+))$')
p9 = re.compile(r'^Length *: +(?P<length>(\d+))$')
p10 = re.compile(r'^Network +Mask: +\/(?P<net_mask>(\S+))$')
p11_1 = re.compile(r'^Metric +Type: +2 +\(.*\)$')
p11_2 = re.compile(r'^Metric +Type: +1 +\(.*\)$')
p12 = re.compile(r'^TOS:? +(?P<tos>(\d+))(?:(\s+|\t+)Metric(?:s)?:'
' +(?P<metric>(\d+)))?$')
p13 = re.compile(r'^Metric: +(?P<metric>(\d+))$')
p14 = re.compile(r'^Forward +Address: +(?P<addr>(\S+))$')
p15 = re.compile(r'^External +Route +Tag: +(?P<tag>(\d+))$')
p16 = re.compile(r'^Attached +Router: +(?P<att_router>(\S+))$')
p17 = re.compile(r'^Number +of +(l|L)inks *: +(?P<num>(\d+))$')
p18 = re.compile(r'^Link +connected +to: +a +(?P<type>(.*))$')
p18_1 = re.compile(r'^Link\s+connected +to\s*: +(?P<type>(.*))$')
p19_1 = re.compile(r'^\(Link +ID\) +Network\/(s|S)ubnet +(n|N)umber:'
' +(?P<link_id>(\S+))$')
p19_2 = re.compile(r'^\(Link +ID\) +(D|d)esignated +(R|r)outer'
' +(a|A)ddress: +(?P<link_id>(\S+))$')
p19_3 = re.compile(r'^\(Link +ID\) +(N|n)eighboring +(R|r)outer'
' +(I|d)D: +(?P<link_id>(\S+))$')
p20_1 = re.compile(r'^\(Link +Data\) +Network +Mask:'
' +(?P<link_data>(\S+))$')
p20_2 = re.compile(r'^\(Link +Data\) +Router +Interface +address:'
' +(?P<link_data>(\S+))$')
# MTID 32 Metrics: 1
# MTID : 0
p21 = re.compile(r'MTID\s*:*\s*(?P<mtid>\d+)\s*(?:(Metrics*\s*:*\s*(?P<metric>\d+)))?')
p21_1 = re.compile(r'^Number +of +MTID +metrics: +(?P<num>(\d+))$')
p22 = re.compile(r'^Opaque +Type: +(?P<type>(\d+))(?: +\((Traffic Engineering)\))?$')
p23 = re.compile(r'^Opaque +ID: +(?P<id>(\d+))$')
p24 = re.compile(r'^Fragment +number *: +(?P<num>(\d+))$')
p25 = re.compile(r'^MPLS +TE +router +ID *: +(?P<mpls>(\S+))$')
p26_1 = re.compile(r'^AS +Boundary +Router$')
p26_2 = re.compile(r'^Area +Border +Router$')
p27 = re.compile(r'^Link +connected +to\s*\:*\s+(?P<link>(.*))$')
p28 = re.compile(r'^Link +ID *: +(?P<id>(\S+))$')
p29 = re.compile(r'^Interface +Address *: +(?P<addr>(\S+))$')
p30 = re.compile(r'^Admin +Metric *: +(?P<te_metric>(\d+))$')
p31 = re.compile(r'^Maximum +(B|b)andwidth *:'
' +(?P<max_band>(\d+))$')
p32 = re.compile(r'^Maximum +(R|r)eservable +(B|b)andwidth'
'(?: +global)? *: +(?P<max_res_band>(\d+))$')
p33 = re.compile(r'^Affinity +Bit *: +(?P<admin_group>(\S+))$')
p33_1 = re.compile(r'^IGP +Metric *: +(?P<igp_metric>(\d+))$')
p33_2 = re.compile(r'^Number +of +Priority *: +(?P<num>(\d+))$')
p34 = re.compile(r'^Priority +(?P<num1>(\d+)) *:'
' +(?P<band1>(\d+))(?: +Priority +(?P<num2>(\d+))'
' *: +(?P<band2>(\d+)))?$')
p35 = re.compile(r'^Unknown +Sub-TLV *: +Type += +(?P<type>(\d+)),'
' +Length += +(?P<length>(\d+))'
' +Value += +(?P<value>(.*))$')
p36 = re.compile(r'^Extended +Administrative +Group *: +Length *:'
' +(?P<eag_length>(\d+))$')
p37 = re.compile(r'^EAG\[(?P<group_num>(\d+))\]: +(?P<val>(\d+))$')
# Neighbor Address : 192.168.220.2
p38 = re.compile(r'Neighbor\s+Address\s*:\s*(?P<neighbor_address>\S+)')
# TLV Type: Router Information
# TLV Type: Segment Routing Algorithm
p39 = re.compile(r'TLV\s+Type\s*:\s*(?P<tlv_type>.+)')
# Router Information
p39_1 = re.compile(r'(R|r)outer\s+(I|i)nformation')
# Segment Routing Algorithm
p39_2 = re.compile(r'(S|s)egment\s+(R|r)outing\s+(A|a)lgorithm')
# Segment Routing Range
p39_3 = re.compile(r'(S|s)egment\s+(R|r)outing\s+(R|r)ange')
# Segment Routing Node MSD
p39_4 = re.compile(r'(S|s)egment\s+(R|r)outing\s+(N|n)ode\s+MSD')
# Segment Routing Local Block
p39_5 = re.compile(r'(S|s)egment\s+(R|r)outing\s+(L|l)ocal\s+(B|b)lock')
# Extended Prefix
p39_6 = re.compile(r'(E|e)xtended\s+(P|p)refix')
# Extended Link
p39_7 = re.compile(r'(E|e)xtended\s+(L|l)ink')
# Algorithm: SPF
# Algorithm: Strict SPF
p40 = re.compile(r'Algo(?:(rithm))?\s*:\s*(?P<algorithm>.+)')
# Range Size: 1000
p41 = re.compile(r'Range\s+Size\s*:\s*(?P<range_size>\d+)')
# Flags : L-Bit, V-bit
p42 = re.compile(r'Flags\s*\:\s*(?P<flags>.+)')
# Weight : 0
p44 = re.compile(r'Weight\s*:\s*(?P<weight>\d+)')
# Label : 19
p45 = re.compile(r'Label\s*:\s*(?P<label>\d+)')
# (Link Data) Interface IP address: 192.168.220.1
p46 = re.compile(r'\(Link\s+Data\)\s+Interface\s+IP\s+address\s*:\s*(?P<link_data>\S+)')
# Prefix : 10.4.1.1/32
p47 = re.compile(r'Prefix\s*:\s*(?P<prefix>\S+)')
# AF : 0
p48 = re.compile(r'AF\s*:\s*(?P<af>\S+)')
# Route-type: Intra
p49 = re.compile(r'Route\-type\s*:\s*(?P<route_type>.+)')
# Sub-TLV Type: Remote Intf Addr
# Sub-TLV Type: Local / Remote Intf ID
p50 = re.compile(r'Sub\-TLV\s+Type\s*:\s*(?P<sub_tlv_type>.+)')
# Remote Interface Address : 192.168.0.1
p51 = re.compile(r'Remote\s+Interface\s+Address\s*:\s*(?P<remote_interface_address>\S+)')
# Local Interface ID : 20
p52 = re.compile(r'Local\s+Interface\s+ID\s*:\s*(?P<local_interface_id>\S+)')
# Remote Interface ID : 20
p53 = re.compile(r'Remote\s+Interface\s+ID\s*:\s*(?P<remote_interface_id>\S+)')
# SID : 1
p54 = re.compile(r'SID\s*:\s*(?P<sid>\S+)')
# Graceful Restart Helper
p55 = re.compile(r'(G|g)raceful\s+(R|r)estart\s+(H|h)elper')
# Stub Router Support
p56 = re.compile(r'(S|s)tub\s+(R|r)outer\s+(S|s)upport')
# SPF
p57 = re.compile(r'SPF')
# Strict SPF
p58 = re.compile(r'Strict\s+SPF')
# Sub-type: Node Max Sid Depth, Value: 13
p59 = re.compile(r'Sub\-type\s*:\s*Node\s+Max\s+Sid\s+Depth\,\s+Value:\s*(?P<value>\d+)')
for line in out.splitlines():
line = line.strip()
# OSPF Router with ID (10.36.3.3) (Process ID 1)
# OSPF Router with ID (10.36.3.3) (Process ID 1, VRF VRF1)
m = p1.match(line)
if m:
router_id = str(m.groupdict()['router_id'])
instance = str(m.groupdict()['instance'])
if m.groupdict()['vrf']:
vrf = str(m.groupdict()['vrf'])
else:
vrf = 'default'
if 'vrf' not in ret_dict:
ret_dict['vrf'] = {}
if vrf not in ret_dict['vrf']:
ret_dict['vrf'][vrf] = {}
if 'address_family' not in ret_dict['vrf'][vrf]:
ret_dict['vrf'][vrf]['address_family'] = {}
if af not in ret_dict['vrf'][vrf]['address_family']:
ret_dict['vrf'][vrf]['address_family'][af] = {}
if 'instance' not in ret_dict['vrf'][vrf]['address_family'][af]:
ret_dict['vrf'][vrf]['address_family'][af]['instance'] = {}
if instance not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance] = {}
continue
# Router Link States (Area 0)
# Net Link States (Area 1)
# Summary Net Link States (Area 0.0.0.0)
# Type-5 AS External Link States
# Type-10 Opaque Link Area Link States (Area 0)
m = p2.match(line)
if m:
lsa_type = lsa_type_mapping[db_type]
# Set area
if m.groupdict()['area']:
try:
int(m.groupdict()['area'])
area = str(IPAddress(str(m.groupdict()['area'])))
except Exception:
area = str(m.groupdict()['area'])
else:
area = '0.0.0.0'
# Create dict structure
if 'areas' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'] = {}
if area not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area] = {}
if 'database' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['database'] = {}
if 'lsa_types' not in ret_dict['vrf'][vrf]['address_family']\
[af]['instance'][instance]['areas'][area]['database']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['database']['lsa_types'] = {}
if lsa_type not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]['database']\
['lsa_types']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['database']['lsa_types']\
[lsa_type] = {}
# Set sub_dict
sub_dict = ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]['database']\
['lsa_types'][lsa_type]
# Set lsa_type
sub_dict['lsa_type'] = lsa_type
continue
# Routing Bit Set on this LSA
m = p3_1.match(line)
if m:
routing_bit_enable = True
continue
# LS age: 1565
m = p3_2.match(line)
if m:
tlv_type_flag = False
sub_tlv_type_flag = False
age = int(m.groupdict()['age'])
continue
# LS age: MAXAGE(3601)
m = p3_2_1.match(line)
if m:
tlv_type_flag = False
sub_tlv_type_flag = False
age = int(m.groupdict()['age'])
continue
# Options: 0x20 (No TOS-capability, DC)
# Options: (No TOS-capability, DC)
m = p4.match(line)
if m:
option = str(m.groupdict()['option'])
option_desc = str(m.groupdict()['option_desc'])
continue
# LS Type: Type-5 AS-External
m = p5_1.match(line)
if m:
lsa_type = lsa_type_mapping[db_type]
continue
# Link State ID: 10.4.1.1
# Link State ID: 10.94.44.44 (Network address)
# Link State ID: 10.1.2.1 (Designated Router address)
# Link State ID: 10.1.2.1 (address of Designated Router)
m = p5_2.match(line)
if m:
lsa_id = str(m.groupdict()['lsa_id'])
continue
# Advertising Router: 10.64.4.4
m = p6.match(line)
if m:
adv_router = str(m.groupdict()['adv_router'])
lsa = '{} {}'.format(lsa_id, adv_router)
# Reset counters for this lsa
link_tlv_counter = 0
unknown_tlvs_counter = 0
# Create schema structure
if 'lsas' not in sub_dict:
sub_dict['lsas'] = {}
if lsa not in sub_dict['lsas']:
sub_dict['lsas'][lsa] = {}
# Set keys under 'lsa'
sub_dict['lsas'][lsa]['adv_router'] = adv_router
try:
sub_dict['lsas'][lsa]['lsa_id'] = lsa_id
except Exception:
pass
# Set db_dict
if 'ospfv2' not in sub_dict['lsas'][lsa]:
sub_dict['lsas'][lsa]['ospfv2'] = {}
if 'body' not in sub_dict['lsas'][lsa]['ospfv2']:
sub_dict['lsas'][lsa]['ospfv2']['body'] = {}
if db_type not in sub_dict['lsas'][lsa]['ospfv2']['body']:
sub_dict['lsas'][lsa]['ospfv2']['body'][db_type] = {}
db_dict = sub_dict['lsas'][lsa]['ospfv2']['body'][db_type]
# Create 'topologies' sub_dict if 'summary' or 'database'
if db_type in ['summary', 'external']:
if 'topologies' not in db_dict:
db_dict['topologies'] = {}
if default_mt_id not in db_dict['topologies']:
db_dict['topologies'][default_mt_id] = {}
db_topo_dict = db_dict['topologies'][default_mt_id]
db_topo_dict['mt_id'] = default_mt_id
# Set header dict
if 'header' not in sub_dict['lsas'][lsa]['ospfv2']:
sub_dict['lsas'][lsa]['ospfv2']['header'] = {}
header_dict = sub_dict['lsas'][lsa]['ospfv2']['header']
# Set previously parsed values
try:
header_dict['routing_bit_enable'] = routing_bit_enable
del routing_bit_enable
except Exception:
pass
try:
header_dict['age'] = age
del age
except Exception:
pass
try:
header_dict['option'] = option
del option
except Exception:
pass
try:
header_dict['option_desc'] = option_desc
del option_desc
except Exception:
pass
try:
header_dict['type'] = lsa_type
del lsa_type
except Exception:
pass
try:
header_dict['lsa_id'] = lsa_id
del lsa_id
except Exception:
pass
try:
header_dict['adv_router'] = adv_router
del adv_router
except Exception:
pass
try:
header_dict['opaque_type'] = opaque_type
del opaque_type
except Exception:
pass
try:
header_dict['opaque_id'] = opaque_id
del opaque_id
except Exception:
pass
# LS Seq Number: 0x80000002
m = p7.match(line)
if m:
header_dict['seq_num'] = str(m.groupdict()['ls_seq_num'])
continue
# Checksum: 0x7d61
m = p8.match(line)
if m:
header_dict['checksum'] = str(m.groupdict()['checksum'])
continue
# Length: 36
# Length : 36
m = p9.match(line)
if m:
length = int(m.groupdict()['length'])
if sub_tlv_type_flag:
sub_tlv_types_dict['length'] = length
elif tlv_type_flag:
tlv_type_dict['length'] = length
else:
header_dict['length'] = length
continue
# Network Mask: /32
m = p10.match(line)
if m:
dummy = '{}/{}'.format('0.0.0.0', m.groupdict()['net_mask'])
db_dict['network_mask'] = str(IPNetwork(dummy).netmask)
continue
# Metric Type: 2 (Larger than any link state path)
# Metric Type: 2 (Larger than any link state path)
m = p11_1.match(line)
if m:
db_topo_dict['flags'] = "E"
continue
# Metric Type: 1 (Comparable directly to link state metric)
m = p11_2.match(line)
if m:
# Do nothing
continue
# TOS: 0
# TOS: 0 Metric: 1
m = p12.match(line)
if m:
if db_type == 'router':
if m.groupdict()['tos']:
db_dict['links'][link_id]['topologies'][default_mt_id]\
['tos'] = int(m.groupdict()['tos'])
if m.groupdict()['metric']:
db_dict['links'][link_id]['topologies'][default_mt_id]\
['metric'] = int(m.groupdict()['metric'])
continue
else:
db_topo_dict['tos'] = int(m.groupdict()['tos'])
if m.groupdict()['metric']:
db_topo_dict['metric'] = int(m.groupdict()['metric'])
continue
# Metric: 20
m = p13.match(line)
if m:
db_topo_dict['metric'] = int(m.groupdict()['metric'])
continue
# Forward Address: 0.0.0.0
m = p14.match(line)
if m:
db_topo_dict['forwarding_address'] = str(m.groupdict()['addr'])
continue
# External Route Tag: 0
m = p15.match(line)
if m:
db_topo_dict['external_route_tag'] = int(m.groupdict()['tag'])
continue
# Attached Router: 10.84.66.66
m = p16.match(line)
if m:
attached_router = str(m.groupdict()['att_router'])
if 'attached_routers' not in db_dict:
db_dict['attached_routers'] = {}
if attached_router not in db_dict['attached_routers']:
db_dict['attached_routers'][attached_router] = {}
continue
# Number of links: 3
# Number of Links: 3
m = p17.match(line)
if m:
db_dict['num_of_links'] = int(m.groupdict()['num'])
continue
# Link connected to: a Stub Network
m = p18.match(line)
if m:
link_type = str(m.groupdict()['type']).lower()
continue
# Link connected to: another Router (point-to-point)
m = p18_1.match(line)
if m:
if tlv_type_flag:
sub_link_type = str(m.groupdict()['type']).lower()
if 'another router' in sub_link_type:
opaque_link_type = 1
tlv_type_dict['link_name'] = sub_link_type
tlv_type_dict['link_type'] = opaque_link_type
continue
link_type = str(m.groupdict()['type']).lower()
continue
# (Link ID) Network/subnet number: 10.4.1.1
m = p19_1.match(line)
if m:
link_id = str(m.groupdict()['link_id'])
# Create dict structures
if 'links' not in db_dict:
db_dict['links'] = {}
if link_id not in db_dict['links']:
db_dict['links'][link_id] = {}
db_dict['links'][link_id]['link_id'] = link_id
# Set previously parsed values
try:
db_dict['links'][link_id]['type'] = link_type
except Exception:
pass
# Create topology dict under link_id
if 'topologies' not in db_dict['links'][link_id]:
db_dict['links'][link_id]['topologies'] = {}
if default_mt_id not in db_dict['links'][link_id]['topologies']:
db_dict['links'][link_id]['topologies'][default_mt_id] = {}
db_dict['links'][link_id]['topologies'][default_mt_id]['mt_id'] = default_mt_id
continue
# (Link ID) Designated Router address: 10.166.7.6
m = p19_2.match(line)
if m:
link_id = str(m.groupdict()['link_id'])
# If 'TLV Type' found in output this flag is set to true
if tlv_type_flag:
tlv_type_dict['link_id'] = link_id
continue
# Create dict structures
if 'links' not in db_dict:
db_dict['links'] = {}
if link_id not in db_dict['links']:
db_dict['links'][link_id] = {}
db_dict['links'][link_id]['link_id'] = link_id
# Set previously parsed values
try:
db_dict['links'][link_id]['type'] = link_type
except Exception:
pass
# Create topology dict under link_id
if 'topologies' not in db_dict['links'][link_id]:
db_dict['links'][link_id]['topologies'] = {}
if default_mt_id not in db_dict['links'][link_id]['topologies']:
db_dict['links'][link_id]['topologies'][default_mt_id] = {}
db_dict['links'][link_id]['topologies'][default_mt_id]['mt_id'] = default_mt_id
continue
# (Link ID) Neighboring Router ID: 10.151.22.22
m = p19_3.match(line)
if m:
link_id = str(m.groupdict()['link_id'])
if tlv_type_flag:
tlv_type_dict['link_id'] = link_id
continue
# Create dict structures
if 'links' not in db_dict:
db_dict['links'] = {}
if link_id not in db_dict['links']:
db_dict['links'][link_id] = {}
db_dict['links'][link_id]['link_id'] = link_id
# Set previously parsed values
try:
db_dict['links'][link_id]['type'] = link_type
except Exception:
pass
# Create topology dict under link_id
if 'topologies' not in db_dict['links'][link_id]:
db_dict['links'][link_id]['topologies'] = {}
if default_mt_id not in db_dict['links'][link_id]['topologies']:
db_dict['links'][link_id]['topologies'][default_mt_id] = {}
db_dict['links'][link_id]['topologies'][default_mt_id]['mt_id'] = default_mt_id
continue
# (Link Data) Network Mask: 255.255.255.255
m = p20_1.match(line)
if m:
db_dict['links'][link_id]['link_data'] = \
str(m.groupdict()['link_data'])
continue
# (Link Data) Router Interface address: 10.166.7.6
m = p20_2.match(line)
if m:
db_dict['links'][link_id]['link_data'] = \
str(m.groupdict()['link_data'])
continue
# MTID 32 Metrics: 1
# MTID : 0
m = p21.match(line)
if m:
mtid = int(m.groupdict()['mtid'])
if sub_tlv_type_flag:
sub_tlv_types_dict['mt_id'] = int(mtid)
continue
if db_type == 'router':
if mtid not in db_dict['links'][link_id]['topologies']:
db_dict['links'][link_id]['topologies'][mtid] = {}
db_dict['links'][link_id]['topologies'][mtid]['mt_id'] = mtid
db_dict['links'][link_id]['topologies'][mtid]['metric'] = \
int(m.groupdict()['metric'])
elif db_type == 'summary':
if 'topologies' not in db_dict:
db_dict['topologies'] = {}
if mtid not in db_dict['topologies']:
db_dict['topologies'][mtid] = {}
db_topo_dict = db_dict['topologies'][mtid]
db_topo_dict['mt_id'] = mtid
db_topo_dict['metric'] = int(m.groupdict()['metric'])
continue
# Number of MTID metrics: 0
m = p21_1.match(line)
if m:
db_dict['links'][link_id]['num_mtid_metrics'] = \
int(m.groupdict()['num'])
continue
# Number of TOS metrics: 0
p21_2 = re.compile(r'^Number +of +TOS +metrics: +(?P<num>(\d+))$')
m = p21_2.match(line)
if m:
db_dict['links'][link_id]['num_tos_metrics'] = \
int(m.groupdict()['num'])
continue
# Opaque Type: 1
m = p22.match(line)
if m:
opaque_type = int(m.groupdict()['type'])
continue
# Opaque ID: 38
m = p23.match(line)
if m:
opaque_id = int(m.groupdict()['id'])
continue
# Fragment number: 0
m = p24.match(line)
if m:
header_dict['fragment_number'] = int(m.groupdict()['num'])
continue
# MPLS TE router ID : 10.4.1.1
m = p25.match(line)
if m:
db_dict['mpls_te_router_id'] = str(m.groupdict()['mpls'])
continue
# AS Boundary Router
m = p26_1.match(line)
if m:
header_dict['as_boundary_router'] = True
continue
# Area Border Router
m = p26_2.match(line)
if m:
header_dict['area_border_router'] = True
continue
# Link connected to Broadcast network
m = p27.match(line)
if m:
link_tlv_counter += 1
if 'link_tlvs' not in db_dict:
db_dict['link_tlvs'] = {}
if link_tlv_counter not in db_dict['link_tlvs']:
db_dict['link_tlvs'][link_tlv_counter] = {}
# Set link type
opaque_link = str(m.groupdict()['link']).lower()
if opaque_link == 'broadcast network':
opaque_link_type = 2
else:
opaque_link_type = 1
db_dict['link_tlvs'][link_tlv_counter]\
['link_type'] = opaque_link_type
db_dict['link_tlvs'][link_tlv_counter]\
['link_name'] = opaque_link
# Set remote_if_ipv4_addrs (if needed)
if opaque_link_type == 2:
if 'remote_if_ipv4_addrs' not in db_dict['link_tlvs']\
[link_tlv_counter]:
db_dict['link_tlvs'][link_tlv_counter]\
['remote_if_ipv4_addrs'] = {}
db_dict['link_tlvs'][link_tlv_counter]\
['remote_if_ipv4_addrs']['0.0.0.0'] = {}
continue
# Link ID : 10.1.4.4
m = p28.match(line)
if m:
db_dict['link_tlvs'][link_tlv_counter]['link_id'] = \
str(m.groupdict()['id'])
continue
# Interface Address : 10.1.4.1
m = p29.match(line)
if m:
addr = str(m.groupdict()['addr'])
if 'local_if_ipv4_addrs' not in db_dict['link_tlvs']\
[link_tlv_counter]:
db_dict['link_tlvs'][link_tlv_counter]\
['local_if_ipv4_addrs'] = {}
if addr not in db_dict['link_tlvs'][link_tlv_counter]\
['local_if_ipv4_addrs']:
db_dict['link_tlvs'][link_tlv_counter]\
['local_if_ipv4_addrs'][addr] = {}
continue
# Admin Metric : 1
m = p30.match(line)
if m:
db_dict['link_tlvs'][link_tlv_counter]['te_metric'] = \
int(m.groupdict()['te_metric'])
continue
# Maximum Bandwidth : 125000000
# Maximum bandwidth : 125000000
m = p31.match(line)
if m:
db_dict['link_tlvs'][link_tlv_counter]['max_bandwidth'] = \
int(m.groupdict()['max_band'])
continue
# Maximum reservable bandwidth : 93750000
# Maximum reservable bandwidth global: 93750000
m = p32.match(line)
if m:
db_dict['link_tlvs'][link_tlv_counter]\
['max_reservable_bandwidth'] = \
int(m.groupdict()['max_res_band'])
continue
# Affinity Bit : 0x0
m = p33.match(line)
if m:
db_dict['link_tlvs'][link_tlv_counter]['admin_group'] = \
str(m.groupdict()['admin_group'])
continue
# IGP Metric : 1
m = p33_1.match(line)
if m:
db_dict['link_tlvs'][link_tlv_counter]['igp_metric'] = \
int(m.groupdict()['igp_metric'])
continue
# Number of Priority : 8
m = p33_2.match(line)
if m:
db_dict['link_tlvs'][link_tlv_counter]['total_priority'] = \
int(m.groupdict()['num'])
continue
# Priority 0 : 93750000 Priority 1 : 93750000
m = p34.match(line)
if m:
value1 = '{} {}'.format(str(m.groupdict()['num1']), str(m.groupdict()['band1']))
value2 = '{} {}'.format(str(m.groupdict()['num2']), str(m.groupdict()['band2']))
if 'unreserved_bandwidths' not in db_dict['link_tlvs']\
[link_tlv_counter]:
db_dict['link_tlvs'][link_tlv_counter]\
['unreserved_bandwidths'] = {}
if value1 not in db_dict['link_tlvs'][link_tlv_counter]\
['unreserved_bandwidths']:
db_dict['link_tlvs'][link_tlv_counter]\
['unreserved_bandwidths'][value1] = {}
db_dict['link_tlvs'][link_tlv_counter]\
['unreserved_bandwidths'][value1]['priority'] = \
int(m.groupdict()['num1'])
db_dict['link_tlvs'][link_tlv_counter]\
['unreserved_bandwidths'][value1]\
['unreserved_bandwidth'] = int(m.groupdict()['band1'])
if value2 not in db_dict['link_tlvs'][link_tlv_counter]\
['unreserved_bandwidths']:
db_dict['link_tlvs'][link_tlv_counter]\
['unreserved_bandwidths'][value2] = {}
db_dict['link_tlvs'][link_tlv_counter]\
['unreserved_bandwidths'][value2]['priority'] = \
int(m.groupdict()['num2'])
db_dict['link_tlvs'][link_tlv_counter]\
['unreserved_bandwidths'][value2]\
['unreserved_bandwidth'] = int(m.groupdict()['band2'])
continue
# Unknown Sub-TLV : Type = 32770, Length = 4 Value = 00 00 00 01
m = p35.match(line)
if m:
unknown_tlvs_counter += 1
if 'unknown_tlvs' not in db_dict['link_tlvs'][link_tlv_counter]:
db_dict['link_tlvs'][link_tlv_counter]['unknown_tlvs'] = {}
if unknown_tlvs_counter not in db_dict['link_tlvs']\
[link_tlv_counter]['unknown_tlvs']:
db_dict['link_tlvs'][link_tlv_counter]['unknown_tlvs']\
[unknown_tlvs_counter] = {}
db_dict['link_tlvs'][link_tlv_counter]['unknown_tlvs']\
[unknown_tlvs_counter]['type'] = int(m.groupdict()['type'])
db_dict['link_tlvs'][link_tlv_counter]['unknown_tlvs']\
[unknown_tlvs_counter]['length'] = int(m.groupdict()['length'])
db_dict['link_tlvs'][link_tlv_counter]['unknown_tlvs']\
[unknown_tlvs_counter]['value'] = str(m.groupdict()['value'])
continue
# Extended Administrative Group : Length: 8
m = p36.match(line)
if m:
if 'extended_admin_group' not in db_dict['link_tlvs']\
[link_tlv_counter]:
db_dict['link_tlvs'][link_tlv_counter]\
['extended_admin_group'] = {}
db_dict['link_tlvs'][link_tlv_counter]['extended_admin_group']\
['length'] = int(m.groupdict()['eag_length'])
continue
# EAG[0]: 0
m = p37.match(line)
if m:
group_num = int(m.groupdict()['group_num'])
if 'groups' not in db_dict['link_tlvs'][link_tlv_counter]\
['extended_admin_group']:
db_dict['link_tlvs'][link_tlv_counter]\
['extended_admin_group']['groups'] = {}
if group_num not in db_dict['link_tlvs'][link_tlv_counter]\
['extended_admin_group']['groups']:
db_dict['link_tlvs'][link_tlv_counter]\
['extended_admin_group']['groups'][group_num] = {}
db_dict['link_tlvs'][link_tlv_counter]['extended_admin_group']\
['groups'][group_num]['value'] = int(m.groupdict()['val'])
continue
# Neighbor Address : 192.168.220.2
m = p38.match(line)
if m:
db_dict['link_tlvs'][link_tlv_counter]['remote_if_ipv4_addrs'] = {m.groupdict()['neighbor_address']: {}}
continue
# TLV Type: Extended Link
# TLV Type: Segment Routing Node MSD
m = p39.match(line)
if m:
tlv_type_flag = True
sub_tlv_type_flag = False
group = m.groupdict()
tlv_type = group['tlv_type']
# Router Information
if p39_1.match(tlv_type):
tlv_type_field = 'router_capabilities_tlv'
# Segment Routing Algorithm
elif p39_2.match(tlv_type):
tlv_type_field = 'sr_algorithm_tlv'
# Segment Routing Range
elif p39_3.match(tlv_type):
tlv_type_field = 'sid_range_tlvs'
# Segment Routing Node MSD
elif p39_4.match(tlv_type):
tlv_type_field = 'node_msd_tlvs'
# Segment Routing Local Block
elif p39_5.match(tlv_type):
tlv_type_field = 'local_block_tlvs'
# Extended Prefix
elif p39_6.match(tlv_type):
tlv_type_field = 'extended_prefix_tlvs'
# Extended Link
elif p39_7.match(tlv_type):
tlv_type_field = 'extended_link_tlvs'
tlv_types_index = db_dict.get(tlv_type_field, {}).keys()
if tlv_types_index:
index = max(tlv_types_index) + 1
else:
index = 1
tlv_type_dict = db_dict\
.setdefault(tlv_type_field, {})\
.setdefault(index, {})
tlv_type_dict['tlv_type'] = tlv_type
continue
if 'Capabilities' in line:
capabilities_flag = True
continue
if capabilities_flag:
if not line:
capabilities_flag = False
continue
capability_field = None
# Graceful Restart Helper
if p55.match(line):
capability_field = 'graceful_restart_helper'
# Stub Router Support
elif p56.match(line):
capability_field = 'stub_router'
if not capability_field:
continue
capabilities_dict = tlv_type_dict\
.setdefault('information_capabilities', {})
capabilities_dict[capability_field] = True
continue
# Algorithm: SPF
# Algorithm: Strict SPF
m = p40.match(line)
if m:
group = m.groupdict()
algorithm = group['algorithm']
algorithm = algorithm.strip()
if sub_tlv_type_flag:
sub_tlv_types_dict['algo'] = algorithm
continue
algo_field = None
# SPF
if p57.match(algorithm):
algo_field = 'spf'
# Strict SPF
if p58.match(algorithm):
algo_field = 'strict_spf'
if not algo_field:
continue
algorithm_dict = tlv_type_dict.setdefault('algorithm', {})
algorithm_dict[algo_field] = True
continue
# Range Size: 1000
m = p41.match(line)
if m:
group = m.groupdict()
range_size = group['range_size']
tlv_type_dict['range_size'] = int(range_size)
continue
# Flags : L-Bit, V-bit
m = p42.match(line)
if m:
group = m.groupdict()
flags = group['flags']
if sub_tlv_type_flag:
sub_tlv_types_dict['flags'] = flags
continue
tlv_type_dict['flags'] = flags
continue
# Weight : 0
m = p44.match(line)
if m:
group = m.groupdict()
weight = int(group['weight'])
if sub_tlv_type_flag:
sub_tlv_types_dict['weight'] = weight
continue
tlv_type_dict['weight'] = weight
continue
# Label : 19
m = p45.match(line)
if m:
group = m.groupdict()
label = group['label']
sub_tlv_types_dict['label'] = int(label)
continue
# (Link Data) Interface IP address: 192.168.220.1
m = p46.match(line)
if m:
group = m.groupdict()
tlv_type_dict['link_data'] = group['link_data']
continue
# Prefix : 10.4.1.1/32
m = p47.match(line)
if m:
group = m.groupdict()
prefix = group['prefix']
tlv_type_dict['prefix'] = prefix
continue
# AF : 0
m = p48.match(line)
if m:
group = m.groupdict()
af = int(group['af'])
tlv_type_dict['af'] = af
continue
# Route-type: Intra
m = p49.match(line)
if m:
group = m.groupdict()
route_type = group['route_type']
tlv_type_dict['route_type'] = route_type
continue
# Sub-TLV Type: Remote Intf Addr
# Sub-TLV Type: Local / Remote Intf ID
m = p50.match(line)
if m:
tlv_type_flag = False
sub_tlv_type_flag = True
group = m.groupdict()
sub_tlv_type = group['sub_tlv_type']
sub_tlv_types_index = tlv_type_dict.get('sub_tlvs', {}).keys()
if sub_tlv_types_index:
index = max(sub_tlv_types_index) + 1
else:
index = 1
sub_tlv_types_dict = tlv_type_dict.setdefault('sub_tlvs', {}).setdefault(index, {})
sub_tlv_types_dict['type'] = sub_tlv_type
continue
# Remote Interface Address : 192.168.0.1
m = p51.match(line)
if m:
group = m.groupdict()
remote_interface_address = group['remote_interface_address']
sub_tlv_types_dict['remote_interface_address'] = remote_interface_address
continue
# Local Interface ID : 20
m = p52.match(line)
if m:
group = m.groupdict()
local_interface_id = int(group['local_interface_id'])
sub_tlv_types_dict['local_interface_id'] = local_interface_id
continue
# Remote Interface ID : 20
m = p53.match(line)
if m:
group = m.groupdict()
remote_interface_id = int(group['remote_interface_id'])
sub_tlv_types_dict['remote_interface_id'] = remote_interface_id
continue
# SID : 1
m = p54.match(line)
if m:
group = m.groupdict()
sid = int(group['sid'])
sub_tlv_types_dict['sid'] = sid
continue
# Sub-type: Node Max Sid Depth, Value: 13
m = p59.match(line)
if m:
group = m.groupdict()
sub_type_value = int(group['value'])
sub_type_dict = tlv_type_dict.setdefault('sub_type', {})
sub_type_dict['node_max_sid_depth_value'] = sub_type_value
continue
return ret_dict
# ==================================
# Schema for:
# * 'show ip ospf database router'
# ==================================
class ShowIpOspfDatabaseRouterSchema(MetaParser):
''' Schema for:
* show ip ospf database router'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{Optional('areas'):
{Any():
{'database':
{'lsa_types':
{Any():
{'lsa_type': int,
'lsas':
{Any():
{'lsa_id': str,
'adv_router': str,
'ospfv2':
{'header':
{'option': str,
'option_desc': str,
'lsa_id': str,
'age': int,
'type': int,
'adv_router': str,
'seq_num': str,
'checksum': str,
'length': int,
Optional('routing_bit_enable'): bool,
Optional('as_boundary_router'): bool,
Optional('area_border_router'): bool,
},
'body':
{'router':
{Optional('flags'): str,
'num_of_links': int,
Optional('links'):
{Any():
{'link_id': str,
'link_data': str,
'type': str,
Optional('num_mtid_metrics'): int,
Optional('num_tos_metrics'): int,
'topologies':
{Any():
{'mt_id': int,
Optional('metric'): int,
Optional('tos'): int,
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
}
# ==================================
# Parser for:
# * 'show ip ospf database router'
# ==================================
class ShowIpOspfDatabaseRouter(ShowIpOspfDatabaseRouterSchema, ShowIpOspfDatabaseTypeParser):
''' Parser for:
* 'show ip ospf database router'
'''
cli_command = 'show ip ospf database router'
exclude = ['age', 'seq_num', 'checksum', 'links']
def cli(self, output=None):
if not output:
output = self.device.execute(self.cli_command)
return super().cli(db_type='router', out=output)
# ====================================
# Schema for:
# * 'show ip ospf database external'
# ====================================
class ShowIpOspfDatabaseExternalSchema(MetaParser):
''' Schema for:
* 'show ip ospf database external'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{Optional('areas'):
{Any():
{'database':
{'lsa_types':
{Any():
{'lsa_type': int,
'lsas':
{Any():
{'lsa_id': str,
'adv_router': str,
'ospfv2':
{'header':
{'option': str,
'option_desc': str,
'lsa_id': str,
'age': int,
'type': int,
'adv_router': str,
'seq_num': str,
'checksum': str,
'length': int,
Optional('routing_bit_enable'): bool,
},
'body':
{'external':
{'network_mask': str,
'topologies':
{Any():
{'mt_id': int,
Optional('flags'): str,
'metric': int,
'forwarding_address': str,
'external_route_tag': int},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
}
# ====================================
# Parser for:
# * 'show ip ospf database external'
# ====================================
class ShowIpOspfDatabaseExternal(ShowIpOspfDatabaseExternalSchema, ShowIpOspfDatabaseTypeParser):
''' Parser for:
* 'show ip ospf database external'
'''
cli_command = 'show ip ospf database external'
def cli(self, output=None):
if not output:
output = self.device.execute(self.cli_command)
return super().cli(db_type='external', out=output)
# ===================================
# Schema for:
# * 'show ip ospf database network'
# ===================================
class ShowIpOspfDatabaseNetworkSchema(MetaParser):
''' Schema for:
* 'show ip ospf database network'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{Optional('areas'):
{Any():
{'database':
{'lsa_types':
{Any():
{'lsa_type': int,
'lsas':
{Any():
{'lsa_id': str,
'adv_router': str,
'ospfv2':
{'header':
{'option': str,
'option_desc': str,
'lsa_id': str,
'age': int,
'type': int,
'adv_router': str,
'seq_num': str,
'checksum': str,
'length': int,
Optional('routing_bit_enable'): bool,
},
'body':
{'network':
{'network_mask': str,
'attached_routers':
{Any(): {},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
}
# ===================================
# Parser for:
# * 'show ip ospf database network'
# ===================================
class ShowIpOspfDatabaseNetwork(ShowIpOspfDatabaseNetworkSchema, ShowIpOspfDatabaseTypeParser):
''' Parser for:
* 'show ip ospf database network'
'''
cli_command = 'show ip ospf database network'
exclude = ['age', 'seq_num', 'checksum', 'lsas']
def cli(self, output=None):
if not output:
output = self.device.execute(self.cli_command)
return super().cli(db_type='network', out=output)
# ===================================
# Schema for:
# * 'show ip ospf database summary'
# ===================================
class ShowIpOspfDatabaseSummarySchema(MetaParser):
''' Schema for:
* 'show ip ospf database summary'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{Optional('areas'):
{Any():
{'database':
{'lsa_types':
{Any():
{'lsa_type': int,
'lsas':
{Any():
{'lsa_id': str,
'adv_router': str,
'ospfv2':
{'header':
{'option': str,
'option_desc': str,
'lsa_id': str,
'age': int,
'type': int,
'adv_router': str,
'seq_num': str,
'checksum': str,
'length': int,
Optional('routing_bit_enable'): bool,
},
'body':
{'summary':
{'network_mask': str,
'topologies':
{Any():
{'mt_id': int,
'metric': int},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
}
# ===================================
# Parser for:
# * 'show ip ospf database summary'
# ===================================
class ShowIpOspfDatabaseSummary(ShowIpOspfDatabaseSummarySchema, ShowIpOspfDatabaseTypeParser):
''' Parser for:
* 'show ip ospf database summary'
'''
cli_command = 'show ip ospf database summary'
exclude = ['age', 'seq_num', 'checksum']
def cli(self, output=None):
if not output:
output = self.device.execute(self.cli_command)
return super().cli(db_type='summary', out=output)
# =======================================
# Schema for:
# * 'show ip ospf database opaque-area'
# =======================================
class ShowIpOspfDatabaseOpaqueAreaSchema(MetaParser):
''' Schema for:
* 'show ip ospf database opaque-area'
* 'show ip ospf database opaque-area self-originate'
'''
schema = {
'vrf': {
Any(): {
'address_family': {
Any(): {
'instance': {
Any(): {
Optional('areas'): {
Any(): {
'database': {
'lsa_types': {
Any(): {
'lsa_type': int,
'lsas': {
Any(): {
'lsa_id': str,
'adv_router': str,
'ospfv2': {
'header': {
'option': str,
'option_desc': str,
'lsa_id': str,
'age': int,
'type': int,
'adv_router': str,
'seq_num': str,
'checksum': str,
'length': int,
Optional('opaque_type'): int,
'opaque_id': int,
Optional('fragment_number'): int,
},
'body': {
'opaque': {
Optional('mpls_te_router_id'): str,
Optional('links'): {
Any(): {
'link_id': str,
'topologies': {
Any(): {
'mt_id': int
}
},
}
},
Optional('num_of_links'): int,
Optional('router_capabilities_tlv'): {
Any(): {
'length': int,
'tlv_type': str,
Optional('information_capabilities'): {
Optional('graceful_restart'): bool,
Optional('graceful_restart_helper'): bool,
Optional('stub_router'): bool,
Optional('traffic_enginnering'): bool,
Optional('p2p_over_lan'): bool,
Optional('experimental_te'): bool,
}
}
},
Optional('sr_algorithm_tlv'): {
Any(): {
'tlv_type': str,
'length': int,
Optional('algorithm'): {
Optional('spf'): bool,
Optional('strict_spf'): bool,
}
}
},
Optional('sid_range_tlvs'): {
Any(): {
'tlv_type': str,
'length': int,
'range_size': int,
'sub_tlvs': {
Any(): {
'type': str,
'length': int,
'label': int,
}
}
}
},
Optional('node_msd_tlvs'): {
Any(): {
'tlv_type': str,
'length': int,
'sub_type': {
'node_max_sid_depth_value': int
}
}
},
Optional('local_block_tlvs'): {
Any(): {
'tlv_type': str,
'range_size': int,
'length': int,
'sub_tlvs': {
Any(): {
'type': str,
'length': int,
'label': int
}
}
}
},
Optional('extended_prefix_tlvs'): {
Any(): {
'tlv_type': str,
'route_type': str,
'length': int,
'flags': str,
'prefix': str,
'af': int,
Optional('sub_tlvs'): {
Any(): {
'type': str,
'length': int,
'flags': str,
Optional('mt_id'): int,
'algo': str,
'sid': int,
}
}
}
},
Optional('extended_link_tlvs'): {
Any(): {
'link_id': str,
'link_data': str,
'length': int,
Optional('link_name'): str,
'link_type': int,
'tlv_type': str,
'sub_tlvs': {
Any(): {
'type': str,
Optional('length'): int,
Optional('flags'): str,
Optional('mt_id'): int,
Optional('weight'): int,
Optional('label'): int,
Optional('remote_interface_address'): str,
Optional('local_interface_id'): int,
Optional('remote_interface_id'): int,
}
}
}
},
Optional('link_tlvs'): {
Any(): {
Optional('link_type'): int,
Optional('link_name'): str,
Optional('link_id'): str,
Optional('te_metric'): int,
Optional('max_bandwidth'): int,
Optional('max_reservable_bandwidth'): int,
Optional('admin_group'): str,
Optional('igp_metric'): int,
Optional('total_priority'): int,
Optional('local_if_ipv4_addrs'): {
Any(): {}
},
Optional('remote_if_ipv4_addrs'): {
Any(): {}
},
Optional('unreserved_bandwidths'): {
Any(): {
'priority': int,
'unreserved_bandwidth': int,
}
},
Optional('unknown_tlvs'): {
Any(): {
'type': int,
'length': int,
'value': str,
}
},
Optional('extended_admin_group'): {
'length': int,
Optional('groups'): {
Any(): {
'value': int
}
},
},
}
},
}
},
},
}
},
}
}
}
}
}
}
}
}
}
}
}
}
# =======================================
# Parser for:
# * 'show ip ospf database opaque-area'
# =======================================
class ShowIpOspfDatabaseOpaqueArea(ShowIpOspfDatabaseOpaqueAreaSchema, ShowIpOspfDatabaseTypeParser):
''' Parser for:
* 'show ip ospf database opaque-area'
'''
cli_command = 'show ip ospf database opaque-area'
def cli(self, output=None):
if not output:
output = self.device.execute(self.cli_command)
return super().cli(db_type='opaque', out=output)
# =====================================
# Schema for:
# * 'show ip ospf mpls ldp interface'
# =====================================
class ShowIpOspfMplsLdpInterfaceSchema(MetaParser):
''' Schema for:
* "show ip ospf mpls ldp interface"
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{'mpls':
{'ldp':
{'autoconfig': bool,
'autoconfig_area_id': str,
},
},
'areas':
{Any():
{'interfaces':
{Any():
{'mpls':
{'ldp':
{'autoconfig': bool,
'autoconfig_area_id': str,
'igp_sync': bool,
'holddown_timer': bool,
'state': str,
Optional('state_info') :str
},
},
},
},
},
},
},
},
},
},
},
},
}
# =====================================
# Parser for:
# * 'show ip ospf mpls ldp interface'
# =====================================
class ShowIpOspfMplsLdpInterface(ShowIpOspfMplsLdpInterfaceSchema):
''' Parser for:
* 'show ip ospf mpls ldp interface'
'''
cli_command = ['show ip ospf mpls ldp interface', 'show ip ospf mpls ldp interface {interface}']
def cli(self, interface='', output=None):
if output is None:
# Execute command on device
if interface:
out = self.device.execute(self.cli_command[1].format(interface=interface))
else:
out = self.device.execute(self.cli_command[0])
else:
out = output
# Init vars
ret_dict = {}
af = 'ipv4' # this is ospf - always ipv4
p1 = re.compile(r'^(?P<interface>(Lo.*|.*Gig.*|.*(SL|VL).*|'
'Cellular.*|FastEthernet.*|LISP.*|Po.*|Tunnel.*|'
'VirtualPortGroup.*|Vlan.*))$')
p2 = re.compile(r'^Process +ID +(?P<instance>(\S+)),'
'(?: +VRF +(?P<vrf>(\S+)),)?'
' +Area +(?P<area>(\S+))$')
p3 = re.compile(r'^LDP +is'
' +(?P<auto_config>(not configured|configured))'
' +through +LDP +autoconfig$')
p5 = re.compile(r'^Holddown +timer +is (?P<val>([a-zA-Z\s]+))$')
# Interface is down and pending LDP
p6 = re.compile(r'^Interface +is (?P<state>(up|down))( +and +(?P<state_info>[\w\s]*))?$')
for line in out.splitlines():
line = line.strip()
# Loopback0
# GigabitEthernet2
# TenGigabitEthernet3/0/1
# TwoGigabitEthernet
# FiveGigabitEthernet
# TwentyFiveGigE
# FortyGigabitEthernet
# HundredGigE
# OSPF_SL1
# OSPF_VL1
# --extra--
# Cellular
# FastEthernet
# LISP
# Port-channel
# Tunnel
# VirtualPortGroup
# Vlan
m = p1.match(line)
if m:
interface = str(m.groupdict()['interface'])
continue
# Process ID 1, Area 0
# Process ID 100, Area 0.0.0.0
# Process ID 2, VRF VRF1, Area 1
m = p2.match(line)
if m:
instance = str(m.groupdict()['instance'])
try:
int(m.groupdict()['area'])
area = str(IPAddress(str(m.groupdict()['area'])))
except:
area = m.groupdict()['area']
if m.groupdict()['vrf']:
vrf = str(m.groupdict()['vrf'])
else:
vrf = 'default'
# Create dict
if 'vrf' not in ret_dict:
ret_dict['vrf'] = {}
if vrf not in ret_dict['vrf']:
ret_dict['vrf'][vrf] = {}
if 'address_family' not in ret_dict['vrf'][vrf]:
ret_dict['vrf'][vrf]['address_family'] = {}
if af not in ret_dict['vrf'][vrf]['address_family']:
ret_dict['vrf'][vrf]['address_family'][af] = {}
if 'instance' not in ret_dict['vrf'][vrf]['address_family'][af]:
ret_dict['vrf'][vrf]['address_family'][af]['instance'] = {}
if instance not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance] = {}
# Create mpls dict
if 'mpls' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['mpls'] = {}
if 'ldp' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['mpls']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['mpls']['ldp'] = {}
mpls_ldp_dict = ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['mpls']['ldp']
# Set values to mpls_ldp_dict
mpls_ldp_dict['autoconfig_area_id'] = area
if 'areas' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'] = {}
if area not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area] = {}
if 'interfaces' not in ret_dict['vrf'][vrf]['address_family']\
[af]['instance'][instance]['areas'][area]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['interfaces'] = {}
if interface not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]['interfaces']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['interfaces'][interface] = {}
if 'mpls' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]['interfaces']\
[interface]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['interfaces'][interface]\
['mpls'] = {}
if 'ldp' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]['interfaces']\
[interface]['mpls']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['interfaces'][interface]\
['mpls']['ldp'] = {}
# Creat intf_dict
intf_dict = ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]\
['interfaces'][interface]['mpls']['ldp']
# Set values to intf_dict
intf_dict['autoconfig_area_id'] = area
continue
# LDP is not configured through LDP autoconfig
# LDP is configured through LDP autoconfig
m = p3.match(line)
if m:
if m.groupdict()['auto_config'] is 'configured':
intf_dict['autoconfig'] = True
mpls_ldp_dict['autoconfig'] = True
else:
intf_dict['autoconfig'] = False
mpls_ldp_dict['autoconfig'] = False
continue
# LDP-IGP Synchronization : Not required
# LDP-IGP Synchronization : Required
p4 = re.compile(r'^LDP-IGP +Synchronization *:'
' +(?P<igp_sync>(Not required|Required))$')
m = p4.match(line)
if m:
if m.groupdict()['igp_sync'] == 'Required':
intf_dict['igp_sync'] = True
else:
intf_dict['igp_sync'] = False
continue
# Holddown timer is disabled
m = p5.match(line)
if m:
if 'enabled' in m.groupdict()['val']:
intf_dict['holddown_timer'] = True
else:
intf_dict['holddown_timer'] = False
continue
# Interface is up
m = p6.match(line)
if m:
state_info = m.groupdict()['state_info']
intf_dict['state'] = str(m.groupdict()['state'])
if state_info:
intf_dict['state_info'] = str(state_info)
continue
return ret_dict
# ========================================
# Schema for:
# * 'show ip ospf mpls traffic-eng link'
# ========================================
class ShowIpOspfMplsTrafficEngLinkSchema(MetaParser):
''' Schema for:
* 'show ip ospf mpls traffic-eng link'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{'mpls':
{'te':
{'router_id': str},
},
'areas':
{Any():
{'mpls':
{'te':
{'enable': bool,
Optional('total_links'): int,
Optional('area_instance'): int,
Optional('link_hash_bucket'):
{Any():
{'link_fragments':
{Any():
{'link_instance': int,
'network_type': str,
'link_id': str,
'interface_address': str,
'te_admin_metric': int,
'igp_admin_metric': int,
'max_bandwidth': int,
'max_reservable_bandwidth': int,
'affinity_bit': str,
'total_priority': int,
Optional('unreserved_bandwidths'):
{Any():
{'priority': int,
'unreserved_bandwidth': int,
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
},
}
# ========================================
# Parser for:
# * 'show ip ospf mpls traffic-eng link'
# ========================================
class ShowIpOspfMplsTrafficEngLink(ShowIpOspfMplsTrafficEngLinkSchema):
''' Parser for:
* 'show ip ospf mpls traffic-eng link'
'''
cli_command = 'show ip ospf mpls traffic-eng link'
def cli(self, output=None):
if output is None:
# Execute command on device
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
ret_dict = {}
af = 'ipv4' # this is ospf - always ipv4
p1 = re.compile(r'^OSPF +Router +with +ID +\((?P<router_id>(\S+))\)'
' +\(Process +ID +(?P<instance>(\S+))\)$')
p2 = re.compile(r'^Area +(?P<area>(\d+)) +has +(?P<links>(\d+))'
' +MPLS +TE +links. +Area +instance +is'
' +(?P<area_instance>(\d+))\.$')
p3 = re.compile(r'^Area +(?P<area>(\S+)) +MPLS +TE +not +initialized$')
p4 = re.compile(r'^Links +in +hash +bucket +(?P<hash>(\d+))\.$')
p5 = re.compile(r'^Link +is +associated +with +fragment'
' +(?P<fragment>(\d+))\. +Link +instance +is'
' +(?P<link_instance>(\d+))$')
p6 = re.compile(r'^Link +connected +to +(?P<type>([a-zA-Z\s]+))$')
p7 = re.compile(r'^Link +ID *: +(?P<link_id>(\S+))$')
p8 = re.compile(r'^Interface +Address *: +(?P<addr>(\S+))$')
p9 = re.compile(r'^Admin +Metric +te: +(?P<te>(\d+)) +igp:'
' +(?P<igp>(\d+))$')
p14 = re.compile(r'^Maximum +(B|b)andwidth *: +(?P<mband>(\d+))$')
p10 = re.compile(r'^Maximum +(R|r)eservable +(B|b)andwidth *:'
' +(?P<res_band>(\d+))$')
p11 = re.compile(r'^Affinity +Bit *: +(?P<admin_group>(\S+))$')
p12 = re.compile(r'^Number +of +Priority +: +(?P<priority>(\d+))$')
p13 = re.compile(r'^Priority +(?P<num1>(\d+)) *:'
' +(?P<band1>(\d+))(?: +Priority +(?P<num2>(\d+))'
' *: +(?P<band2>(\d+)))?$')
for line in out.splitlines():
line = line.strip()
# OSPF Router with ID (10.4.1.1) (Process ID 1)
m = p1.match(line)
if m:
router_id = str(m.groupdict()['router_id'])
instance = str(m.groupdict()['instance'])
# Get VRF information using the ospf instance
cmd = 'show running-config | section router ospf {}'.format(instance)
out = self.device.execute(cmd)
for line in out.splitlines():
line = line.rstrip()
# Skip the show command line so as to not match
if re.search('show', line):
continue
# router ospf 1
# router ospf 2 vrf VRF1
p = re.search('router +ospf +(?P<instance>(\S+))'
'(?: +vrf +(?P<vrf>(\S+)))?', line)
if p:
p_instance = str(p.groupdict()['instance'])
if p_instance == instance:
if p.groupdict()['vrf']:
vrf = str(p.groupdict()['vrf'])
break
else:
vrf = 'default'
break
# Create dict
if 'vrf' not in ret_dict:
ret_dict['vrf'] = {}
if vrf not in ret_dict['vrf']:
ret_dict['vrf'][vrf] = {}
if 'address_family' not in ret_dict['vrf'][vrf]:
ret_dict['vrf'][vrf]['address_family'] = {}
if af not in ret_dict['vrf'][vrf]['address_family']:
ret_dict['vrf'][vrf]['address_family'][af] = {}
if 'instance' not in ret_dict['vrf'][vrf]['address_family'][af]:
ret_dict['vrf'][vrf]['address_family'][af]['instance'] = {}
if instance not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance] = {}
if 'mpls' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['mpls'] = {}
if 'te' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['mpls']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['mpls']['te'] = {}
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['mpls']['te']['router_id'] = router_id
continue
# Area 0 has 2 MPLS TE links. Area instance is 2.
m = p2.match(line)
if m:
area = str(IPAddress(str(m.groupdict()['area'])))
total_links = int(m.groupdict()['links'])
area_instance = int(m.groupdict()['area_instance'])
# Create dict
if 'areas' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'] = {}
if area not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area] = {}
if 'mpls' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['mpls'] = {}
if 'te' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]['mpls']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['mpls']['te'] = {}
# Set values
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['mpls']['te']['enable'] = True
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['mpls']['te']['total_links'] = \
total_links
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['mpls']['te']['area_instance'] = \
area_instance
continue
# Area 1 MPLS TE not initialized
# Area 0.0.0.0 MPLS TE not initialized
m = p3.match(line)
if m:
try:
int(m.groupdict()['area'])
area = str(IPAddress(str(m.groupdict()['area'])))
except:
area = m.groupdict()['area']
# Create dict
if 'areas' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'] = {}
if area not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area] = {}
if 'mpls' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['mpls'] = {}
if 'te' not in ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]['mpls']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['mpls']['te'] = {}
# Set values
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['mpls']['te']['enable'] = False
continue
# Links in hash bucket 8.
m = p4.match(line)
if m:
link_hash_bucket = int(m.groupdict()['hash'])
if 'link_hash_bucket' not in ret_dict['vrf'][vrf]\
['address_family'][af]['instance'][instance]['areas']\
[area]['mpls']['te']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['mpls']['te']\
['link_hash_bucket'] = {}
if link_hash_bucket not in ret_dict['vrf'][vrf]\
['address_family'][af]['instance'][instance]['areas']\
[area]['mpls']['te']['link_hash_bucket']:
ret_dict['vrf'][vrf]['address_family'][af]['instance']\
[instance]['areas'][area]['mpls']['te']\
['link_hash_bucket'][link_hash_bucket] = {}
link_dict = ret_dict['vrf'][vrf]['address_family'][af]\
['instance'][instance]['areas'][area]['mpls']\
['te']['link_hash_bucket'][link_hash_bucket]
continue
# Link is associated with fragment 2. Link instance is 2
m = p5.match(line)
if m:
link_fragment = int(m.groupdict()['fragment'])
if 'link_fragments' not in link_dict:
link_dict['link_fragments'] = {}
if link_fragment not in link_dict['link_fragments']:
link_dict['link_fragments'][link_fragment] = {}
sub_dict = link_dict['link_fragments'][link_fragment]
sub_dict['link_instance'] = int(m.groupdict()['link_instance'])
continue
# Link connected to Broadcast network
m = p6.match(line)
if m:
sub_dict['network_type'] = str(m.groupdict()['type']).lower()
continue
# Link ID : 10.1.2.1
m = p7.match(line)
if m:
sub_dict['link_id'] = str(m.groupdict()['link_id'])
continue
# Interface Address : 10.1.2.1
m = p8.match(line)
if m:
sub_dict['interface_address'] = str(m.groupdict()['addr'])
continue
# Admin Metric te: 1 igp: 1
m = p9.match(line)
if m:
sub_dict['te_admin_metric'] = int(m.groupdict()['te'])
sub_dict['igp_admin_metric'] = int(m.groupdict()['igp'])
continue
# Maximum bandwidth : 125000000
m = p14.match(line) #Modified from p9 to p14
if m:
sub_dict['max_bandwidth'] = int(m.groupdict()['mband'])
continue
# Maximum reservable bandwidth : 93750000
m = p10.match(line)
if m:
sub_dict['max_reservable_bandwidth'] = \
int(m.groupdict()['res_band'])
continue
# Affinity Bit : 0x0
m = p11.match(line)
if m:
sub_dict['affinity_bit'] = str(m.groupdict()['admin_group'])
continue
# Number of Priority : 8
m = p12.match(line)
if m:
sub_dict['total_priority'] = int(m.groupdict()['priority'])
continue
# Priority 0 : 93750000 Priority 1 : 93750000
m = p13.match(line)
if m:
value1 = '{} {}'.format(str(m.groupdict()['num1']), str(m.groupdict()['band1']))
value2 = '{} {}'.format(str(m.groupdict()['num2']), str(m.groupdict()['band2']))
if 'unreserved_bandwidths' not in sub_dict:
sub_dict['unreserved_bandwidths'] = {}
if value1 not in sub_dict['unreserved_bandwidths']:
sub_dict['unreserved_bandwidths'][value1] = {}
sub_dict['unreserved_bandwidths'][value1]['priority'] = \
int(m.groupdict()['num1'])
sub_dict['unreserved_bandwidths'][value1]\
['unreserved_bandwidth'] = int(m.groupdict()['band1'])
if value2 not in sub_dict['unreserved_bandwidths']:
sub_dict['unreserved_bandwidths'][value2] = {}
sub_dict['unreserved_bandwidths'][value2]['priority'] = \
int(m.groupdict()['num2'])
sub_dict['unreserved_bandwidths'][value2]\
['unreserved_bandwidth'] = int(m.groupdict()['band2'])
continue
return ret_dict
# =============================
# Schema for:
# * 'show ip ospf max-metric'
# =============================
class ShowIpOspfMaxMetricSchema(MetaParser):
''' Schema for:
* 'show ip ospf max-metric'
'''
schema = {
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{'router_id': str,
'base_topology_mtid':
{Any():
{'start_time': str,
'time_elapsed': str,
'router_lsa_max_metric':
{Any():
{Optional('condition'): str,
Optional('state'): str,
Optional('advertise_lsa_metric'): int,
Optional('unset_reason'): str,
Optional('unset_time'): str,
Optional('unset_time_elapsed'): str,
Optional('time_remaining'): str,
},
},
},
},
},
},
},
},
},
},
}
# =============================
# Parser for:
# * 'show ip ospf max-metric'
# =============================
class ShowIpOspfMaxMetric(ShowIpOspfMaxMetricSchema):
''' Parser for:
* 'show ip ospf max-metric'
'''
cli_command = 'show ip ospf max-metric'
def cli(self, output=None):
if output is None:
# Execute command on device
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
ret_dict = {}
address_family = 'ipv4'
# Load for five secs: 71%/0%; one minute: 11%; five minutes: 9%
# Time source is NTP, 20:29:26.348 EST Fri Nov 11 2016
# OSPF Router with ID (172.16.1.214) (Process ID 65109)
# OSPF Router with ID (10.36.3.3) (Process ID 1, VRF VRF1)
p1 = re.compile(r'^OSPF +Router +with +ID +\((?P<router_id>(\S+))\)'
' +\(Process +ID +(?P<instance>(\d+))'
'(?:, +VRF +(?P<vrf>(\S+)))?\)$')
# Base Topology (MTID 0)
p2 = re.compile(r'^Base +Topology +\(MTID +(?P<mtid>(\d+))\)$')
# Start time: 00:01:58.314, Time elapsed: 00:54:43.858
p3 = re.compile(r'^Start +time: +(?P<start_time>(\S+)), +Time +elapsed:'
' +(?P<time_elapsed>(\S+))$')
# Originating router-LSAs with maximum metric
# Originating router-LSAs with maximum metric, Time remaining: 00:03:55
p4_1 = re.compile(r'^Originating +router-LSAs +with +maximum +metric(, +Time +remaining: +(?P<time_remaining>([\d\:]+)))?$')
# Router is not originating router-LSAs with maximum metric
p4_2 = re.compile(r'^Router +is +not +originating +router-LSAs +with'
' +maximum +metric$')
# Condition: on startup for 5 seconds, State: inactive
p5 = re.compile(r'^Condition: +(?P<condition>(.*)), +State:'
' +(?P<state>([a-zA-Z\s]+))$')
# Advertise summary-LSAs with metric 16711680
p6 = re.compile(r'^Advertise +summary-LSAs +with +metric'
' +(?P<metric>(\d+))$')
# Unset reason: timer expired, Originated for 5 seconds
p7 = re.compile(r'^Unset +reason: (?P<reason>(.*))$')
# Unset time: 00:02:03.314, Time elapsed: 00:54:38.858
p8 = re.compile(r'^Unset +time: +(?P<time>(\S+)), +Time +elapsed:'
' +(?P<elapsed>(\S+))$')
for line in out.splitlines():
line = line.strip()
# OSPF Router with ID (10.36.3.3) (Process ID 1, VRF VRF1)
m = p1.match(line)
if m:
group = m.groupdict()
router_id = str(group['router_id'])
instance = str(group['instance'])
if group['vrf']:
vrf = str(group['vrf'])
else:
vrf = 'default'
# Create dict
ospf_dict = ret_dict.setdefault('vrf', {}).\
setdefault(vrf, {}).\
setdefault('address_family', {}).\
setdefault(address_family, {}).\
setdefault('instance', {}).\
setdefault(instance, {})
ospf_dict['router_id'] = router_id
continue
# Base Topology (MTID 0)
m = p2.match(line)
if m:
mtid = m.groupdict()['mtid']
mtid_dict = ospf_dict.setdefault('base_topology_mtid', {}).\
setdefault(mtid, {})
continue
# Start time: 00:01:58.314, Time elapsed: 00:54:43.858
m = p3.match(line)
if m:
group = m.groupdict()
mtid_dict['start_time'] = group['start_time']
mtid_dict['time_elapsed'] = group['time_elapsed']
continue
# Originating router-LSAs with maximum metric
# Originating router-LSAs with maximum metric, Time remaining: 00:03:55
m = p4_1.match(line)
if m:
rtr_lsa_dict = mtid_dict.\
setdefault('router_lsa_max_metric', {}).\
setdefault(True, {})
if m.groupdict()['time_remaining']:
rtr_lsa_dict['time_remaining'] = m.groupdict()['time_remaining']
continue
# Router is not originating router-LSAs with maximum metric
m = p4_2.match(line)
if m:
rtr_lsa_dict = mtid_dict.\
setdefault('router_lsa_max_metric', {}).\
setdefault(False, {})
continue
# Condition: on startup for 5 seconds, State: inactive
m = p5.match(line)
if m:
group = m.groupdict()
rtr_lsa_dict['condition'] = group['condition']
rtr_lsa_dict['state'] = group['state']
continue
# Advertise summary-LSAs with metric 16711680
m = p6.match(line)
if m:
rtr_lsa_dict['advertise_lsa_metric'] = int(m.groupdict()['metric'])
# Unset reason: timer expired, Originated for 5 seconds
m = p7.match(line)
if m:
rtr_lsa_dict['unset_reason'] = m.groupdict()['reason']
continue
# Unset time: 00:02:03.314, Time elapsed: 00:54:38.858
m = p8.match(line)
if m:
group = m.groupdict()
rtr_lsa_dict['unset_time'] = group['time']
rtr_lsa_dict['unset_time_elapsed'] = group['elapsed']
continue
return ret_dict
# ==========================
# Schema for:
# * 'show ip ospf traffic'
# ==========================
class ShowIpOspfTrafficSchema(MetaParser):
''' Schema for:
* 'show ip ospf traffic'
'''
schema = {
Optional('ospf_statistics'):
{'last_clear_traffic_counters': str,
'rcvd':
{'total': int,
'checksum_errors': int,
'hello': int,
'database_desc': int,
'link_state_req': int,
'link_state_updates': int,
'link_state_acks': int,
},
'sent':
{'total': int,
'hello': int,
'database_desc': int,
'link_state_req': int,
'link_state_updates': int,
'link_state_acks': int,
},
},
'vrf':
{Any():
{'address_family':
{Any():
{'instance':
{Any():
{
Optional('router_id'): str,
Optional('ospf_queue_statistics'):
{'limit':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'drops':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'max_delay_msec':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'max_size':
{'total':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'invalid':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'hello':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'db_des':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'ls_req':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'ls_upd':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'ls_ack':
{'inputq': int,
'outputq': int,
'updateq': int,
},
},
'current_size':
{'total':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'invalid':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'hello':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'db_des':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'ls_req':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'ls_upd':
{'inputq': int,
'outputq': int,
'updateq': int,
},
'ls_ack':
{'inputq': int,
'outputq': int,
'updateq': int,
},
},
},
Optional('interface_statistics'):
{'interfaces':
{Any():
{'last_clear_traffic_counters': str,
'ospf_packets_received_sent':
{'type':
{Any():
{'packets': int,
'bytes': int,
},
},
},
'ospf_header_errors':
{'length': int,
'instance_id': int,
'checksum': int,
'auth_type': int,
'version': int,
'bad_source': int,
'no_virtual_link': int,
'area_mismatch': int,
'no_sham_link': int,
'self_originated': int,
'duplicate_id': int,
'hello': int,
'mtu_mismatch': int,
'nbr_ignored': int,
'lls': int,
'unknown_neighbor': int,
'authentication': int,
'ttl_check_fail': int,
Optional('adjacency_throttle'): int,
Optional('bfd'): int,
'test_discard': int,
},
'ospf_lsa_errors':
{'type': int,
'length': int,
'data': int,
'checksum': int,
},
},
},
},
'summary_traffic_statistics':
{'ospf_packets_received_sent':
{'type':
{Any():
{'packets': int,
'bytes': int,
},
},
},
'ospf_header_errors':
{'length': int,
'instance_id': int,
'checksum': int,
'auth_type': int,
'version': int,
'bad_source': int,
'no_virtual_link': int,
'area_mismatch': int,
'no_sham_link': int,
'self_originated': int,
'duplicate_id': int,
'hello': int,
'mtu_mismatch': int,
'nbr_ignored': int,
'lls': int,
'unknown_neighbor': int,
'authentication': int,
'ttl_check_fail': int,
Optional('adjacency_throttle'): int,
Optional('bfd'): int,
'test_discard': int,
},
'ospf_lsa_errors':
{'type': int,
'length': int,
'data': int,
'checksum': int,
},
},
},
},
},
},
},
},
}
# ==========================
# Parser for:
# * 'show ip ospf traffic'
# ==========================
class ShowIpOspfTraffic(ShowIpOspfTrafficSchema):
''' Parser for:
* "show ip ospf traffic"
'''
cli_command = 'show ip ospf traffic'
def cli(self, output=None):
if output is None:
# Execute command on device
out = self.device.execute(self.cli_command)
else:
out = output
# Init vars
ret_dict = {}
address_family = 'ipv4'
vrf = 'default'
received = False ; sent = False
interface_stats = False ; summary_stats = False
max_size_stats = False ; current_size_stats = False
# OSPF statistics:
p1 = re.compile(r'^OSPF +statistics:$')
# Last clearing of OSPF traffic counters never
# Last clearing of interface traffic counters never
p2 = re.compile(r'^Last +clearing +of +(?P<type>(OSPF|interface)) +traffic'
' +counters +(?P<last_clear>([a-zA-Z0-9\:\s]+))$')
# Rcvd: 2112690 total, 0 checksum errors
p3 = re.compile(r'^Rcvd: +(?P<total>(\d+)) total, +(?P<csum_errors>(\d+))'
' +checksum +errors$')
# 2024732 hello, 938 database desc, 323 link state req
# 2381794 hello, 1176 database desc, 43 link state req
p4 = re.compile(r'^(?P<hello>(\d+)) +hello, +(?P<db_desc>(\d+))'
' +database +desc, +(?P<link_state_req>(\d+))'
' +link +state +req$')
# 11030 link state updates, 75666 link state acks
# 92224 link state updates, 8893 link state acks
p5 = re.compile(r'^(?P<link_state_updates>(\d+)) +link +state +updates,'
' +(?P<link_state_acks>(\d+)) +link +state +acks$')
# Sent: 2509472 total
p6 = re.compile(r'^Sent: +(?P<total>(\d+)) +total$')
# OSPF Router with ID (10.169.197.252) (Process ID 65109)
# OSPF Router with ID (10.36.3.3) (Process ID 1, VRF VRF1)
p7 = re.compile(r'^OSPF +Router +with +ID +\((?P<router_id>(\S+))\)'
' +\(Process +ID +(?P<instance>(\d+))'
'(?:, +VRF +(?P<vrf>(\S+)))?\)$')
# OSPF queue statistics for process ID 65109:
p8 = re.compile(r'^OSPF +queue +statistics +for +process +ID +(?P<pid>(\d+)):$')
# InputQ UpdateQ OutputQ
# Limit 0 200 0
# Drops 0 0 0
# Max delay [msec] 49 2 2
p9_1 = re.compile(r'^(?P<item>(Limit|Drops|Max delay \[msec\])) +'
'(?P<inputq>(\d+)) +(?P<updateq>(\d+)) +(?P<outputq>(\d+))$')
# Invalid 0 0 0
# Hello 0 0 0
# DB des 0 0 0
# LS req 0 0 0
# LS upd 0 0 0
# LS ack 14 14 6
p9_2 = re.compile(r'^(?P<item>(Invalid|Hello|DB des|LS '
'req|LS upd|LS ack)) +(?P<inputq>(\d+)) '
'+(?P<updateq>(\d+)) +(?P<outputq>(\d+))$')
# InputQ UpdateQ OutputQ
# Max size 14 14 6
# Current size 0 0 0
p9_3 = re.compile(r'^(?P<item>(Max size|Current size)) +(?P<inputq>(\d+))'
' +(?P<updateq>(\d+)) +(?P<outputq>(\d+))$')
# Interface statistics:
p10 = re.compile(r'^Interface +statistics:$')
# Interface GigabitEthernet0/0/6
p11 = re.compile(r'^Interface +(?P<intf>(\S+))$')
# OSPF packets received/sent
# Type Packets Bytes
# RX Invalid 0 0
# RX Hello 169281 8125472
# RX DB des 36 1232
# RX LS req 20 25080
# RX LS upd 908 76640
# RX LS ack 9327 8733808
# RX Total 179572 16962232
# TX Failed 0 0
# TX Hello 169411 13552440
# TX DB des 40 43560
# TX LS req 4 224
# TX LS upd 12539 12553264
# TX LS ack 899 63396
# TX Total 182893 26212884
p12 = re.compile(r'^(?P<type>([a-zA-Z\s]+)) +(?P<packets>(\d+))'
' +(?P<bytes>(\d+))$')
# OSPF header errors
p13 = re.compile(r'^OSPF +header +errors$')
# Length 0, Instance ID 0, Checksum 0, Auth Type 0,
p14 = re.compile(r'^Length +(?P<len>(\d+)), +Instance +ID'
' +(?P<iid>(\d+)), +Checksum +(?P<csum>(\d+)),'
' +Auth +Type +(?P<auth>(\d+)),?$')
# Version 0, Bad Source 0, No Virtual Link 0,
p15 = re.compile(r'^Version +(?P<version>(\d+)), +Bad +Source'
' +(?P<bad_source>(\d+)), +No +Virtual +Link'
' +(?P<no_virtual_link>(\d+)),?$')
# Area Mismatch 0, No Sham Link 0, Self Originated 0,
p16 = re.compile(r'^Area +Mismatch +(?P<area_mismatch>(\d+)),'
' +No +Sham +Link +(?P<no_sham_link>(\d+)),'
' +Self +Originated +(?P<self_originated>(\d+)),?$')
# Duplicate ID 0, Hello 0, MTU Mismatch 0,
p17 = re.compile(r'^Duplicate +ID +(?P<duplicate_id>(\d+)),'
' +Hello +(?P<hello>(\d+)), +MTU +Mismatch'
' +(?P<mtu_mismatch>(\d+)),$')
# Nbr Ignored 0, LLS 0, Unknown Neighbor 0,
p18 = re.compile(r'^Nbr +Ignored +(?P<nbr_ignored>(\d+)), +LLS'
' +(?P<lls>(\d+)), +Unknown +Neighbor'
' +(?P<unknown_neighbor>(\d+)),?$')
# Authentication 0, TTL Check Fail 0, Adjacency Throttle 0,
p19 = re.compile(r'^Authentication +(?P<authentication>(\d+)), +TTL'
' +Check +Fail +(?P<ttl_check_fail>(\d+)), +Adjacency'
' +Throttle +(?P<adjacency_throttle>(\d+)),?$')
# Authentication 0, TTL Check Fail 0, Test discard 0
p19_1 = re.compile(r'^Authentication +(?P<authentication>\d+), +TTL'
' +Check +Fail +(?P<ttl_check_fail>\d+), +Test discard'
' +(?P<test_discard>\d+),?$')
# BFD 0, Test discard 0
p20 = re.compile(r'^BFD +(?P<bfd>(\d+)), +Test +discard'
' +(?P<test_discard>(\d+))$')
# OSPF LSA errors
p21 = re.compile(r'^OSPF +LSA +errors$')
# Type 0, Length 0, Data 0, Checksum 0
p22 = re.compile(r'^Type +(?P<type>(\d+)), +Length +(?P<len>(\d+)),'
' +Data +(?P<data>(\d+)), +Checksum +(?P<csum>(\d+))$')
# Summary traffic statistics for process ID 65109:
p23 = re.compile(r'^Summary +traffic +statistics +for +process +ID'
' +(?P<pid>(\d+)):$')
for line in out.splitlines():
line = line.strip()
# OSPF statistics:
m = p1.match(line)
if m:
ospf_stats_dict = ret_dict.setdefault('ospf_statistics', {})
continue
# Last clearing of OSPF traffic counters never
# Last clearing of interface traffic counters never
m = p2.match(line)
if m:
if m.groupdict()['type'] == 'OSPF':
ospf_stats_dict['last_clear_traffic_counters'] = \
m.groupdict()['last_clear']
if m.groupdict()['type'] == 'interface':
intf_dict['last_clear_traffic_counters'] = \
m.groupdict()['last_clear']
continue
# Rcvd: 2112690 total, 0 checksum errors
m = p3.match(line)
if m:
group = m.groupdict()
rcvd_dict = ospf_stats_dict.setdefault('rcvd', {})
rcvd_dict['total'] = int(group['total'])
rcvd_dict['checksum_errors'] = int(group['csum_errors'])
received = True ; sent = False
continue
# 2024732 hello, 938 database desc, 323 link state req
# 2381794 hello, 1176 database desc, 43 link state req
m = p4.match(line)
if m:
group = m.groupdict()
if received:
sdict = rcvd_dict
elif sent:
sdict = sent_dict
else:
continue
sdict['hello'] = int(group['hello'])
sdict['database_desc'] = int(group['db_desc'])
sdict['link_state_req'] = int(group['link_state_req'])
continue
# 11030 link state updates, 75666 link state acks
# 92224 link state updates, 8893 link state acks
m = p5.match(line)
if m:
group = m.groupdict()
if received:
sdict = rcvd_dict
elif sent:
sdict = sent_dict
else:
continue
sdict['link_state_updates'] = int(group['link_state_updates'])
sdict['link_state_acks'] = int(group['link_state_acks'])
continue
# Sent: 2509472 total
m = p6.match(line)
if m:
group = m.groupdict()
sent_dict = ospf_stats_dict.setdefault('sent', {})
sent_dict['total'] = int(group['total'])
sent = True ; received = False
continue
# OSPF Router with ID (10.169.197.252) (Process ID 65109)
# OSPF Router with ID (10.36.3.3) (Process ID 1, VRF VRF1)
m = p7.match(line)
if m:
group = m.groupdict()
router_id = str(group['router_id'])
instance = str(group['instance'])
if group['vrf']:
vrf = str(group['vrf'])
else:
vrf = 'default'
# Create dict
ospf_dict = ret_dict.setdefault('vrf', {}).\
setdefault(vrf, {}).\
setdefault('address_family', {}).\
setdefault(address_family, {}).\
setdefault('instance', {}).\
setdefault(instance, {})
ospf_dict['router_id'] = router_id
continue
# OSPF queue statistics for process ID 65109:
m = p8.match(line)
if m:
queue_stats_dict = ospf_dict.setdefault('ospf_queue_statistics', {})
continue
# InputQ UpdateQ OutputQ
# Limit 0 200 0
# Drops 0 0 0
# Max delay [msec] 49 2 2
m = p9_1.match(line)
if m:
group = m.groupdict()
item = group['item'].strip().lower().replace(" ", "_").\
replace("[", "").replace("]", "")
tmp_dict = queue_stats_dict.setdefault(item, {})
tmp_dict['inputq'] = int(group['inputq'])
tmp_dict['updateq'] = int(group['updateq'])
tmp_dict['outputq'] = int(group['outputq'])
continue
# Invalid 0 0 0
# Hello 0 0 0
# DB des 0 0 0
# LS req 0 0 0
# LS upd 0 0 0
# LS ack 14 14 6
m = p9_2.match(line)
if m:
group = m.groupdict()
item = group['item'].strip().lower().replace(" ", "_").\
replace("[", "").replace("]", "")
if max_size_stats:
tmp_dict = max_size_queue_stats_dict.setdefault(item, {})
elif current_size_stats:
tmp_dict = current_size_queue_stats_dict.setdefault(item, {})
else:
tmp_dict = queue_stats_dict.setdefault(item, {})
tmp_dict['inputq'] = int(group['inputq'])
tmp_dict['updateq'] = int(group['updateq'])
tmp_dict['outputq'] = int(group['outputq'])
continue
# InputQ UpdateQ OutputQ
# Max size 14 14 6
# Current size 0 0 0
m = p9_3.match(line)
if m:
group = m.groupdict()
item = group['item'].strip().lower().replace(" ", "_")
tmp_dict = queue_stats_dict.setdefault(item, {})
if item == 'max_size':
max_size_stats = True
current_size_stats = False
max_size_queue_stats_dict = tmp_dict
elif item == 'current_size':
current_size_stats = True
max_size_stats = False
current_size_queue_stats_dict = tmp_dict
tmp_dict.setdefault('total', {})['inputq'] = int(group['inputq'])
tmp_dict.setdefault('total', {})['updateq'] = int(group['updateq'])
tmp_dict.setdefault('total', {})['outputq'] = int(group['outputq'])
continue
# Interface statistics:
m = p10.match(line)
if m:
intf_stats_dict = ospf_dict.setdefault('interface_statistics', {})
continue
# Interface GigabitEthernet0/0/6
m = p11.match(line)
if m:
intf = m.groupdict()['intf']
intf_dict = intf_stats_dict.setdefault('interfaces', {}).\
setdefault(intf, {})
interface_stats = True ; summary_stats = False
continue
# Type Packets Bytes
# RX Invalid 0 0
# RX Hello 169281 8125472
# RX DB des 36 1232
# RX LS req 20 25080
# RX LS upd 908 76640
# RX LS ack 9327 8733808
# RX Total 179572 16962232
# TX Failed 0 0
# TX Hello 169411 13552440
# TX DB des 40 43560
# TX LS req 4 224
# TX LS upd 12539 12553264
# TX LS ack 899 63396
# TX Total 182893 26212884
m = p12.match(line)
if m:
group = m.groupdict()
if interface_stats:
sdict = intf_dict
elif summary_stats:
sdict = summary_stats_dict
else:
continue
item_type = group['type'].strip().lower().replace(" ", "_")
tmp_dict = sdict.setdefault('ospf_packets_received_sent', {}).\
setdefault('type', {}).setdefault(item_type, {})
tmp_dict['packets'] = int(group['packets'])
tmp_dict['bytes'] = int(group['bytes'])
continue
# OSPF header errors
m = p13.match(line)
if m:
group = m.groupdict()
if interface_stats:
sdict = intf_dict
elif summary_stats:
sdict = summary_stats_dict
else:
continue
ospf_header_errors_dict = sdict.setdefault('ospf_header_errors', {})
continue
# Length 0, Instance ID 0, Checksum 0, Auth Type 0,
m = p14.match(line)
if m:
group = m.groupdict()
ospf_header_errors_dict['length'] = int(group['len'])
ospf_header_errors_dict['instance_id'] = int(group['iid'])
ospf_header_errors_dict['checksum'] = int(group['csum'])
ospf_header_errors_dict['auth_type'] = int(group['auth'])
continue
# Version 0, Bad Source 0, No Virtual Link 0,
m = p15.match(line)
if m:
group = m.groupdict()
ospf_header_errors_dict['version'] = int(group['version'])
ospf_header_errors_dict['bad_source'] = int(group['bad_source'])
ospf_header_errors_dict['no_virtual_link'] = int(group['no_virtual_link'])
continue
# Area Mismatch 0, No Sham Link 0, Self Originated 0,
m = p16.match(line)
if m:
group = m.groupdict()
ospf_header_errors_dict['area_mismatch'] = int(group['area_mismatch'])
ospf_header_errors_dict['no_sham_link'] = int(group['no_sham_link'])
ospf_header_errors_dict['self_originated'] = int(group['self_originated'])
continue
# Duplicate ID 0, Hello 0, MTU Mismatch 0,
m = p17.match(line)
if m:
group = m.groupdict()
ospf_header_errors_dict['duplicate_id'] = int(group['duplicate_id'])
ospf_header_errors_dict['hello'] = int(group['hello'])
ospf_header_errors_dict['mtu_mismatch'] = int(group['mtu_mismatch'])
continue
# Nbr Ignored 0, LLS 0, Unknown Neighbor 0,
m = p18.match(line)
if m:
group = m.groupdict()
ospf_header_errors_dict['nbr_ignored'] = int(group['nbr_ignored'])
ospf_header_errors_dict['lls'] = int(group['lls'])
ospf_header_errors_dict['unknown_neighbor'] = int(group['unknown_neighbor'])
continue
# Authentication 0, TTL Check Fail 0, Adjacency Throttle 0,
m = p19.match(line)
if m:
group = m.groupdict()
ospf_header_errors_dict['authentication'] = int(group['authentication'])
ospf_header_errors_dict['ttl_check_fail'] = int(group['ttl_check_fail'])
ospf_header_errors_dict['adjacency_throttle'] = int(group['adjacency_throttle'])
continue
# Authentication 0, TTL Check Fail 0, Test discard 0
m = p19_1.match(line)
if m:
group = m.groupdict()
ospf_header_errors_dict['authentication'] = int(group['authentication'])
ospf_header_errors_dict['ttl_check_fail'] = int(group['ttl_check_fail'])
ospf_header_errors_dict['test_discard'] = int(group['test_discard'])
continue
# BFD 0, Test discard 0
m = p20.match(line)
if m:
group = m.groupdict()
ospf_header_errors_dict['bfd'] = int(group['bfd'])
ospf_header_errors_dict['test_discard'] = int(group['test_discard'])
continue
# OSPF LSA errors
m = p21.match(line)
if m:
if interface_stats:
sdict = intf_dict
elif summary_stats:
sdict = summary_stats_dict
else:
continue
ospf_lsa_errors_dict = sdict.setdefault('ospf_lsa_errors', {})
continue
# Type 0, Length 0, Data 0, Checksum 0
m = p22.match(line)
if m:
group = m.groupdict()
ospf_lsa_errors_dict['type'] = int(group['type'])
ospf_lsa_errors_dict['length'] = int(group['len'])
ospf_lsa_errors_dict['data'] = int(group['data'])
ospf_lsa_errors_dict['checksum'] = int(group['csum'])
continue
# Summary traffic statistics for process ID 65109:
m = p23.match(line)
if m:
pid = m.groupdict()['pid']
ospf_dict = ret_dict.setdefault('vrf', {}).\
setdefault(vrf, {}).\
setdefault('address_family', {}).\
setdefault(address_family, {}).\
setdefault('instance', {}).\
setdefault(pid, {})
summary_stats_dict = ospf_dict.\
setdefault('summary_traffic_statistics', {})
interface_stats = False ; summary_stats = True
vrf = 'default'
continue
return ret_dict
# ===========================
# Schema for:
# * 'show ip ospf neighbor'
# * 'show ip ospf neighbor {interface}'
# ===========================
class ShowIpOspfNeighborSchema(MetaParser):
''' Schema for:
* 'show ip ospf neighbor'
* 'show ip ospf neighbor {interface}'
'''
schema = {
'interfaces':
{Any():
{'neighbors':
{Any():
{'priority': int,
'state':str,
'dead_time':str,
'address':str,
},
},
},
},
}
# ===========================
# Parser for:
# * 'show ip ospf neighbor'
# * 'show ip ospf neighbor {interface}'
# ===========================
class ShowIpOspfNeighbor(ShowIpOspfNeighborSchema):
''' Parser for:
* 'show ip ospf neighbor'
* 'show ip ospf neighbor {interface}'
'''
cli_command = [
'show ip ospf neighbor {interface}',
'show ip ospf neighbor']
exclude = ['dead_time']
def cli(self, interface='', output=None):
if output is None:
# Execute command on device
if interface:
cmd = self.cli_command[0].format(interface=interface)
else:
cmd = self.cli_command[1]
out = self.device.execute(cmd)
else:
out = output
# Init vars
ret_dict = {}
# Neighbor ID Pri State Dead Time Address Interface
# 172.16.197.253 128 FULL/DR 00:00:30 172.16.165.49 GigabitEthernet0/0/1
# 10.169.197.252 0 FULL/ - 00:00:36 10.169.197.93 GigabitEthernet2
p1=re.compile(r'^(?P<neighbor>\S+) +(?P<pri>\d+) +(?P<state>\S+(?:\s+\S+)?)'
' +(?P<dead_time>\S+) +(?P<address>\S+) +(?P<interface>\S+)$')
for line in out.splitlines():
line = line.strip()
m = p1.match(line)
if m:
neighbor = m.groupdict()['neighbor']
interface = m.groupdict()['interface']
#Build Dict
intf_dict = ret_dict.setdefault('interfaces', {}).setdefault(interface, {})
nbr_dict = intf_dict.setdefault('neighbors', {}).setdefault(neighbor, {})
# Set values
nbr_dict['priority'] = int(m.groupdict()['pri'])
nbr_dict['state'] = str(m.groupdict()['state'])
nbr_dict['dead_time'] = str(m.groupdict()['dead_time'])
nbr_dict['address'] = str(m.groupdict()['address'])
continue
return ret_dict
# =================================================
# Parser for:
# * 'show ip ospf database router self-originate'
# =================================================
class ShowIpOspfDatabaseRouterSelfOriginate(ShowIpOspfDatabaseRouterSchema, ShowIpOspfDatabaseTypeParser):
''' Parser for:
* 'show ip ospf database router self-originate'
'''
cli_command = 'show ip ospf database router self-originate'
exclude = ['age' , 'checksum', 'seq_num', 'dead_time']
def cli(self, output=None):
if not output:
output = self.device.execute(self.cli_command)
return super().cli(db_type='router', out=output)
class ShowIpOspfSegmentRoutingAdjacencySidSchema(MetaParser):
''' Schema for commands:
* show ip ospf {process_id} segment-routing adjacency-sid
'''
schema = {
'process_id': {
Any(): {
'router_id': str,
'adjacency_sids': {
Any(): {
'neighbor_id': str,
'neighbor_address': str,
'interface': str,
'flags': str,
Optional('backup_nexthop'): str,
Optional('backup_interface'): str,
}
}
}
}
}
class ShowIpOspfSegmentRoutingAdjacencySid(ShowIpOspfSegmentRoutingAdjacencySidSchema):
''' Parser for commands:
* show ip ospf {process_id} segment-routing adjacency-sid
'''
cli_command = [
'show ip ospf {process_id} segment-routing adjacency-sid',
'show ip ospf segment-routing adjacency-sid',
]
def cli(self, process_id=None, output=None):
if output is None:
if process_id:
command = self.cli_command[0].format(process_id=process_id)
else:
command = self.cli_command[1]
out = self.device.execute(command)
else:
out = output
# OSPF Router with ID (10.4.1.1) (Process ID 65109)
r1 = re.compile(r'OSPF\s+Router\s+with\s+ID\s+\((?P<router_id>\S+)\)\s+'
'\(Process\s+ID\s+(?P<process_id>\d+)\)')
# 16 10.16.2.2 Gi0/1/2 192.168.154.2 D U
# 17 10.16.2.2 Gi0/1/1 192.168.4.2 D U
r2 = re.compile(r'(?P<adj_sid>\d+)\s+(?P<neighbor_id>\S+)\s+'
'(?P<interface>\S+)\s+(?P<neighbor_address>\S+)\s+'
'(?P<flags>[SDPUGL\s]+)\s*(?:(?P<backup_nexthop>\S+))?'
'\s*(?:(?P<backup_interface>\S+))?')
parsed_output = {}
for line in out.splitlines():
line = line.strip()
# OSPF Router with ID (10.4.1.1) (Process ID 65109)
result = r1.match(line)
if result:
group = result.groupdict()
router_id = group['router_id']
process_id = group['process_id']
process_id_dict = parsed_output.setdefault('process_id', {})\
.setdefault(process_id, {})
process_id_dict['router_id'] = router_id
continue
# 16 10.16.2.2 Gi0/1/2 192.168.154.2 D U
# 17 10.16.2.2 Gi0/1/1 192.168.4.2 D U
result = r2.match(line)
if result:
group = result.groupdict()
adj_sid = group['adj_sid']
adjs_sid_dict = process_id_dict.setdefault('adjacency_sids', {})\
.setdefault(adj_sid, {})
adjs_sid_dict['neighbor_id'] = group['neighbor_id']
interface = group['interface']
adjs_sid_dict['interface'] = Common.convert_intf_name(str(interface))
adjs_sid_dict['neighbor_address'] = group['neighbor_address']
adjs_sid_dict['flags'] = group['flags']
backup_nexthop = group['backup_nexthop']
if backup_nexthop:
adjs_sid_dict['backup_nexthop'] = backup_nexthop
backup_interface = group['backup_interface']
if backup_interface:
adjs_sid_dict['backup_interface'] = backup_interface
continue
return parsed_output
# =================================================
# Schema for:
# * 'show ip ospf fast-reroute ti-lfa'
# =================================================
class ShowIpOspfFastRerouteTiLfaSchema(MetaParser):
"""Schema for show ip ospf fast-reroute ti-lfa
"""
schema = {
'process_id': {
Any(): {
'router_id': str,
'ospf_object': {
Any(): {
'ipfrr_enabled': str,
'sr_enabled': str,
'ti_lfa_configured': str,
'ti_lfa_enabled': str,
}
}
}
}
}
# =================================================
# Parser for:
# * 'show ip ospf fast-reroute ti-lfa'
# =================================================
class ShowIpOspfFastRerouteTiLfa(ShowIpOspfFastRerouteTiLfaSchema):
"""Parser for show ip ospf fast-reroute ti-lfa
"""
cli_command = 'show ip ospf fast-reroute ti-lfa'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# OSPF Router with ID (10.4.1.1) (Process ID 65109)
p1 = re.compile(r'^OSPF +Router +with +ID +\((?P<router_id>\S+)'
'\) +\(Process +ID +(?P<process_id>\d+)\)')
# Process ID (65109) no yes no no
# Area 8 no yes no no
# Loopback0 no no no no
# GigabitEthernet0/1/2 no yes no no
p2 = re.compile(r'^(?P<ospf_object>[\S\s]+) +(?P<ipfrr_enabled>(yes|no)'
'( +\(inactive\))?) +(?P<sr_enabled>(yes|no)( +\(inactive\))?) '
'+(?P<ti_lfa_configured>(yes|no)( +\(inactive\))?) +'
'(?P<ti_lfa_enabled>(yes|no)( +\(inactive\))?)$')
# initial variables
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# OSPF Router with ID (10.4.1.1) (Process ID 65109)
m = p1.match(line)
if m:
group = m.groupdict()
router_id = group['router_id']
process_id = int(group['process_id'])
process_id_dict = ret_dict.setdefault('process_id', {}). \
setdefault(process_id, {})
process_id_dict.update({'router_id': router_id})
ospf_object_dict = process_id_dict.setdefault('ospf_object', {})
continue
# Process ID (65109) no yes no no
# Area 8 no yes no no
# Loopback0 no no no no
# GigabitEthernet0/1/2 no yes no no
m = p2.match(line)
if m:
group = m.groupdict()
ospf_object = group['ospf_object'].strip()
ipfrr_enabled = group['ipfrr_enabled']
sr_enabled = group['sr_enabled']
ti_lfa_configured = group['ti_lfa_configured']
ti_lfa_enabled = group['ti_lfa_enabled']
ospf_object = ospf_object_dict.setdefault(ospf_object, {})
ospf_object.update({'ipfrr_enabled': ipfrr_enabled })
ospf_object.update({'sr_enabled': sr_enabled })
ospf_object.update({'ti_lfa_configured': ti_lfa_configured })
ospf_object.update({'ti_lfa_enabled': ti_lfa_enabled })
continue
return ret_dict
# ===============================================================
# Schema for 'show ip ospf segment-routing protected-adjacencies'
# ===============================================================
class ShowIpOspfSegmentRoutingProtectedAdjacenciesSchema(MetaParser):
''' Schema for show ip ospf segment-routing protected-adjacencies
'''
schema = {
'process_id': {
Any(): {
'router_id': str,
Optional('areas'): {
Any(): {
'neighbors': {
Any(): {
'interfaces': {
Any(): {
'address': str,
'adj_sid': int,
Optional('backup_nexthop'): str,
Optional('backup_interface'): str
}
}
}
}
}
}
}
}
}
# ========================================================
# Parser for:
# * 'show ip ospf segment-routing protected-adjacencies'
# ========================================================
class ShowIpOspfSegmentRoutingProtectedAdjacencies(ShowIpOspfSegmentRoutingProtectedAdjacenciesSchema):
""" Parser for show ip ospf segment-routing protected-adjacencies
"""
cli_command = 'show ip ospf segment-routing protected-adjacencies'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# OSPF Router with ID (10.4.1.1) (Process ID 65109)
p1 = re.compile(r'OSPF +Router +with +ID +\((?P<router_id>\S+)\) +\('
'Process +ID +(?P<process_id>\d+)\)')
# Area with ID (8)
p2 = re.compile(r'^Area +with +ID \((?P<area_id>\d+)\)$')
# 10.234.30.22 Gi10 192.168.10.2 17 192.168.10.3 Gi14
p3 = re.compile(
r'^(?P<neighbor_id>\S+) +(?P<interface>\S+) +(?P<address>\S+) +('
r'?P<adj_sid>\d+)( +(?P<backup_nexthop>\S+))?( +(?P<backup_interface>\S+))?$')
# initial variables
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# OSPF Router with ID (10.4.1.1) (Process ID 65109)
m = p1.match(line)
if m:
group = m.groupdict()
router_id = group['router_id']
process_id = int(group['process_id'])
process_id_dict = ret_dict.setdefault('process_id', {}). \
setdefault(process_id, {})
process_id_dict['router_id'] = router_id
continue
# Area with ID (8)
m = p2.match(line)
if m:
group = m.groupdict()
area_id = str(IPAddress(str(group['area_id'])))
area_dict = process_id_dict.setdefault('areas', {}). \
setdefault(area_id, {})
continue
# 10.234.30.22 Gi10 192.168.10.2 17 192.168.10.3 Gi14
m = p3.match(line)
if m:
group = m.groupdict()
neighbor_id = group['neighbor_id']
interface = group['interface']
address = group['address']
adj_sid = int(group['adj_sid'])
backup_nexthop = group['backup_nexthop']
backup_interface = group['backup_interface']
neighbor_dict = area_dict.setdefault('neighbors', {}). \
setdefault(neighbor_id, {}). \
setdefault('interfaces', {}). \
setdefault(Common.convert_intf_name(interface), {})
neighbor_dict.update({'address': address})
neighbor_dict.update({'adj_sid': adj_sid})
if backup_nexthop:
neighbor_dict.update({'backup_nexthop': backup_nexthop})
if backup_interface:
neighbor_dict.update({'backup_interface':
Common.convert_intf_name(backup_interface)})
continue
return ret_dict
class ShowIpOspfSegmentRoutingSidDatabaseSchema(MetaParser):
''' Schema for commands:
* show ip ospf segment-routing sid-database
'''
schema = {
'process_id': {
Any(): {
'router_id': str,
Optional('sids'): {
Any(): {
'index': {
Any(): { # 1, 2, 3, ...
Optional('codes'): str,
'prefix': str,
Optional('adv_rtr_id'): str,
Optional('area_id'): str,
Optional('type'): str,
Optional('algo'): int
}
}
},
'total_entries': int
}
}
}
}
class ShowIpOspfSegmentRoutingSidDatabase(ShowIpOspfSegmentRoutingSidDatabaseSchema):
""" Parser for commands:
* show ip ospf segment-routing sid-database
"""
cli_command = ['show ip ospf segment-routing sid-database']
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command[0])
else:
out = output
# OSPF Router with ID (10.4.1.1) (Process ID 65109)
p1 = re.compile(r'^OSPF +Router +with +ID +\((?P<router_id>[\d+\.]+)\) +'
'\(Process +ID +(?P<pid>\d+)\)$')
# 1 (L) 10.4.1.1/32 10.4.1.1 8 Intra 0
# 2 10.16.2.2/32 10.16.2.2 8 Intra 0
# 10.16.2.3/32 10.16.2.2 8 Intra 0
# 3 (M) 10.16.2.3/32 Unknown 0
# 10.36.3.3/32 10.16.2.10 0
p2 = re.compile(r'(?:(?P<sid>\d+) +)?(?:\((?P<codes>[LNM,]+)\) +)?(?P<prefix>[\d\.\/]+)'
r'( +(?P<adv_rtr_id>[\d\.]+))?( +(?P<area_id>\d+))?(?: +(?P<type>\w+))?'
r'(?: +(?P<algo>\d+))?')
ret_dict = {}
sid_entries = 0
for line in out.splitlines():
line = line.strip()
# OSPF Router with ID (10.4.1.1) (Process ID 65109)
m = p1.match(line)
if m:
group = m.groupdict()
process_dict = ret_dict.setdefault('process_id', {}).setdefault(int(group['pid']), {})
process_dict.update({'router_id': group['router_id']})
continue
# 1 (L) 10.4.1.1/32 10.4.1.1 8 Intra 0
# 2 10.16.2.2/32 10.16.2.2 8 Intra 0
# 10.16.2.3/32 10.16.2.2 8 Intra 0
# 3 (M) 10.16.2.3/32 Unknown 0
# 10.36.3.3/32 10.16.2.10 0
m = p2.match(line)
if m:
group = m.groupdict()
sid_entries += 1
sids_dict = process_dict.setdefault('sids', {})
sids_dict.update({'total_entries': sid_entries})
if group.get('sid'):
index = 1
sid_dict = sids_dict.setdefault(int(group['sid']), {})
else:
# No sid found. Using previous sid.
index += 1
index_dict = sid_dict.setdefault('index', {}).setdefault(index, {})
index_dict.update({'prefix': group['prefix']})
if group.get('codes'):
index_dict.update({'codes': group['codes']})
if group.get('adv_rtr_id'):
index_dict.update({'adv_rtr_id': group['adv_rtr_id']})
if group.get('area_id'):
index_dict.update({'area_id': str(IPAddress(group['area_id']))})
if group.get('type'):
index_dict.update({'type': group['type']})
if group.get('algo'):
index_dict.update({'algo': int(group['algo'])})
continue
return ret_dict
# =====================================================
# Schema for:
# * 'show ip ospf {pid} segment-routing global-block'
# =====================================================
class ShowIpOspfSegmentRoutingGlobalBlockSchema(MetaParser):
""" Schema for commands:
* show ip ospf {pid} segment-routing global-block
"""
schema = {
'process_id': {
Any(): {
'router_id': str,
'area': int,
'routers': {
Any(): {
'router_id': str,
'sr_capable': str,
Optional('sr_algorithm'): str,
Optional('srgb_base'): int,
Optional('srgb_range'): int,
Optional('sid_label'): str
}
}
}
}
}
# =====================================================
# Parser for:
# * 'show ip ospf {pid} segment-routing global-block'
# =====================================================
class ShowIpOspfSegmentRoutingGlobalBlock(ShowIpOspfSegmentRoutingGlobalBlockSchema):
""" Parser for commands:
* show ip ospf {pid} segment-routing global-block
"""
cli_command = ['show ip ospf segment-routing global-block',
'show ip ospf {process_id} segment-routing global-block']
def cli(self, process_id=None, output=None):
if not output:
if not process_id:
cmd = self.cli_command[0]
else:
cmd = self.cli_command[1].format(process_id=process_id)
out = self.device.execute(cmd)
else:
out = output
# OSPF Router with ID (10.4.1.1) (Process ID 1234)
p1 = re.compile(r'^OSPF +Router +with +ID +\((?P<router_id>[\d+\.]+)\) +'
'\(Process +ID +(?P<pid>\d+)\)$')
# OSPF Segment Routing Global Blocks in Area 3
p2 = re.compile(r'^OSPF +Segment +Routing +Global +Blocks +in +Area (?P<area>\d+)$')
# *10.4.1.1 Yes SPF,StrictSPF 16000 8000 Label
# 10.16.2.2 Yes SPF,StrictSPF 16000 8000 Label
# *10.4.1.1 No
# 10.16.2.2 No
p3 = re.compile(r'^\*?(?P<router_id>[\d\.]+) +(?P<sr_capable>\w+)'
'(?: +(?P<sr_algorithm>[\w,]+) +(?P<srgb_base>\d+) +'
'(?P<srgb_range>\d+) +(?P<sid_label>\w+))?$')
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# OSPF Router with ID (10.4.1.1) (Process ID 1234)
m = p1.match(line)
if m:
group = m.groupdict()
router_dict = ret_dict.setdefault('process_id', {}).setdefault(int(group['pid']), {})
router_dict.update({'router_id': group['router_id']})
continue
# OSPF Segment Routing Global Blocks in Area 3
m = p2.match(line)
if m:
group = m.groupdict()
router_dict.update({'area': int(group['area'])})
continue
# *10.4.1.1 Yes SPF,StrictSPF 16000 8000 Label
# 10.16.2.2 Yes SPF,StrictSPF 16000 8000 Label
m = p3.match(line)
if m:
group = m.groupdict()
router_entry_dict = router_dict.setdefault('routers', {}).setdefault(group['router_id'], {})
router_entry_dict.update({'router_id': group['router_id']})
router_entry_dict.update({'sr_capable': group['sr_capable']})
if group['sr_algorithm']:
router_entry_dict.update({'sr_algorithm': group['sr_algorithm']})
if group['srgb_base']:
router_entry_dict.update({'srgb_base': int(group['srgb_base'])})
if group['srgb_range']:
router_entry_dict.update({'srgb_range': int(group['srgb_range'])})
if group['sid_label']:
router_entry_dict.update({'sid_label': group['sid_label']})
continue
return ret_dict
# ==========================================
# Parser for 'show ip ospf segment-routing'
# ==========================================
class ShowIpOspfSegmentRoutingSchema(MetaParser):
''' Schema for show ip ospf segment-routing
'''
schema = {
'process_id': {
Any(): {
'router_id': str,
Optional('global_segment_routing_state'): str,
Optional('segment_routing_enabled'): {
'area': {
Any(): {
'topology_name': str,
'forwarding': str,
'strict_spf': str
}
}
},
'sr_attributes': {
'sr_label_preferred': bool,
'advertise_explicit_null': bool,
},
Optional('global_block_srgb'): {
'range': {
'start': int,
'end': int
},
'state': str,
},
Optional('local_block_srlb'): {
'range': {
'start': int,
'end': int
},
'state': str,
},
Optional('registered_with'): {
Any(): {
Optional('client_handle'): int,
Optional('sr_algo'): {
Any(): {
Any(): {
'handle': str,
'bit_mask': str,
}
}
},
Optional('client_id'): int,
}
},
Optional('max_labels'): {
'platform': int,
'available': int,
'pushed_by_ospf': {
'uloop_tunnels': int,
'ti_lfa_tunnels': int
}
},
'mfi_label_reservation_ack_pending': bool,
'bind_retry_timer_running': bool,
Optional('bind_retry_timer_left'): str,
Optional('adj_label_bind_retry_timer_running'): bool,
Optional('adj_label_bind_retry_timer_left'): str,
Optional('srp_app_locks_requested'): {
'srgb': int,
'srlb': int
},
Optional('teapp'): {
'te_router_id': str
}
}
}
}
class ShowIpOspfSegmentRouting(ShowIpOspfSegmentRoutingSchema):
''' Parser for show ip ospf segment-routing
'''
cli_command = 'show ip ospf segment-routing'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# OSPF Router with ID (10.16.2.2) (Process ID 65109)
p1 = re.compile(r'^OSPF +Router +with +ID +\((?P<router_id>\S+)\) +\('
'Process +ID +(?P<process_id>\d+)\)$')
# Global segment-routing state: Enabled
p2 = re.compile(r'^Global +segment\-routing +state: +'
'(?P<global_segment_routing_state>\S+)$')
# Prefer non-SR (LDP) Labels
p3 = re.compile(r'^Prefer +non\-SR +\(LDP\) +Labels$')
# Do not advertise Explicit Null
p4 = re.compile(r'^Do +not +advertise +Explicit +Null$')
# Global Block (SRGB):
p5 = re.compile(r'^Global +Block +\(SRGB\):$')
# Range: 16000 - 23999
p6 = re.compile(r'^Range: +(?P<start>\d+) +\- +(?P<end>\d+)$')
# State: Created
p7 = re.compile(r'^State: +(?P<state>\S+)$')
# Local Block (SRLB):
p8 = re.compile(r'^Local +Block +\(SRLB\):$')
# Registered with SR App, client handle: 2
p9 = re.compile(r'^Registered +with +(?P<app_name>[\S\s]+), +'
'client +handle: +(?P<client_handle>\d+)$')
# SR algo 0 Connected map notifications active (handle 0x0), bitmask 0x1
p10 = re.compile(r'^SR +algo +(?P<algo>\d+) +(?P<notifications>[\S\s]+) +\('
'handle +(?P<handle>\w+)\), +bitmask +(?P<bitmask>\w+)$')
# Registered with MPLS, client-id: 100
p12 = re.compile(r'^Registered +with +(?P<app_name>[\S\s]+), +client\-id: +'
'(?P<client_id>\d+)$')
# Max labels: platform 16, available 13
p13 = re.compile(r'^Max +labels: +platform +(?P<platform>\d+), available +(?P<available>\d+)$')
# Max labels pushed by OSPF: uloop tunnels 10, TI-LFA tunnels 10
p14 = re.compile(r'^Max +labels +pushed +by +OSPF: +uloop +tunnels +(?P<uloop_tunnels>\d+)'
', +TI\-LFA +tunnels +(?P<ti_lfa_tunnels>\d+)$')
# mfi label reservation ack not pending
p15 = re.compile(r'^mfi +label +reservation +ack +not +pending$')
# Bind Retry timer not running
p16 = re.compile(r'^Bind +Retry +timer +not +running$')
# Bind Retry timer running, left ???
p16_1 = re.compile(r'^Bind +Retry +timer +running, +left +(?P<bind_retry_timer_left>\S+)$')
# Adj Label Bind Retry timer not running
p17 = re.compile(r'^Adj +Label +Bind +Retry +timer +not +running$')
# Adj Label Bind Retry timer running, left ???
p17_1 = re.compile(r'^Adj +Label +Bind +Retry +timer +running, +left +(?P<adj_label_bind_retry_timer_left>\S+)$')
# sr-app locks requested: srgb 0, srlb 0
p18 = re.compile(r'^sr\-app +locks +requested: +srgb +(?P<srgb>\d+), +srlb +(?P<srlb>\d+)$')
# TE Router ID 10.16.2.2
p19 = re.compile(r'^TE +Router +ID +(?P<te_router_id>\S+)$')
# Area Topology name Forwarding Strict SPF
p20 = re.compile(r'^Area +Topology +name +Forwarding +Strict +SPF$')
# 8 Base MPLS Capable
# AS external Base MPLS Not applicable
p21 = re.compile(r'^(?P<area>(\d+|(\w+ +\w+))) +(?P<topology_name>\w+)'
' +(?P<forwarding>\w+) +(?P<strict_spf>\w+( +\w+)?)$')
# initial variables
ret_dict = {}
for line in out.splitlines():
line = line.strip()
# OSPF Router with ID (10.16.2.2) (Process ID 65109)
m = p1.match(line)
if m:
group = m.groupdict()
router_id = group['router_id']
process_id = int(group['process_id'])
process_id_dict = ret_dict.setdefault('process_id', {}). \
setdefault(process_id, {})
process_id_dict.update({'router_id': router_id})
sr_attributes_dict = process_id_dict.setdefault('sr_attributes', {})
sr_attributes_dict.update({'sr_label_preferred': True})
sr_attributes_dict.update({'advertise_explicit_null': True})
process_id_dict.update({'mfi_label_reservation_ack_pending': True})
process_id_dict.update({'bind_retry_timer_running': True})
process_id_dict.update({'adj_label_bind_retry_timer_running': True})
continue
# Global segment-routing state: Enabled
m = p2.match(line)
if m:
group = m.groupdict()
global_segment_routing_state = group['global_segment_routing_state']
process_id_dict.update({'global_segment_routing_state': global_segment_routing_state})
continue
# Prefer non-SR (LDP) Labels
m = p3.match(line)
if m:
group = m.groupdict()
sr_attributes_dict = process_id_dict.setdefault('sr_attributes', {})
sr_attributes_dict.update({'sr_label_preferred': False})
continue
# Do not advertise Explicit Null
m = p4.match(line)
if m:
group = m.groupdict()
sr_attributes_dict = process_id_dict.setdefault('sr_attributes', {})
sr_attributes_dict.update({'advertise_explicit_null': False})
continue
# Global Block (SRGB):
m = p5.match(line)
if m:
group = m.groupdict()
block_dict = process_id_dict.setdefault('global_block_srgb', {})
continue
# Range: 16000 - 23999
m = p6.match(line)
if m:
group = m.groupdict()
range_dict = block_dict.setdefault('range', {})
range_dict.update({'start': int(group['start'])})
range_dict.update({'end': int(group['end'])})
continue
# State: Created
m = p7.match(line)
if m:
group = m.groupdict()
state = group['state']
block_dict.update({'state': state})
continue
# Local Block (SRLB):
m = p8.match(line)
if m:
group = m.groupdict()
block_dict = process_id_dict.setdefault('local_block_srlb', {})
continue
# Registered with SR App, client handle: 2
m = p9.match(line)
if m:
group = m.groupdict()
app_name = group['app_name']
client_handle = int(group['client_handle'])
registered_with_sr_app_dict = process_id_dict.setdefault('registered_with', {}). \
setdefault(app_name, {})
registered_with_sr_app_dict.update({'client_handle': client_handle})
continue
# SR algo 0 Connected map notifications active (handle 0x0), bitmask 0x1
# SR algo 0 Active policy map notifications active (handle 0x2), bitmask 0xC
m = p10.match(line)
if m:
group = m.groupdict()
algo = int(group['algo'])
notifications = group['notifications'].lower().replace(' ', '_')
handle = group['handle']
bitmask = group['bitmask']
sr_algo_dict = registered_with_sr_app_dict.setdefault('sr_algo', {}). \
setdefault(algo, {}). \
setdefault(notifications, {})
sr_algo_dict.update({'handle': handle})
sr_algo_dict.update({'bit_mask': bitmask})
continue
# Registered with MPLS, client-id: 100
m = p12.match(line)
if m:
group = m.groupdict()
app_name = group['app_name']
client_id = int(group['client_id'])
registered_with_mpls_dict = process_id_dict.setdefault('registered_with', {}). \
setdefault(app_name, {})
registered_with_mpls_dict.update({'client_id': client_id})
continue
# Max labels: platform 16, available 13
m = p13.match(line)
if m:
group = m.groupdict()
platform = int(group['platform'])
available = int(group['available'])
match_labels_dict = process_id_dict.setdefault('max_labels', {})
match_labels_dict.update({'platform': platform})
match_labels_dict.update({'available': available})
continue
# Max labels pushed by OSPF: uloop tunnels 10, TI-LFA tunnels 10
m = p14.match(line)
if m:
group = m.groupdict()
uloop_tunnels = int(group['uloop_tunnels'])
ti_lfa_tunnels = int(group['ti_lfa_tunnels'])
match_labels_dict = process_id_dict.setdefault('max_labels', {})
pushed_by_ospf_dict = match_labels_dict.setdefault('pushed_by_ospf', {})
pushed_by_ospf_dict.update({'uloop_tunnels': uloop_tunnels})
pushed_by_ospf_dict.update({'ti_lfa_tunnels': ti_lfa_tunnels})
continue
# mfi label reservation ack not pending
m = p15.match(line)
if m:
process_id_dict.update({'mfi_label_reservation_ack_pending': False})
continue
# Bind Retry timer not running
m = p16.match(line)
if m:
process_id_dict.update({'bind_retry_timer_running': False})
continue
# Bind Retry timer running, left ???
m = p16_1.match(line)
if m:
group = m.groupdict()
bind_retry_timer_left = group['bind_retry_timer_left']
process_id_dict.update({'bind_retry_timer_left': bind_retry_timer_left})
continue
# Adj Label Bind Retry timer not running
m = p17.match(line)
if m:
process_id_dict.update({'adj_label_bind_retry_timer_running': False})
continue
# adj_label_bind_retry_timer_left
m = p17_1.match(line)
if m:
group = m.groupdict()
adj_label_bind_retry_timer_left = group['adj_label_bind_retry_timer_left']
process_id_dict.update({'adj_label_bind_retry_timer_left': adj_label_bind_retry_timer_left})
continue
# sr-app locks requested: srgb 0, srlb 0
m = p18.match(line)
if m:
group = m.groupdict()
srgb = int(group['srgb'])
srlb = int(group['srlb'])
srp_app_locks_requested_dict = process_id_dict.setdefault('srp_app_locks_requested', {})
srp_app_locks_requested_dict.update({'srgb': srgb})
srp_app_locks_requested_dict.update({'srlb': srlb})
continue
# TE Router ID 10.16.2.2
m = p19.match(line)
if m:
group = m.groupdict()
te_router_id = group['te_router_id']
process_id_dict.setdefault('teapp', {}). \
update({'te_router_id': te_router_id})
continue
# Area Topology name Forwarding Strict SPF
m = p20.match(line)
if m:
segment_routing_enabled_dict = process_id_dict.setdefault('segment_routing_enabled', {})
continue
# 8 Base MPLS Capable
# AS external Base MPLS Not applicable
m = p21.match(line)
if m:
group = m.groupdict()
area = group['area']
if area.isdigit():
area = str(IPAddress(str(area)))
topology_name = group['topology_name']
forwarding = group['forwarding']
strict_spf = group['strict_spf']
area_dict = segment_routing_enabled_dict.setdefault('area', {}). \
setdefault(area, {})
area_dict.update({'topology_name' : topology_name})
area_dict.update({'forwarding' : forwarding})
area_dict.update({'strict_spf' : strict_spf})
continue
return ret_dict
class ShowIpOspfDatabaseOpaqueAreaSelfOriginate(ShowIpOspfDatabaseOpaqueAreaSchema, ShowIpOspfDatabaseTypeParser):
''' Parser for:
* 'show ip ospf database opaque-area self-originate'
'''
cli_command = ['show ip ospf database opaque-area {lsa_id} self-originate',
'show ip ospf database opaque-area self-originate']
def cli(self, lsa_id=None, output=None):
if output is None:
if lsa_id:
output = self.device.execute(self.cli_command[0].format(lsa_id=lsa_id))
else:
output = self.device.execute(self.cli_command[1])
return super().cli(db_type='opaque', out=output)
class ShowIpOspfDatabaseOpaqueAreaAdvRouter(ShowIpOspfDatabaseOpaqueAreaSchema, ShowIpOspfDatabaseTypeParser):
''' Parser for:
* 'show ip ospf database opaque-area adv-router {address}'
'''
cli_command = 'show ip ospf database opaque-area adv-router {address}'
def cli(self, address, output=None):
if not output:
output = self.device.execute(self.cli_command.format(address=address))
return super().cli(db_type='opaque', out=output)
class ShowIpOspfDatabaseOpaqueAreaTypeExtLink(ShowIpOspfDatabaseOpaqueAreaSchema, ShowIpOspfDatabaseTypeParser):
""" Parser for:
* show ip ospf database opaque-area type ext-link
"""
cli_command = 'show ip ospf database opaque-area type ext-link'
def cli(self, output=None):
if not output:
output = self.device.execute(self.cli_command)
return super().cli(db_type='opaque', out=output)
class ShowIpOspfDatabaseOpaqueAreaTypeExtLinkSelfOriginate(ShowIpOspfDatabaseOpaqueAreaSchema, ShowIpOspfDatabaseTypeParser):
""" Parser for:
* show ip ospf database opaque-area type ext-link self-originate
"""
cli_command = 'show ip ospf database opaque-area type ext-link self-originate'
def cli(self, output=None):
if not output:
output = self.device.execute(self.cli_command)
return super().cli(db_type='opaque', out=output)
class ShowIpOspfDatabaseOpaqueAreaTypeExtLinkAdvRouter(ShowIpOspfDatabaseOpaqueAreaSchema, ShowIpOspfDatabaseTypeParser):
""" Parser for:
* show ip ospf database opaque-area type ext-link adv-router {address}
"""
cli_command = 'show ip ospf database opaque-area type ext-link adv-router {address}'
def cli(self, address, output=None):
if not output:
output = self.device.execute(self.cli_command.format(address=address))
return super().cli(db_type='opaque', out=output)
|
the-stack_106_27907 | import os
import pickle
import random
import numpy as np
class ReplayMemory:
def __init__(self, capacity, seed):
random.seed(seed)
self.capacity = capacity
self.buffer = []
self.position = 0
def push(self, state, action, reward, next_state, done):
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (state, action, reward, next_state, done)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size)
state, action, reward, next_state, done = map(np.stack, zip(*batch))
return state, action, reward, next_state, done
def __len__(self):
return len(self.buffer)
def save_buffer(self, logs_dir, return_path=False):
save_path = os.path.join(logs_dir, "replay_buffer.pkl")
print('Saving buffer to {}'.format(save_path))
with open(save_path, 'wb') as f:
pickle.dump(self.buffer, f)
if return_path:
return save_path
def load_buffer(self, save_path):
print('Loading buffer from {}'.format(save_path))
with open(save_path, "rb") as f:
self.buffer = pickle.load(f)
self.position = len(self.buffer) % self.capacity
|
the-stack_106_27908 | """
Loading results, formatting and adding columns
result is the raw result metric computed from predictions at the end the benchmark. For classification problems, it is usually auc for binomial classification and logloss for multinomial classification.
score ensures a standard comparison between tasks: higher is always better.
norm_score is a normalization of score on a [0, 1] scale, with {{zero_one_refs[0]}} score as 0 and {{zero_one_refs[1]}} score as 1.
imp_result and imp_score for imputed results/scores. Given a task and a framework:
if all folds results/scores are missing, then no imputation occurs, and the result is nan for each fold.
if only some folds results/scores are missing, then the missing result is imputed by the {{impute_missing_with}} result for this fold.
"""
import numpy as np
import pandas as pd
import report.config as config
from .metadata import load_dataset_metadata
from .util import Namespace, display
def load_results(files):
return pd.concat([pd.read_csv(file) for file in files], ignore_index=True)
def task_prop(row, metadata, prop):
return getattr(metadata.get(row.task), prop)
def impute_result(row, results, res_col='result',
imp_framework=None, imp_results=None,
imp_value=None, aggregation=None):
if pd.notna(row[res_col]):
return row[res_col]
# if all folds are failed or missing, don't impute
if pd.isna(results.loc[(results.task == row.task)
& (results.framework == row.framework)][res_col]).all():
return np.nan
if imp_framework is not None:
# impute with ref framework corresponding value
imp_results = results if imp_results is None else imp_results
return (imp_results.loc[(imp_results.framework == imp_framework)
& (imp_results.task == row.task)]
[res_col]
.agg(aggregation) if aggregation
else imp_results.loc[(imp_results.framework == imp_framework)
& (imp_results.task == row.task)
& (imp_results.fold == row.fold)]
[res_col]
.item())
return imp_value
def imputed(row):
return pd.isna(row.result) and pd.notna(row.imp_result)
fit_metrics = ['auc', 'acc', 'r2']
def metric_type(row, res_col='result'):
return 'fit' if any([row[res_col] == getattr(row, m, None) for m in fit_metrics]) else 'loss'
def score(row, res_col='result'):
return (row[res_col] if row['metric_type'] == 'fit'
else - row[res_col])
def norm_score(row, score_col='score',
zero_one_refs=None, ref_results=None,
aggregation=None):
if zero_one_refs is None:
return row[score_col]
def get_val(ref, default):
try:
if isinstance(ref, str):
return (ref_results.loc[(ref_results.framework == ref)
& (ref_results.task == row.task)]
[score_col]
.agg(aggregation) if aggregation
else ref_results.loc[(ref_results.framework == ref)
& (ref_results.task == row.task)
& (ref_results.fold == row.fold)]
[score_col]
.item())
else:
return ref
except Exception:
raise
# return default
zero, one = (get_val(ref, i) for i, ref in enumerate(zero_one_refs))
rel_score = (row[score_col] - zero) / (one - zero)
return (- rel_score if row['metric_type'] == 'loss' and one < 0 <= zero
else rel_score)
def sorted_ints(arr):
return sorted(list(map(int, arr[~np.isnan(arr)])))
def remove_duplicates(df, handling='fail'):
if not df.index.is_unique:
print("Duplicate entries:")
display(df[df.index.duplicated(keep=False)].sort_values(by=df.index.names),
pretty=False)
assert df.index.is_unique or handling != 'fail'
duplicated = (df.index.duplicated(keep='first') if handling == 'keep_first'
else df.index.duplicated(keep='last') if handling == 'keep_last'
else df.index.duplicated(keep=False) if handling == 'keep_none'
else np.full((len(df), 1), False))
return df[~duplicated]
def prepare_results(results_files,
renamings=None,
exclusions=None,
imputation=None,
normalization=None,
ref_results=None,
duplicates_handling='fail' # other options are 'keep_first', 'keep_last', 'keep_none'
):
if not results_files:
return None
results = load_results(results_files)
if renamings:
results.replace(renamings, inplace=True)
if exclusions:
results = results.loc[~results.framework.isin(exclusions)]
results.task = results.task.str.lower()
results.framework = results.framework.str.lower()
results.fold = results.fold.apply(int)
frameworks = results.framework.unique()
frameworks.sort()
tasks = results.task.unique()
tasks.sort()
folds = results.fold.unique()
metadata = load_dataset_metadata(results)
done = results.set_index(['task', 'fold', 'framework'])
done = remove_duplicates(done, handling=duplicates_handling)
missing = (pd.DataFrame([(task, fold, framework, 'missing')
for task in tasks
for fold in range(config.nfolds)
for framework in frameworks
if (task, fold, framework) not in done.index],
columns=[*done.index.names, 'info'])
.set_index(done.index.names))
missing = remove_duplicates(missing, handling=duplicates_handling)
failed = (results.loc[pd.notna(results['info'])]
.set_index(done.index.names))
failed = remove_duplicates(failed, handling=duplicates_handling)
# extending the data frame
results = results.append(missing.reset_index())
results['type'] = [task_prop(row, metadata, 'type') for _, row in results.iterrows()]
results['metric_type'] = [metric_type(row) for _, row in results.iterrows()]
results['score'] = [score(row) for _, row in results.iterrows()]
if ref_results is None:
ref_results = results
if imputation is not None:
imp_fr = imp_val = aggr = None
if isinstance(imputation, tuple):
imp_fr, aggr = imputation
elif isinstance(imputation, str):
imp_fr = imputation
else:
imp_val = imputation
results['imp_result'] = [impute_result(row, results,
imp_framework=imp_fr, imp_results=ref_results,
imp_value=imp_val, aggregation=aggr)
for _, row in results.iterrows()]
results['imp_score'] = [impute_result(row, results, 'score',
imp_framework=imp_fr, imp_results=ref_results,
imp_value=imp_val, aggregation=aggr)
for _, row in results.iterrows()]
if normalization is not None:
score_col = 'imp_score' if imputation is not None else 'score'
zero_one = normalization[0:2]
aggr = normalization[2] if len(normalization) > 2 else None
results['norm_score'] = [norm_score(row, score_col,
zero_one_refs=zero_one, ref_results=ref_results, aggregation=aggr)
for _, row in results.iterrows()]
return Namespace(
results=results,
frameworks=frameworks,
tasks=tasks,
folds=folds,
metadata=metadata,
done=done,
missing=missing,
failed=failed
)
|
the-stack_106_27909 | # -*- encoding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 8
_modified_time = 1350123761.8453729
_enable_loop = True
_template_filename = '/home/smita/cyberweb/cyberweb/templates/gcem/gcem_sim_details.mako'
_template_uri = '/gcem/gcem_sim_details.mako'
_source_encoding = 'utf-8'
from webhelpers.html import escape
_exports = ['col1main']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/1col.mako', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n\n')
# SOURCE LINE 54
__M_writer(u'\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_col1main(context):
__M_caller = context.caller_stack._push_frame()
try:
c = context.get('c', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 3
__M_writer(u'\n<style type="text/css">\n table, td, th\n {\n width:600px;\n border:1px solid black;\n }\n td\n {\n height:400px;\n vertical-align:top;\n }\n\n #jobs table, #jobs th, #jobs td\n {\n width:600px;\n border:1px solid black;\n }\n #jobs th\n {\n height:200px;\n vertical-align:top;\n }\n #jobs td\n {\n height:200px;\n vertical-align:top;\n }\n</style>\n\n<h3>')
# SOURCE LINE 33
__M_writer(escape(c.title))
__M_writer(u'</h3>\n<p>\n\n\n<blockquote>\n <table id="jobs">\n <tr><td>\n list all jobs in top panel job status/progress\n </td></tr>\n </table>\n</blockquote>\n<blockquote>\n <table style="vertical:600px">\n <tr><td>\n <p> on selection of job in panel above, display job details\n <p>interact with job (cancel)\n <p>view interim results --> redirect to analyze \n </td></tr>\n </table>\n</blockquote>\n\n')
return ''
finally:
context.caller_stack._pop_frame()
|
the-stack_106_27910 | # -*- coding: utf-8 -*-
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib.colors import LinearSegmentedColormap
import matplotlib.pyplot as plt
import numpy as np
import bbobbenchmarks
#########User-configured Parameters:
#Numbered from 1 (Sphere Function) to 24 (Lunacek bi-Rastrigin Function)
#as they occur in http://coco.lri.fr/downloads/download15.03/bbobdocfunctions.pdf
ProblemID=21
#Range for X,Y to display
xLimitLower = -5.05
xLimitUpper = 5.05
yLimitLower = -5.05
yLimitUpper = 5.05
#Samplepoints per dimension (remember the total number of points is samplepoints²)
samplepoints = 101
#Range below/above the optimal function value - keep in mind this is minimization!
zLimitBelow = 10 # "empty" space below opt f-value
zLimitAbove = 100 # added range which is shown of the function above the opt f-value
#If you don't care and want automatically determined values for the given X/Y-rectangle
autoZ = True
#########SCRIPT#########
problem, optimalFunValue = bbobbenchmarks.instantiate(ProblemID,1)
#one eval is needed so xopt exists
problem._evalfull(np.array([0,0]))
print('Problem: ' + str(ProblemID))
print('Optimal Solution Vector: ' + str(problem.xopt))
print('Optimal Function Value: ' + str(optimalFunValue))
@np.vectorize
def func(x, y):
coord = np.array([x-xopt,y-yopt])
_, funValue = problem._evalfull(coord)
return funValue
#This return is much better for some problems
#return np.log10(funValue - optimalFunValue)
#Generating the global optimum somewhere inside [-4,4]
xopt = np.random.uniform(-4,4)
yopt = np.random.uniform(-4,4)
fig = plt.figure()
ax = fig.gca(projection='3d')
#Defining the grid of probing points, and how many of them
X = np.linspace(-5, 5, samplepoints)
Y = np.linspace(-5, 5, samplepoints)
Z = func(X[:,None], Y[None,:])
X, Y = np.meshgrid(X, Y) #needed for getting everything plotted
#Plot itself
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=False)
#Defining the "Viewport"
ax.set_xlim(-5.01,5.01)
ax.set_ylim(-5.01,5.01)
if(autoZ):
print('automatic z-limits by matplotlib')
else:
ax.set_zlim(optimalFunValue - zLimitBelow, optimalFunValue + zLimitAbove)
#Axis Labels
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('f')
#Label for the logarithmic return value
#ax.set_zlabel('$log_{10}(f-f_{opt})$')
#Inverting the zaxis makes for better images
plt.gca().invert_zaxis()
plt.show()
#For future use, changes format of the axis labeling
#x.zaxis.set_major_locator(LinearLocator(10))
#ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#If one wants to see the colorbar
#fig.colorbar(surf, shrink=0.5, aspect=5) |
the-stack_106_27914 | import json
import pytest
from buildtrigger.test.githubmock import get_github_trigger
from buildtrigger.triggerutil import (
SkipRequestException,
ValidationRequestException,
InvalidPayloadException,
)
from endpoints.building import PreparedBuild
from util.morecollections import AttrDict
@pytest.fixture
def github_trigger():
return get_github_trigger()
@pytest.mark.parametrize(
"payload, expected_error, expected_message",
[
('{"zen": true}', SkipRequestException, ""),
("{}", InvalidPayloadException, "Missing 'repository' on request"),
('{"repository": "foo"}', InvalidPayloadException, "Missing 'owner' on repository"),
# Valid payload:
(
"""{
"repository": {
"owner": {
"name": "someguy"
},
"name": "somerepo",
"ssh_url": "someurl"
},
"ref": "refs/tags/foo",
"head_commit": {
"id": "11d6fbc",
"url": "http://some/url",
"message": "some message",
"timestamp": "NOW"
}
}""",
None,
None,
),
# Skip message:
(
"""{
"repository": {
"owner": {
"name": "someguy"
},
"name": "somerepo",
"ssh_url": "someurl"
},
"ref": "refs/tags/foo",
"head_commit": {
"id": "11d6fbc",
"url": "http://some/url",
"message": "[skip build]",
"timestamp": "NOW"
}
}""",
SkipRequestException,
"",
),
],
)
def test_handle_trigger_request(github_trigger, payload, expected_error, expected_message):
def get_payload():
return json.loads(payload)
request = AttrDict(dict(get_json=get_payload))
if expected_error is not None:
with pytest.raises(expected_error) as ipe:
github_trigger.handle_trigger_request(request)
assert str(ipe.value) == expected_message
else:
assert isinstance(github_trigger.handle_trigger_request(request), PreparedBuild)
@pytest.mark.parametrize(
"dockerfile_path, contents",
[
("/Dockerfile", "hello world"),
("somesubdir/Dockerfile", "hi universe"),
("unknownpath", None),
],
)
def test_load_dockerfile_contents(dockerfile_path, contents):
trigger = get_github_trigger(dockerfile_path)
assert trigger.load_dockerfile_contents() == contents
@pytest.mark.parametrize(
"username, expected_response",
[
("unknownuser", None),
("knownuser", {"html_url": "https://bitbucket.org/knownuser", "avatar_url": "avatarurl"}),
],
)
def test_lookup_user(username, expected_response, github_trigger):
assert github_trigger.lookup_user(username) == expected_response
def test_list_build_subdirs(github_trigger):
assert github_trigger.list_build_subdirs() == ["Dockerfile", "somesubdir/Dockerfile"]
def test_list_build_source_namespaces(github_trigger):
namespaces_expected = [
{
"personal": True,
"score": 1,
"avatar_url": "avatarurl",
"id": "knownuser",
"title": "knownuser",
"url": "https://bitbucket.org/knownuser",
},
{
"score": 0,
"title": "someorg",
"personal": False,
"url": "",
"avatar_url": "avatarurl",
"id": "someorg",
},
]
found = github_trigger.list_build_source_namespaces()
sorted(found, key=lambda d: sorted(d.items()))
sorted(namespaces_expected, key=lambda d: sorted(d.items()))
assert found == namespaces_expected
|
the-stack_106_27915 | # G 改 E, 实际上需要用G Block改出E block, 完成逆序对称,在同样位置还原style潜码
# 比第0版多了残差, 每一层的两个(conv/line)输出的w1和w2合并为1个w
# 比第1版加了要学习的bias_1和bias_2,网络顺序和第1版有所不同(更对称)
# 比第2版,即可以使用到styleganv1,styleganv2, 不再使用带Equalize learning rate的Conv (这条已经废除). 以及Block第二层的blur操作
# 改变了上采样,不在conv中完成
# 改变了In,带参数的学习
# 改变了了residual,和残差网络一致,另外旁路多了conv1处理通道和In学习参数
# 经测试,不带Eq(Equalize Learning Rate)的参数层学习效果不好
#这一版兼容PGGAN和BIGGAN: 主要改变最后一层,增加FC
#PGGAN: 加一个fc, 和原D类似
#BIGGAN,加两个fc,各128channel,其中一个是标签,完成128->1000的映射
#BIGGAN 改进思路1: IN替换CBN (本例实现)
#BIGGAN 改进思路2: G加w,和E的w对称 (未实现)
import math
import torch
import torch.nn as nn
from torch.nn import init
from torch.nn.parameter import Parameter
import sys
sys.path.append('../')
from torch.nn import functional as F
import model.utils.lreq as ln
# G 改 E, 实际上需要用G Block改出E block, 完成逆序对称,在同样位置还原style潜码
# 比第0版多了残差, 每一层的两个(conv/line)输出的w1和w2合并为1个w
# 比第1版加了要学习的bias_1和bias_2,网络顺序和第1版有所不同(更对称)
def snlinear(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Linear(**kwargs), eps=eps)
class BigGANBatchNorm(nn.Module):
""" This is a batch norm module that can handle conditional input and can be provided with pre-computed
activation means and variances for various truncation parameters.
We cannot just rely on torch.batch_norm since it cannot handle
batched weights (pytorch 1.0.1). We computate batch_norm our-self without updating running means and variances.
If you want to train this model you should add running means and variance computation logic.
"""
def __init__(self, num_features, condition_vector_dim=None, n_stats=51, eps=1e-4, conditional=True):
super(BigGANBatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.conditional = conditional
# We use pre-computed statistics for n_stats values of truncation between 0 and 1
self.register_buffer('running_means', torch.zeros(n_stats, num_features))
self.register_buffer('running_vars', torch.ones(n_stats, num_features))
self.step_size = 1.0 / (n_stats - 1)
if conditional:
assert condition_vector_dim is not None
self.scale = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
self.offset = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
else:
self.weight = torch.nn.Parameter(torch.Tensor(num_features))
self.bias = torch.nn.Parameter(torch.Tensor(num_features))
def forward(self, x, truncation, condition_vector=None):
# Retreive pre-computed statistics associated to this truncation
coef, start_idx = math.modf(truncation / self.step_size)
start_idx = int(start_idx)
if coef != 0.0: # Interpolate
running_mean = self.running_means[start_idx] * coef + self.running_means[start_idx + 1] * (1 - coef)
running_var = self.running_vars[start_idx] * coef + self.running_vars[start_idx + 1] * (1 - coef)
else:
running_mean = self.running_means[start_idx]
running_var = self.running_vars[start_idx]
if self.conditional:
running_mean = running_mean.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
running_var = running_var.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
weight = 1 + self.scale(condition_vector).unsqueeze(-1).unsqueeze(-1)
bias = self.offset(condition_vector).unsqueeze(-1).unsqueeze(-1)
out = (x - running_mean) / torch.sqrt(running_var + self.eps) * weight + bias
else:
out = F.batch_norm(x, running_mean, running_var, self.weight, self.bias,
training=False, momentum=0.0, eps=self.eps)
return out
class FromRGB(nn.Module):
def __init__(self, channels, outputs):
super(FromRGB, self).__init__()
self.from_rgb = torch.nn.Conv2d(channels, outputs, 1, 1, 0)
def forward(self, x):
x = self.from_rgb(x)
x = F.leaky_relu(x, 0.2)
return x
class BEBlock(nn.Module):
def __init__(self, inputs, outputs, latent_size, has_second_conv=True, fused_scale=True): #分辨率大于128用fused_scale,即conv完成上采样
super().__init__()
self.has_second_conv = has_second_conv
self.noise_weight_1 = nn.Parameter(torch.Tensor(1, inputs, 1, 1))
self.noise_weight_1.data.zero_()
self.bias_1 = nn.Parameter(torch.Tensor(1, inputs, 1, 1))
#self.instance_norm_1 = nn.InstanceNorm2d(inputs, affine=True, eps=1e-8)
self.batch_norm_1 = BigGANBatchNorm(inputs, condition_vector_dim=256, n_stats=51, eps=1e-12, conditional=True)
#self.inver_mod1 = ln.Linear(2 * inputs, latent_size) # [n, 2c] -> [n,512]
self.conv_1 = ln.Conv2d(inputs, inputs, 3, 1, 1, bias=False)
self.noise_weight_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
self.noise_weight_2.data.zero_()
self.bias_2 = nn.Parameter(torch.Tensor(1, outputs, 1, 1))
#self.instance_norm_2 = nn.InstanceNorm2d(outputs, affine=True, eps=1e-8)
self.batch_norm_2 = BigGANBatchNorm(inputs, condition_vector_dim=256, n_stats=51, eps=1e-12, conditional=True)
#self.inver_mod2 = ln.Linear(2 * inputs, latent_size)
if has_second_conv:
if fused_scale:
self.conv_2 = ln.Conv2d(inputs, outputs, 3, 2, 1, bias=False)
else:
self.conv_2 = ln.Conv2d(inputs, outputs, 3, 1, 1, bias=False)
self.fused_scale = fused_scale
self.inputs = inputs
self.outputs = outputs
if self.inputs != self.outputs:
self.batch_norm_3 = BigGANBatchNorm(inputs, condition_vector_dim=256, n_stats=51, eps=1e-12, conditional=True)
self.conv_3 = ln.Conv2d(inputs, outputs, 1, 1, 0)
#self.instance_norm_3 = nn.InstanceNorm2d(outputs, affine=True, eps=1e-8)
with torch.no_grad():
self.bias_1.zero_()
self.bias_2.zero_()
def forward(self, x, cond_vector, truncation=0.4):
residual = x
# mean1 = torch.mean(x, dim=[2, 3], keepdim=True) # [b, c, 1, 1]
# std1 = torch.sqrt(torch.mean((x - mean1) ** 2, dim=[2, 3], keepdim=True)) # [b, c, 1, 1]
# style1 = torch.cat((mean1, std1), dim=1) # [b,2c,1,1]
# w1 = self.inver_mod1(style1.view(style1.shape[0],style1.shape[1])) # [b,2c]->[b,512]
w1=0
x = self.batch_norm_1(x, truncation, cond_vector)
#x = F.leaky_relu(x, 0.2)
x = self.conv_1(x)
#x = self.instance_norm_1(x)
x = torch.addcmul(x, value=1.0, tensor1=self.noise_weight_1, tensor2=torch.randn([x.shape[0], 1, x.shape[2], x.shape[3]]).to(x.device))
x = x + self.bias_1
x = F.leaky_relu(x, 0.2)
# mean2 = torch.mean(x, dim=[2, 3], keepdim=True) # [b, c, 1, 1]
# std2 = torch.sqrt(torch.mean((x - mean2) ** 2, dim=[2, 3], keepdim=True)) # [b, c, 1, 1]
# style2 = torch.cat((mean2, std2), dim=1) # [b,2c,1,1]
# w2 = self.inver_mod2(style2.view(style2.shape[0],style2.shape[1])) # [b,512] , 这里style2.view一直写错成style1.view
w2=0
if self.has_second_conv:
x = self.batch_norm_2(x, truncation, cond_vector)
#x = F.leaky_relu(x, 0.2)
x = self.conv_2(x)
#x = self.instance_norm_2(x)
x = torch.addcmul(x, value=1.0, tensor1=self.noise_weight_2, tensor2=torch.randn([x.shape[0], 1, x.shape[2], x.shape[3]]).to(x.device))
x = x + self.bias_2
x = F.leaky_relu(x, 0.2)
if self.inputs != self.outputs:
residual = self.batch_norm_3(residual, truncation, cond_vector)
#x = F.leaky_relu(x, 0.2)
residual = self.conv_3(residual)
x = F.leaky_relu(x, 0.2)
x = x + residual
if not self.fused_scale: #上采样
x = F.avg_pool2d(x, 2, 2)
#x = 0.111*x+0.889*residual #降低x的比例,可以将const的loss缩小!!0.7*residual: 10-11 >> 7 同时 c_s的loss扩大至3, ws的抖动提前, 效果更好
return x, w1, w2
class BE(nn.Module):
def __init__(self, startf=16, maxf=512, layer_count=9, latent_size=512, channels=3, pggan=False, biggan=False):
super().__init__()
self.maxf = maxf
self.startf = startf
self.latent_size = latent_size
#self.layer_to_resolution = [0 for _ in range(layer_count)]
self.decode_block = nn.ModuleList()
self.layer_count = layer_count
inputs = startf # 16
outputs = startf*2
#resolution = 1024
# from_RGB = nn.ModuleList()
fused_scale = False
self.FromRGB = FromRGB(channels, inputs)
for i in range(layer_count):
has_second_conv = i+1 != layer_count #普通的D最后一个块的第二层是 mini_batch_std
#fused_scale = resolution >= 128 # 在新的一层起初 fused_scale = flase, 完成上采样
#from_RGB.append(FromRGB(channels, inputs))
block = BEBlock(inputs, outputs, latent_size, has_second_conv, fused_scale=fused_scale)
inputs = inputs*2
outputs = outputs*2
inputs = min(maxf, inputs)
outputs = min(maxf, outputs)
#self.layer_to_resolution[i] = resolution
#resolution /=2
self.decode_block.append(block)
#self.FromRGB = from_RGB
self.biggan = biggan
if biggan:
self.new_final_1 = ln.Linear(8192, 256, gain=1) # 8192 = 512 * 16
self.new_final_2 = ln.Linear(256, 128, gain=1)
#self.new_final_3 = ln.Linear(256, 1000, gain=1) #
#将w逆序,以保证和G的w顺序, block_num控制progressive,在其他网络中无效
def forward(self, x, cond_vector, block_num=9):
#x = self.FromRGB[9-block_num](x) #每个block一个
x = self.FromRGB(x)
#w = torch.tensor(0)
for i in range(9-block_num,self.layer_count):
x,w1,w2 = self.decode_block[i](x, cond_vector, truncation=0.4)
#w_ = torch.cat((w2.view(x.shape[0],1,512),w1.view(x.shape[0],1,512)),dim=1) # [b,2,512]
# if i == (9-block_num): #最后一层
# w = w_ # [b,n,512]
# else:
# w = torch.cat((w_,w),dim=1)
if self.biggan:
c_v = self.new_final_1(x.view(x.shape[0],-1)) #[n, 256], cond_vector
z = self.new_final_2(c_v) # [n, 128]
#w_ = self.new_final_3(x) # [n, 1000]
return c_v, z
#test
# E = BE(startf=64, maxf=512, layer_count=7, latent_size=512, channels=3)
# imgs1 = torch.randn(2,3,256,256)
# const2,w2 = E(imgs1)
# print(const2.shape)
# print(w2.shape)
# print(E)
|
the-stack_106_27916 | import math
from machine import PWM, Pin
from .pca9685 import PCA9685
def map_angle(x, in_min, in_max, out_min, out_max):
return int((x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min)
class Servo:
"""
A abstract base class for controlling hobby servos.
Parent classes must implement the duty methos
Args:
freq (int): The frequency of the signal, in hertz.
min_pulse_us (int microseconds): The minimum signal length supported by the servo.
max_pulse_us (int microseconds): The maximum signal length supported by the servo.
actuation_range (int): The range between the minimum and maximum positions.
"""
def __init__(
self,
freq=50,
min_pulse_us: int = 500,
max_pulse_us: int = 2000,
actuation_range: int = 180,
):
self.freq = freq
self.set_pulse_width_range(min_pulse_us, max_pulse_us)
"""The physical range of motion of the servo in degrees."""
self.actuation_range = actuation_range
def _us2duty(self, value):
period = 1000000 / self.freq
print(f"period: {period}")
return int(1024 * value / period)
def set_pulse_width_range(self, min_pulse: int = 750, max_pulse: int = 2250):
"""Change min and max pulse widths."""
# self._min_duty = int((min_pulse * self.freq) / 1000000 * 0xFFFF)
self._min_duty = self._us2duty(min_pulse)
print(f"min duty: {self._min_duty}")
# max_duty = (max_pulse * self.freq) / 1000000 * 0xFFFF
max_duty = self._us2duty(max_pulse)
print(f"max duty: {max_duty}")
self._duty_range = int(max_duty - self._min_duty)
print(f"duty range: {self._duty_range}")
@property
def fraction(self):
"""Pulse width expressed as fraction between 0.0 (`min_pulse`) and 1.0 (`max_pulse`).
For conventional servos, corresponds to the servo position as a fraction
of the actuation range. Is None when servo is diabled (pulsewidth of 0ms).
"""
if self.duty() == 0: # Special case for disabled servos
return None
return (self.duty() - self._min_duty) / self._duty_range
@fraction.setter
def fraction(self, value: float = None):
if value is None:
self.duty(0) # disable the motor
return
if not 0.0 <= value <= 1.0:
raise ValueError("Must be 0.0 to 1.0")
duty_cycle = self._min_duty + int(value * self._duty_range)
# print(f"duty: {duty_cycle}")
self.duty(duty_cycle)
@property
def angle(self):
"""The servo angle in degrees. Must be in the range ``0`` to ``actuation_range``.
Is None when servo is disabled."""
if self.fraction is None: # special case for disabled servos
return None
return self.actuation_range * self.fraction
@angle.setter
def angle(self, new_angle: int = None):
if new_angle is None: # disable the servo by sending 0 signal
self.fraction = None
return
if new_angle < 0 or new_angle > self.actuation_range:
raise ValueError("Angle out of range")
self.fraction = new_angle / self.actuation_range
def duty(self, duty: int = None):
raise Exception("duty function must be implemented in parent")
class DirectServo(Servo):
def __init__(
self,
pin: Pin,
freq=50,
min_pulse_us=400,
max_pulse_us=2400,
actuation_range=180,
):
self.pin = pin
self.pwm = PWM(pin, freq=freq, duty=0)
super().__init__(freq, min_pulse_us, max_pulse_us, actuation_range)
def duty(self, duty: int = None):
if not duty:
return self.pwm.duty()
return self.pwm.duty(duty)
class PCAServo(Servo):
def __init__(
self,
pca9685: PCA9685,
channel: int,
freq=50,
min_pulse_us=600,
max_pulse_us=2700,
actuation_range=180,
):
self.pca9685 = pca9685
self.pca9685.freq(freq)
self.channel = channel
super().__init__(freq, min_pulse_us, max_pulse_us, actuation_range)
def _us2duty(self, value):
period = 1000000 / self.freq
print(f"period: {period}")
# TODO: Work out why servos on the pca need 4095
return int(4095 * value / period)
def duty(self, duty: int = None):
if not duty:
return self.pca9685.duty(self.channel)
return self.pca9685.duty(self.channel, duty)
def release(self):
self.pca9685.duty(self.channel, 0)
|
the-stack_106_27918 | # Given a binary tree, find the second largest
# node in it
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def find_largest(root):
current = root
while current is not None:
if current.right is not None:
return current.right.data
current = current.right
def find_second_largest(root):
if root is None or (root.left is None and root.right is None):
raise ValueError("Tree must atleast have 2 nodes")
current = root
while current is not None:
if(current.left is not None and current.right is None):
return find_largest(current.left)
if(current.right is not None and current.right.left is None and current.right.right is None):
return current.data
current = current.right
node = Node(10)
node.left = Node(5)
node.left.left = Node(1)
node.right = Node(50)
node.right.left = Node(45)
node.right.right = Node(100)
result = find_second_largest(node)
print(result) # prints 50
|
the-stack_106_27922 | import numpy as np
from ann import NeuralNetwork
# input
x = np.array([
[30, 40, 50],
[40, 50, 20],
[50, 20, 15],
[20, 15, 60],
[15, 60, 70],
[60, 70, 50]
], dtype=np.float64)
# Expected output
y = np.array([20, 15, 60, 70, 50, 40], dtype=np.float64)
def main():
size_of_learn_sample = int(len(x)*0.9)
print(size_of_learn_sample)
NN = NeuralNetwork(x, y, 0.5)
# NN.print_matrices()
NN.train()
NN.print_matrices()
if __name__ == "__main__":
main()
|
the-stack_106_27924 | # Authors: Manoj Kumar
# Thomas Unterthiner
# License: BSD 3 clause
import scipy.sparse as sp
import numpy as np
from .fixes import sparse_min_max, bincount
from .sparsefuncs_fast import csr_mean_variance_axis0 as _csr_mean_var_axis0
from .sparsefuncs_fast import csc_mean_variance_axis0 as _csc_mean_var_axis0
def _raise_typeerror(X):
"""Raises a TypeError if X is not a CSR or CSC matrix"""
input_type = X.format if sp.issparse(X) else type(X)
err = "Expected a CSR or CSC sparse matrix, got %s." % input_type
raise TypeError(err)
def inplace_csr_column_scale(X, scale):
"""Inplace column scaling of a CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR matrix with shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale : float array with shape (n_features,)
Array of precomputed feature-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[1]
X.data *= scale.take(X.indices, mode='clip')
def inplace_csr_row_scale(X, scale):
""" Inplace row scaling of a CSR matrix.
Scale each sample of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR sparse matrix, shape (n_samples, n_features)
Matrix to be scaled.
scale : float array with shape (n_samples,)
Array of precomputed sample-wise values to use for scaling.
"""
assert scale.shape[0] == X.shape[0]
X.data *= np.repeat(scale, np.diff(X.indptr))
def mean_variance_axis(X, axis):
"""Compute mean and variance along axis 0 on a CSR or CSC matrix
Parameters
----------
X: CSR or CSC sparse matrix, shape (n_samples, n_features)
Input data.
axis: int (either 0 or 1)
Axis along which the axis should be computed.
Returns
-------
means: float array with shape (n_features,)
Feature-wise means
variances: float array with shape (n_features,)
Feature-wise variances
"""
if axis not in (0, 1):
raise ValueError(
"Unknown axis value: %d. Use 0 for rows, or 1 for columns" % axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(X)
else:
return _csc_mean_var_axis0(X.T)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(X)
else:
return _csr_mean_var_axis0(X.T)
else:
_raise_typeerror(X)
def inplace_column_scale(X, scale):
"""Inplace column scaling of a CSC/CSR matrix.
Scale each feature of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X: CSC or CSR matrix with shape (n_samples, n_features)
Matrix to normalize using the variance of the features.
scale: float array with shape (n_features,)
Array of precomputed feature-wise values to use for scaling.
"""
if isinstance(X, sp.csc_matrix):
inplace_csr_row_scale(X.T, scale)
elif isinstance(X, sp.csr_matrix):
inplace_csr_column_scale(X, scale)
else:
_raise_typeerror(X)
def inplace_row_scale(X, scale):
""" Inplace row scaling of a CSR or CSC matrix.
Scale each row of the data matrix by multiplying with specific scale
provided by the caller assuming a (n_samples, n_features) shape.
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Matrix to be scaled.
scale : float array with shape (n_features,)
Array of precomputed sample-wise values to use for scaling.
"""
if isinstance(X, sp.csc_matrix):
inplace_csr_column_scale(X.T, scale)
elif isinstance(X, sp.csr_matrix):
inplace_csr_row_scale(X, scale)
else:
_raise_typeerror(X)
def inplace_swap_row_csc(X, m, n):
"""
Swaps two rows of a CSC matrix in-place.
Parameters
----------
X: scipy.sparse.csc_matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m: int
Index of the row of X to be swapped.
n: int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
m_mask = X.indices == m
X.indices[X.indices == n] = m
X.indices[m_mask] = n
def inplace_swap_row_csr(X, m, n):
"""
Swaps two rows of a CSR matrix in-place.
Parameters
----------
X: scipy.sparse.csr_matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m: int
Index of the row of X to be swapped.
n: int
Index of the row of X to be swapped.
"""
for t in [m, n]:
if isinstance(t, np.ndarray):
raise TypeError("m and n should be valid integers")
if m < 0:
m += X.shape[0]
if n < 0:
n += X.shape[0]
# The following swapping makes life easier since m is assumed to be the
# smaller integer below.
if m > n:
m, n = n, m
indptr = X.indptr
m_start = indptr[m]
m_stop = indptr[m + 1]
n_start = indptr[n]
n_stop = indptr[n + 1]
nz_m = m_stop - m_start
nz_n = n_stop - n_start
if nz_m != nz_n:
# Modify indptr first
X.indptr[m + 2:n] += nz_n - nz_m
X.indptr[m + 1] = m_start + nz_n
X.indptr[n] = n_stop - nz_m
X.indices = np.concatenate([X.indices[:m_start],
X.indices[n_start:n_stop],
X.indices[m_stop:n_start],
X.indices[m_start:m_stop],
X.indices[n_stop:]])
X.data = np.concatenate([X.data[:m_start],
X.data[n_start:n_stop],
X.data[m_stop:n_start],
X.data[m_start:m_stop],
X.data[n_stop:]])
def inplace_swap_row(X, m, n):
"""
Swaps two rows of a CSC/CSR matrix in-place.
Parameters
----------
X : CSR or CSC sparse matrix, shape=(n_samples, n_features)
Matrix whose two rows are to be swapped.
m: int
Index of the row of X to be swapped.
n: int
Index of the row of X to be swapped.
"""
if isinstance(X, sp.csc_matrix):
return inplace_swap_row_csc(X, m, n)
elif isinstance(X, sp.csr_matrix):
return inplace_swap_row_csr(X, m, n)
else:
_raise_typeerror(X)
def inplace_swap_column(X, m, n):
"""
Swaps two columns of a CSC/CSR matrix in-place.
Parameters
----------
X : CSR or CSC sparse matrix, shape=(n_samples, n_features)
Matrix whose two columns are to be swapped.
m: int
Index of the column of X to be swapped.
n : int
Index of the column of X to be swapped.
"""
if m < 0:
m += X.shape[1]
if n < 0:
n += X.shape[1]
if isinstance(X, sp.csc_matrix):
return inplace_swap_row_csr(X, m, n)
elif isinstance(X, sp.csr_matrix):
return inplace_swap_row_csc(X, m, n)
else:
_raise_typeerror(X)
def min_max_axis(X, axis):
"""Compute minimum and maximum along an axis on a CSR or CSC matrix
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Input data.
axis: int (either 0 or 1)
Axis along which the axis should be computed.
Returns
-------
mins: float array with shape (n_features,)
Feature-wise minima
maxs: float array with shape (n_features,)
Feature-wise maxima
"""
if isinstance(X, sp.csr_matrix) or isinstance(X, sp.csc_matrix):
return sparse_min_max(X, axis=axis)
else:
_raise_typeerror(X)
def count_nonzero(X, axis=None, sample_weight=None):
"""A variant of X.getnnz() with extension to weighting on axis 0
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : CSR sparse matrix, shape = (n_samples, n_labels)
Input data.
axis : None, 0 or 1
The axis on which the data is aggregated.
sample_weight : array, shape = (n_samples,), optional
Weight for each row of X.
"""
if axis == -1:
axis = 1
elif axis == -2:
axis = 0
elif X.format != 'csr':
raise TypeError('Expected CSR sparse format, got {0}'.format(X.format))
# We rely here on the fact that np.diff(Y.indptr) for a CSR
# will return the number of nonzero entries in each row.
# A bincount over Y.indices will return the number of nonzeros
# in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.
if axis is None:
if sample_weight is None:
return X.nnz
else:
return np.dot(np.diff(X.indptr), sample_weight)
elif axis == 1:
out = np.diff(X.indptr)
if sample_weight is None:
return out
return out * sample_weight
elif axis == 0:
if sample_weight is None:
return bincount(X.indices, minlength=X.shape[1])
else:
weights = np.repeat(sample_weight, np.diff(X.indptr))
return bincount(X.indices, minlength=X.shape[1],
weights=weights)
else:
raise ValueError('Unsupported axis: {0}'.format(axis))
def _get_median(data, n_zeros):
"""Compute the median of data with n_zeros additional zeros.
This function is used to support sparse matrices; it modifies data in-place
"""
n_elems = len(data) + n_zeros
if not n_elems:
return np.nan
n_negative = np.count_nonzero(data < 0)
middle, is_odd = divmod(n_elems, 2)
data.sort()
if is_odd:
return _get_elem_at_rank(middle, data, n_negative, n_zeros)
return (_get_elem_at_rank(middle - 1, data, n_negative, n_zeros) +
_get_elem_at_rank(middle, data, n_negative, n_zeros)) / 2.
def _get_elem_at_rank(rank, data, n_negative, n_zeros):
"""Find the value in data augmented with n_zeros for the given rank"""
if rank < n_negative:
return data[rank]
if rank - n_negative < n_zeros:
return 0
return data[rank - n_zeros]
def csc_median_axis_0(X):
"""Find the median across axis 0 of a CSC matrix.
It is equivalent to doing np.median(X, axis=0).
Parameters
----------
X : CSC sparse matrix, shape (n_samples, n_features)
Input data.
Returns
-------
median : ndarray, shape (n_features,)
Median.
"""
if not isinstance(X, sp.csc_matrix):
raise TypeError("Expected matrix of CSC format, got %s" % X.format)
indptr = X.indptr
n_samples, n_features = X.shape
median = np.zeros(n_features)
for f_ind, (start, end) in enumerate(zip(indptr[:-1], indptr[1:])):
# Prevent modifying X in place
data = np.copy(X.data[start: end])
nz = n_samples - data.size
median[f_ind] = _get_median(data, nz)
return median
|
the-stack_106_27925 | # -*- coding: utf-8 -*-
'''
Created on 2018. 9. 24.
@author: jason96
Apply Gini Impurity
'''
import pandas as pd
from graphviz import Digraph
import os
import operator
import numpy as np
raw_data = {
'name': ["Kang", "Kim", "Choi", "Park", "Yoon"],
'짱절미': [True, False, False, False, False],
'셀스타그램': [False, False, True, False, False],
'like': [True, False, True, True, False]
}
pd_data = pd.DataFrame(raw_data)
pd_data = pd_data.set_index("name")
label_name = "like"
feature_names = pd_data.columns.difference([label_name])
def display_node(dot, key, node):
if node["leaf"] is True:
proba = node['proba']
proba = round(proba, 4)
proba = str(proba)
dot.node(key, proba)
else:
desc = node['desc']
dot.node(key, desc)
if "left" in node:
left_key = key + "L"
display_node(dot, left_key, node['left'])
dot.edge(key, left_key)
if "right" in node:
right_key = key + "R"
display_node(dot, right_key, node['right'])
dot.edge(key, right_key)
dot.render('graphviz-files/dstree.gv', view=True)
def display_tree(tree):
dot = Digraph(comment='Decision Tree')
display_node(dot, "Root", tree)
def predict(data, node):
if node['leaf']:
proba = node["proba"]
result = dict(zip(data.index, len(data) * [proba]))
else:
rule = node['rule']
left_data = data[rule(data)]
left_result = predict(left_data, node['left'])
right_data = data[~rule(data)]
right_result = predict(right_data, node['right'])
return {**left_result, **right_result}
return result
def binary_rule(data, feature_name, value):
return data[feature_name] == value
def make_rule(method, feature_name, value):
def call_condition(data):
return method(data, feature_name, value)
return call_condition
def make_rules(feature_names):
rules = {}
feature_names = list(feature_names)
for feature_name in feature_names:
rules[feature_name] = make_rule(binary_rule, feature_name, True)
return rules
def get_best_rule(data, rules):
gini_indexes = {}
for feature_name, rule in rules.items():
true_data = data[rule(data)]
true_proba = true_data[label_name].mean()
false_proba = 1 - true_proba
gini_index = true_proba*(1-false_proba) - false_proba*(1-true_proba)
gini_indexes[feature_name] = gini_index
sorted_x = sorted(gini_indexes.items(), key=operator.itemgetter(1))
for k, v in sorted_x: # @UnusedVariable
return k, rules[k]
def make_node(data, rules):
if len(rules) > 0:
feature_name, rule = get_best_rule(data, rules)
left_data = data[rule(data)]
right_data = data[~rule(data)]
if len(left_data) > 0 and len(right_data) > 0:
del rules[feature_name]
node = {'leaf': False, 'desc': feature_name, 'rule': rule}
node['left'] = make_tree(left_data, rules.copy())
node['right'] = make_tree(right_data, rules.copy())
return node
proba = data[label_name].mean()
node = {'leaf': True, 'proba': proba}
return node
def make_tree(data, feature_names):
rules = make_rules(feature_names)
return make_node(data, rules)
def display_predict(predict):
for k, v in predict.items():
print(k, v)
def bootstrap(data, feature_names, label_name):
feature_data = data[feature_names]
num_rows, num_cols = feature_data.shape
index = np.random.choice(feature_data.index, size=num_rows, replace=True)
if max_feature == None: # @IgnorePep8
num_cols = np.sqrt(num_cols)
else:
num_cols = num_cols * max_feature
num_cols = int(num_cols)
columns = np.random.choice(feature_data.columns, size=num_cols,
replace=False)
# If index and columns are specified,
# a new table is created based on the values.
result = feature_data.loc[index, columns]
result[label_name] = data[label_name]
return result
def make_forest(data):
forest = []
for _ in range(n_estimators):
bootstrapped_data = bootstrap(data, feature_names, label_name)
bs_feature_names = bootstrapped_data.columns.difference([label_name])
tree = make_tree(bootstrapped_data, bs_feature_names)
forest.append(tree)
return forest
def predict_forest(data, forest):
prediction_total = []
for tree in forest:
prediction = predict(data, tree)
prediction = pd.Series(prediction)
prediction_total.append(prediction)
prediction_total = pd.concat(prediction_total, axis=1, sort=False)
prediction_total = prediction_total.mean(axis=1)
return prediction_total
if __name__ == '__main__':
max_feature = None
n_estimators = 10
os.environ["PATH"] += os.pathsep + '/usr/local/bin'
forest = make_forest(pd_data)
display_predict(predict_forest(pd_data, forest))
# tree = make_tree(pd_data, rules)
# display_tree(tree)
# display_predict(predict(pd_data, tree))
|
the-stack_106_27926 | from django.db import models
from core.utils import validate_slug
from labour.models import ObsoleteSignupExtraBaseV1
SHIRT_SIZES = [
('NO_SHIRT', 'Ei paitaa'),
('XS', 'XS Unisex'),
('S', 'S Unisex'),
('M', 'M Unisex'),
('L', 'L Unisex'),
('XL', 'XL Unisex'),
('XXL', 'XXL Unisex'),
('3XL', '3XL Unisex'),
('4XL', '4XL Unisex'),
('5XL', '5XL Unisex'),
('LF_XS', 'XS Ladyfit'),
('LF_S', 'S Ladyfit'),
('LF_M', 'M Ladyfit'),
('LF_L', 'L Ladyfit'),
('LF_XL', 'XL Ladyfit'),
]
SHIFT_TYPE_CHOICES = [
('yksipitka', 'Yksi pitkä vuoro'),
('montalyhytta', 'Monta lyhyempää vuoroa'),
('kaikkikay', 'Kumpi tahansa käy'),
]
TOTAL_WORK_CHOICES = [
('8h', 'Minimi - 8 tuntia (1 lämmin ateria)'),
('12h', '12 tuntia (2 lämmintä ateriaa)'),
('yli12h', 'Työn Sankari! Yli 12 tuntia! (2 lämmintä ateriaa)'),
]
class SimpleChoice(models.Model):
name = models.CharField(max_length=63)
def __str__(self):
return self.name
class Meta:
abstract = True
class SpecialDiet(SimpleChoice):
pass
class Night(SimpleChoice):
pass
class SignupExtra(ObsoleteSignupExtraBaseV1):
shift_type = models.CharField(max_length=15,
verbose_name='Toivottu työvuoron pituus',
help_text='Haluatko tehdä yhden pitkän työvuoron vaiko monta lyhyempää vuoroa?',
choices=SHIFT_TYPE_CHOICES,
)
total_work = models.CharField(max_length=15,
verbose_name='Toivottu kokonaistyömäärä',
help_text='Kuinka paljon haluat tehdä töitä yhteensä tapahtuman aikana? Useimmissa tehtävistä minimi on kahdeksan tuntia, mutta joissain tehtävissä se voi olla myös vähemmän (esim. majoitusvalvonta 6 h).',
choices=TOTAL_WORK_CHOICES,
)
overseer = models.BooleanField(
default=False,
verbose_name='Olen kiinnostunut vuorovastaavan tehtävistä',
help_text='Vuorovastaavat ovat kokeneempia conityöläisiä, jotka toimivat oman tehtäväalueensa tiiminvetäjänä.',
)
want_certificate = models.BooleanField(
default=False,
verbose_name='Haluan todistuksen työskentelystäni Traconissa',
)
certificate_delivery_address = models.TextField(
blank=True,
verbose_name='Työtodistuksen toimitusosoite',
help_text='Jos haluat työtodistuksen, täytä tähän kenttään postiosoite (katuosoite, '
'postinumero ja postitoimipaikka) johon haluat todistuksen toimitettavan.',
)
shirt_size = models.CharField(
max_length=8,
choices=SHIRT_SIZES,
verbose_name='Paidan koko',
help_text='Ajoissa ilmoittautuneet vänkärit saavat maksuttoman työvoimapaidan. '
'Kokotaulukot: <a href="http://www.bc-collection.eu/uploads/sizes/TU004.jpg" '
'target="_blank">unisex-paita</a>, <a href="http://www.bc-collection.eu/uploads/sizes/TW040.jpg" '
'target="_blank">ladyfit-paita</a>',
)
special_diet = models.ManyToManyField(
SpecialDiet,
blank=True,
verbose_name='Erikoisruokavalio'
)
special_diet_other = models.TextField(
blank=True,
verbose_name='Muu erikoisruokavalio',
help_text='Jos noudatat erikoisruokavaliota, jota ei ole yllä olevassa listassa, '
'ilmoita se tässä. Tapahtuman järjestäjä pyrkii ottamaan erikoisruokavaliot '
'huomioon, mutta kaikkia erikoisruokavalioita ei välttämättä pystytä järjestämään.'
)
lodging_needs = models.ManyToManyField(Night,
blank=True,
verbose_name='Tarvitsen lattiamajoitusta',
help_text='Ruksaa ne yöt, joille tarvitset lattiamajoitusta. Lattiamajoitus sijaitsee '
'kävelymatkan päässä tapahtumapaikalta.',
)
prior_experience = models.TextField(
blank=True,
verbose_name='Työkokemus',
help_text='Kerro tässä kentässä, jos sinulla on aiempaa kokemusta vastaavista '
'tehtävistä tai muuta sellaista työkokemusta, josta arvioit olevan hyötyä '
'hakemassasi tehtävässä.'
)
free_text = models.TextField(
blank=True,
verbose_name='Vapaa alue',
help_text='Jos haluat sanoa hakemuksesi käsittelijöille jotain sellaista, jolle ei ole '
'omaa kenttää yllä, käytä tätä kenttää.'
)
email_alias = models.CharField(
blank=True,
default='',
max_length=32,
verbose_name='Sähköpostialias',
help_text='Coniitit saavat käyttöönsä [email protected] sähköpostialiaksen, joka '
'ohjataan coniitin omaan sähköpostilaatikkoon. Tässä voit toivoa haluamaasi sähköpostialiaksen alkuosaa eli sitä, joka tulee ennen @tracon.fi:tä. '
'Sallittuja merkkejä ovat pienet kirjaimet a-z, numerot 0-9 sekä väliviiva.',
validators=[validate_slug]
)
@classmethod
def get_form_class(cls):
from .forms import SignupExtraForm
return SignupExtraForm
@property
def formatted_lodging_needs(self):
return "\n".join("{night}: {need}".format(
night=night.name,
need='Tarvitsee lattiamajoitusta' if self.lodging_needs.filter(pk=night.pk).exists() else 'Ei tarvetta lattiamajoitukselle',
) for night in Night.objects.all())
|
the-stack_106_27929 | # coding: utf-8
# /*##########################################################################
# Copyright (C) 2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ############################################################################*/
"""
Python h5 module and octave h5 module have different ways to deal with
h5 files.
This module is used to make the link between octave and python using such files.
(python is using a dictionary and octave a struct )
This module provides tool to set HDF5 file for fasttomo input.
Here is an example of a simple read and write :
.. code-block:: python
:emphasize-lines: 3,5
# writing a structure
myStruct = {'MKEEP_MASK': 0.0, 'UNSHARP_SIGMA': 0.80000000000000004 }
writer = Octaveh5().open("my_h5file", 'a')
writer.write('mt_struct_name', myStruct)
# reading a h5 file
reader = Octaveh5().open("my_h5file")
strucDict = reader.get('mt_struct_name')
.. note:: These functions depend on the `h5py <http://www.h5py.org/>`_
library, which is not a mandatory dependency for `silx`.
"""
import logging
logger = logging.getLogger(__name__)
import numpy as np
import h5py
__authors__ = ["C. Nemoz", "H. Payno"]
__license__ = "MIT"
__date__ = "05/10/2016"
class Octaveh5(object):
"""This class allows communication between octave and python using hdf5 format.
"""
def __init__(self, octave_targetted_version=3.8):
"""Constructor
:param octave_targetted_version: the version of Octave for which we want to write this hdf5 file.
This is needed because for old Octave version we need to had a hack(adding one extra character)
"""
self.file = None
self.octave_targetted_version = octave_targetted_version
def open(self, h5file, mode='r'):
"""Open the h5 file which has been write by octave
:param h5file: The path of the file to read
:param mode: the opening mode of the file :'r', 'w'...
"""
try:
self.file = h5py.File(h5file, mode)
return self
except IOError as e:
if mode == 'a':
reason = "\n %s: Can t find or create " % h5file
else:
reason = "\n %s: File not found" % h5file
self.file = None
logger.info(reason)
raise e
def get(self, struct_name):
"""Read octave equivalent structures in hdf5 file
:param struct_name: the identification of the top level identity we want to get from an hdf5 structure
:return: the dictionnary of the requested struct. None if can t find it
"""
if self.file is None:
info = "No file currently open"
logger.info(info)
return None
data_dict = {}
grr = (list(self.file[struct_name].items())[1])[1]
try:
gr_level2 = grr.items()
except AttributeError:
reason = "no gr_level2"
logger.info(reason)
return None
for key, val in iter(dict(gr_level2).items()):
data_dict[str(key)] = list(val.items())[1][1][()]
if list(val.items())[0][1][()] != np.string_('sq_string'):
data_dict[str(key)] = float(data_dict[str(key)])
else:
if list(val.items())[0][1][()] == np.string_('sq_string'):
# in the case the string has been stored as an nd-array of char
if type(data_dict[str(key)]) is np.ndarray:
data_dict[str(key)] = "".join(chr(item) for item in data_dict[str(key)])
else:
data_dict[str(key)] = data_dict[str(key)].decode('UTF-8')
# In the case Octave have added an extra character at the end
if self.octave_targetted_version < 3.8:
data_dict[str(key)] = data_dict[str(key)][:-1]
return data_dict
def write(self, struct_name, data_dict):
"""write data_dict under the group struct_name in the open hdf5 file
:param struct_name: the identificatioon of the structure to write in the hdf5
:param data_dict: The python dictionnary containing the informations to write
"""
if self.file is None:
info = "No file currently open"
logger.info(info)
return
group_l1 = self.file.create_group(struct_name)
group_l1.attrs['OCTAVE_GLOBAL'] = np.uint8(1)
group_l1.attrs['OCTAVE_NEW_FORMAT'] = np.uint8(1)
group_l1.create_dataset("type", data=np.string_('scalar struct'), dtype="|S14")
group_l2 = group_l1.create_group('value')
for ftparams in data_dict:
group_l3 = group_l2.create_group(ftparams)
group_l3.attrs['OCTAVE_NEW_FORMAT'] = np.uint8(1)
if type(data_dict[ftparams]) == str:
group_l3.create_dataset("type", (), data=np.string_('sq_string'), dtype="|S10")
if self.octave_targetted_version < 3.8:
group_l3.create_dataset("value", data=np.string_(data_dict[ftparams] + '0'))
else:
group_l3.create_dataset("value", data=np.string_(data_dict[ftparams]))
else:
group_l3.create_dataset("type", (), data=np.string_('scalar'), dtype="|S7")
group_l3.create_dataset("value", data=data_dict[ftparams])
def close(self):
"""Close the file after calling read function
"""
if self.file:
self.file.close()
def __del__(self):
"""Destructor
"""
self.close()
|
the-stack_106_27932 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: char-rnn.py
# Author: Yuxin Wu
import argparse
import numpy as np
import operator
import os
import sys
from collections import Counter
import tensorflow as tf
from tensorpack import *
from tensorpack.tfutils import optimizer, summary
from tensorpack.tfutils.gradproc import GlobalNormClip
rnn = tf.contrib.rnn
class _NS: pass # noqa
param = _NS()
# some model hyperparams to set
param.batch_size = 128
param.rnn_size = 256
param.num_rnn_layer = 2
param.seq_len = 50
param.grad_clip = 5.
param.vocab_size = None
param.softmax_temprature = 1
param.corpus = None
class CharRNNData(RNGDataFlow):
def __init__(self, input_file, size):
self.seq_length = param.seq_len
self._size = size
logger.info("Loading corpus...")
# preprocess data
with open(input_file, 'rb') as f:
data = f.read()
data = [chr(c) for c in data if c < 128]
counter = Counter(data)
char_cnt = sorted(counter.items(), key=operator.itemgetter(1), reverse=True)
self.chars = [x[0] for x in char_cnt]
print(sorted(self.chars))
self.vocab_size = len(self.chars)
param.vocab_size = self.vocab_size
self.char2idx = {c: i for i, c in enumerate(self.chars)}
self.whole_seq = np.array([self.char2idx[c] for c in data], dtype='int32')
logger.info("Corpus loaded. Vocab size: {}".format(self.vocab_size))
def __len__(self):
return self._size
def __iter__(self):
random_starts = self.rng.randint(
0, self.whole_seq.shape[0] - self.seq_length - 1, (self._size,))
for st in random_starts:
seq = self.whole_seq[st:st + self.seq_length + 1]
yield [seq[:-1], seq[1:]]
class Model(ModelDesc):
def inputs(self):
return [tf.TensorSpec((None, param.seq_len), tf.int32, 'input'),
tf.TensorSpec((None, param.seq_len), tf.int32, 'nextinput')]
def build_graph(self, input, nextinput):
cell = rnn.MultiRNNCell([rnn.LSTMBlockCell(num_units=param.rnn_size)
for _ in range(param.num_rnn_layer)])
def get_v(n):
ret = tf.get_variable(n + '_unused', [param.batch_size, param.rnn_size],
trainable=False,
initializer=tf.constant_initializer())
ret = tf.placeholder_with_default(ret, shape=[None, param.rnn_size], name=n)
return ret
initial = (rnn.LSTMStateTuple(get_v('c0'), get_v('h0')),
rnn.LSTMStateTuple(get_v('c1'), get_v('h1')))
embeddingW = tf.get_variable('embedding', [param.vocab_size, param.rnn_size])
input_feature = tf.nn.embedding_lookup(embeddingW, input) # B x seqlen x rnnsize
input_list = tf.unstack(input_feature, axis=1) # seqlen x (Bxrnnsize)
outputs, last_state = rnn.static_rnn(cell, input_list, initial, scope='rnnlm')
last_state = tf.identity(last_state, 'last_state')
# seqlen x (Bxrnnsize)
output = tf.reshape(tf.concat(outputs, 1), [-1, param.rnn_size]) # (Bxseqlen) x rnnsize
logits = FullyConnected('fc', output, param.vocab_size, activation=tf.identity)
tf.nn.softmax(logits / param.softmax_temprature, name='prob')
xent_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=tf.reshape(nextinput, [-1]))
cost = tf.reduce_mean(xent_loss, name='cost')
summary.add_param_summary(('.*/W', ['histogram'])) # monitor histogram of all W
summary.add_moving_summary(cost)
return cost
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2e-3, trainable=False)
opt = tf.train.AdamOptimizer(lr)
return optimizer.apply_grad_processors(opt, [GlobalNormClip(5)])
def get_config():
logger.auto_set_dir()
ds = CharRNNData(param.corpus, 100000)
ds = BatchData(ds, param.batch_size)
return TrainConfig(
data=QueueInput(ds),
callbacks=[
ModelSaver(),
ScheduledHyperParamSetter('learning_rate', [(25, 2e-4)])
],
model=Model(),
max_epoch=50,
)
def sample(path, start, length):
"""
:param path: path to the model
:param start: a `str`. the starting characters
:param length: a `int`. the length of text to generate
"""
# initialize vocabulary and sequence length
param.seq_len = 1
ds = CharRNNData(param.corpus, 100000)
pred = OfflinePredictor(PredictConfig(
model=Model(),
session_init=SmartInit(path),
input_names=['input', 'c0', 'h0', 'c1', 'h1'],
output_names=['prob', 'last_state']))
# feed the starting sentence
initial = np.zeros((1, param.rnn_size))
for c in start[:-1]:
x = np.array([[ds.char2idx[c]]], dtype='int32')
_, state = pred(x, initial, initial, initial, initial)
def pick(prob):
t = np.cumsum(prob)
s = np.sum(prob)
return(int(np.searchsorted(t, np.random.rand(1) * s)))
# generate more
ret = start
c = start[-1]
for _ in range(length):
x = np.array([[ds.char2idx[c]]], dtype='int32')
prob, state = pred(x, state[0, 0], state[0, 1], state[1, 0], state[1, 1])
c = ds.chars[pick(prob[0])]
ret += c
print(ret)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
subparsers = parser.add_subparsers(title='command', dest='command')
parser_sample = subparsers.add_parser('sample', help='sample a trained model')
parser_sample.add_argument('-n', '--num', type=int,
default=300, help='length of text to generate')
parser_sample.add_argument('-s', '--start',
default='The ', help='initial text sequence')
parser_sample.add_argument('-t', '--temperature', type=float,
default=1, help='softmax temperature')
parser_train = subparsers.add_parser('train', help='train')
parser_train.add_argument('--corpus', help='corpus file', default='input.txt')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.command == 'sample':
param.softmax_temprature = args.temperature
assert args.load is not None, "Load your model by argument --load"
sample(args.load, args.start, args.num)
sys.exit()
else:
param.corpus = args.corpus
config = get_config()
config.session_init = SmartInit(args.load)
launch_train_with_config(config, SimpleTrainer())
|
the-stack_106_27935 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: i2cy([email protected])
# Filename: remote_controller
# Created on: 2020/9/17
"""
WARNING: INTERNAL NETWORK USE ONLY
WARNING: INTERNAL NETWORK USE ONLY
WARNING: INTERNAL NETWORK USE ONLY
"""
import socket
import time
import os
import threading
LISTENING_PORT = 10430
KEY = "__BasiCABCKey."
LIVE = True
class timeKey: # 64-Bits Live key generator/matcher
def __init__(self,key):
if type(key) != type(""):
raise Exception("key must be a string")
self.key = key
def keygen(self,mt=0): # 64-Bits Live key generator
dt = int(str(int(time.time()))[:-2]) + mt
sub_key_unit = str(int(str(4*dt**8 + 8*dt**4 + 2*dt**2 + 4*dt + 1024)[::-1]) + 3*dt**4 + 2*dt**3 + 3*dt**2 + 2*dt)
final_key = b""
n = 0
n2 = 0
for i in range(64):
if n == len(sub_key_unit):
n = 0
if n2 == len(self.key):
n2 = 0
final_key_unit = ord(self.key[n2]) + ord(sub_key_unit[n])
if final_key_unit >= 255:
final_key_unit -= 256
final_key += bytes((final_key_unit,))
n += 1
n2 += 1
return final_key
def keymatch(self,key): # Live key matcher
lock_1 = self.keygen(-1)
lock_2 = self.keygen(0)
lock_3 = self.keygen(1)
lock = [lock_1,lock_2,lock_3]
if key in lock:
return True
else:
return False
def executer(cmd):
try:
pipe = os.popen(cmd)
time.sleep(0.5)
res = pipe.read()
except Exception as err:
res = str(err)
print("execution result:", res)
def handler(con):
try:
con.settimeout(3)
tk = timeKey(KEY)
match = tk.keymatch(con.recv(1024))
if not match:
return
else:
con.sendall(b"OK")
cmd = con.recv(2048).decode()
thr = threading.Thread(target=executer, args=(cmd,))
thr.start()
con.sendall(b"OK")
con.close()
except Exception as err:
try:
con.sendall(str(err).encode())
con.close()
except:
pass
print("error while communicating with client,", err)
def listening_loop():
global LIVE
try:
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind(("0.0.0.0", LISTENING_PORT))
print("server bind at 0.0.0.0:{}".format(LISTENING_PORT))
srv.settimeout(5)
srv.listen(10)
except Exception as err:
print("failed to initialize server, {}, exiting".format(err))
LIVE = False
return
while LIVE:
try:
con, addr = srv.accept()
print("connection from {} in coming".format(str(addr)))
handler_thread = threading.Thread(target=handler, args=(con,))
handler_thread.start()
except:
continue
srv.close()
def main():
global LIVE
print("initializing...")
lis_thread = threading.Thread(target=listening_loop)
lis_thread.start()
print('(use Ctrl+C to exit)\n')
while LIVE:
try:
time.sleep(2)
except KeyboardInterrupt:
LIVE = False
exit(0)
if __name__ == "__main__":
main() |
the-stack_106_27936 | import copy
import itertools
import warnings
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
from torch.nn.intrinsic import _FusedModule
from .quantization_mappings import (
get_default_dynamic_quant_module_mappings,
get_default_static_quant_module_mappings,
get_default_qat_module_mappings,
get_default_qconfig_propagation_list,
no_observer_set,
_has_special_act_post_process,
_get_special_act_post_process,
)
from .stubs import DeQuantStub, QuantWrapper
from .qconfig import default_dynamic_qconfig, float16_dynamic_qconfig, float_qparams_weight_only_qconfig
def is_activation_post_process(module):
return (isinstance(module, torch.quantization.ObserverBase) or
isinstance(module, torch.quantization.FakeQuantizeBase) or
isinstance(module, torch.quantization.Logger)) # type: ignore
def _propagate_qconfig_helper(module, qconfig_dict, allow_list=None,
qconfig_parent=None, prefix=''):
r"""This is a helper function for `propagate_qconfig_`
Args:
module: input module
qconfig_dict: dictionary that maps from name of submodule to quantization
configuration
allow_list: list of quantizable modules
qconfig_parent: quantization config of parent module, we will fallback to
this config when there is no specified config for current
module
prefix: corresponding prefix of the current module, used as key in
qconfig_dict
Return:
None, module is modified inplace with qconfig attached
"""
# TODO: Add test
if allow_list is None:
allow_list = get_default_qconfig_propagation_list()
module_qconfig = qconfig_dict.get(type(module), qconfig_parent)
module_qconfig = qconfig_dict.get(prefix, module_qconfig)
module_qconfig = getattr(module, 'qconfig', module_qconfig)
torch.quantization.qconfig.assert_valid_qconfig(module_qconfig, module)
module.qconfig = module_qconfig
for name, child in module.named_children():
module_prefix = prefix + '.' + name if prefix else name
_propagate_qconfig_helper(child, qconfig_dict, allow_list,
module_qconfig, module_prefix)
# TODO(jerryzh): expose allow_list
def propagate_qconfig_(module, qconfig_dict=None, allow_list=None):
r"""Propagate qconfig through the module hierarchy and assign `qconfig`
attribute on each leaf module
Args:
module: input module
qconfig_dict: dictionary that maps from name or type of submodule to
quantization configuration, qconfig applies to all submodules of a
given module unless qconfig for the submodules are specified (when
the submodule already has qconfig attribute)
Return:
None, module is modified inplace with qconfig attached
"""
if qconfig_dict is None:
qconfig_dict = {}
_propagate_qconfig_helper(module, qconfig_dict, allow_list)
def _observer_forward_hook(self, input, output):
r"""Forward hook that calls observer on the output
"""
return self.activation_post_process(output)
def register_activation_post_process_hook(module):
assert hasattr(module, 'activation_post_process'), \
'Expect activation_post_process attribut already attached to the module'
return module.register_forward_hook(_observer_forward_hook)
def add_observer_(module, qconfig_propagation_list=None, non_leaf_module_list=None, device=None, custom_module_class_mapping=None):
r"""Add observer for the leaf child of the module.
This function insert observer module to all leaf child module that
has a valid qconfig attribute.
Args:
module: input module with qconfig attributes for all the leaf modules that we want to quantize
device: parent device, if any
non_leaf_module_list: list of non-leaf modules we want to add observer
Return:
None, module is modified inplace with added observer modules and forward_hooks
"""
if qconfig_propagation_list is None:
qconfig_propagation_list = get_default_qconfig_propagation_list()
if custom_module_class_mapping is None:
custom_module_class_mapping = {}
# respect device affinity when adding observers
if device is None:
devices = get_unique_devices_(module)
assert len(devices) <= 1, (
"add_observer_ only works with cpu or single-device CUDA modules, "
"but got devices {}".format(devices)
)
device = next(iter(devices)) if len(devices) > 0 else None
def get_activation_post_process(qconfig, device, special_act_post_process=None):
activation = qconfig.activation() if special_act_post_process is None else special_act_post_process()
if device is not None:
activation.to(device)
return activation
def needs_observation(m):
return hasattr(m, 'qconfig') and m.qconfig is not None
def insert_activation_post_process(m, special_act_post_process=None):
""" Adds an activation post process module and register
a post hook that calls the module
"""
# We don't insert observer/fake_quantize for DeQuantStub
if needs_observation(m) and not isinstance(m, DeQuantStub):
# observer and hook will be gone after we swap the module
m.add_module('activation_post_process', get_activation_post_process(m.qconfig, device, special_act_post_process))
# Register observer as the first entry in the hook list
# All post forward hooks are preserved and will be executed after the observer before convert
handle = register_activation_post_process_hook(m)
m._forward_hooks.move_to_end(handle.id, last=False)
for name, child in module.named_children():
if type(child) in [nnq.FloatFunctional, nnq.QFunctional]:
if needs_observation(child):
child.activation_post_process = get_activation_post_process(child.qconfig, device)
elif isinstance(child, _FusedModule):
# activation_post_process are now added directly to nn.Sequentail/_FusedModule
if needs_observation(child):
insert_activation_post_process(child)
elif _has_special_act_post_process(child):
special_act_post_process = _get_special_act_post_process(child)
insert_activation_post_process(child, special_act_post_process)
elif non_leaf_module_list is not None and type(child) in non_leaf_module_list:
if needs_observation(child):
insert_activation_post_process(child)
elif needs_observation(child) and type(child) in custom_module_class_mapping:
observed_child = custom_module_class_mapping[type(child)].from_float(child)
setattr(module, name, observed_child)
# TODO: These are the modules that cannot be observed
# Once there are more, we should move them to a separate list
if custom_module_class_mapping[type(child)] not in no_observer_set():
insert_activation_post_process(observed_child)
else:
add_observer_(child, qconfig_propagation_list, non_leaf_module_list, device, custom_module_class_mapping)
# Insert observers only for leaf nodes, note that this observer is for
# the output of the module, for input QuantStub will observe them
if len(module._modules) == 0 and not isinstance(module, torch.nn.Sequential) \
and type(module) in qconfig_propagation_list:
insert_activation_post_process(module)
def get_unique_devices_(module):
return {p.device for p in module.parameters()} | \
{p.device for p in module.buffers()}
def add_quant_dequant(module):
r"""Wrap the leaf child module in QuantWrapper if it has a valid qconfig
Note that this function will modify the children of module inplace and it
can return a new module which wraps the input module as well.
Args:
module: input module with qconfig attributes for all the leaf modules
that we want to quantize
Return:
Either the inplace modified module with submodules wrapped in
`QuantWrapper` based on qconfig or a new `QuantWrapper` module which
wraps the input module, the latter case only happens when the input
module is a leaf module and we want to quantize it.
"""
if len(module._modules) == 0 and hasattr(module, 'qconfig') and module.qconfig:
return QuantWrapper(module)
for name, child in module.named_children():
module._modules[name] = add_quant_dequant(child)
return module
def prepare(model, inplace=False, allow_list=None,
observer_non_leaf_module_list=None,
prepare_custom_config_dict=None):
r"""Prepares a copy of the model for quantization calibration or quantization-aware training.
Quantization configuration should be assigned preemptively
to individual submodules in `.qconfig` attribute.
The model will be attached with observer or fake quant modules, and qconfig
will be propagated.
Args:
`model`: input model to be modified in-place
`inplace`: carry out model transformations in-place, the original module is mutated
`allow_list`: list of quantizable modules
`observer_non_leaf_module_list`: list of non-leaf modules we want to add observer
`prepare_custom_config_dict`: customization configuration dictionary for prepare function
.. code-block:: python
# Example of prepare_custom_config_dict:
prepare_custom_config_dict = {
# user will manually define the corresponding observed
# module class which has a from_float class method that converts
# float custom module to observed custom module
"float_to_observed_custom_module_class": {
CustomModule: ObservedCustomModule
}
}
"""
torch._C._log_api_usage_once("quantization_api.quantize.prepare")
if prepare_custom_config_dict is None:
prepare_custom_config_dict = {}
custom_module_class_mapping = prepare_custom_config_dict.get("float_to_observed_custom_module_class", {})
if not inplace:
model = copy.deepcopy(model)
# TODO: remove allow_list
qconfig_propagation_list = allow_list
if qconfig_propagation_list is None:
qconfig_propagation_list = get_default_qconfig_propagation_list()
propagate_qconfig_(model, qconfig_dict=None)
# sanity check common API misusage
if not any(hasattr(m, 'qconfig') and m.qconfig for m in model.modules()):
warnings.warn("None of the submodule got qconfig applied. Make sure you "
"passed correct configuration through `qconfig_dict` or "
"by assigning the `.qconfig` attribute directly on submodules")
add_observer_(
model, qconfig_propagation_list, observer_non_leaf_module_list,
custom_module_class_mapping=custom_module_class_mapping)
return model
def _remove_activation_post_process(module):
# TODO: maybe we should change activation_post_process to _activation_post_process
# to prevent it from being used by user
if hasattr(module, 'activation_post_process') and \
is_activation_post_process(module.activation_post_process):
delattr(module, 'activation_post_process')
# remove activation_post_proceess hook
handle_ids_to_remove = set()
for handle_id, hook_fn in module._forward_hooks.items():
if hook_fn is _observer_forward_hook:
handle_ids_to_remove.add(handle_id)
for handle_id in handle_ids_to_remove:
module._forward_hooks.pop(handle_id)
# TODO: rename to something more general
def _remove_qconfig(module):
r"""Clean up the qconfig left in the module so that new qconfig can be
propagated.
Args:
module: module to be cleaned up
"""
for child in module.children():
_remove_qconfig(child)
if hasattr(module, "qconfig"):
del module.qconfig
_remove_activation_post_process(module)
def quantize(model, run_fn, run_args, mapping=None, inplace=False):
r"""Quantize the input float model with post training static quantization.
First it will prepare the model for calibration, then it calls
`run_fn` which will run the calibration step, after that we will
convert the model to a quantized model.
Args:
model: input float model
run_fn: a calibration function for calibrating the prepared model
run_args: positional arguments for `run_fn`
inplace: carry out model transformations in-place, the original module is mutated
mapping: correspondence between original module types and quantized counterparts
Return:
Quantized model.
"""
torch._C._log_api_usage_once("quantization_api.quantize.quantize")
if mapping is None:
mapping = get_default_static_quant_module_mappings()
if not inplace:
model = copy.deepcopy(model)
model.eval()
prepare(model, inplace=True)
run_fn(model, *run_args)
convert(model, mapping, inplace=True)
return model
def quantize_dynamic(model, qconfig_spec=None, dtype=torch.qint8,
mapping=None, inplace=False):
r"""Converts a float model to dynamic (i.e. weights-only) quantized model.
Replaces specified modules with dynamic weight-only quantized versions and output the quantized model.
For simplest usage provide `dtype` argument that can be float16 or qint8. Weight-only quantization
by default is performed for layers with large weights size - i.e. Linear and RNN variants.
Fine grained control is possible with `qconfig` and `mapping` that act similarly to `quantize()`.
If `qconfig` is provided, the `dtype` argument is ignored.
Args:
model: input model
qconfig_spec: Either:
- A dictionary that maps from name or type of submodule to quantization
configuration, qconfig applies to all submodules of a given
module unless qconfig for the submodules are specified (when the
submodule already has qconfig attribute). Entries in the dictionary
need to be QConfigDynamic instances.
- A set of types and/or submodule names to apply dynamic quantization to,
in which case the `dtype` argument is used to specify the bit-width
inplace: carry out model transformations in-place, the original module is mutated
mapping: maps type of a submodule to a type of corresponding dynamically quantized version
with which the submodule needs to be replaced
"""
torch._C._log_api_usage_once("quantization_api.quantize.quantize_dynamic")
if qconfig_spec is None:
if dtype == torch.qint8:
qconfig_spec = {
nn.Linear : default_dynamic_qconfig,
nn.LSTM : default_dynamic_qconfig,
nn.GRU : default_dynamic_qconfig,
nn.LSTMCell : default_dynamic_qconfig,
nn.RNNCell : default_dynamic_qconfig,
nn.GRUCell : default_dynamic_qconfig,
}
elif dtype == torch.float16:
qconfig_spec = {
nn.Linear : float16_dynamic_qconfig,
nn.LSTM : float16_dynamic_qconfig,
nn.GRU : float16_dynamic_qconfig,
nn.LSTMCell : float16_dynamic_qconfig,
nn.RNNCell : float16_dynamic_qconfig,
nn.GRUCell : float16_dynamic_qconfig,
}
elif dtype == torch.quint8:
qconfig_spec = {
nn.EmbeddingBag : float_qparams_weight_only_qconfig,
}
else:
raise ValueError(
"Don't know how to quantize with default settings for {}. Provide full qconfig please".format(dtype))
elif isinstance(qconfig_spec, set):
if dtype is torch.qint8:
default_qconfig = default_dynamic_qconfig
elif dtype is torch.float16:
default_qconfig = float16_dynamic_qconfig
elif dtype is torch.quint8:
default_qconfig = float_qparams_weight_only_qconfig
else:
raise RuntimeError('Unknown dtype specified for quantize_dynamic: ', str(dtype))
qconfig_spec = dict(zip(qconfig_spec, itertools.repeat(default_qconfig)))
if mapping is None:
mapping = get_default_dynamic_quant_module_mappings()
if not inplace:
model = copy.deepcopy(model)
model.eval()
propagate_qconfig_(model, qconfig_spec)
convert(model, mapping, inplace=True)
return model
def prepare_qat(model, mapping=None, inplace=False):
r"""
Prepares a copy of the model for quantization calibration or
quantization-aware training and converts it to quantized version.
Quantization configuration should be assigned preemptively
to individual submodules in `.qconfig` attribute.
Args:
model: input model to be modified in-place
mapping: dictionary that maps float modules to quantized modules to be
replaced.
inplace: carry out model transformations in-place, the original module
is mutated
"""
torch._C._log_api_usage_once("quantization_api.quantize.prepare_qat")
if mapping is None:
mapping = get_default_qat_module_mappings()
if not inplace:
model = copy.deepcopy(model)
propagate_qconfig_(model, qconfig_dict=None)
convert(model, mapping=mapping, inplace=True, remove_qconfig=False)
prepare(model, observer_non_leaf_module_list=set(mapping.values()), inplace=True)
return model
def quantize_qat(model, run_fn, run_args, inplace=False):
r"""Do quantization aware training and output a quantized model
Args:
model: input model
run_fn: a function for evaluating the prepared model, can be a
function that simply runs the prepared model or a training
loop
run_args: positional arguments for `run_fn`
Return:
Quantized model.
"""
torch._C._log_api_usage_once("quantization_api.quantize.quantize_qat")
if not inplace:
model = copy.deepcopy(model)
model.train()
prepare_qat(model, inplace=True)
run_fn(model, *run_args)
convert(model, inplace=True)
return model
def convert(
module, mapping=None, inplace=False, remove_qconfig=True,
convert_custom_config_dict=None):
r"""Converts submodules in input module to a different module according to `mapping`
by calling `from_float` method on the target module class. And remove qconfig at the
end if remove_qconfig is set to True.
Args:
`module`: prepared and calibrated module
`mapping`: a dictionary that maps from source module type to target
module type, can be overwritten to allow swapping user defined
Modules
`inplace`: carry out model transformations in-place, the original module
is mutated
`convert_custom_config_dict`: custom configuration dictionary for convert function
.. code-block:: python
# Example of convert_custom_config_dict:
convert_custom_config_dict = {
# user will manually define the corresponding quantized
# module class which has a from_observed class method that converts
# observed custom module to quantized custom module
"observed_to_quantized_custom_module_class": {
ObservedCustomModule: QuantizedCustomModule
}
}
"""
torch._C._log_api_usage_once("quantization_api.quantize.convert")
if not inplace:
module = copy.deepcopy(module)
_convert(
module, mapping, inplace=True,
convert_custom_config_dict=convert_custom_config_dict)
if remove_qconfig:
_remove_qconfig(module)
return module
def _convert(
module, mapping=None, inplace=False,
convert_custom_config_dict=None):
r"""Converts submodules in input module to a different module according to `mapping`
by calling `from_float` method on the target module class
Args:
module: input module
mapping: a dictionary that maps from source module type to target
module type, can be overwritten to allow swapping user defined
Modules
inplace: carry out model transformations in-place, the original module
is mutated
"""
if mapping is None:
mapping = get_default_static_quant_module_mappings()
if convert_custom_config_dict is None:
convert_custom_config_dict = {}
custom_module_class_mapping = convert_custom_config_dict.get("observed_to_quantized_custom_module_class", {})
if not inplace:
module = copy.deepcopy(module)
reassign = {}
for name, mod in module.named_children():
# both fused modules and observed custom modules are
# swapped as one unit
if not isinstance(mod, _FusedModule) and \
type(mod) not in custom_module_class_mapping:
_convert(mod, mapping, True, # inplace
custom_module_class_mapping)
reassign[name] = swap_module(mod, mapping, custom_module_class_mapping)
for key, value in reassign.items():
module._modules[key] = value
return module
def swap_module(mod, mapping, custom_module_class_mapping):
r"""Swaps the module if it has a quantized counterpart and it has an
`observer` attached.
Args:
mod: input module
mapping: a dictionary that maps from nn module to nnq module
Return:
The corresponding quantized module of `mod`
"""
new_mod = mod
if hasattr(mod, 'qconfig') and mod.qconfig is not None:
swapped = False
if type(mod) in custom_module_class_mapping:
new_mod = custom_module_class_mapping[type(mod)].from_observed(mod)
swapped = True
elif type(mod) in mapping:
new_mod = mapping[type(mod)].from_float(mod)
swapped = True
if swapped:
# Preserve module's pre forward hooks. They'll be called on quantized input
for pre_hook_fn in mod._forward_pre_hooks.values():
new_mod.register_forward_pre_hook(pre_hook_fn)
# Preserve module's post forward hooks except _observer_forward_hook
# After convert they'll work with quantized output
for hook_fn in mod._forward_hooks.values():
if hook_fn is not _observer_forward_hook:
new_mod.register_forward_hook(hook_fn)
# respect device affinity when swapping modules
devices = get_unique_devices_(mod)
assert len(devices) <= 1, (
"swap_module only works with cpu or single-device CUDA modules, "
"but got devices {}".format(devices)
)
device = next(iter(devices)) if len(devices) > 0 else None
if device:
new_mod.to(device)
return new_mod
def get_observer_dict(mod, target_dict, prefix=""):
r"""Traverse the modules and save all observers into dict.
This is mainly used for quantization accuracy debug
Args:
mod: the top module we want to save all observers
prefix: the prefix for the current module
target_dict: the dictionary used to save all the observers
"""
def get_prefix(prefix):
return prefix if prefix == "" else prefix + '.'
if hasattr(mod, 'activation_post_process'):
target_dict[get_prefix(prefix) + 'activation_post_process'] = mod.activation_post_process
for name, child in mod.named_children():
module_prefix = get_prefix(prefix) + name if prefix else name
get_observer_dict(child, target_dict, module_prefix)
|
the-stack_106_27939 | # Short-term MFCC Cepstrum Distance
# pr6_5_3
from MFCC import *
from Noisy import *
from Universal import *
from VAD import *
if __name__ == '__main__':
# Set_I
IS = 0.25 # unvoice segemnt length
wlen = 200 # frame length 25ms
inc = 80 # frame shift
filename = 'bluesky1.wav'
SNR = 10
# PART_I
speech = Speech()
xx, fs = speech.audioread(filename, 8000)
xx = xx - np.mean(xx) # DC
x = xx / np.max(xx) # normalized
N = len(x)
time = np.arange(N) / fs
noisy = Noisy()
signal, _ = noisy.Gnoisegen(x, SNR) # add noise
wnd = np.hamming(wlen) # window function
overlap = wlen - inc
NIS = int((IS * fs - wlen) / inc + 1) # unvoice segment frame number
y = speech.enframe(signal, list(wnd), inc).T
fn = y.shape[1] # frame number
frameTime = speech.FrameTime(fn, wlen, inc, fs) # frame to time
Mfcc = MFCC()
ccc = Mfcc.mfcc(signal, fs, 16, wlen, inc) # MFCC
fn1 = ccc.shape[0] # frame number
frameTime1 = frameTime[2 : fn - 2]
Ccep = ccc[:, 0 : 16] # MFCC coefficient
C0 = np.mean(Ccep[0 : 5, :], axis = 0) # calculate approximate average noise MFCC coefficient
Dcep = np.zeros(fn)
for i in range(5, fn1):
Cn = Ccep[i, :] # one frame MFCC cepstrum coefficient
Dstu = 0
for k in range(16): # calculate the MFCC cepstrum distance
Dstu += (Cn[k] - C0[k]) ** 2 # between each frame and noise
Dcep[i] = np.sqrt(Dstu)
Dcep[0 : 5] = Dcep[5]
Vad = VAD()
Dstm = Vad.multimidfilter(Dcep, 10) # smoothing
dth = np.max(Dstm[0: NIS])
T1 = dth
T2 = 1.5 * dth
[voiceseg, vsl, SF, NF] = Vad.vad_param1D(Dstm, T1, T2)
# figure
plt.figure(figsize=(9, 16))
plt.subplot(3, 1, 1)
plt.plot(time, x)
for k in range(vsl):
nx1 = voiceseg['begin'][k]
nx2 = voiceseg['end'][k]
print('{}, begin = {}, end = {}'.format(k + 1, nx1, nx2))
plt.plot(np.array([frameTime[nx1], frameTime[nx1]]), np.array([-1, 1]), 'k', linewidth=1)
plt.plot(np.array([frameTime[nx2], frameTime[nx2]]), np.array([-1, 1]), 'k--', linewidth=1)
plt.axis([0, np.max(time), -1, 1])
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title('Clean Speech Signal')
plt.subplot(3, 1, 2)
plt.plot(time, signal)
plt.axis([0, np.max(time), np.min(signal), np.max(signal)])
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title('Noisy Speech Signal SNR = {}dB'.format(SNR))
plt.subplot(3, 1, 3)
plt.plot(frameTime, Dstm)
plt.axis([0, np.max(time), 0, 1.2 * np.max(Dstm)])
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title('Short-term MFCC Cepstrum Distance')
for k in range(vsl):
nx1 = voiceseg['begin'][k]
nx2 = voiceseg['end'][k]
plt.plot(np.array([frameTime[nx1], frameTime[nx1]]), np.array([0, 1.2 * np.max(Dstm)]), 'k', linewidth=1)
plt.plot(np.array([frameTime[nx2], frameTime[nx2]]), np.array([0, 1.2 * np.max(Dstm)]), 'k--', linewidth=1)
plt.plot(np.array([0, np.max(time)]), np.array([T1, T1]), 'b', linewidth=1)
plt.plot(np.array([0, np.max(time)]), np.array([T2, T2]), 'r--', linewidth=1)
plt.savefig('images/vad_mfcc_cepstrum_distance.png', bbox_inches='tight', dpi=600)
plt.show()
|
the-stack_106_27941 | #!/usr/bin/python
import os
import re
import sys
import csv
import json
import pandas
import installLib
requiredConfigParameters = ["dataFolder"]
# Load the configuration file.
config = json.loads(installLib.readFile("config/config.json"))
for requiredConfigParameter in requiredConfigParameters:
if not requiredConfigParameter in config.keys():
print("Error - required value " + requiredConfigParameter + " not set in config.json.")
sys.exit(1)
# Read the existing basic staff details. Headings:
# GUID,UserCode,Title,GivenName,FamilyName,DateOfBirth,Username,Identifier,Form,Role,JobTitle,TelephoneNumber
staff = pandas.read_csv(config["dataFolder"] + os.sep + "staff.csv", header=0)
# Tell Pandas that the (currently empty) JobTitle and TelephoneNumber columns are actually meant to be String, not Float.
staff["JobTitle"] = staff["JobTitle"].astype(str)
staff["TelephoneNumber"] = staff["TelephoneNumber"].astype(str)
# Staff job titles: not recorded by iSAMS, but instead set manually in GSuite for each signature. Therefore, for each user,
# retrive the existing signature and extract the "job title" value, updating the "staff" records read from CSV above. Use the job
# Role if no other value is present, then write out the updated "staff.csv" again with the added values.
outputString = ""
for staffIndex, staffMember in staff.iterrows():
if staff.at[staffIndex, "JobTitle"] == "nan":
staff.at[staffIndex, "JobTitle"] = ""
if staff.at[staffIndex, "TelephoneNumber"] == "nan":
staff.at[staffIndex, "TelephoneNumber"] = ""
staffName = ""
staffJobTitle = ""
staffUsername = ""
staffTelephone = ""
for sigLine in installLib.runCommand("gam user " + staffMember["Username"] + " show signature"):
sigLine = sigLine.replace(u'\xc2\xa0', u' ').strip()
matchResult = re.match(".*bold..(.*)..span. . (.*)..div..*?", sigLine)
if not matchResult == None:
staffName = matchResult[1].strip()
staffJobTitle = matchResult[2].split("<")[0].strip().replace("&", "&")
matchResult = re.match(".*blank..(.*)@knightsbridgeschool.com./a..*", sigLine)
if not matchResult == None:
staffUsername = matchResult[1]
matchResult = re.match("([ \d]*)</div>$", sigLine)
if not matchResult == None:
if not matchResult[1] == "":
staffTelephone = matchResult[1]
if staffUsername == "":
staffUsername = staffMember["Username"]
if staffTelephone == "":
staffTelephone = "020 7590 9000"
if staffJobTitle == "":
staffJobTitle = staffMember["Role"]
if not staffMember["Username"] == staffUsername:
print("Username mismatch: " + staffMember["Username"] + " not equal to " + staffUsername)
else:
print("Adding details for staff member " + staffMember["GivenName"] + " " + staffMember["FamilyName"] + " - JobTitle: " + str(staffJobTitle) + ", staffTelephone: " + str(staffTelephone))
staff.at[staffIndex, "JobTitle"] = staffJobTitle
staff.at[staffIndex, "TelephoneNumber"] = staffTelephone
installLib.writeFile(config["dataFolder"] + os.sep + "staff.csv", staff.to_csv(index=False))
|
the-stack_106_27942 | # -*- coding: utf-8 -*-
import argparse
from multiprocessing import cpu_count
from ncc import LOGGER
try:
from dataset.codesearchnet_feng.proj_oriented import (
LANGUAGES, MODES,
RAW_DATA_DIR,
RAW_PROJ_DATA_DIR, LIBS_DIR, FLATTEN_PROJ_DATA_DIR,
)
except ImportError:
from . import (
LANGUAGES, MODES,
RAW_DATA_DIR,
RAW_PROJ_DATA_DIR, LIBS_DIR, FLATTEN_PROJ_DATA_DIR,
)
if __name__ == '__main__':
"""
This script is to flatten attributes of code_search_net dataset
Examples: 'code', 'code_tokens', 'docstring', 'docstring_tokens', 'func_name', 'original_string', 'index',
"""
parser = argparse.ArgumentParser(description="Download CodeSearchNet dataset(s) or Tree-Sitter Library(ies)")
parser.add_argument(
"--language", "-l", default=LANGUAGES, type=str, nargs='+', help="languages constain [{}]".format(LANGUAGES),
)
parser.add_argument(
"--dataset_dir", "-d", default=RAW_PROJ_DATA_DIR, type=str, help="raw dataset download directory",
)
parser.add_argument(
"--flatten_dir", "-f", default=FLATTEN_PROJ_DATA_DIR, type=str,
help="data directory of flatten attribute",
)
parser.add_argument(
"--attrs", "-a",
default=['code', 'code_tokens', 'docstring', 'docstring_tokens'],
type=str, nargs='+',
help="attrs: code, code_tokens, docstring, docstring_tokens, func_name, original_string, index",
)
parser.add_argument(
"--cores", "-c", default=cpu_count(), type=int, help="cpu cores for flatten raw data attributes",
)
args = parser.parse_args()
LOGGER.info(args)
|
the-stack_106_27943 | """Support for Xiaomi aqara binary sensors."""
import logging
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.restore_state import RestoreEntity
from . import XiaomiDevice
from .const import DOMAIN, GATEWAYS_KEY
_LOGGER = logging.getLogger(__name__)
NO_CLOSE = "no_close"
ATTR_OPEN_SINCE = "Open since"
MOTION = "motion"
NO_MOTION = "no_motion"
ATTR_LAST_ACTION = "last_action"
ATTR_NO_MOTION_SINCE = "No motion since"
DENSITY = "density"
ATTR_DENSITY = "Density"
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Perform the setup for Xiaomi devices."""
entities: list[XiaomiBinarySensor] = []
gateway = hass.data[DOMAIN][GATEWAYS_KEY][config_entry.entry_id]
for entity in gateway.devices["binary_sensor"]:
model = entity["model"]
if model in ("motion", "sensor_motion", "sensor_motion.aq2"):
entities.append(XiaomiMotionSensor(entity, hass, gateway, config_entry))
elif model in ("magnet", "sensor_magnet", "sensor_magnet.aq2"):
entities.append(XiaomiDoorSensor(entity, gateway, config_entry))
elif model == "sensor_wleak.aq1":
entities.append(XiaomiWaterLeakSensor(entity, gateway, config_entry))
elif model in ("smoke", "sensor_smoke"):
entities.append(XiaomiSmokeSensor(entity, gateway, config_entry))
elif model in ("natgas", "sensor_natgas"):
entities.append(XiaomiNatgasSensor(entity, gateway, config_entry))
elif model in (
"switch",
"sensor_switch",
"sensor_switch.aq2",
"sensor_switch.aq3",
"remote.b1acn01",
):
if "proto" not in entity or int(entity["proto"][0:1]) == 1:
data_key = "status"
else:
data_key = "button_0"
entities.append(
XiaomiButton(entity, "Switch", data_key, hass, gateway, config_entry)
)
elif model in (
"86sw1",
"sensor_86sw1",
"sensor_86sw1.aq1",
"remote.b186acn01",
"remote.b186acn02",
):
if "proto" not in entity or int(entity["proto"][0:1]) == 1:
data_key = "channel_0"
else:
data_key = "button_0"
entities.append(
XiaomiButton(
entity, "Wall Switch", data_key, hass, gateway, config_entry
)
)
elif model in (
"86sw2",
"sensor_86sw2",
"sensor_86sw2.aq1",
"remote.b286acn01",
"remote.b286acn02",
):
if "proto" not in entity or int(entity["proto"][0:1]) == 1:
data_key_left = "channel_0"
data_key_right = "channel_1"
else:
data_key_left = "button_0"
data_key_right = "button_1"
entities.append(
XiaomiButton(
entity,
"Wall Switch (Left)",
data_key_left,
hass,
gateway,
config_entry,
)
)
entities.append(
XiaomiButton(
entity,
"Wall Switch (Right)",
data_key_right,
hass,
gateway,
config_entry,
)
)
entities.append(
XiaomiButton(
entity,
"Wall Switch (Both)",
"dual_channel",
hass,
gateway,
config_entry,
)
)
elif model in ("cube", "sensor_cube", "sensor_cube.aqgl01"):
entities.append(XiaomiCube(entity, hass, gateway, config_entry))
elif model in ("vibration", "vibration.aq1"):
entities.append(
XiaomiVibration(entity, "Vibration", "status", gateway, config_entry)
)
else:
_LOGGER.warning("Unmapped Device Model %s", model)
async_add_entities(entities)
class XiaomiBinarySensor(XiaomiDevice, BinarySensorEntity):
"""Representation of a base XiaomiBinarySensor."""
def __init__(self, device, name, xiaomi_hub, data_key, device_class, config_entry):
"""Initialize the XiaomiSmokeSensor."""
self._data_key = data_key
self._device_class = device_class
self._should_poll = False
self._density = 0
super().__init__(device, name, xiaomi_hub, config_entry)
@property
def should_poll(self):
"""Return True if entity has to be polled for state."""
return self._should_poll
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of binary sensor."""
return self._device_class
def update(self):
"""Update the sensor state."""
_LOGGER.debug("Updating xiaomi sensor (%s) by polling", self._sid)
self._get_from_hub(self._sid)
class XiaomiNatgasSensor(XiaomiBinarySensor):
"""Representation of a XiaomiNatgasSensor."""
def __init__(self, device, xiaomi_hub, config_entry):
"""Initialize the XiaomiSmokeSensor."""
self._density = None
super().__init__(
device, "Natgas Sensor", xiaomi_hub, "alarm", "gas", config_entry
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_DENSITY: self._density}
attrs.update(super().extra_state_attributes)
return attrs
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._state = False
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if DENSITY in data:
self._density = int(data.get(DENSITY))
value = data.get(self._data_key)
if value is None:
return False
if value in ("1", "2"):
if self._state:
return False
self._state = True
return True
if value == "0":
if self._state:
self._state = False
return True
return False
class XiaomiMotionSensor(XiaomiBinarySensor):
"""Representation of a XiaomiMotionSensor."""
def __init__(self, device, hass, xiaomi_hub, config_entry):
"""Initialize the XiaomiMotionSensor."""
self._hass = hass
self._no_motion_since = 0
self._unsub_set_no_motion = None
if "proto" not in device or int(device["proto"][0:1]) == 1:
data_key = "status"
else:
data_key = "motion_status"
super().__init__(
device, "Motion Sensor", xiaomi_hub, data_key, "motion", config_entry
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_NO_MOTION_SINCE: self._no_motion_since}
attrs.update(super().extra_state_attributes)
return attrs
@callback
def _async_set_no_motion(self, now):
"""Set state to False."""
self._unsub_set_no_motion = None
self._state = False
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._state = False
def parse_data(self, data, raw_data):
"""Parse data sent by gateway.
Polling (proto v1, firmware version 1.4.1_159.0143)
>> { "cmd":"read","sid":"158..."}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'read_ack', 'data': '{"voltage":3005}'}
Multicast messages (proto v1, firmware version 1.4.1_159.0143)
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"status":"motion"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"120"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"180"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'report', 'data': '{"no_motion":"300"}'}
<< {'model': 'motion', 'sid': '158...', 'short_id': 26331,
'cmd': 'heartbeat', 'data': '{"voltage":3005}'}
"""
if raw_data["cmd"] == "heartbeat":
_LOGGER.debug(
"Skipping heartbeat of the motion sensor. "
"It can introduce an incorrect state because of a firmware "
"bug (https://github.com/home-assistant/core/pull/"
"11631#issuecomment-357507744)"
)
return
if NO_MOTION in data:
self._no_motion_since = data[NO_MOTION]
self._state = False
return True
value = data.get(self._data_key)
if value is None:
return False
if value == MOTION:
if self._data_key == "motion_status":
if self._unsub_set_no_motion:
self._unsub_set_no_motion()
self._unsub_set_no_motion = async_call_later(
self._hass, 120, self._async_set_no_motion
)
if self.entity_id is not None:
self._hass.bus.fire(
"xiaomi_aqara.motion", {"entity_id": self.entity_id}
)
self._no_motion_since = 0
if self._state:
return False
self._state = True
return True
class XiaomiDoorSensor(XiaomiBinarySensor, RestoreEntity):
"""Representation of a XiaomiDoorSensor."""
def __init__(self, device, xiaomi_hub, config_entry):
"""Initialize the XiaomiDoorSensor."""
self._open_since = 0
if "proto" not in device or int(device["proto"][0:1]) == 1:
data_key = "status"
else:
data_key = "window_status"
super().__init__(
device,
"Door Window Sensor",
xiaomi_hub,
data_key,
BinarySensorDeviceClass.OPENING,
config_entry,
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_OPEN_SINCE: self._open_since}
attrs.update(super().extra_state_attributes)
return attrs
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
if (state := await self.async_get_last_state()) is None:
return
self._state = state.state == "on"
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
self._should_poll = False
if NO_CLOSE in data: # handle push from the hub
self._open_since = data[NO_CLOSE]
return True
value = data.get(self._data_key)
if value is None:
return False
if value == "open":
self._should_poll = True
if self._state:
return False
self._state = True
return True
if value == "close":
self._open_since = 0
if self._state:
self._state = False
return True
return False
class XiaomiWaterLeakSensor(XiaomiBinarySensor):
"""Representation of a XiaomiWaterLeakSensor."""
def __init__(self, device, xiaomi_hub, config_entry):
"""Initialize the XiaomiWaterLeakSensor."""
if "proto" not in device or int(device["proto"][0:1]) == 1:
data_key = "status"
else:
data_key = "wleak_status"
super().__init__(
device,
"Water Leak Sensor",
xiaomi_hub,
data_key,
BinarySensorDeviceClass.MOISTURE,
config_entry,
)
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._state = False
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
self._should_poll = False
value = data.get(self._data_key)
if value is None:
return False
if value == "leak":
self._should_poll = True
if self._state:
return False
self._state = True
return True
if value == "no_leak":
if self._state:
self._state = False
return True
return False
class XiaomiSmokeSensor(XiaomiBinarySensor):
"""Representation of a XiaomiSmokeSensor."""
def __init__(self, device, xiaomi_hub, config_entry):
"""Initialize the XiaomiSmokeSensor."""
self._density = 0
super().__init__(
device, "Smoke Sensor", xiaomi_hub, "alarm", "smoke", config_entry
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_DENSITY: self._density}
attrs.update(super().extra_state_attributes)
return attrs
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._state = False
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if DENSITY in data:
self._density = int(data.get(DENSITY))
value = data.get(self._data_key)
if value is None:
return False
if value in ("1", "2"):
if self._state:
return False
self._state = True
return True
if value == "0":
if self._state:
self._state = False
return True
return False
class XiaomiVibration(XiaomiBinarySensor):
"""Representation of a Xiaomi Vibration Sensor."""
def __init__(self, device, name, data_key, xiaomi_hub, config_entry):
"""Initialize the XiaomiVibration."""
self._last_action = None
super().__init__(device, name, xiaomi_hub, data_key, None, config_entry)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().extra_state_attributes)
return attrs
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._state = False
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
value = data.get(self._data_key)
if value is None:
return False
if value not in ("vibrate", "tilt", "free_fall", "actively"):
_LOGGER.warning("Unsupported movement_type detected: %s", value)
return False
self.hass.bus.fire(
"xiaomi_aqara.movement",
{"entity_id": self.entity_id, "movement_type": value},
)
self._last_action = value
return True
class XiaomiButton(XiaomiBinarySensor):
"""Representation of a Xiaomi Button."""
def __init__(self, device, name, data_key, hass, xiaomi_hub, config_entry):
"""Initialize the XiaomiButton."""
self._hass = hass
self._last_action = None
super().__init__(device, name, xiaomi_hub, data_key, None, config_entry)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().extra_state_attributes)
return attrs
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._state = False
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
value = data.get(self._data_key)
if value is None:
return False
if value == "long_click_press":
self._state = True
click_type = "long_click_press"
elif value == "long_click_release":
self._state = False
click_type = "hold"
elif value == "click":
click_type = "single"
elif value == "double_click":
click_type = "double"
elif value == "both_click":
click_type = "both"
elif value == "double_both_click":
click_type = "double_both"
elif value == "shake":
click_type = "shake"
elif value == "long_click":
click_type = "long"
elif value == "long_both_click":
click_type = "long_both"
else:
_LOGGER.warning("Unsupported click_type detected: %s", value)
return False
self._hass.bus.fire(
"xiaomi_aqara.click",
{"entity_id": self.entity_id, "click_type": click_type},
)
self._last_action = click_type
return True
class XiaomiCube(XiaomiBinarySensor):
"""Representation of a Xiaomi Cube."""
def __init__(self, device, hass, xiaomi_hub, config_entry):
"""Initialize the Xiaomi Cube."""
self._hass = hass
self._last_action = None
if "proto" not in device or int(device["proto"][0:1]) == 1:
data_key = "status"
else:
data_key = "cube_status"
super().__init__(device, "Cube", xiaomi_hub, data_key, None, config_entry)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {ATTR_LAST_ACTION: self._last_action}
attrs.update(super().extra_state_attributes)
return attrs
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._state = False
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
if self._data_key in data:
self._hass.bus.fire(
"xiaomi_aqara.cube_action",
{"entity_id": self.entity_id, "action_type": data[self._data_key]},
)
self._last_action = data[self._data_key]
if "rotate" in data:
action_value = float(
data["rotate"]
if isinstance(data["rotate"], int)
else data["rotate"].replace(",", ".")
)
self._hass.bus.fire(
"xiaomi_aqara.cube_action",
{
"entity_id": self.entity_id,
"action_type": "rotate",
"action_value": action_value,
},
)
self._last_action = "rotate"
if "rotate_degree" in data:
action_value = float(
data["rotate_degree"]
if isinstance(data["rotate_degree"], int)
else data["rotate_degree"].replace(",", ".")
)
self._hass.bus.fire(
"xiaomi_aqara.cube_action",
{
"entity_id": self.entity_id,
"action_type": "rotate",
"action_value": action_value,
},
)
self._last_action = "rotate"
return True
|
the-stack_106_27944 | from floppy.graph import Graph
from floppy.floppyUi import Painter2D, MainWindow
import sys
from PyQt5.QtWidgets import QApplication
import argparse
import logging
logger = logging.getLogger('Floppy')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('floppy.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
def run():
logger.info('Starting Floppy Application with '+' '.join(sys.argv))
app = QApplication(sys.argv)
painter = initializePainter()
startUI(app, painter)
def initializePainter():
painter = Painter2D()
Graph(painter=painter)
return painter
def startUI(app, painter):
win = MainWindow(painter=painter)
win.setArgs(parseArgv())
win.show()
logger.debug('Startup successful. Handing main thread control to Qt main loop.')
qtReturnValue = app.exec_()
override, value = win.getFloppyReturnValue()
if override:
sys.exit(value)
sys.exit(qtReturnValue)
# try:
# sys.exit(app.exec_())
# except KeyboardInterrupt:
# print('Keyboard Interrupt. Shutting down gracefully.')
# win.killRunner()
def parseArgv():
parser = argparse.ArgumentParser()
parser.add_argument('-i', action='store_true', required=False)
parser.add_argument('--test', nargs=1, required=False, default=False)
args = parser.parse_args()
return args
|
the-stack_106_27946 | __version__ = "1.0.0"
__author__ = "Akhier Dragonheart"
__license__ = "MIT"
import json
import pygame
def get_spritesheet(jsonpath):
"""Get a spritesheet through data stored in a json file
This function expects all sprites to be the same size. Along with that it
needs your json file to be formated like so:
{
"file": "examples/examplesheet.png",
"colorkey": false,
"sprite_width":50,
"sprite_height":50,
"sprites": [
{
"row": [
{"id": "yellow"},
{"id": "blue"},
{"id": "purple"}
]
},
{
"row": [
{"id": "green"},
{"id": "red"}
]
}
]
}
:param jsonpath: This is were the json file detailing your
spritesheet is located
:return: Dictionary containing pairs of (sprite_name, sprite_image)
"""
with open(jsonpath) as json_data:
sheet_data = json.load(json_data)
try:
full_image = pygame.image.load(sheet_data['file']).convert()
except FileNotFoundError:
print(sheet_data['file'] + " not found")
colorkey = sheet_data['colorkey']
sprite_width = sheet_data['sprite_width']
sprite_height = sheet_data['sprite_height']
sprites = {}
y = 0
for row in sheet_data['sprites']:
x = 0
for sprite_id in row['row']:
sprite_name = sprite_id['id']
rect = pygame.Rect(x, y, sprite_width, sprite_height)
sprite = pygame.Surface(rect.size).convert()
sprite.blit(full_image, (0, 0), rect)
if colorkey:
if colorkey is -1:
colorkey = sprite.get_at((0, 0))
sprite.set_colorkey(colorkey, pygame.RLEACCEL)
sprites[sprite_name] = sprite
x += sprite_width
y += sprite_height
return sprites
|
the-stack_106_27947 | from django.conf.urls import include, patterns, url
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.cache import cache_page
from django.views.generic.base import RedirectView
import authority
import badger
from adminplus.sites import AdminSitePlus
from waffle.views import wafflejs
admin.site = AdminSitePlus()
admin.autodiscover()
admin.site.login = login_required(admin.site.login)
authority.autodiscover()
badger.autodiscover()
urlpatterns = patterns(
'',
(r'^search', include('kitsune.search.urls')),
(r'^forums', include('kitsune.forums.urls')),
(r'^questions', include('kitsune.questions.urls')),
(r'^flagged', include('kitsune.flagit.urls')),
(r'^upload', include('kitsune.upload.urls')),
(r'^kb', include('kitsune.wiki.urls')),
(r'^gallery', include('kitsune.gallery.urls')),
(r'^army-of-awesome', include('kitsune.customercare.urls')),
(r'^chat', RedirectView.as_view(url='questions/new')),
(r'^messages', include('kitsune.messages.urls')),
(r'^1', include('kitsune.inproduct.urls')),
(r'^postcrash', include('kitsune.postcrash.urls')),
(r'^groups', include('kitsune.groups.urls')),
(r'^karma', include('kitsune.karma.urls')),
(r'^kpi/', include('kitsune.kpi.urls')),
(r'^products', include('kitsune.products.urls')),
(r'^announcements', include('kitsune.announcements.urls')),
(r'^community', include('kitsune.community.urls')),
(r'^badges/', include('kitsune.kbadge.urls')),
(r'^offline', include('kitsune.offline.urls')),
# Kitsune admin (not Django admin).
(r'^admin/', include(admin.site.urls)),
# Javascript translations.
url(r'^jsi18n/.*$', cache_page(60 * 60 * 24 * 365)(javascript_catalog),
{'domain': 'javascript', 'packages': ['kitsune']}, name='jsi18n'),
# JavaScript Waffle.
url(r'^wafflejs$', wafflejs, name='wafflejs'),
(r'^', include('kitsune.dashboards.urls')),
(r'^', include('kitsune.landings.urls')),
(r'^', include('tidings.urls')), # Keep short for email wrapping.
(r'^', include('kitsune.kpi.urls_api')),
# Users
('', include('kitsune.users.urls')),
# Services and sundry.
(r'', include('kitsune.sumo.urls')),
# APIs
(r'^api/1/kb/', include('kitsune.wiki.urls_api')),
(r'^api/1/products/', include('kitsune.products.urls_api')),
)
# Handle 404 and 500 errors
handler404 = 'kitsune.sumo.views.handle404'
handler500 = 'kitsune.sumo.views.handle500'
if settings.DEBUG:
media_url = settings.MEDIA_URL.lstrip('/').rstrip('/')
urlpatterns += patterns(
'',
(r'^%s/(?P<path>.*)$' % media_url, 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
|
the-stack_106_27948 | """
Utility for generating grids of plots.
"""
import os
import math
import itertools
from typing import Any, Callable, Optional
import matplotlib.pyplot as plt # type: ignore
from experiment_utils.plotting.defaults import DEFAULT_SETTINGS
def try_cell_row_col(
value_dict: dict, row: Any, col: Any, default_value: Optional[bool] = None
) -> Any:
"""Helper for finding the dictionary value associated with a cell in the plot.
The dictionary is first index by cell using `(row, col)`, then by row using `row`,
and finally by column using `col`. `default_value` is returned if nothing is found.
:param value_dict: dictionary to index into. e.g y-axis labels for each cell.
:param row: key for the plot row.
:param col: key for the plot column.
:param default_value: value to return if nothing is found. Optional. Defaults to `None`.
:returns: value_dict[(row, col)], value_dict[row], value_dict[col] or `default_value`.
"""
return value_dict.get(
(row, col), value_dict.get(row, value_dict.get(col, default_value))
)
def plot_grid(
plot_fn: Callable,
results: dict,
figure_labels: dict,
line_kwargs: dict,
limits: dict = {},
ticks: dict = {},
log_scale: dict = {},
settings: dict = DEFAULT_SETTINGS,
base_dir: Optional[str] = None,
):
"""Helper function for generating a len(rows) x len(cols) grid of plots. In the following, cell refers to a (row, col) key-pair.
:param plot_fn: function for plotting each cell in the grid.
:param results: nested dictionary of results. The first level of is defines the rows of the plot,
the second the columns and the third the lines in each cell. Note that the number of columns must be
the same for each row, but the number of lines may differ for each cell.
:param figure_labels: dict of dicts containing labels for the plots. The top-level dict should contain keys
'y_labels', 'x_labels', 'col_titles', 'row_titles'. Each sub-dict can be indexed by cell, row, or column.
:param line_kwargs: dict of key-word arguments for each key in 'lines'.
:param limits: dict of tuples (x_lim, y_lim), where x_lim are the desired limits for the x-axis and
y_lim are the desired limits for the y-axis. Can be indexed by cell, row, or column.
:param ticks: dict of tuples (x_ticks, y_ticks), where x_ticks are the desiredd ticks for the x-axis
y_ticks are the desired ticks for the y-axis. Can be indexed by cell, row, or column.
:param log_scale: dict of strings indicating whether or not to plot a cell, row, or column as a log-log,
log-linear, or linear-linear plot. Defaults to linear-linear.
:param settings: dict with the plot configuration. See 'defaults.DEFAULT_SETTINGS' above.
:param base_dir: location to save plot. Defaults to 'None', in which case the plot is not saved.
:returns: figure and dictionary of axis objects indexed by the rows and columns.
"""
rows = list(results.keys())
cols = list(results[rows[0]].keys())
fig = plt.figure(
figsize=(settings["fig_width"] * len(cols), len(rows) * settings["fig_height"])
)
# grid spec.
spec = fig.add_gridspec(ncols=len(cols), nrows=len(rows))
# title the plot if a title is given.
if "title" in figure_labels:
fig.suptitle(
figure_labels["title"], fontsize=settings.get("titles_fs", 18), y=1
)
# unpack label arguments:
y_labels, x_labels, col_titles, row_titles = (
figure_labels.get("y_labels", {}),
figure_labels.get("x_labels", {}),
figure_labels.get("col_titles", {}),
figure_labels.get("row_titles", {}),
)
axes = {}
for i, (row, col) in enumerate(itertools.product(rows, cols)):
ax = fig.add_subplot(spec[math.floor(i / len(cols)), i % len(cols)])
# dict of axes objects
axes[(row, col)] = ax
ax.yaxis.offsetText.set_fontsize(settings["offest_text_fs"])
# in the top row
if settings.get("col_titles", False) and i < len(cols):
ax.set_title(col_titles.get(col, ""), fontsize=settings["subtitle_fs"])
if settings.get("row_titles", False) and i % len(cols) == 0:
ax.annotate(
row_titles.get(row, ""),
xy=(0, 0.5),
xytext=(-ax.yaxis.labelpad - settings["row_title_pad"], 0),
xycoords=ax.yaxis.label,
textcoords="offset points",
fontsize=settings["subtitle_fs"],
ha="right",
va="center",
rotation=90,
)
# start of a new row
if settings.get("y_labels", False) == "left_col" and i % len(cols) == 0:
ax.set_ylabel(y_labels.get(row, ""), fontsize=settings["axis_labels_fs"])
elif settings.get("y_labels", False) == "every_col":
ax.set_ylabel(
try_cell_row_col(y_labels, row, col, ""),
fontsize=settings["axis_labels_fs"],
)
# in the bottom row
if (
settings.get("x_labels", False) == "bottom_row"
and len(cols) * (len(rows) - 1) <= i
):
ax.set_xlabel(x_labels.get(col, ""), fontsize=settings["axis_labels_fs"])
elif settings.get("x_labels", False) == "every_row":
ax.set_xlabel(
try_cell_row_col(x_labels, row, col, ""),
fontsize=settings["axis_labels_fs"],
)
ax.ticklabel_format(
axis="y",
style=settings.get("ticklabel_format", "scientific"),
scilimits=(0, 0),
)
# ticks
ax.tick_params(labelsize=settings["tick_fs"])
if try_cell_row_col(ticks, row, col, None) is not None:
x_ticks, y_ticks = try_cell_row_col(ticks, row, col, None)
if x_ticks is not None and len(x_ticks) > 0:
ax.xticks(x_ticks)
if y_ticks is not None and len(y_ticks) > 0:
ax.yticks(y_ticks)
# plot the cell
plot_fn(ax, results[row][col], line_kwargs, settings)
# log-scale:
if try_cell_row_col(log_scale, row, col, None) is not None:
log_type = try_cell_row_col(log_scale, row, col, None)
if log_type == "log-linear":
ax.set_yscale("log")
elif log_type == "log-log":
ax.set_yscale("log")
ax.set_xscale("log")
elif log_type == "linear-log":
ax.set_xscale("log")
# limits: needs to be done after plotting the data
if try_cell_row_col(limits, row, col, None) is not None:
x_limits, y_limits = try_cell_row_col(limits, row, col, None)
if x_limits is not None and len(x_limits) > 0:
ax.set_xlim(*x_limits)
if y_limits is not None and len(y_limits) > 0:
ax.set_ylim(*y_limits)
# Put only one shared legend to avoid clutter
handles, labels = ax.get_legend_handles_labels()
final_handles, final_labels = [], []
for i, label in enumerate(labels):
final_handles.append(handles[i])
final_labels.append(labels[i])
ncol = settings["legend_cols"]
if settings["show_legend"]:
legend = fig.legend(
final_handles,
final_labels,
loc="lower center",
borderaxespad=0.1,
fancybox=False,
shadow=False,
frameon=False,
ncol=ncol,
fontsize=settings["legend_fs"],
)
for line in legend.get_lines():
line.set_linewidth(4.0)
bottom_margin = settings["bottom_margin"] / len(rows)
plt.tight_layout()
fig.subplots_adjust(
wspace=settings.get("wspace", 0.2),
hspace=settings.get("vspace", 0.2),
bottom=bottom_margin,
)
if base_dir is not None:
head, _ = os.path.split(base_dir)
os.makedirs(head, exist_ok=True)
plt.savefig(base_dir)
plt.close()
return fig, axes
|
the-stack_106_27954 |
from pytest import raises
from mutmut import (
partition_node_list,
name_mutation,
Context,
mutate)
def test_partition_node_list_no_nodes():
with raises(AssertionError):
partition_node_list([], None)
def test_name_mutation_simple_mutants():
assert name_mutation(None, 'True') == 'False'
def test_context_exclude_line():
source = "__import__('pkg_resources').declare_namespace(__name__)\n"
assert mutate(Context(source=source)) == (source, 0)
source = "__all__ = ['hi']\n"
assert mutate(Context(source=source)) == (source, 0)
|
the-stack_106_27956 | from __future__ import unicode_literals
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from .conf import settings
from .signals import user_linked_to_response
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
AUTH_USER_MODEL = getattr(settings, "AUTH_USER_MODEL", "auth.User")
@python_2_unicode_compatible
class Referral(models.Model):
user = models.ForeignKey(
AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name="referral_codes",
null=True
)
label = models.CharField(max_length=100, blank=True)
code = models.CharField(max_length=40, unique=True)
expired_at = models.DateTimeField(null=True, blank=True)
redirect_to = models.CharField(max_length=512)
target_content_type = models.ForeignKey(ContentType, null=True, blank=True, on_delete=models.SET_NULL)
target_object_id = models.PositiveIntegerField(null=True, blank=True)
target = GenericForeignKey(
ct_field="target_content_type",
fk_field="target_object_id"
)
created_at = models.DateTimeField(default=timezone.now)
def __str__(self):
if self.user:
return "{} ({})".format(self.user, self.code)
else:
return self.code
@classmethod
def for_request(cls, request):
cookie = request.COOKIES.get("pinax-referral")
if cookie:
code, session_key = cookie.split(":")
try:
return Referral.objects.get(code=code)
except Referral.DoesNotExist:
pass
@property
def url(self):
path = reverse("pinax_referrals:process_referral", kwargs={"code": self.code})
domain = Site.objects.get_current().domain
protocol = "https" if settings.PINAX_REFERRALS_SECURE_URLS else "http"
return "{}://{}{}".format(protocol, domain, path)
@property
def response_count(self):
return self.responses.filter(action="RESPONDED").count()
def save(self, *args, **kwargs):
if not self.code:
self.code = settings.PINAX_REFERRALS_CODE_GENERATOR_CALLBACK(Referral)
return super(Referral, self).save(*args, **kwargs)
@classmethod
def create(cls, redirect_to, user=None, label="", target=None):
if target:
obj, _ = cls.objects.get_or_create(
user=user,
redirect_to=redirect_to,
label=label,
target_content_type=ContentType.objects.get_for_model(target),
target_object_id=target.pk
)
else:
obj, _ = cls.objects.get_or_create(
user=user,
label=label,
redirect_to=redirect_to,
)
return obj
@classmethod
def record_response(cls, request, action_string, target=None):
referral = cls.referral_for_request(request)
if referral:
return referral.respond(request, action_string, target=target)
@classmethod
def referral_for_request(cls, request):
if request.user.is_authenticated:
qs = ReferralResponse.objects.filter(user=request.user)
else:
qs = ReferralResponse.objects.filter(session_key=request.session.session_key)
try:
return qs.order_by("-created_at")[0].referral
except IndexError:
pass
def link_responses_to_user(self, user, session_key):
for response in self.responses.filter(session_key=session_key, user__isnull=True):
response.user = user
response.save()
user_linked_to_response.send(sender=self, response=response)
def respond(self, request, action_string, user=None, target=None):
if user is None:
if request.user.is_authenticated:
user = request.user
else:
user = None
ip_address = request.META.get(
settings.PINAX_REFERRALS_IP_ADDRESS_META_FIELD,
""
)
kwargs = dict(
referral=self,
session_key=request.session.session_key,
ip_address=ip_address,
action=action_string,
user=user
)
if target:
kwargs.update({"target": target})
return ReferralResponse.objects.create(**kwargs)
def filtered_responses(self):
return settings.PINAX_REFERRALS_RESPONSES_FILTER_CALLBACK(
referral=self
)
class ReferralResponse(models.Model):
referral = models.ForeignKey(Referral, related_name="responses", on_delete=models.CASCADE)
session_key = models.CharField(max_length=40)
user = models.ForeignKey(AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)
ip_address = models.CharField(max_length=45)
action = models.CharField(max_length=128)
target_content_type = models.ForeignKey(ContentType, null=True, on_delete=models.SET_NULL)
target_object_id = models.PositiveIntegerField(null=True)
target = GenericForeignKey(
ct_field="target_content_type",
fk_field="target_object_id"
)
created_at = models.DateTimeField(default=timezone.now)
|
the-stack_106_27959 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
import time
import re
import json
import config
import utils
# if you want to make this word on a system with no GUI, install Xvfb first and then uncomment these lines.
# from pyvirtualdisplay import Display
# display = Display(visible=0, size=(1366, 768))
# display.start()
# print('Display start')
browser = webdriver.Firefox()
class WechatScraper():
def __init__(self, **kwargs):
self.config = config
"""
query: keyword
page: the page you want to scrap, useful when you use a keyword to scrap many articles
"""
def get_article_list_by_keyword(self, query, page=1):
query = 'query=' + query
page = 'page=' + str(page)
built_url = self._build_url(self.config.article_search_url, ['query', 'page'], [query, page])
article_list = []
browser.get(built_url)
times = self._withdraw_time(browser.page_source)
news_list = browser.find_elements_by_css_selector('.news-list li')
for i in range(len(news_list)):
news = news_list[i]
title = news.find_element_by_tag_name('h3').text
url = news.find_element_by_css_selector('h3 a').get_attribute('href')
description = news.find_element_by_css_selector('.txt-box>p').text
gzh = news.find_element_by_css_selector('.account').text
gzh_url = news.find_element_by_css_selector('.account').get_attribute('href')
# time_re = re.compile(r'\d{10}')
# bs_obj = BeautifulSoup(browser.page_source, 'html.parser')
# time = re.search(time_re, news.find_element_by_tag_name('script').text).group(0)
imgs = map(self._withdraw_image, news.find_elements_by_tag_name('img'))
news_unit = {
'title': title,
'url': url,
'gzh': gzh,
'gzh_url': gzh_url,
'description': description,
'updateTime': times[i],
'poster': imgs
}
article_list.append(news_unit)
return article_list
def get_article_by_url(self, url):
browser.get(url)
avatar_re = re.compile(r'ori_head_img_url[^;]+;')
raw_avatar = avatar_re.findall(browser.page_source)
avatar = ''
if(raw_avatar):
avatar = re.sub(re.compile(r'[^"]+"'), '', raw_avatar[0], 1).replace('";', '')
page_content = browser.find_element_by_id('img-content')
ems = page_content.find_elements_by_css_selector('.rich_media_meta_list>em')
author = ''
if(len(ems)>1):
author = ems[1].text
content = page_content.find_element_by_id('js_content').get_attribute('innerHTML')
article = {
'authorName': author,
'authorAvatar': avatar,
'content': content
}
return article
# scrap gzh list at the page
def search_gzh_by_keyword(self, query, **kwargs):
page = kwargs.get('page', 1)
query = 'query=' + query
page = 'page=' + str(page)
built_url = self._build_url(self.config.gzh_search_url, ['query', 'page'], [query, page])
browser.get(built_url)
gzh_list = browser.find_elements_by_css_selector('.news-list2 li')
for i in range(len(gzh_list)):
gzh = gzh_list[i]
avatar = gzh.find_element_by_css_selector('.img-box img').get_attribute('src')
title = gzh.find_element_by_class_name('tit').text
wechatid = gzh.find_element_by_name('em_weixinhao').text
qrcode = gzh.find_element_by_css_selector('.ew-pop .pop img').get_attribute('src')
gzh_info = {
'title': title,
'wechatid': wechatid,
'avatar': avatar,
'qrcode': qrcode
}
dls = gzh.find_elements_by_tag_name('dl')
for k in range(len(dls)):
dl = dls[k]
if(dl.text[0:4] == u'功能介绍'[0:4]):
gzh_info['introduction'] = dl.find_element_by_tag_name('dd').text
if(dl.text[0:4] == u'微信认证'[0:4]):
gzh_info['verification'] = dl.find_element_by_tag_name('dd').text
gzh_list[i] = gzh_info
return gzh_list
# get gzh message by wechatid
def get_gzh_message(self, wechatid):
query = 'query=' + str(wechatid)
page = 'page=' + str(1)
built_url = self._build_url(self.config.gzh_search_url, ['query', 'page'], [query, page])
browser.get(built_url)
gzh_list = browser.find_elements_by_css_selector('.news-list2 li')
gzh_url = gzh_list[0].find_element_by_css_selector('.img-box a').get_attribute('href')
browser.get(gzh_url)
# get msg within the script
source_re = re.compile(r'{"list":.+}}]}')
msg_list_string = source_re.findall(browser.page_source)[0]
msg_list = json.loads(msg_list_string.encode('utf-8'))['list']
for i in range(len(msg_list)):
msg = msg_list[i]
f = msg['app_msg_ext_info']
s = msg['comm_msg_info']
msg_list[i] = {
'title': f['title'],
'url': 'http://mp.weixin.qq.com' + f['content_url'],
'poster': f['cover'],
'authorName': f['author'],
'description': f['digest'],
'updateTime': s['datetime']
}
return msg_list
# get msg list through DOM tree
# msg_list = browser.find_elements_by_class_name('weui_msg_card')
# for i in range(len(msg_list)):
# msg = msg_list[i]
# html = msg.find_element_by_css_selector('.weui_media_box').get_attribute('innerHTML')
# img_re = re.compile(r'http:.*wx_fmt=[a-zA-Z0-9]+')
# poster = img_re.findall(html)
# title = msg.find_element_by_tag_name('h4').text.strip()
# url = 'http://mp.weixin.qq.com' + msg.find_element_by_tag_name('h4').get_attribute('hrefs')
# time = msg.find_element_by_class_name('weui_media_extra_info').text
# msg_list[i] = {
# 'title': title,
# 'poster': poster,
# 'url': url,
# 'time': time
# }
# print(msg_list)
# return msg_list
"""
below here are some common functions
"""
# replace url parameters
def _build_url(self, base, oldVal, newVal):
if(type(oldVal) == str):
base = base.replace(oldVal, newVal)
if(type(oldVal) == list):
for i in range(len(oldVal)):
base = base.replace(oldVal[i], str(newVal[i]))
return base
# withdraw image url through image element
def _withdraw_image(self, element):
return element.get_attribute('src') or element.get_attribute('data-src')
# withdraw time within script, page source needed
def _withdraw_time(self, source):
raw_re = re.compile(r'document\.write\(timeConvert\(\'\d{10}\'\)\)')
raw_times = raw_re.findall(source)
exact_re = re.compile(r'\d{10}')
return [exact_re.findall(time)[0] for time in raw_times]
|
the-stack_106_27962 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import socket
import sys
sys.path.append('..')
from common import send_msg, recv_msg
PORT = 9090
with socket.socket() as sock:
sock.bind(('', PORT))
sock.listen()
print(f'Server: {sock.getsockname()}')
while True:
conn, addr = sock.accept()
print('Connected:', addr)
while True:
data = recv_msg(conn)
if not data:
break
print(f'Receiving ({len(data)}): {data}')
print('Sending')
data = data.decode('utf-8').upper().encode('utf-8')
send_msg(conn, data)
print('Close\n')
|
the-stack_106_27964 | #https://www.youtube.com/watch?v=jO6qQDNa2UY&ab_channel=TechWithTim
#The mp3 files doesn't work
import pygame
#OS to help find the path to the assets
import os
pygame.font.init()
pygame.mixer.init()
#surface is like a Window in pygame
WIDTH, HEIGHT = 900, 500
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("First Game!")
#Editing windows properties
HEALTH_FONT = pygame.font.SysFont('comicsans', 40)
WINNER_FONT = pygame.font.SysFont('comicsans', 100)
WHITE = (255, 255, 255)
BORDER_COLOR = (0, 0, 0)
YELLOW_BULLET_COLOR = (255, 255, 0)
RED_BULLET_COLOR = (255, 0, 0)
BORDER = pygame.Rect((WIDTH/2) - 5, 0, 10, HEIGHT)
#BULLET_HIT_SOUND = pygame.mixer.Sound(os.path.join('Assets', 'Grenade+1.mp3'))
#BULLET_FIRE_SOUND = pygame.mixer.Sound(os.path.join('Assets', 'Gun+Silencer.mp3'))
FPS = 60
VEL = 5
BULLET_VEL = 7
MAX_BULLETS = 3
SPACESHIP_WIDTH, SPACESHIP_HEIGHT = 55, 40
YELLOW_HIT = pygame.USEREVENT + 1
RED_HIT = pygame.USEREVENT + 2
YELLOW_SPACESHIP_IMAGE = pygame.image.load(
os.path.join('Tutorials/Assets', 'spaceship_yellow.png'))
YELLOW_SPACESHIP_SCALE = pygame.transform.scale(
YELLOW_SPACESHIP_IMAGE, (SPACESHIP_WIDTH, SPACESHIP_HEIGHT))
YELLOW_SPACESHIP = pygame.transform.rotate(YELLOW_SPACESHIP_SCALE, 90)
RED_SPACESHIP_IMAGE = pygame.image.load(
os.path.join('Tutorials/Assets', 'spaceship_red.png'))
RED_SPACESHIP_SCALE = pygame.transform.scale(
RED_SPACESHIP_IMAGE, (SPACESHIP_WIDTH, SPACESHIP_HEIGHT))
RED_SPACESHIP = pygame.transform.rotate(RED_SPACESHIP_SCALE, -90)
SPACE = pygame.transform.scale(pygame.image.load(
os.path.join('Tutorials/Assets', 'space.png')), (WIDTH, HEIGHT))
def draw_window(yellow, red, yellow_bullets, red_bullets, yellow_health, red_health):
#From now on, urder matters to put things on screen
#Background color to WIN
#Colors are in RGB in Pygame
#WIN.fill((BACKGROUND))
WIN.blit(SPACE, (0, 0))
#Draw the border
pygame.draw.rect(WIN, BORDER_COLOR, BORDER)
#Blit to draw a surface over the screen
#Images are loaded as surfaces in Pygame
#0,0 position is top left in Pygame
#WIN.blit(YELLOW_SPACESHIP_IMAGE, (300, 200))
yellow_health_text = HEALTH_FONT.render("Health: " + str(yellow_health), 1, WHITE)
red_health_text = HEALTH_FONT.render("Health: " + str(red_health), 1, WHITE)
WIN.blit(yellow_health_text, (10, 10))
WIN.blit(red_health_text, (WIDTH - red_health_text.get_width() - 10, 10))
#Now using the one variable of the spaceship that is scaled, not the actual image
WIN.blit(YELLOW_SPACESHIP, (yellow.x, yellow.y))
WIN.blit(RED_SPACESHIP, (red.x, red.y))
for bullet in yellow_bullets:
pygame.draw.rect(WIN, YELLOW_BULLET_COLOR, bullet)
for bullet in red_bullets:
pygame.draw.rect(WIN, RED_BULLET_COLOR, bullet)
#Now, update windows with the information written so far
pygame.display.update()
def yellow_handle_movement(key_pressed, yellow):
if key_pressed[pygame.K_a] and yellow.x - VEL > 0:
yellow.x -= VEL
if key_pressed[pygame.K_d] and yellow.x + VEL + yellow.width < BORDER.x:
yellow.x += VEL
if key_pressed[pygame.K_w] and yellow.y + VEL > 0:
yellow.y -= VEL
if key_pressed[pygame.K_s] and yellow.y + VEL + yellow.height < HEIGHT:
yellow.y += VEL
def red_handle_movement(key_pressed, red):
if key_pressed[pygame.K_LEFT] and red.x - VEL > BORDER.x + BORDER.width:
red.x -= VEL
if key_pressed[pygame.K_RIGHT] and red.x + VEL + red.width < WIDTH:
red.x += VEL
if key_pressed[pygame.K_UP] and red.y + VEL > 0:
red.y -= VEL
if key_pressed[pygame.K_DOWN] and red.y + VEL + red.height < HEIGHT:
red.y += VEL
def handle_bullets(yellow_bullets, red_bullets, yellow, red):
for bullet in yellow_bullets:
bullet.x += BULLET_VEL
if red.colliderect(bullet):
pygame.event.post(pygame.event.Event(RED_HIT))
yellow_bullets.remove(bullet)
elif bullet.x > WIDTH:
yellow_bullets.remove(bullet)
for bullet in red_bullets:
bullet.x -= BULLET_VEL
if yellow.colliderect(bullet):
pygame.event.post(pygame.event.Event(YELLOW_HIT))
red_bullets.remove(bullet)
elif bullet.x < 0:
red_bullets.remove(bullet)
def draw_winner(winner_text):
draw_text = WINNER_FONT.render(winner_text, 1, WHITE)
WIN.blit(draw_text, (WIDTH/2 - draw_text.get_width()/2, HEIGHT/2 - draw_text.get_height()/2))
pygame.display.update()
pygame.time.delay(5000)
def main():
yellow = pygame.Rect(100, 300, SPACESHIP_WIDTH, SPACESHIP_HEIGHT)
red = pygame.Rect(700, 300, SPACESHIP_WIDTH, SPACESHIP_HEIGHT)
yellow_bullets = []
red_bullets = []
yellow_health = 10
red_health = 10
clock = pygame.time.Clock()
#Event loop for redrawing the surface, checking collision, updating score
run = True
while run:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LSHIFT and len(yellow_bullets) < MAX_BULLETS:
bullet = pygame.Rect(yellow.x + yellow.width, yellow.y + yellow.height/2, 10, 5)
yellow_bullets.append(bullet)
#BULLET_FIRE_SOUND.play()
if event.key == pygame.K_RSHIFT and len(red_bullets) < MAX_BULLETS:
bullet = pygame.Rect(red.x, red.y + red.height/2, 10, 5)
red_bullets.append(bullet)
#BULLET_FIRE_SOUND.play()
if event.type == YELLOW_HIT:
yellow_health -= 1
#BULLET_HIT_SOUND.play()
if event.type == RED_HIT:
red_health -= 1
#BULLET_HIT_SOUND.play()
winner_text = ""
if yellow_health <= 0:
winner_text = "Red Wins!"
if red_health <= 0:
winner_text = "Yellow Wins!"
if winner_text != "":
draw_winner(winner_text)
break
print(yellow_health, red_health)
key_pressed = pygame.key.get_pressed()
yellow_handle_movement(key_pressed, yellow)
red_handle_movement(key_pressed, red)
handle_bullets(yellow_bullets, red_bullets, yellow, red)
draw_window(yellow, red, yellow_bullets, red_bullets, yellow_health, red_health)
main()
if __name__ == "__main__":
#__name__ of the file and __main__ is the main file that was run
#This means main() will be excecuted when running this file
main()
|
the-stack_106_27965 | # Copyright (C) 2014 Christine Dodrill <[email protected]> All rights reserved.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
#
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
#
# 3. This notice may not be removed or altered from any source
# distribution.
#
from flask import Blueprint, flash, render_template, redirect
from unimatrix.forms import ServerForm
from unimatrix.models import Server
from unimatrix import db
bp = Blueprint("admin", __name__)
@bp.route("/")
def index():
return render_template("admin/index.html")
@bp.route("/addserver", methods=['GET', 'POST'])
def addserver():
form = ServerForm()
if form.validate_on_submit():
server = Server(form.name.data, form.description.data, form.ipaddress.data)
server.linksumm.append("genesect.yolo-swag.com")
db.keys["servers"].append(server.__dict__)
db.commit()
flash("Success! Added %s" % repr(server))
return redirect("/")
return render_template("admin/addserver.html", **locals())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.