gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
Player
The player class is an extension of the default Django user class,
and is customized for the needs of Evennia.
We use the Player to store a more mud-friendly style of permission
system as well as to allow the admin more flexibility by storing
attributes on the Player. Within the game we should normally use the
Player manager's methods to create users so that permissions are set
correctly.
To make the Player model more flexible for your own game, it can also
persistently store attributes of its own. This is ideal for extra
account info and OOC account configuration variables etc.
"""
from django.conf import settings
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.encoding import smart_str
from src.players import manager
from src.scripts.models import ScriptDB
from src.typeclasses.models import (TypedObject, TagHandler, NickHandler,
AliasHandler, AttributeHandler)
from src.commands.cmdsethandler import CmdSetHandler
from src.commands import cmdhandler
from src.utils import utils, logger
from src.utils.utils import to_str, make_iter
from django.utils.translation import ugettext as _
__all__ = ("PlayerDB",)
_ME = _("me")
_SELF = _("self")
_SESSIONS = None
_AT_SEARCH_RESULT = utils.variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
_MULTISESSION_MODE = settings.MULTISESSION_MODE
_GA = object.__getattribute__
_SA = object.__setattr__
_DA = object.__delattr__
_TYPECLASS = None
#------------------------------------------------------------
#
# PlayerDB
#
#------------------------------------------------------------
class PlayerDB(TypedObject, AbstractUser):
"""
This is a special model using Django's 'profile' functionality
and extends the default Django User model. It is defined as such
by use of the variable AUTH_PROFILE_MODULE in the settings.
One accesses the fields/methods. We try use this model as much
as possible rather than User, since we can customize this to
our liking.
The TypedObject supplies the following (inherited) properties:
key - main name
typeclass_path - the path to the decorating typeclass
typeclass - auto-linked typeclass
date_created - time stamp of object creation
permissions - perm strings
dbref - #id of object
db - persistent attribute storage
ndb - non-persistent attribute storage
The PlayerDB adds the following properties:
user - Connected User object. django field, needs to be save():d.
name - alias for user.username
sessions - sessions connected to this player
is_superuser - bool if this player is a superuser
"""
#
# PlayerDB Database model setup
#
# inherited fields (from TypedObject):
# db_key, db_typeclass_path, db_date_created, db_permissions
# store a connected flag here too, not just in sessionhandler.
# This makes it easier to track from various out-of-process locations
db_is_connected = models.BooleanField(default=False,
verbose_name="is_connected",
help_text="If player is connected to game or not")
# database storage of persistant cmdsets.
db_cmdset_storage = models.CharField('cmdset', max_length=255, null=True,
help_text="optional python path to a cmdset class. If creating a Character, this will default to settings.CMDSET_CHARACTER.")
# Database manager
objects = manager.PlayerManager()
# caches for quick lookups
_typeclass_paths = settings.PLAYER_TYPECLASS_PATHS
_default_typeclass_path = settings.BASE_PLAYER_TYPECLASS or "src.players.player.Player"
class Meta:
app_label = 'players'
verbose_name = 'Player'
def __init__(self, *args, **kwargs):
"Parent must be initiated first"
TypedObject.__init__(self, *args, **kwargs)
# handlers
_SA(self, "cmdset", CmdSetHandler(self))
_GA(self, "cmdset").update(init_mode=True)
_SA(self, "attributes", AttributeHandler(self))
_SA(self, "tags", TagHandler(self, category_prefix="player_"))
_SA(self, "aliases", AliasHandler(self, category_prefix="player_"))
_SA(self, "nicks", NickHandler(self))
# alias to the objs property
def __characters_get(self):
return self.objs
def __characters_set(self, value):
self.objs = value
def __characters_del(self):
raise Exception("Cannot delete name")
characters = property(__characters_get, __characters_set, __characters_del)
# cmdset_storage property
# This seems very sensitive to caching, so leaving it be for now /Griatch
#@property
def cmdset_storage_get(self):
"""
Getter. Allows for value = self.name. Returns a list of cmdset_storage.
"""
storage = _GA(self, "db_cmdset_storage")
# we need to check so storage is not None
return [path.strip() for path in storage.split(',')] if storage else []
#@cmdset_storage.setter
def cmdset_storage_set(self, value):
"""
Setter. Allows for self.name = value. Stores as a comma-separated
string.
"""
_SA(self, "db_cmdset_storage", ",".join(str(val).strip() for val in make_iter(value)))
_GA(self, "save")()
#@cmdset_storage.deleter
def cmdset_storage_del(self):
"Deleter. Allows for del self.name"
_SA(self, "db_cmdset_storage", None)
_GA(self, "save")()
cmdset_storage = property(cmdset_storage_get, cmdset_storage_set, cmdset_storage_del)
class Meta:
"Define Django meta options"
verbose_name = "Player"
verbose_name_plural = "Players"
#
# PlayerDB main class properties and methods
#
def __str__(self):
return smart_str("%s(player %s)" % (_GA(self, "name"), _GA(self, "dbid")))
def __unicode__(self):
return u"%s(player#%s)" % (_GA(self, "name"), _GA(self, "dbid"))
#@property
def __username_get(self):
return _GA(self, "username")
def __username_set(self, value):
_SA(self, "username", value)
def __username_del(self):
_DA(self, "username")
# aliases
name = property(__username_get, __username_set, __username_del)
key = property(__username_get, __username_set, __username_del)
#@property
def __uid_get(self):
"Getter. Retrieves the user id"
return self.id
def __uid_set(self, value):
raise Exception("User id cannot be set!")
def __uid_del(self):
raise Exception("User id cannot be deleted!")
uid = property(__uid_get, __uid_set, __uid_del)
#@property
#def __is_superuser_get(self):
# "Superusers have all permissions."
# return self.db_is_superuser
# #is_suser = get_prop_cache(self, "_is_superuser")
# #if is_suser == None:
# # is_suser = _GA(self, "user").is_superuser
# # set_prop_cache(self, "_is_superuser", is_suser)
# #return is_suser
#is_superuser = property(__is_superuser_get)
#
# PlayerDB class access methods
#
def msg(self, text=None, from_obj=None, sessid=None, **kwargs):
"""
Evennia -> User
This is the main route for sending data back to the user from the
server.
outgoing_string (string) - text data to send
from_obj (Object/Player) - source object of message to send. Its
at_msg_send() hook will be called.
sessid - the session id of the session to send to. If not given, return
to all sessions connected to this player. This is usually only
relevant when using msg() directly from a player-command (from
a command on a Character, the character automatically stores
and handles the sessid).
kwargs (dict) - All other keywords are parsed as extra data.
"""
if "data" in kwargs:
# deprecation warning
logger.log_depmsg("PlayerDB:msg() 'data'-dict keyword is deprecated. Use **kwargs instead.")
data = kwargs.pop("data")
if isinstance(data, dict):
kwargs.update(data)
text = to_str(text, force_string=True) if text else ""
if from_obj:
# call hook
try:
_GA(from_obj, "at_msg_send")(text=text, to_obj=self, **kwargs)
except Exception:
pass
session = _MULTISESSION_MODE == 2 and sessid and _GA(self, "get_session")(sessid) or None
if session:
obj = session.puppet
if obj and not obj.at_msg_receive(text=text, **kwargs):
# if hook returns false, cancel send
return
session.msg(text=text, **kwargs)
else:
# if no session was specified, send to them all
for sess in _GA(self, 'get_all_sessions')():
sess.msg(text=text, **kwargs)
# session-related methods
def get_session(self, sessid):
"""
Return session with given sessid connected to this player.
"""
global _SESSIONS
if not _SESSIONS:
from src.server.sessionhandler import SESSIONS as _SESSIONS
return _SESSIONS.session_from_player(self, sessid)
def get_all_sessions(self):
"Return all sessions connected to this player"
global _SESSIONS
if not _SESSIONS:
from src.server.sessionhandler import SESSIONS as _SESSIONS
return _SESSIONS.sessions_from_player(self)
sessions = property(get_all_sessions) # alias shortcut
def disconnect_session_from_player(self, sessid):
"""
Access method for disconnecting a given session from the player
(connection happens automatically in the sessionhandler)
"""
# this should only be one value, loop just to make sure to
# clean everything
sessions = (session for session in self.get_all_sessions()
if session.sessid == sessid)
for session in sessions:
# this will also trigger unpuppeting
session.sessionhandler.disconnect(session)
# puppeting operations
def puppet_object(self, sessid, obj, normal_mode=True):
"""
Use the given session to control (puppet) the given object (usually
a Character type). Note that we make no puppet checks here, that must
have been done before calling this method.
sessid - session id of session to connect
obj - the object to connect to
normal_mode - trigger hooks and extra checks - this is turned off when
the server reloads, to quickly re-connect puppets.
returns True if successful, False otherwise
"""
session = self.get_session(sessid)
if not session:
return False
if normal_mode and session.puppet:
# cleanly unpuppet eventual previous object puppeted by this session
self.unpuppet_object(sessid)
if obj.player and obj.player.is_connected and obj.player != self:
# we don't allow to puppet an object already controlled by an active
# player. To kick a player, call unpuppet_object on them explicitly.
return
# if we get to this point the character is ready to puppet or it
# was left with a lingering player/sessid reference from an unclean
# server kill or similar
if normal_mode:
_GA(obj.typeclass, "at_pre_puppet")(self.typeclass, sessid=sessid)
# do the connection
obj.sessid = sessid
obj.player = self
session.puid = obj.id
session.puppet = obj
# validate/start persistent scripts on object
ScriptDB.objects.validate(obj=obj)
if normal_mode:
_GA(obj.typeclass, "at_post_puppet")()
return True
def unpuppet_object(self, sessid):
"""
Disengage control over an object
sessid - the session id to disengage
returns True if successful
"""
session = self.get_session(sessid)
if not session:
return False
obj = hasattr(session, "puppet") and session.puppet or None
if not obj:
return False
# do the disconnect
_GA(obj.typeclass, "at_pre_unpuppet")()
del obj.dbobj.sessid
del obj.dbobj.player
session.puppet = None
session.puid = None
_GA(obj.typeclass, "at_post_unpuppet")(self.typeclass, sessid=sessid)
return True
def unpuppet_all(self):
"""
Disconnect all puppets. This is called by server
before a reset/shutdown.
"""
for session in self.get_all_sessions():
self.unpuppet_object(session.sessid)
def get_puppet(self, sessid, return_dbobj=False):
"""
Get an object puppeted by this session through this player. This is
the main method for retrieving the puppeted object from the
player's end.
sessid - return character connected to this sessid,
character - return character if connected to this player, else None.
"""
session = self.get_session(sessid)
if not session:
return None
if return_dbobj:
return session.puppet
return session.puppet and session.puppet.typeclass or None
def get_all_puppets(self, return_dbobj=False):
"""
Get all currently puppeted objects as a list
"""
puppets = [session.puppet for session in self.get_all_sessions()
if session.puppet]
if return_dbobj:
return puppets
return [puppet.typeclass for puppet in puppets]
def __get_single_puppet(self):
"""
This is a legacy convenience link for users of
MULTISESSION_MODE 0 or 1. It will return
only the first puppet. For mode 2, this returns
a list of all characters.
"""
puppets = self.get_all_puppets()
if _MULTISESSION_MODE in (0, 1):
return puppets and puppets[0] or None
return puppets
character = property(__get_single_puppet)
# utility methods
def delete(self, *args, **kwargs):
"""
Deletes the player permanently.
"""
for session in self.get_all_sessions():
# unpuppeting all objects and disconnecting the user, if any
# sessions remain (should usually be handled from the
# deleting command)
self.unpuppet_object(session.sessid)
session.sessionhandler.disconnect(session, reason=_("Player being deleted."))
super(PlayerDB, self).delete(*args, **kwargs)
def execute_cmd(self, raw_string, sessid=None):
"""
Do something as this player. This method is never called normally,
but only when the player object itself is supposed to execute the
command. It does not take nicks on eventual puppets into account.
raw_string - raw command input coming from the command line.
"""
# nick replacement - we require full-word matching.
raw_string = utils.to_unicode(raw_string)
raw_list = raw_string.split(None)
raw_list = [" ".join(raw_list[:i + 1]) for i in range(len(raw_list)) if raw_list[:i + 1]]
# get the nick replacement data directly from the database to be
# able to use db_category__in
nicks = self.db_attributes.filter(db_category__in=("nick_inputline", "nick_channel"))
for nick in nicks:
if nick.db_key in raw_list:
raw_string = raw_string.replace(nick.db_key, nick.db_strvalue, 1)
break
if not sessid and _MULTISESSION_MODE in (0, 1):
# in this case, we should either have only one sessid, or the sessid
# should not matter (since the return goes to all of them we can
# just use the first one as the source)
sessid = self.get_all_sessions()[0].sessid
return cmdhandler.cmdhandler(self.typeclass, raw_string,
callertype="player", sessid=sessid)
def search(self, ostring, return_puppet=False,
return_character=False, **kwargs):
"""
This is similar to the ObjectDB search method but will search for
Players only. Errors will be echoed, and None returned if no Player
is found.
return_character - will try to return the character the player controls
instead of the Player object itself. If no
Character exists (since Player is OOC), None will
be returned.
Extra keywords are ignored, but are allowed in call in order to make
API more consistent with objects.models.
TypedObject.search.
"""
if return_character:
logger.log_depmsg("Player.search's 'return_character' keyword is deprecated. Use the return_puppet keyword instead.")
#return_puppet = return_character
# handle me, self
if ostring in (_ME, _SELF, '*' + _ME, '*' + _SELF):
return self
matches = _GA(self, "__class__").objects.player_search(ostring)
matches = _AT_SEARCH_RESULT(self, ostring, matches, global_search=True)
if matches and return_character:
try:
return _GA(matches, "character")
except:
pass
return matches
|
|
from __future__ import unicode_literals
import os
import warnings
from unittest import skipUnless
from django.apps import AppConfig, apps
from django.apps.registry import Apps
from django.contrib.admin.models import LogEntry
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.db import models
from django.test import SimpleTestCase, override_settings
from django.test.utils import extend_sys_path
from django.utils import six
from django.utils._os import upath
from .default_config_app.apps import CustomConfig
from .models import SoAlternative, TotallyNormal, new_apps
# Small list with a variety of cases for tests that iterate on installed apps.
# Intentionally not in alphabetical order to check if the order is preserved.
SOME_INSTALLED_APPS = [
'apps.apps.MyAdmin',
'apps.apps.MyAuth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
SOME_INSTALLED_APPS_NAMES = [
'django.contrib.admin',
'django.contrib.auth',
] + SOME_INSTALLED_APPS[2:]
HERE = os.path.dirname(upath(__file__))
class AppsTests(SimpleTestCase):
def test_singleton_master(self):
"""
Ensures that only one master registry can exist.
"""
with self.assertRaises(RuntimeError):
Apps(installed_apps=None)
def test_ready(self):
"""
Tests the ready property of the master registry.
"""
# The master app registry is always ready when the tests run.
self.assertTrue(apps.ready)
# Non-master app registries are populated in __init__.
self.assertTrue(Apps().ready)
def test_bad_app_config(self):
"""
Tests when INSTALLED_APPS contains an incorrect app config.
"""
with self.assertRaises(ImproperlyConfigured):
with self.settings(INSTALLED_APPS=['apps.apps.BadConfig']):
pass
def test_not_an_app_config(self):
"""
Tests when INSTALLED_APPS contains a class that isn't an app config.
"""
with self.assertRaises(ImproperlyConfigured):
with self.settings(INSTALLED_APPS=['apps.apps.NotAConfig']):
pass
def test_no_such_app(self):
"""
Tests when INSTALLED_APPS contains an app that doesn't exist, either
directly or via an app config.
"""
with self.assertRaises(ImportError):
with self.settings(INSTALLED_APPS=['there is no such app']):
pass
with self.assertRaises(ImportError):
with self.settings(INSTALLED_APPS=['apps.apps.NoSuchApp']):
pass
def test_no_such_app_config(self):
"""
Tests when INSTALLED_APPS contains an entry that doesn't exist.
"""
with self.assertRaises(ImportError):
with self.settings(INSTALLED_APPS=['apps.apps.NoSuchConfig']):
pass
def test_default_app_config(self):
with self.settings(INSTALLED_APPS=['apps.default_config_app']):
config = apps.get_app_config('default_config_app')
self.assertIsInstance(config, CustomConfig)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_app_configs(self):
"""
Tests apps.get_app_configs().
"""
app_configs = apps.get_app_configs()
self.assertListEqual(
[app_config.name for app_config in app_configs],
SOME_INSTALLED_APPS_NAMES)
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_app_config(self):
"""
Tests apps.get_app_config().
"""
app_config = apps.get_app_config('admin')
self.assertEqual(app_config.name, 'django.contrib.admin')
app_config = apps.get_app_config('staticfiles')
self.assertEqual(app_config.name, 'django.contrib.staticfiles')
with self.assertRaises(LookupError):
apps.get_app_config('webdesign')
msg = "No installed app with label 'django.contrib.auth'. Did you mean 'myauth'"
with self.assertRaisesMessage(LookupError, msg):
apps.get_app_config('django.contrib.auth')
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_is_installed(self):
"""
Tests apps.is_installed().
"""
self.assertTrue(apps.is_installed('django.contrib.admin'))
self.assertTrue(apps.is_installed('django.contrib.auth'))
self.assertTrue(apps.is_installed('django.contrib.staticfiles'))
self.assertFalse(apps.is_installed('django.contrib.webdesign'))
@override_settings(INSTALLED_APPS=SOME_INSTALLED_APPS)
def test_get_model(self):
"""
Tests apps.get_model().
"""
self.assertEqual(apps.get_model('admin', 'LogEntry'), LogEntry)
with self.assertRaises(LookupError):
apps.get_model('admin', 'LogExit')
# App label is case-sensitive, Model name is case-insensitive.
self.assertEqual(apps.get_model('admin', 'loGentrY'), LogEntry)
with self.assertRaises(LookupError):
apps.get_model('Admin', 'LogEntry')
# A single argument is accepted.
self.assertEqual(apps.get_model('admin.LogEntry'), LogEntry)
with self.assertRaises(LookupError):
apps.get_model('admin.LogExit')
with self.assertRaises(ValueError):
apps.get_model('admin_LogEntry')
@override_settings(INSTALLED_APPS=['apps.apps.RelabeledAppsConfig'])
def test_relabeling(self):
self.assertEqual(apps.get_app_config('relabeled').name, 'apps')
def test_duplicate_labels(self):
with six.assertRaisesRegex(self, ImproperlyConfigured, "Application labels aren't unique"):
with self.settings(INSTALLED_APPS=['apps.apps.PlainAppsConfig', 'apps']):
pass
def test_duplicate_names(self):
with six.assertRaisesRegex(self, ImproperlyConfigured, "Application names aren't unique"):
with self.settings(INSTALLED_APPS=['apps.apps.RelabeledAppsConfig', 'apps']):
pass
def test_import_exception_is_not_masked(self):
"""
App discovery should preserve stack traces. Regression test for #22920.
"""
with six.assertRaisesRegex(self, ImportError, "Oops"):
with self.settings(INSTALLED_APPS=['import_error_package']):
pass
def test_models_py(self):
"""
Tests that the models in the models.py file were loaded correctly.
"""
self.assertEqual(apps.get_model("apps", "TotallyNormal"), TotallyNormal)
with self.assertRaises(LookupError):
apps.get_model("apps", "SoAlternative")
with self.assertRaises(LookupError):
new_apps.get_model("apps", "TotallyNormal")
self.assertEqual(new_apps.get_model("apps", "SoAlternative"), SoAlternative)
def test_dynamic_load(self):
"""
Makes a new model at runtime and ensures it goes into the right place.
"""
old_models = list(apps.get_app_config("apps").get_models())
# Construct a new model in a new app registry
body = {}
new_apps = Apps(["apps"])
meta_contents = {
'app_label': "apps",
'apps': new_apps,
}
meta = type(str("Meta"), tuple(), meta_contents)
body['Meta'] = meta
body['__module__'] = TotallyNormal.__module__
temp_model = type(str("SouthPonies"), (models.Model,), body)
# Make sure it appeared in the right place!
self.assertListEqual(list(apps.get_app_config("apps").get_models()), old_models)
with self.assertRaises(LookupError):
apps.get_model("apps", "SouthPonies")
self.assertEqual(new_apps.get_model("apps", "SouthPonies"), temp_model)
def test_model_clash(self):
"""
Test for behavior when two models clash in the app registry.
"""
new_apps = Apps(["apps"])
meta_contents = {
'app_label': "apps",
'apps': new_apps,
}
body = {}
body['Meta'] = type(str("Meta"), tuple(), meta_contents)
body['__module__'] = TotallyNormal.__module__
type(str("SouthPonies"), (models.Model,), body)
# When __name__ and __module__ match we assume the module
# was reloaded and issue a warning. This use-case is
# useful for REPL. Refs #23621.
body = {}
body['Meta'] = type(str("Meta"), tuple(), meta_contents)
body['__module__'] = TotallyNormal.__module__
with warnings.catch_warnings(record=True) as w:
type(str("SouthPonies"), (models.Model,), body)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(str(w[-1].message),
"Model 'apps.southponies' was already registered. "
"Reloading models is not advised as it can lead to inconsistencies, "
"most notably with related models.")
# If it doesn't appear to be a reloaded module then we expect
# a RuntimeError.
body = {}
body['Meta'] = type(str("Meta"), tuple(), meta_contents)
body['__module__'] = TotallyNormal.__module__ + '.whatever'
with six.assertRaisesRegex(self, RuntimeError,
"Conflicting 'southponies' models in application 'apps':.*"):
type(str("SouthPonies"), (models.Model,), body)
def test_get_containing_app_config_apps_not_ready(self):
"""
apps.get_containing_app_config() should raise an exception if
apps.apps_ready isn't True.
"""
apps.apps_ready = False
try:
with self.assertRaisesMessage(AppRegistryNotReady, "Apps aren't loaded yet"):
apps.get_containing_app_config('foo')
finally:
apps.apps_ready = True
def test_lazy_model_operation(self):
"""
Tests apps.lazy_model_operation().
"""
model_classes = []
initial_pending = set(apps._pending_operations)
def test_func(*models):
model_classes[:] = models
class LazyA(models.Model):
pass
# Test models appearing twice, and models appearing consecutively
model_keys = [('apps', model_name) for model_name in ['lazya', 'lazyb', 'lazyb', 'lazyc', 'lazya']]
apps.lazy_model_operation(test_func, *model_keys)
# LazyModelA shouldn't be waited on since it's already registered,
# and LazyModelC shouldn't be waited on until LazyModelB exists.
self.assertSetEqual(set(apps._pending_operations) - initial_pending, {('apps', 'lazyb')})
# Test that multiple operations can wait on the same model
apps.lazy_model_operation(test_func, ('apps', 'lazyb'))
class LazyB(models.Model):
pass
self.assertListEqual(model_classes, [LazyB])
# Now we are just waiting on LazyModelC.
self.assertSetEqual(set(apps._pending_operations) - initial_pending, {('apps', 'lazyc')})
class LazyC(models.Model):
pass
# Everything should be loaded - make sure the callback was executed properly.
self.assertListEqual(model_classes, [LazyA, LazyB, LazyB, LazyC, LazyA])
class Stub(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class AppConfigTests(SimpleTestCase):
"""Unit tests for AppConfig class."""
def test_path_set_explicitly(self):
"""If subclass sets path as class attr, no module attributes needed."""
class MyAppConfig(AppConfig):
path = 'foo'
ac = MyAppConfig('label', Stub())
self.assertEqual(ac.path, 'foo')
def test_explicit_path_overrides(self):
"""If path set as class attr, overrides __path__ and __file__."""
class MyAppConfig(AppConfig):
path = 'foo'
ac = MyAppConfig('label', Stub(__path__=['a'], __file__='b/__init__.py'))
self.assertEqual(ac.path, 'foo')
def test_dunder_path(self):
"""If single element in __path__, use it (in preference to __file__)."""
ac = AppConfig('label', Stub(__path__=['a'], __file__='b/__init__.py'))
self.assertEqual(ac.path, 'a')
def test_no_dunder_path_fallback_to_dunder_file(self):
"""If there is no __path__ attr, use __file__."""
ac = AppConfig('label', Stub(__file__='b/__init__.py'))
self.assertEqual(ac.path, 'b')
def test_empty_dunder_path_fallback_to_dunder_file(self):
"""If the __path__ attr is empty, use __file__ if set."""
ac = AppConfig('label', Stub(__path__=[], __file__='b/__init__.py'))
self.assertEqual(ac.path, 'b')
def test_multiple_dunder_path_fallback_to_dunder_file(self):
"""If the __path__ attr is length>1, use __file__ if set."""
ac = AppConfig('label', Stub(__path__=['a', 'b'], __file__='c/__init__.py'))
self.assertEqual(ac.path, 'c')
def test_no_dunder_path_or_dunder_file(self):
"""If there is no __path__ or __file__, raise ImproperlyConfigured."""
with self.assertRaises(ImproperlyConfigured):
AppConfig('label', Stub())
def test_empty_dunder_path_no_dunder_file(self):
"""If the __path__ attr is empty and there is no __file__, raise."""
with self.assertRaises(ImproperlyConfigured):
AppConfig('label', Stub(__path__=[]))
def test_multiple_dunder_path_no_dunder_file(self):
"""If the __path__ attr is length>1 and there is no __file__, raise."""
with self.assertRaises(ImproperlyConfigured):
AppConfig('label', Stub(__path__=['a', 'b']))
def test_duplicate_dunder_path_no_dunder_file(self):
"""
If the __path__ attr contains duplicate paths and there is no
__file__, they duplicates should be deduplicated (#25246).
"""
ac = AppConfig('label', Stub(__path__=['a', 'a']))
self.assertEqual(ac.path, 'a')
@skipUnless(six.PY3, "Namespace packages sans __init__.py were added in Python 3.3")
class NamespacePackageAppTests(SimpleTestCase):
# We need nsapp to be top-level so our multiple-paths tests can add another
# location for it (if its inside a normal package with an __init__.py that
# isn't possible). In order to avoid cluttering the already-full tests/ dir
# (which is on sys.path), we add these new entries to sys.path temporarily.
base_location = os.path.join(HERE, 'namespace_package_base')
other_location = os.path.join(HERE, 'namespace_package_other_base')
app_path = os.path.join(base_location, 'nsapp')
def test_single_path(self):
"""
A Py3.3+ namespace package can be an app if it has only one path.
"""
with extend_sys_path(self.base_location):
with self.settings(INSTALLED_APPS=['nsapp']):
app_config = apps.get_app_config('nsapp')
self.assertEqual(app_config.path, upath(self.app_path))
def test_multiple_paths(self):
"""
A Py3.3+ namespace package with multiple locations cannot be an app.
(Because then we wouldn't know where to load its templates, static
assets, etc from.)
"""
# Temporarily add two directories to sys.path that both contain
# components of the "nsapp" package.
with extend_sys_path(self.base_location, self.other_location):
with self.assertRaises(ImproperlyConfigured):
with self.settings(INSTALLED_APPS=['nsapp']):
pass
def test_multiple_paths_explicit_path(self):
"""
Multiple locations are ok only if app-config has explicit path.
"""
# Temporarily add two directories to sys.path that both contain
# components of the "nsapp" package.
with extend_sys_path(self.base_location, self.other_location):
with self.settings(INSTALLED_APPS=['nsapp.apps.NSAppConfig']):
app_config = apps.get_app_config('nsapp')
self.assertEqual(app_config.path, upath(self.app_path))
|
|
"""Defines the interface for executing a job"""
from __future__ import unicode_literals
import logging
import re
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from job.configuration.interface import job_interface_1_1 as previous_interface
from job.configuration.interface.exceptions import InvalidInterfaceDefinition
from job.execution.configuration.exceptions import MissingSetting
logger = logging.getLogger(__name__)
SCHEMA_VERSION = '1.2'
JOB_INTERFACE_SCHEMA = {
'type': 'object',
'required': ['command', 'command_arguments'],
'additionalProperties': False,
'properties': {
'version': {
'description': 'version of the job_interface schema',
"default": SCHEMA_VERSION,
"type": "string"
},
'command': {
'description': 'The command that will be called. Uses variable replacement',
'type': 'string',
},
'command_arguments': {
'description': 'The arguments that are passed to the command',
'type': 'string',
},
'env_vars': {
'description': 'Environment variables that will be made available at runtime',
'type': 'array',
'items': {
'$ref': '#/definitions/env_var',
},
},
'settings': {
'description': 'Job settings that will be in command call',
'type': 'array',
'items': {
'$ref': '#/definitions/setting',
},
},
'input_data': {
'type': 'array',
'items': {
'$ref': '#/definitions/input_data_item',
},
},
'output_data': {
'type': 'array',
'items': {
'$ref': '#/definitions/output_data_item',
},
},
'shared_resources': {
'type': 'array',
'items': {
'$ref': '#/definitions/shared_resource',
},
},
},
'definitions': {
'env_var': {
'type': 'object',
'required': ['name', 'value'],
'additionalProperties': False,
'properties': {
'name': {
'type': 'string',
},
'value': {
'type': 'string',
},
},
},
'setting': {
'type': 'object',
'required': ['name'],
'additionalProperties': False,
'properties': {
'name': {
'type': 'string',
},
'required': {
'type': 'boolean',
},
},
},
'input_data_item': {
'type': 'object',
'required': ['name', 'type'],
'additionalProperties': False,
'properties': {
'name': {
'type': 'string',
'pattern': '^[a-zA-Z0-9\\-_ ]{1,255}$',
},
'type': {
'type': 'string',
'enum': ['file', 'files', 'property'],
},
'required': {
'type': 'boolean',
},
'partial': {
'description': 'file/files type only flag indicating input may be mounted vs downloaded',
'type': 'boolean'
},
'media_types': {
'type': 'array',
},
},
},
'output_data_item': {
'type': 'object',
'required': ['name', 'type'],
'additionalProperties': False,
'properties': {
'name': {
'type': 'string',
'pattern': '^[a-zA-Z0-9\\-_ ]{1,255}$',
},
'type': {
'type': 'string',
'enum': ['file', 'files'],
},
'required': {
'type': 'boolean',
},
'media_type': {
'type': 'string',
},
},
},
'shared_resource': {
'type': 'object',
'required': ['name', 'type'],
'additionalProperties': False,
'properties': {
'name': {
'type': 'string',
},
'type': {
'type': 'string',
},
'required': {
'type': 'boolean',
},
},
},
},
}
class JobInterface(previous_interface.JobInterface):
"""Represents the interface for executing a job"""
def __init__(self, definition):
"""Creates a job interface from the given definition. If the definition is invalid, a
:class:`job.configuration.interface.exceptions.InvalidInterfaceDefinition` exception will be thrown.
:param definition: The interface definition
:type definition: dict
"""
self.definition = definition
self._param_names = set()
# Tuples used for validation with other classes
self._property_validation_dict = {} # str->bool
self._input_file_validation_dict = {} # str->tuple
self._output_file_validation_list = []
self._output_file_manifest_dict = {} # str->bool
if 'version' not in self.definition:
self.definition['version'] = SCHEMA_VERSION
if self.definition['version'] != SCHEMA_VERSION:
self.convert_interface(definition)
try:
validate(definition, JOB_INTERFACE_SCHEMA)
except ValidationError as validation_error:
raise InvalidInterfaceDefinition(validation_error)
self._populate_default_values()
self._populate_settings_defaults()
self._populate_env_vars_defaults()
self._check_param_name_uniqueness()
self._check_setting_name_uniqueness()
self._check_env_var_uniqueness()
self._validate_command_arguments()
self._create_validation_dicts()
@staticmethod
def convert_interface(interface):
"""Convert the previous Job interface schema to the 1.2 schema
:param interface: The previous interface
:type interface: dict
:return: converted interface
:rtype: dict
"""
previous = previous_interface.JobInterface(interface)
converted = previous.get_dict()
converted['version'] = SCHEMA_VERSION
if 'env_vars' not in converted:
converted['env_vars'] = []
if 'settings' not in converted:
converted['settings'] = []
return converted
def _populate_settings_defaults(self):
"""populates the default values for any missing settings values"""
if 'settings' not in self.definition:
self.definition['settings'] = []
for setting in self.definition['settings']:
if 'required' not in setting:
setting['required'] = True
def _populate_env_vars_defaults(self):
"""populates the default values for any missing environment variable values"""
if 'env_vars' not in self.definition:
self.definition['env_vars'] = []
for env_var in self.definition['env_vars']:
if 'value' not in env_var:
env_var['value'] = ""
def populate_command_argument_settings(self, command_arguments, job_configuration):
"""Return the command arguments string,
populated with the settings from the job_configuration.
:param command_arguments: The command_arguments that you want to perform the replacement on
:type command_arguments: string
:param job_configuration: The job configuration
:type job_configuration: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration`
:return: command arguments with the settings populated
:rtype: str
"""
config_settings = job_configuration.get_dict()
interface_settings = self.definition['settings']
param_replacements = self._get_settings_values(interface_settings,
config_settings)
command_arguments = self._replace_command_parameters(command_arguments, param_replacements)
return command_arguments
def populate_env_vars_arguments(self, job_configuration):
"""Populates the environment variables with the requested values.
:param job_configuration: The job configuration
:type job_configuration: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration`
:return: env_vars populated with values
:rtype: dict
"""
env_vars = self.definition['env_vars']
config_settings = job_configuration.get_dict()
interface_settings = self.definition['settings']
param_replacements = self._get_settings_values(interface_settings,
config_settings)
env_vars = self._replace_env_var_parameters(env_vars, param_replacements)
return env_vars
def _get_settings_values(self, settings, config_settings):
"""
:param settings: The job configuration
:type settings: JSON
:param config_settings: The job configuration
:type config_settings: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration`
:return: settings name and the value to replace it with
:rtype: dict
"""
param_replacements = {}
# Isolate the job_type settings and convert to list
config_settings = config_settings['job_task']['settings']
config_settings_dict = {setting['name']: setting['value'] for setting in config_settings}
for setting in settings:
setting_name = setting['name']
setting_required = setting['required']
if setting_name in config_settings_dict:
param_replacements[setting_name] = config_settings_dict[setting_name]
else:
param_replacements[setting_name] = ''
return param_replacements
def _replace_env_var_parameters(self, env_vars, param_replacements):
"""find all occurrences of a parameter with a given name in the environment
variable strings and replace them with the param values. If the parameter
replacement string in the variable uses a custom output ( ${-f :foo}).
The parameter will be replaced with the string preceding the colon and the
given param value will be appended.
:param env_vars: The environment variables that you want to perform replacement on
:type env_vars: list
:param param_replacements: The parameter you are searching for
:type param_replacements: dict
:return: The string with all replacements made
:rtype: str
"""
for env_var in env_vars:
ret_str = env_var['value']
for param_name, param_value in param_replacements.iteritems():
param_pattern = '\$\{([^\}]*\:)?' + re.escape(param_name) + '\}'
pattern_prog = re.compile(param_pattern)
match_obj = pattern_prog.search(ret_str)
if match_obj:
ret_str = param_value
break
if ret_str == env_var['value']:
env_var['value'] = ''
else:
env_var['value'] = ret_str
return env_vars
def _check_setting_name_uniqueness(self):
"""Ensures all the settings names are unique, and throws a
:class:`job.configuration.interface.exceptions.InvalidInterfaceDefinition` if they are not unique
"""
for setting in self.definition['settings']:
if setting['name'] in self._param_names:
raise InvalidInterfaceDefinition('Setting names must be unique')
self._param_names.add(setting['name'])
def _check_env_var_uniqueness(self):
"""Ensures all the enviornmental variable names are unique, and throws a
:class:`job.configuration.interface.exceptions.InvalidInterfaceDefinition` if they are not unique
"""
env_vars = [env_var['name'] for env_var in self.definition['env_vars']]
if len(env_vars) != len(set(env_vars)):
raise InvalidInterfaceDefinition('Environment variable names must be unique')
def validate_populated_settings(self, job_exe, job_configuration):
"""Ensures that all required settings are defined in the job_configuration
:param job_exe: The job execution model with related job and job_type fields
:type job_exe: :class:`job.models.JobExecution`
:param job_configuration: The job configuration
:type job_configuration: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration`
"""
interface_settings = self.definition['settings']
config_setting_names = [setting.name for setting in job_configuration.get_job_task_settings()]
for setting in interface_settings:
setting_name = setting['name']
setting_required = setting['required']
if setting_required:
if setting_name not in config_setting_names:
raise MissingSetting('Required setting %s was not provided' % setting_name)
|
|
import urwid
import blinker
import textwrap
import pprint
from typing import Optional, Sequence
from mitmproxy import exceptions
from mitmproxy import optmanager
from mitmproxy.tools.console import layoutwidget
from mitmproxy.tools.console import signals
from mitmproxy.tools.console import overlay
HELP_HEIGHT = 5
def can_edit_inplace(opt):
if opt.choices:
return False
if opt.typespec in [str, int, Optional[str], Optional[int]]:
return True
def fcol(s, width, attr):
s = str(s)
return (
"fixed",
width,
urwid.Text((attr, s))
)
option_focus_change = blinker.Signal()
class OptionItem(urwid.WidgetWrap):
def __init__(self, walker, opt, focused, namewidth, editing):
self.walker, self.opt, self.focused = walker, opt, focused
self.namewidth = namewidth
self.editing = editing
super().__init__(None)
self._w = self.get_widget()
def get_widget(self):
val = self.opt.current()
if self.opt.typespec == bool:
displayval = "true" if val else "false"
elif not val:
displayval = ""
elif self.opt.typespec == Sequence[str]:
displayval = pprint.pformat(val, indent=1)
else:
displayval = str(val)
changed = self.walker.master.options.has_changed(self.opt.name)
if self.focused:
valstyle = "option_active_selected" if changed else "option_selected"
else:
valstyle = "option_active" if changed else "text"
if self.editing:
valw = urwid.Edit(edit_text=displayval)
else:
valw = urwid.AttrMap(
urwid.Padding(
urwid.Text([(valstyle, displayval)])
),
valstyle
)
return urwid.Columns(
[
(
self.namewidth,
urwid.Text([("title", self.opt.name.ljust(self.namewidth))])
),
valw
],
dividechars=2,
focus_column=1
)
def get_edit_text(self):
return self._w[1].get_edit_text()
def selectable(self):
return True
def keypress(self, size, key):
if self.editing:
self._w[1].keypress(size, key)
return
return key
class OptionListWalker(urwid.ListWalker):
def __init__(self, master):
self.master = master
self.index = 0
self.focusobj = None
self.opts = sorted(master.options.keys())
self.maxlen = max(len(i) for i in self.opts)
self.editing = False
self.set_focus(0)
self.master.options.changed.connect(self.sig_mod)
def sig_mod(self, *args, **kwargs):
self.opts = sorted(self.master.options.keys())
self.maxlen = max(len(i) for i in self.opts)
self._modified()
self.set_focus(self.index)
def start_editing(self):
self.editing = True
self.focus_obj = self._get(self.index, True)
self._modified()
def stop_editing(self):
self.editing = False
self.focus_obj = self._get(self.index, False)
self.set_focus(self.index)
self._modified()
def get_edit_text(self):
return self.focus_obj.get_edit_text()
def _get(self, pos, editing):
name = self.opts[pos]
opt = self.master.options._options[name]
return OptionItem(
self, opt, pos == self.index, self.maxlen, editing
)
def get_focus(self):
return self.focus_obj, self.index
def set_focus(self, index):
self.editing = False
name = self.opts[index]
opt = self.master.options._options[name]
self.index = index
self.focus_obj = self._get(self.index, self.editing)
option_focus_change.send(opt.help)
def get_next(self, pos):
if pos >= len(self.opts) - 1:
return None, None
pos = pos + 1
return self._get(pos, False), pos
def get_prev(self, pos):
pos = pos - 1
if pos < 0:
return None, None
return self._get(pos, False), pos
class OptionsList(urwid.ListBox):
def __init__(self, master):
self.master = master
self.walker = OptionListWalker(master)
super().__init__(self.walker)
def save_config(self, path):
try:
optmanager.save(self.master.options, path)
except exceptions.OptionsError as e:
signals.status_message.send(message=str(e))
def keypress(self, size, key):
if self.walker.editing:
if key == "enter":
foc, idx = self.get_focus()
v = self.walker.get_edit_text()
try:
d = self.master.options.parse_setval(foc.opt.name, v)
self.master.options.update(**{foc.opt.name: d})
except exceptions.OptionsError as v:
signals.status_message.send(message=str(v))
self.walker.stop_editing()
return None
elif key == "esc":
self.walker.stop_editing()
return None
else:
if key == "m_start":
self.set_focus(0)
self.walker._modified()
elif key == "m_end":
self.set_focus(len(self.walker.opts) - 1)
self.walker._modified()
elif key == "m_select":
foc, idx = self.get_focus()
if foc.opt.typespec == bool:
self.master.options.toggler(foc.opt.name)()
# Bust the focus widget cache
self.set_focus(self.walker.index)
elif can_edit_inplace(foc.opt):
self.walker.start_editing()
self.walker._modified()
elif foc.opt.choices:
self.master.overlay(
overlay.Chooser(
self.master,
foc.opt.name,
foc.opt.choices,
foc.opt.current(),
self.master.options.setter(foc.opt.name)
)
)
elif foc.opt.typespec == Sequence[str]:
self.master.overlay(
overlay.OptionsOverlay(
self.master,
foc.opt.name,
foc.opt.current(),
HELP_HEIGHT + 5
),
valign="top"
)
else:
raise NotImplementedError()
return super().keypress(size, key)
class OptionHelp(urwid.Frame):
def __init__(self, master):
self.master = master
super().__init__(self.widget(""))
self.set_active(False)
option_focus_change.connect(self.sig_mod)
def set_active(self, val):
h = urwid.Text("Option Help")
style = "heading" if val else "heading_inactive"
self.header = urwid.AttrWrap(h, style)
def widget(self, txt):
cols, _ = self.master.ui.get_cols_rows()
return urwid.ListBox(
[urwid.Text(i) for i in textwrap.wrap(txt, cols)]
)
def sig_mod(self, txt):
self.set_body(self.widget(txt))
class Options(urwid.Pile, layoutwidget.LayoutWidget):
title = "Options"
keyctx = "options"
def __init__(self, master):
oh = OptionHelp(master)
self.optionslist = OptionsList(master)
super().__init__(
[
self.optionslist,
(HELP_HEIGHT, oh),
]
)
self.master = master
def current_name(self):
foc, idx = self.optionslist.get_focus()
return foc.opt.name
def keypress(self, size, key):
if key == "m_next":
self.focus_position = (
self.focus_position + 1
) % len(self.widget_list)
self.widget_list[1].set_active(self.focus_position == 1)
key = None
# This is essentially a copypasta from urwid.Pile's keypress handler.
# So much for "closed for modification, but open for extension".
item_rows = None
if len(size) == 2:
item_rows = self.get_item_rows(size, focus = True)
i = self.widget_list.index(self.focus_item)
tsize = self.get_item_size(size, i, True, item_rows)
return self.focus_item.keypress(tsize, key)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class ApplicationOperations(object):
"""ApplicationOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version to be used with the HTTP request. Constant value: "2017-09-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01"
self.config = config
def create(
self, resource_group_name, account_name, application_id, allow_updates=None, display_name=None, custom_headers=None, raw=False, **operation_config):
"""Adds an application to the specified Batch account.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_id: The ID of the application.
:type application_id: str
:param allow_updates: A value indicating whether packages within the
application may be overwritten using the same version string.
:type allow_updates: bool
:param display_name: The display name for the application.
:type display_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Application or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.batch.models.Application or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = None
if allow_updates is not None or display_name is not None:
parameters = models.ApplicationCreateParameters(allow_updates=allow_updates, display_name=display_name)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'applicationId': self._serialize.url("application_id", application_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if parameters is not None:
body_content = self._serialize.body(parameters, 'ApplicationCreateParameters')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('Application', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, account_name, application_id, custom_headers=None, raw=False, **operation_config):
"""Deletes an application.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_id: The ID of the application.
:type application_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'applicationId': self._serialize.url("application_id", application_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get(
self, resource_group_name, account_name, application_id, custom_headers=None, raw=False, **operation_config):
"""Gets information about the specified application.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_id: The ID of the application.
:type application_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Application or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.batch.models.Application or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'applicationId': self._serialize.url("application_id", application_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Application', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, account_name, application_id, parameters, custom_headers=None, raw=False, **operation_config):
"""Updates settings for the specified application.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param application_id: The ID of the application.
:type application_id: str
:param parameters: The parameters for the request.
:type parameters: ~azure.mgmt.batch.models.ApplicationUpdateParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications/{applicationId}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'applicationId': self._serialize.url("application_id", application_id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ApplicationUpdateParameters')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list(
self, resource_group_name, account_name, maxresults=None, custom_headers=None, raw=False, **operation_config):
"""Lists all of the applications in the specified account.
:param resource_group_name: The name of the resource group that
contains the Batch account.
:type resource_group_name: str
:param account_name: The name of the Batch account.
:type account_name: str
:param maxresults: The maximum number of items to return in the
response.
:type maxresults: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Application
:rtype:
~azure.mgmt.batch.models.ApplicationPaged[~azure.mgmt.batch.models.Application]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Batch/batchAccounts/{accountName}/applications'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ApplicationPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ApplicationPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.privatecatalog_v1beta1.services.private_catalog import pagers
from google.cloud.privatecatalog_v1beta1.types import private_catalog
from .transports.base import PrivateCatalogTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import PrivateCatalogGrpcAsyncIOTransport
from .client import PrivateCatalogClient
class PrivateCatalogAsyncClient:
"""``PrivateCatalog`` allows catalog consumers to retrieve ``Catalog``,
``Product`` and ``Version`` resources under a target resource
context.
``Catalog`` is computed based on the [Association][]s linked to the
target resource and its ancestors. Each association's
[google.cloud.privatecatalogproducer.v1beta.Catalog][] is
transformed into a ``Catalog``. If multiple associations have the
same parent [google.cloud.privatecatalogproducer.v1beta.Catalog][],
they are de-duplicated into one ``Catalog``. Users must have
``cloudprivatecatalog.catalogTargets.get`` IAM permission on the
resource context in order to access catalogs. ``Catalog`` contains
the resource name and a subset of data of the original
[google.cloud.privatecatalogproducer.v1beta.Catalog][].
``Product`` is child resource of the catalog. A ``Product`` contains
the resource name and a subset of the data of the original
[google.cloud.privatecatalogproducer.v1beta.Product][].
``Version`` is child resource of the product. A ``Version`` contains
the resource name and a subset of the data of the original
[google.cloud.privatecatalogproducer.v1beta.Version][].
"""
_client: PrivateCatalogClient
DEFAULT_ENDPOINT = PrivateCatalogClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = PrivateCatalogClient.DEFAULT_MTLS_ENDPOINT
catalog_path = staticmethod(PrivateCatalogClient.catalog_path)
parse_catalog_path = staticmethod(PrivateCatalogClient.parse_catalog_path)
product_path = staticmethod(PrivateCatalogClient.product_path)
parse_product_path = staticmethod(PrivateCatalogClient.parse_product_path)
version_path = staticmethod(PrivateCatalogClient.version_path)
parse_version_path = staticmethod(PrivateCatalogClient.parse_version_path)
common_billing_account_path = staticmethod(
PrivateCatalogClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
PrivateCatalogClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(PrivateCatalogClient.common_folder_path)
parse_common_folder_path = staticmethod(
PrivateCatalogClient.parse_common_folder_path
)
common_organization_path = staticmethod(
PrivateCatalogClient.common_organization_path
)
parse_common_organization_path = staticmethod(
PrivateCatalogClient.parse_common_organization_path
)
common_project_path = staticmethod(PrivateCatalogClient.common_project_path)
parse_common_project_path = staticmethod(
PrivateCatalogClient.parse_common_project_path
)
common_location_path = staticmethod(PrivateCatalogClient.common_location_path)
parse_common_location_path = staticmethod(
PrivateCatalogClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PrivateCatalogAsyncClient: The constructed client.
"""
return PrivateCatalogClient.from_service_account_info.__func__(PrivateCatalogAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PrivateCatalogAsyncClient: The constructed client.
"""
return PrivateCatalogClient.from_service_account_file.__func__(PrivateCatalogAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return PrivateCatalogClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> PrivateCatalogTransport:
"""Returns the transport used by the client instance.
Returns:
PrivateCatalogTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(PrivateCatalogClient).get_transport_class, type(PrivateCatalogClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, PrivateCatalogTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the private catalog client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.PrivateCatalogTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = PrivateCatalogClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def search_catalogs(
self,
request: Union[private_catalog.SearchCatalogsRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchCatalogsAsyncPager:
r"""Search [Catalog][google.cloud.privatecatalog.v1beta1.Catalog]
resources that consumers have access to, within the scope of the
consumer cloud resource hierarchy context.
.. code-block:: python
from google.cloud import privatecatalog_v1beta1
def sample_search_catalogs():
# Create a client
client = privatecatalog_v1beta1.PrivateCatalogClient()
# Initialize request argument(s)
request = privatecatalog_v1beta1.SearchCatalogsRequest(
resource="resource_value",
)
# Make the request
page_result = client.search_catalogs(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.privatecatalog_v1beta1.types.SearchCatalogsRequest, dict]):
The request object. Request message for
[PrivateCatalog.SearchCatalogs][google.cloud.privatecatalog.v1beta1.PrivateCatalog.SearchCatalogs].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.privatecatalog_v1beta1.services.private_catalog.pagers.SearchCatalogsAsyncPager:
Response message for
[PrivateCatalog.SearchCatalogs][google.cloud.privatecatalog.v1beta1.PrivateCatalog.SearchCatalogs].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
request = private_catalog.SearchCatalogsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.search_catalogs,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.SearchCatalogsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def search_products(
self,
request: Union[private_catalog.SearchProductsRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchProductsAsyncPager:
r"""Search [Product][google.cloud.privatecatalog.v1beta1.Product]
resources that consumers have access to, within the scope of the
consumer cloud resource hierarchy context.
.. code-block:: python
from google.cloud import privatecatalog_v1beta1
def sample_search_products():
# Create a client
client = privatecatalog_v1beta1.PrivateCatalogClient()
# Initialize request argument(s)
request = privatecatalog_v1beta1.SearchProductsRequest(
resource="resource_value",
)
# Make the request
page_result = client.search_products(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.privatecatalog_v1beta1.types.SearchProductsRequest, dict]):
The request object. Request message for
[PrivateCatalog.SearchProducts][google.cloud.privatecatalog.v1beta1.PrivateCatalog.SearchProducts].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.privatecatalog_v1beta1.services.private_catalog.pagers.SearchProductsAsyncPager:
Response message for
[PrivateCatalog.SearchProducts][google.cloud.privatecatalog.v1beta1.PrivateCatalog.SearchProducts].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
request = private_catalog.SearchProductsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.search_products,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.SearchProductsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def search_versions(
self,
request: Union[private_catalog.SearchVersionsRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchVersionsAsyncPager:
r"""Search [Version][google.cloud.privatecatalog.v1beta1.Version]
resources that consumers have access to, within the scope of the
consumer cloud resource hierarchy context.
.. code-block:: python
from google.cloud import privatecatalog_v1beta1
def sample_search_versions():
# Create a client
client = privatecatalog_v1beta1.PrivateCatalogClient()
# Initialize request argument(s)
request = privatecatalog_v1beta1.SearchVersionsRequest(
resource="resource_value",
query="query_value",
)
# Make the request
page_result = client.search_versions(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.privatecatalog_v1beta1.types.SearchVersionsRequest, dict]):
The request object. Request message for
[PrivateCatalog.SearchVersions][google.cloud.privatecatalog.v1beta1.PrivateCatalog.SearchVersions].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.privatecatalog_v1beta1.services.private_catalog.pagers.SearchVersionsAsyncPager:
Response message for
[PrivateCatalog.SearchVersions][google.cloud.privatecatalog.v1beta1.PrivateCatalog.SearchVersions].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
request = private_catalog.SearchVersionsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.search_versions,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.SearchVersionsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-private-catalog",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("PrivateCatalogAsyncClient",)
|
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Assorted utilities shared between parts of apitools."""
import os
import random
import six
from six.moves import http_client
import six.moves.urllib.error as urllib_error
import six.moves.urllib.parse as urllib_parse
import six.moves.urllib.request as urllib_request
from apitools.base.protorpclite import messages
from apitools.base.py import encoding_helper as encoding
from apitools.base.py import exceptions
if six.PY3:
from collections.abc import Iterable
else:
from collections import Iterable
__all__ = [
'DetectGae',
'DetectGce',
]
_RESERVED_URI_CHARS = r":/?#[]@!$&'()*+,;="
def DetectGae():
"""Determine whether or not we're running on GAE.
This is based on:
https://developers.google.com/appengine/docs/python/#The_Environment
Returns:
True iff we're running on GAE.
"""
server_software = os.environ.get('SERVER_SOFTWARE', '')
return (server_software.startswith('Development/') or
server_software.startswith('Google App Engine/'))
def DetectGce():
"""Determine whether or not we're running on GCE.
This is based on:
https://cloud.google.com/compute/docs/metadata#runninggce
Returns:
True iff we're running on a GCE instance.
"""
metadata_url = 'http://{}'.format(
os.environ.get('GCE_METADATA_ROOT', 'metadata.google.internal'))
try:
o = urllib_request.build_opener(urllib_request.ProxyHandler({})).open(
urllib_request.Request(
metadata_url, headers={'Metadata-Flavor': 'Google'}))
except urllib_error.URLError:
return False
return (o.getcode() == http_client.OK and
o.headers.get('metadata-flavor') == 'Google')
def NormalizeScopes(scope_spec):
"""Normalize scope_spec to a set of strings."""
if isinstance(scope_spec, six.string_types):
scope_spec = six.ensure_str(scope_spec)
return set(scope_spec.split(' '))
elif isinstance(scope_spec, Iterable):
scope_spec = [six.ensure_str(x) for x in scope_spec]
return set(scope_spec)
raise exceptions.TypecheckError(
'NormalizeScopes expected string or iterable, found %s' % (
type(scope_spec),))
def Typecheck(arg, arg_type, msg=None):
if not isinstance(arg, arg_type):
if msg is None:
if isinstance(arg_type, tuple):
msg = 'Type of arg is "%s", not one of %r' % (
type(arg), arg_type)
else:
msg = 'Type of arg is "%s", not "%s"' % (type(arg), arg_type)
raise exceptions.TypecheckError(msg)
return arg
def ExpandRelativePath(method_config, params, relative_path=None):
"""Determine the relative path for request."""
path = relative_path or method_config.relative_path or ''
for param in method_config.path_params:
param_template = '{%s}' % param
# For more details about "reserved word expansion", see:
# http://tools.ietf.org/html/rfc6570#section-3.2.2
reserved_chars = ''
reserved_template = '{+%s}' % param
if reserved_template in path:
reserved_chars = _RESERVED_URI_CHARS
path = path.replace(reserved_template, param_template)
if param_template not in path:
raise exceptions.InvalidUserInputError(
'Missing path parameter %s' % param)
try:
# TODO(craigcitro): Do we want to support some sophisticated
# mapping here?
value = params[param]
except KeyError:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
if value is None:
raise exceptions.InvalidUserInputError(
'Request missing required parameter %s' % param)
try:
if not isinstance(value, six.string_types):
value = str(value)
path = path.replace(param_template,
urllib_parse.quote(value.encode('utf_8'),
reserved_chars))
except TypeError as e:
raise exceptions.InvalidUserInputError(
'Error setting required parameter %s to value %s: %s' % (
param, value, e))
return path
def CalculateWaitForRetry(retry_attempt, max_wait=60):
"""Calculates amount of time to wait before a retry attempt.
Wait time grows exponentially with the number of attempts. A
random amount of jitter is added to spread out retry attempts from
different clients.
Args:
retry_attempt: Retry attempt counter.
max_wait: Upper bound for wait time [seconds].
Returns:
Number of seconds to wait before retrying request.
"""
wait_time = 2 ** retry_attempt
max_jitter = wait_time / 4.0
wait_time += random.uniform(-max_jitter, max_jitter)
return max(1, min(wait_time, max_wait))
def AcceptableMimeType(accept_patterns, mime_type):
"""Return True iff mime_type is acceptable for one of accept_patterns.
Note that this function assumes that all patterns in accept_patterns
will be simple types of the form "type/subtype", where one or both
of these can be "*". We do not support parameters (i.e. "; q=") in
patterns.
Args:
accept_patterns: list of acceptable MIME types.
mime_type: the mime type we would like to match.
Returns:
Whether or not mime_type matches (at least) one of these patterns.
"""
if '/' not in mime_type:
raise exceptions.InvalidUserInputError(
'Invalid MIME type: "%s"' % mime_type)
unsupported_patterns = [p for p in accept_patterns if ';' in p]
if unsupported_patterns:
raise exceptions.GeneratedClientError(
'MIME patterns with parameter unsupported: "%s"' % ', '.join(
unsupported_patterns))
def MimeTypeMatches(pattern, mime_type):
"""Return True iff mime_type is acceptable for pattern."""
# Some systems use a single '*' instead of '*/*'.
if pattern == '*':
pattern = '*/*'
return all(accept in ('*', provided) for accept, provided
in zip(pattern.split('/'), mime_type.split('/')))
return any(MimeTypeMatches(pattern, mime_type)
for pattern in accept_patterns)
def MapParamNames(params, request_type):
"""Reverse parameter remappings for URL construction."""
return [encoding.GetCustomJsonFieldMapping(request_type, json_name=p) or p
for p in params]
def MapRequestParams(params, request_type):
"""Perform any renames/remappings needed for URL construction.
Currently, we have several ways to customize JSON encoding, in
particular of field names and enums. This works fine for JSON
bodies, but also needs to be applied for path and query parameters
in the URL.
This function takes a dictionary from param names to values, and
performs any registered mappings. We also need the request type (to
look up the mappings).
Args:
params: (dict) Map from param names to values
request_type: (protorpc.messages.Message) request type for this API call
Returns:
A new dict of the same size, with all registered mappings applied.
"""
new_params = dict(params)
for param_name, value in params.items():
field_remapping = encoding.GetCustomJsonFieldMapping(
request_type, python_name=param_name)
if field_remapping is not None:
new_params[field_remapping] = new_params.pop(param_name)
param_name = field_remapping
if isinstance(value, messages.Enum):
new_params[param_name] = encoding.GetCustomJsonEnumMapping(
type(value), python_name=str(value)) or str(value)
return new_params
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mox
from oslo_concurrency import processutils
from oslo_log import log as logging
from cinder.brick import exception
from cinder.brick.local_dev import lvm as brick
from cinder import test
from cinder.volume import configuration as conf
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
return configuration
class BrickLvmTestCase(test.TestCase):
def setUp(self):
self._mox = mox.Mox()
self.configuration = mox.MockObject(conf.Configuration)
self.configuration.volume_group_name = 'fake-vg'
super(BrickLvmTestCase, self).setUp()
# Stub processutils.execute for static methods
self.stubs.Set(processutils, 'execute',
self.fake_execute)
self.vg = brick.LVM(self.configuration.volume_group_name,
'sudo',
False, None,
'default',
self.fake_execute)
def failed_fake_execute(obj, *cmd, **kwargs):
return ("\n", "fake-error")
def fake_pretend_lvm_version(obj, *cmd, **kwargs):
return (" LVM version: 2.03.00 (2012-03-06)\n", "")
def fake_old_lvm_version(obj, *cmd, **kwargs):
# Does not support thin prov or snap activation
return (" LVM version: 2.02.65(2) (2012-03-06)\n", "")
def fake_customised_lvm_version(obj, *cmd, **kwargs):
return (" LVM version: 2.02.100(2)-RHEL6 (2013-09-12)\n", "")
def fake_execute(obj, *cmd, **kwargs):
cmd_string = ', '.join(cmd)
data = "\n"
if ('env, LC_ALL=C, vgs, --noheadings, --unit=g, -o, name' ==
cmd_string):
data = " fake-vg\n"
data += " some-other-vg\n"
elif ('env, LC_ALL=C, vgs, --noheadings, -o, name, fake-vg' ==
cmd_string):
data = " fake-vg\n"
elif 'env, LC_ALL=C, vgs, --version' in cmd_string:
data = " LVM version: 2.02.95(2) (2012-03-06)\n"
elif ('env, LC_ALL=C, vgs, --noheadings, -o, uuid, fake-vg' in
cmd_string):
data = " kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
elif 'env, LC_ALL=C, vgs, --noheadings, --unit=g, ' \
'-o, name,size,free,lv_count,uuid, ' \
'--separator, :, --nosuffix' in cmd_string:
data = (" test-prov-cap-vg-unit:10.00:10.00:0:"
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n")
if 'test-prov-cap-vg-unit' in cmd_string:
return (data, "")
data = (" test-prov-cap-vg-no-unit:10.00:10.00:0:"
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z4\n")
if 'test-prov-cap-vg-no-unit' in cmd_string:
return (data, "")
data = " fake-vg:10.00:10.00:0:"\
"kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1\n"
if 'fake-vg' in cmd_string:
return (data, "")
data += " fake-vg-2:10.00:10.00:0:"\
"lWyauW-dKpG-Rz7E-xtKY-jeju-QsYU-SLG7Z2\n"
data += " fake-vg-3:10.00:10.00:0:"\
"mXzbuX-dKpG-Rz7E-xtKY-jeju-QsYU-SLG8Z3\n"
elif ('env, LC_ALL=C, lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size, --nosuffix, '
'fake-vg/lv-nothere' in cmd_string):
raise processutils.ProcessExecutionError(
stderr="One or more specified logical volume(s) not found.")
elif ('env, LC_ALL=C, lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size, --nosuffix, '
'fake-vg/lv-newerror' in cmd_string):
raise processutils.ProcessExecutionError(
stderr="Failed to find logical volume \"fake-vg/lv-newerror\"")
elif ('env, LC_ALL=C, lvs, --noheadings, '
'--unit=g, -o, vg_name,name,size' in cmd_string):
if 'fake-unknown' in cmd_string:
raise processutils.ProcessExecutionError(
stderr="One or more volume(s) not found."
)
if 'test-prov-cap-vg-unit' in cmd_string:
data = " fake-vg test-prov-cap-pool-unit 9.50g\n"
data += " fake-vg fake-volume-1 1.00g\n"
data += " fake-vg fake-volume-2 2.00g\n"
elif 'test-prov-cap-vg-no-unit' in cmd_string:
data = " fake-vg test-prov-cap-pool-no-unit 9.50\n"
data += " fake-vg fake-volume-1 1.00\n"
data += " fake-vg fake-volume-2 2.00\n"
elif 'test-found-lv-name' in cmd_string:
data = " fake-vg test-found-lv-name 9.50\n"
else:
data = " fake-vg fake-1 1.00g\n"
data += " fake-vg fake-2 1.00g\n"
elif ('env, LC_ALL=C, lvdisplay, --noheading, -C, -o, Attr' in
cmd_string):
if 'test-volumes' in cmd_string:
data = ' wi-a-'
else:
data = ' owi-a-'
elif 'env, LC_ALL=C, pvs, --noheadings' in cmd_string:
data = " fake-vg|/dev/sda|10.00|1.00\n"
data += " fake-vg|/dev/sdb|10.00|1.00\n"
data += " fake-vg|/dev/sdc|10.00|8.99\n"
data += " fake-vg-2|/dev/sdd|10.00|9.99\n"
elif 'env, LC_ALL=C, lvs, --noheadings, --unit=g' \
', -o, size,data_percent, --separator, :' in cmd_string:
if 'test-prov-cap-pool' in cmd_string:
data = " 9.5:20\n"
else:
data = " 9:12\n"
elif 'lvcreate, -T, -L, ' in cmd_string:
pass
elif 'lvcreate, -T, -V, ' in cmd_string:
pass
elif 'lvcreate, --name, ' in cmd_string:
pass
else:
raise AssertionError('unexpected command called: %s' % cmd_string)
return (data, "")
def test_create_lv_snapshot(self):
self.assertEqual(self.vg.create_lv_snapshot('snapshot-1', 'fake-1'),
None)
self._mox.StubOutWithMock(self.vg, 'get_volume')
self.vg.get_volume('fake-non-existent').AndReturn(None)
self._mox.ReplayAll()
try:
self.vg.create_lv_snapshot('snapshot-1', 'fake-non-existent')
except exception.VolumeDeviceNotFound as e:
self.assertEqual(e.kwargs['device'], 'fake-non-existent')
else:
self.fail("Exception not raised")
def test_vg_exists(self):
self.assertEqual(self.vg._vg_exists(), True)
def test_get_vg_uuid(self):
self.assertEqual(self.vg._get_vg_uuid()[0],
'kVxztV-dKpG-Rz7E-xtKY-jeju-QsYU-SLG6Z1')
def test_get_all_volumes(self):
out = self.vg.get_volumes()
self.assertEqual(out[0]['name'], 'fake-1')
self.assertEqual(out[0]['size'], '1.00g')
self.assertEqual(out[0]['vg'], 'fake-vg')
def test_get_volume(self):
self.assertEqual(self.vg.get_volume('fake-1')['name'], 'fake-1')
def test_get_volume_none(self):
self.assertEqual(self.vg.get_volume('fake-unknown'), None)
def test_get_lv_info_notfound(self):
# lv-nothere will raise lvm < 2.102.112 exception
self.assertEqual(
[],
self.vg.get_lv_info(
'sudo', vg_name='fake-vg', lv_name='lv-nothere')
)
# lv-newerror will raise lvm > 2.102.112 exception
self.assertEqual(
[],
self.vg.get_lv_info(
'sudo', vg_name='fake-vg', lv_name='lv-newerror')
)
def test_get_lv_info_found(self):
lv_info = [{'size': '9.50', 'name': 'test-found-lv-name',
'vg': 'fake-vg'}]
self.assertEqual(
lv_info,
self.vg.get_lv_info(
'sudo', vg_name='fake-vg',
lv_name='test-found-lv-name')
)
def test_get_lv_info_no_lv_name(self):
lv_info = [{'name': 'fake-1', 'size': '1.00g', 'vg': 'fake-vg'},
{'name': 'fake-2', 'size': '1.00g', 'vg': 'fake-vg'}]
self.assertEqual(
lv_info,
self.vg.get_lv_info(
'sudo', vg_name='fake-vg')
)
def test_get_all_physical_volumes(self):
# Filtered VG version
pvs = self.vg.get_all_physical_volumes('sudo', 'fake-vg')
self.assertEqual(len(pvs), 3)
# Non-Filtered, all VG's
pvs = self.vg.get_all_physical_volumes('sudo')
self.assertEqual(len(pvs), 4)
def test_get_physical_volumes(self):
pvs = self.vg.get_physical_volumes()
self.assertEqual(len(pvs), 3)
def test_get_volume_groups(self):
self.assertEqual(len(self.vg.get_all_volume_groups('sudo')), 3)
self.assertEqual(len(self.vg.get_all_volume_groups('sudo',
'fake-vg')), 1)
def test_thin_support(self):
# lvm.supports_thin() is a static method and doesn't
# use the self._executor fake we pass in on init
# so we need to stub processutils.execute appropriately
self.stubs.Set(processutils, 'execute', self.fake_execute)
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
self.stubs.Set(processutils, 'execute', self.fake_pretend_lvm_version)
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version)
self.assertFalse(self.vg.supports_thin_provisioning('sudo'))
self.stubs.Set(processutils,
'execute',
self.fake_customised_lvm_version)
self.assertTrue(self.vg.supports_thin_provisioning('sudo'))
def test_snapshot_lv_activate_support(self):
self.vg._supports_snapshot_lv_activation = None
self.stubs.Set(processutils, 'execute', self.fake_execute)
self.assertTrue(self.vg.supports_snapshot_lv_activation)
self.vg._supports_snapshot_lv_activation = None
self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version)
self.assertFalse(self.vg.supports_snapshot_lv_activation)
self.vg._supports_snapshot_lv_activation = None
def test_lvchange_ignskipact_support_yes(self):
"""Tests if lvchange -K is available via a lvm2 version check."""
self.vg._supports_lvchange_ignoreskipactivation = None
self.stubs.Set(processutils, 'execute', self.fake_pretend_lvm_version)
self.assertTrue(self.vg.supports_lvchange_ignoreskipactivation)
self.vg._supports_lvchange_ignoreskipactivation = None
self.stubs.Set(processutils, 'execute', self.fake_old_lvm_version)
self.assertFalse(self.vg.supports_lvchange_ignoreskipactivation)
self.vg._supports_lvchange_ignoreskipactivation = None
def test_thin_pool_creation(self):
# The size of fake-vg volume group is 10g, so the calculated thin
# pool size should be 9.5g (95% of 10g).
self.assertEqual("9.5g", self.vg.create_thin_pool())
# Passing a size parameter should result in a thin pool of that exact
# size.
for size in ("1g", "1.2g", "1.75g"):
self.assertEqual(size, self.vg.create_thin_pool(size_str=size))
def test_thin_pool_provisioned_capacity(self):
self.vg.vg_thin_pool = "test-prov-cap-pool-unit"
self.vg.vg_name = 'test-prov-cap-vg-unit'
self.assertEqual(
"9.5g",
self.vg.create_thin_pool(name=self.vg.vg_thin_pool))
self.assertEqual("9.50", self.vg.vg_thin_pool_size)
self.assertEqual(7.6, self.vg.vg_thin_pool_free_space)
self.assertEqual(3.0, self.vg.vg_provisioned_capacity)
self.vg.vg_thin_pool = "test-prov-cap-pool-no-unit"
self.vg.vg_name = 'test-prov-cap-vg-no-unit'
self.assertEqual(
"9.5g",
self.vg.create_thin_pool(name=self.vg.vg_thin_pool))
self.assertEqual("9.50", self.vg.vg_thin_pool_size)
self.assertEqual(7.6, self.vg.vg_thin_pool_free_space)
self.assertEqual(3.0, self.vg.vg_provisioned_capacity)
def test_thin_pool_free_space(self):
# The size of fake-vg-pool is 9g and the allocated data sums up to
# 12% so the calculated free space should be 7.92
self.assertEqual(float("7.92"),
self.vg._get_thin_pool_free_space("fake-vg",
"fake-vg-pool"))
def test_volume_create_after_thin_creation(self):
"""Test self.vg.vg_thin_pool is set to pool_name
See bug #1220286 for more info.
"""
vg_name = "vg-name"
pool_name = vg_name + "-pool"
pool_path = "%s/%s" % (vg_name, pool_name)
def executor(obj, *cmd, **kwargs):
self.assertEqual(pool_path, cmd[-1])
self.vg._executor = executor
self.vg.create_thin_pool(pool_name, "1G")
self.vg.create_volume("test", "1G", lv_type='thin')
self.assertEqual(self.vg.vg_thin_pool, pool_name)
def test_lv_has_snapshot(self):
self.assertTrue(self.vg.lv_has_snapshot('fake-vg'))
self.assertFalse(self.vg.lv_has_snapshot('test-volumes'))
def test_activate_lv(self):
self._mox.StubOutWithMock(self.vg, '_execute')
self.vg._supports_lvchange_ignoreskipactivation = True
self.vg._execute('lvchange', '-a', 'y', '--yes', '-K',
'fake-vg/my-lv',
root_helper='sudo', run_as_root=True)
self._mox.ReplayAll()
self.vg.activate_lv('my-lv')
self._mox.VerifyAll()
def test_get_mirrored_available_capacity(self):
self.assertEqual(self.vg.vg_mirror_free_space(1), 2.0)
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-commit script for Oppia.
This script lints Python and JavaScript code, and prints a
list of lint errors to the terminal. If the directory path is passed,
it will lint all Python and JavaScript files in that directory; otherwise,
it will only lint files that have been touched in this commit.
This script ignores all filepaths contained within the excludeFiles
argument in .jscsrc. Note that, as a side-effect, these filepaths will also
prevent Python files in those paths from being linted.
IMPORTANT NOTES:
1. Before running this script, you must install third-party dependencies by
running
bash scripts/start.sh
at least once.
=====================
CUSTOMIZATION OPTIONS
=====================
1. To lint only files that have been touched in this commit
python scripts/pre_commit_linter.py
2. To lint all files in the folder or to lint just a specific file
python scripts/pre_commit_linter.py --path filepath
3. To lint a specific list of files (*.js/*.py only). Separate files by spaces
python scripts/pre_commit_linter.py --files file_1 file_2 ... file_n
Note that the root folder MUST be named 'oppia'.
"""
# Pylint has issues with the import order of argparse.
# pylint: disable=wrong-import-order
import argparse
import fnmatch
import multiprocessing
import os
import json
import subprocess
import sys
import time
# pylint: enable=wrong-import-order
_PARSER = argparse.ArgumentParser()
_EXCLUSIVE_GROUP = _PARSER.add_mutually_exclusive_group()
_EXCLUSIVE_GROUP.add_argument(
'--path',
help='path to the directory with files to be linted',
action='store')
_EXCLUSIVE_GROUP.add_argument(
'--files',
nargs='+',
help='specific files to be linted. Space separated list',
action='store')
BAD_PATTERNS = {
'__author__': {
'message': 'Please remove author tags from this file.',
'excluded_files': ()},
'datetime.datetime.now()': {
'message': 'Please use datetime.datetime.utcnow() instead of'
'datetime.datetime.now().',
'excluded_files': ()},
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': ()},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': ()}
}
BAD_PATTERNS_JS = {
' == ': {
'message': 'Please replace == with === in this file.',
'excluded_files': (
'core/templates/dev/head/expressions/parserSpec.js',
'core/templates/dev/head/expressions/evaluatorSpec.js',
'core/templates/dev/head/expressions/typeParserSpec.js')},
' != ': {
'message': 'Please replace != with !== in this file.',
'excluded_files': (
'core/templates/dev/head/expressions/parserSpec.js',
'core/templates/dev/head/expressions/evaluatorSpec.js',
'core/templates/dev/head/expressions/typeParserSpec.js')}
}
BAD_PATTERNS_APP_YAML = {
'MINIFICATION: true': {
'message': 'Please set the MINIFICATION env variable in app.yaml'
'to False before committing.',
'excluded_files': ()}
}
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'scripts/pre_commit_linter.py', 'integrations/*',
'integrations_dev/*', '*.svg', '*.png', '*.zip', '*.ico', '*.jpg',
'*.min.js', 'assets/scripts/*')
if not os.getcwd().endswith('oppia'):
print ''
print 'ERROR Please run this script from the oppia root directory.'
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.5.2')
if not os.path.exists(_PYLINT_PATH):
print ''
print 'ERROR Please run start.sh first to install pylint '
print ' and its dependencies.'
sys.exit(1)
_PATHS_TO_INSERT = [
_PYLINT_PATH,
os.getcwd(),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19',
'google_appengine', 'lib', 'webapp2-2.3'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19',
'google_appengine', 'lib', 'yaml-3.10'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19',
'google_appengine', 'lib', 'jinja2-2.6'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.19',
'google_appengine'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'webtest-1.4.2'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'numpy-1.6.1'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'browsermob-proxy-0.7.1'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'selenium-2.53.2'),
os.path.join('third_party', 'gae-pipeline-1.9.17.0'),
os.path.join('third_party', 'bleach-1.2.2'),
os.path.join('third_party', 'gae-mapreduce-1.9.17.0'),
]
for path in _PATHS_TO_INSERT:
sys.path.insert(0, path)
from pylint import lint # pylint: disable=wrong-import-position
_MESSAGE_TYPE_SUCCESS = 'SUCCESS'
_MESSAGE_TYPE_FAILED = 'FAILED'
def _get_changed_filenames():
"""Returns a list of modified files (both staged and unstaged)
Returns:
a list of filenames of modified files
"""
unstaged_files = subprocess.check_output([
'git', 'diff', '--name-only']).splitlines()
staged_files = subprocess.check_output([
'git', 'diff', '--cached', '--name-only',
'--diff-filter=ACM']).splitlines()
return unstaged_files + staged_files
def _get_glob_patterns_excluded_from_jscsrc(config_jscsrc):
"""Collects excludeFiles from jscsrc file.
Args:
- config_jscsrc: str. Path to .jscsrc file.
Returns:
a list of files in excludeFiles.
"""
with open(config_jscsrc) as f:
f.readline() # First three lines are comments
f.readline()
f.readline()
json_data = json.loads(f.read())
return json_data['excludeFiles']
def _get_all_files_in_directory(dir_path, excluded_glob_patterns):
"""Recursively collects all files in directory and
subdirectories of specified path.
Args:
- dir_path: str. Path to the folder to be linted.
- excluded_glob_patterns: set. Set of all files to be excluded.
Returns:
a list of files in directory and subdirectories without excluded files.
"""
files_in_directory = []
for _dir, _, files in os.walk(dir_path):
for file_name in files:
filename = os.path.relpath(
os.path.join(_dir, file_name), os.getcwd())
if not any([fnmatch.fnmatch(filename, gp) for gp in
excluded_glob_patterns]):
files_in_directory.append(filename)
return files_in_directory
def _lint_js_files(node_path, jscs_path, config_jscsrc, files_to_lint, stdout,
result):
"""Prints a list of lint errors in the given list of JavaScript files.
Args:
- node_path: str. Path to the node binary.
- jscs_path: str. Path to the JSCS binary.
- config_jscsrc: str. Configuration args for the call to the JSCS binary.
- files_to_lint: list of str. A list of filepaths to lint.
- stdout: multiprocessing.Queue. A queue to store JSCS outputs
- result: multiprocessing.Queue. A queue to put results of test
Returns:
None
"""
start_time = time.time()
num_files_with_errors = 0
num_js_files = len(files_to_lint)
if not files_to_lint:
result.put('')
print 'There are no JavaScript files to lint.'
return
print 'Total js files: ', num_js_files
jscs_cmd_args = [node_path, jscs_path, config_jscsrc]
for _, filename in enumerate(files_to_lint):
print 'Linting: ', filename
proc_args = jscs_cmd_args + [filename]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
linter_stdout, linter_stderr = proc.communicate()
if linter_stderr:
print 'LINTER FAILED'
print linter_stderr
sys.exit(1)
if linter_stdout:
num_files_with_errors += 1
stdout.put(linter_stdout)
if num_files_with_errors:
result.put('%s %s JavaScript files' % (
_MESSAGE_TYPE_FAILED, num_files_with_errors))
else:
result.put('%s %s JavaScript files linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_js_files, time.time() - start_time))
print 'Js linting finished.'
def _lint_py_files(config_pylint, files_to_lint, result):
"""Prints a list of lint errors in the given list of Python files.
Args:
- config_pylint: str. Path to the .pylintrc file.
- files_to_lint: list of str. A list of filepaths to lint.
- result: multiprocessing.Queue. A queue to put results of test
Returns:
None
"""
start_time = time.time()
are_there_errors = False
num_py_files = len(files_to_lint)
if not files_to_lint:
result.put('')
print 'There are no Python files to lint.'
return
print 'Linting %s Python files' % num_py_files
_BATCH_SIZE = 50
current_batch_start_index = 0
while current_batch_start_index < len(files_to_lint):
# Note that this index is an exclusive upper bound -- i.e., the current
# batch of files ranges from 'start_index' to 'end_index - 1'.
current_batch_end_index = min(
current_batch_start_index + _BATCH_SIZE, len(files_to_lint))
current_files_to_lint = files_to_lint[
current_batch_start_index : current_batch_end_index]
print 'Linting Python files %s to %s...' % (
current_batch_start_index + 1, current_batch_end_index)
try:
# This prints output to the console.
lint.Run(current_files_to_lint + [config_pylint])
except SystemExit as e:
if str(e) != '0':
are_there_errors = True
current_batch_start_index = current_batch_end_index
if are_there_errors:
result.put('%s Python linting failed' % _MESSAGE_TYPE_FAILED)
else:
result.put('%s %s Python files linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_py_files, time.time() - start_time))
print 'Python linting finished.'
def _get_all_files():
"""This function is used to check if this script is ran from
root directory and to return a list of all the files for linting and
pattern checks.
"""
jscsrc_path = os.path.join(os.getcwd(), '.jscsrc')
parsed_args = _PARSER.parse_args()
if parsed_args.path:
input_path = os.path.join(os.getcwd(), parsed_args.path)
if not os.path.exists(input_path):
print 'Could not locate file or directory %s. Exiting.' % input_path
print '----------------------------------------'
sys.exit(1)
if os.path.isfile(input_path):
all_files = [input_path]
else:
excluded_glob_patterns = _get_glob_patterns_excluded_from_jscsrc(
jscsrc_path)
all_files = _get_all_files_in_directory(
input_path, excluded_glob_patterns)
elif parsed_args.files:
valid_filepaths = []
invalid_filepaths = []
for f in parsed_args.files:
if os.path.isfile(f):
valid_filepaths.append(f)
else:
invalid_filepaths.append(f)
if invalid_filepaths:
print ('The following file(s) do not exist: %s\n'
'Exiting.' % invalid_filepaths)
sys.exit(1)
all_files = valid_filepaths
else:
all_files = _get_changed_filenames()
return all_files
def _pre_commit_linter(all_files):
"""This function is used to check if node-jscs dependencies are installed
and pass JSCS binary path
"""
print 'Starting linter...'
jscsrc_path = os.path.join(os.getcwd(), '.jscsrc')
pylintrc_path = os.path.join(os.getcwd(), '.pylintrc')
config_jscsrc = '--config=%s' % jscsrc_path
config_pylint = '--rcfile=%s' % pylintrc_path
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
node_path = os.path.join(
parent_dir, 'oppia_tools', 'node-4.2.1', 'bin', 'node')
jscs_path = os.path.join(
parent_dir, 'node_modules', 'jscs', 'bin', 'jscs')
if not os.path.exists(jscs_path):
print ''
print 'ERROR Please run start.sh first to install node-jscs '
print ' and its dependencies.'
sys.exit(1)
js_files_to_lint = [
filename for filename in all_files if filename.endswith('.js')]
py_files_to_lint = [
filename for filename in all_files if filename.endswith('.py')]
js_result = multiprocessing.Queue()
linting_processes = []
js_stdout = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_js_files, args=(node_path, jscs_path, config_jscsrc,
js_files_to_lint, js_stdout, js_result)))
py_result = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_py_files,
args=(config_pylint, py_files_to_lint, py_result)))
print 'Starting Javascript and Python Linting'
print '----------------------------------------'
for process in linting_processes:
process.start()
for process in linting_processes:
# Require timeout parameter to prevent against endless waiting for the
# linting function to return.
process.join(timeout=600)
js_messages = []
while not js_stdout.empty():
js_messages.append(js_stdout.get())
print ''
print '\n'.join(js_messages)
print '----------------------------------------'
summary_messages = []
# Require block = False to prevent unnecessary waiting for the process
# output.
summary_messages.append(js_result.get(block=False))
summary_messages.append(py_result.get(block=False))
print '\n'.join(summary_messages)
print ''
return summary_messages
def _check_bad_patterns(all_files):
"""This function is used for detecting bad patterns.
"""
print 'Starting Pattern Checks'
print '----------------------------------------'
total_files_checked = 0
total_error_count = 0
summary_messages = []
all_files = [
filename for filename in all_files if not
any(fnmatch.fnmatch(filename, pattern) for pattern in EXCLUDED_PATHS)]
all_js_files = [
filename for filename in all_files if filename.endswith('.js')]
failed = False
for filename in all_files:
with open(filename) as f:
content = f.read()
total_files_checked += 1
for pattern in BAD_PATTERNS:
if pattern in content and filename not in (
BAD_PATTERNS[pattern]['excluded_files']):
failed = True
print '%s --> %s' % (
filename, BAD_PATTERNS[pattern]['message'])
total_error_count += 1
if filename in all_js_files:
for pattern in BAD_PATTERNS_JS:
if filename not in (
BAD_PATTERNS_JS[pattern]['excluded_files']):
if pattern in content:
failed = True
print '%s --> %s' % (
filename,
BAD_PATTERNS_JS[pattern]['message'])
total_error_count += 1
if filename == 'app.yaml':
for pattern in BAD_PATTERNS_APP_YAML:
if pattern in content:
failed = True
print '%s --> %s' % (
filename,
BAD_PATTERNS_APP_YAML[pattern]['message'])
total_error_count += 1
if failed:
summary_message = '%s Pattern checks failed' % _MESSAGE_TYPE_FAILED
summary_messages.append(summary_message)
else:
summary_message = '%s Pattern checks passed' % _MESSAGE_TYPE_SUCCESS
summary_messages.append(summary_message)
print ''
print '----------------------------------------'
print ''
if total_files_checked == 0:
print "There are no files to be checked."
else:
print '(%s files checked, %s errors found)' % (
total_files_checked, total_error_count)
print summary_message
return summary_messages
def main():
all_files = _get_all_files()
linter_messages = _pre_commit_linter(all_files)
pattern_messages = _check_bad_patterns(all_files)
all_messages = linter_messages + pattern_messages
if any([message.startswith(_MESSAGE_TYPE_FAILED) for message in
all_messages]):
sys.exit(1)
if __name__ == '__main__':
main()
|
|
import json
import os.path
import re
from django.conf import settings
from django.utils.translation import ugettext_noop
from casexml.apps.case.models import CommCareCase
from corehq.apps.reports.api import ReportDataSource
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.generic import GenericReportView, GenericTabularReport, GetParamsMixin
from corehq.apps.reports.standard import ProjectReport, ProjectReportParametersMixin
from corehq.apps.reports.standard.cases.basic import CaseListMixin, CaseListReport
from dimagi.utils.modules import to_function
from django.template.loader import render_to_string
class GenericMapReport(ProjectReport, ProjectReportParametersMixin):
"""instances must set:
data_source -- config about backend data source
display_config -- configure the front-end display of data
consult docs/maps.html for instructions
"""
report_partial_path = "reports/partials/maps.html"
flush_layout = True
#asynchronous = False
def _get_data(self):
adapter = self.data_source['adapter']
geo_col = self.data_source.get('geo_column', 'geo')
try:
loader = getattr(self, '_get_data_%s' % adapter)
except AttributeError:
raise RuntimeError('unknown adapter [%s]' % adapter)
data = loader(self.data_source, dict(self.request.GET.iteritems()))
return self._to_geojson(data, geo_col)
def _to_geojson(self, data, geo_col):
def _parse_geopoint(raw):
try:
latlon = [float(k) for k in re.split(' *,? *', raw)[:2]]
return [latlon[1], latlon[0]] # geojson is lon, lat
except ValueError:
return None
metadata = {}
def points():
for row in data:
if '_meta' in row:
# not a real data row
metadata.update(row['_meta'])
continue
geo = row[geo_col]
if geo is None:
continue
e = geo
depth = 0
while hasattr(e, '__iter__'):
e = e[0]
depth += 1
if depth < 2:
if depth == 0:
geo = _parse_geopoint(geo)
if geo is None:
continue
feature_type = 'Point'
else:
if depth == 2:
geo = [geo]
depth += 1
feature_type = 'MultiPolygon' if depth == 4 else 'Polygon'
properties = dict((k, v) for k, v in row.iteritems() if k != geo_col)
# handle 'display value / raw value' fields (for backwards compatibility with
# existing data sources)
# note: this is not ideal for the maps report, as we have no idea how to properly
# format legends; it's better to use a formatter function in the maps report config
display_props = {}
for k, v in properties.iteritems():
if isinstance(v, dict) and set(v.keys()) == set(('html', 'sort_key')):
properties[k] = v['sort_key']
display_props['__disp_%s' % k] = v['html']
properties.update(display_props)
yield {
'type': 'Feature',
'geometry': {
'type': feature_type,
'coordinates': geo,
},
'properties': properties,
}
features = list(points())
return {
'type': 'FeatureCollection',
'features': features,
'metadata': metadata,
}
def _get_data_report(self, params, filters):
# this ordering is important!
# in the reverse order you could view a different domain's data just by setting the url param!
config = dict(filters)
config.update(params.get('report_params', {}))
config['domain'] = self.domain
config['request'] = self.request
DataSource = to_function(params['report'])
assert issubclass(DataSource, ReportDataSource), '[%s] does not implement the ReportDataSource API!' % params['report']
assert not issubclass(DataSource, GenericReportView), '[%s] cannot be a ReportView (even if it is also a ReportDataSource)! You must separate your code into a class of each type, or use the "legacyreport" adapater.' % params['report']
return DataSource(config).get_data()
def _get_data_legacyreport(self, params, filters):
Report = to_function(params['report'])
assert issubclass(Report, GenericTabularReport), '[%s] must be a GenericTabularReport!' % params['report']
# TODO it would be nice to indicate to the report that it was being used in a map context, (so
# that it could add a geo column) but it does not seem like reports can be arbitrarily
# parameterized in this way
report = Report(request=self.request, domain=self.domain, **params.get('report_params', {}))
def _headers(e, root=[]):
if hasattr(e, '__iter__'):
if hasattr(e, 'html'):
root = list(root) + [e.html]
for sub in e:
for k in _headers(sub, root):
yield k
else:
yield root + [e.html]
headers = ['::'.join(k) for k in _headers(report.headers)]
for row in report.rows:
yield dict(zip(headers, row))
def _get_data_case(self, params, filters):
MAX_RESULTS = 200 # TODO vary by domain (cc-plus gets a higher limit?)
# bleh
_get = self.request.GET.copy()
_get['iDisplayStart'] = '0'
_get['iDisplayLength'] = str(MAX_RESULTS)
self.request.GET = _get
source = CaseListReport(self.request, domain=self.domain)
total_count = source.es_results['hits']['total']
if total_count > MAX_RESULTS:
# can't really think of a better way to return out-of-band
# metadata from a generator
yield {'_meta': {
'total_rows': total_count,
'capped_rows': MAX_RESULTS,
}}
# TODO ideally we'd want access to all the data shown on the
# case detail report. certain case types can override this via
# case.to_full_dict(). however, there is currently no efficient
# way to call this over a large block of cases. so now we (via the
# CaseListReport/DataSource) limit ourselves only to that which
# can be queried in bulk
for data in source.get_data():
case = CommCareCase.wrap(data['_case']).get_json()
del data['_case']
data['num_forms'] = len(case['xform_ids'])
standard_props = (
'case_name',
'case_type',
'date_opened',
'external_id',
'owner_id',
)
data.update(('prop_%s' % k, v) for k, v in case['properties'].iteritems() if k not in standard_props)
GEO_DEFAULT = 'gps' # case property
geo = None
geo_directive = params['geo_fetch'].get(data['case_type'], GEO_DEFAULT)
if geo_directive.startswith('link:'):
# TODO use linked case
pass
elif geo_directive == '_random':
# for testing -- just map the case to a random point
import random
import math
geo = '%s %s' % (math.degrees(math.asin(random.uniform(-1, 1))), random.uniform(-180, 180))
elif geo_directive:
# case property
geo = data.get('prop_%s' % geo_directive)
if geo:
data['geo'] = geo
yield data
def _get_data_csv(self, params, filters):
import csv
with open(params['path']) as f:
return list(csv.DictReader(f))
def _get_data_geojson(self, params, filters):
with open(params['path']) as f:
data = json.load(f)
for feature in data['features']:
item = dict(feature['properties'])
item['geo'] = feature['geometry']['coordinates']
yield item
@property
def report_context(self):
layers = getattr(settings, 'MAPS_LAYERS', None)
if not layers:
layers = {'Default': {'family': 'fallback'}}
data = self._get_data()
display = self.dynamic_config(self.display_config, data['features'])
context = {
'data': data,
'config': display,
'layers': layers,
}
return dict(
context=context,
)
def dynamic_config(self, static_config, data):
"""override to customize the display configuration based on the
resultant data
static_config -- contents of 'display_config' property
data -- report data as a list of geojson Feature records
"""
return static_config
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return True
class ElasticSearchMapReport(GetParamsMixin, GenericTabularReport, GenericMapReport):
report_template_path = "reports/async/maps.html"
report_partial_path = "reports/async/partials/maps.html"
ajax_pagination = True
asynchronous = True
flush_layout = True
def get_report(self):
Report = to_function(self.data_source['report'])
assert issubclass(Report, GenericTabularReport), '[%s] must be a GenericTabularReport!' % self.data_source['report']
report = Report(request=self.request, domain=self.domain, **self.data_source.get('report_params', {}))
return report
@property
def total_records(self):
report = self.get_report()
return report.total_records
@property
def json_dict(self):
ret = super(ElasticSearchMapReport, self).json_dict
layers = getattr(settings, 'MAPS_LAYERS', None)
if not layers:
layers = {'Default': {'family': 'fallback'}}
data = self._get_data()
display = self.dynamic_config(self.display_config, data['features'])
context = {
'data': data,
'config': display,
'layers': layers,
}
ret.update(dict(context=context))
return ret
class DemoMapReport(GenericMapReport):
"""this report is a demonstration of the maps report's capabilities
it uses a static dataset
"""
name = ugettext_noop("Maps: Highest Mountains")
slug = "maps_demo"
data_source = {
"adapter": "csv",
"geo_column": "geo",
"path": os.path.join(os.path.dirname(os.path.dirname(__file__)), 'tests/maps_demo/mountains.csv'),
}
display_config = {
"name_column": "name",
"detail_columns": [
"rank",
"height",
"prominence",
"country",
"range",
"first_ascent",
"num_ascents",
"num_deaths",
"death_rate"
],
"column_titles": {
"name": "Mountain",
"country": "Country",
"height": "Elevation",
"prominence": "Topographic Prominence",
"range": "Range",
"first_ascent": "First Ascent",
"rank": "Ranking",
"num_ascents": "# Ascents",
"num_deaths": "# Deaths",
"death_rate": "Death Rate"
},
"enum_captions": {
"first_ascent": {
"_null": "Unclimbed"
},
"rank": {
"-": "Top 10"
}
},
"numeric_format": {
"rank": "return '#' + x",
"height": "return x + ' m | ' + Math.round(x / .3048) + ' ft'",
"prominence": "return x + ' m | ' + Math.round(x / .3048) + ' ft'",
"death_rate": "return (100. * x).toFixed(2) + '%'"
},
"metrics": [
{
"color": {
"column": "rank",
"thresholds": [
11,
25,
50
]
}
},
{
"color": {
"column": "height",
"colorstops": [
[
7200,
"rgba(20,20,20,.8)"
],
[
8848,
"rgba(255,120,20,.8)"
]
]
}
},
{
"size": {
"column": "prominence"
},
"color": {
"column": "prominence",
"thresholds": [
1500,
3000,
4000
],
"categories": {
"1500": "rgba(255, 255, 60, .8)",
"3000": "rgba(255, 128, 0, .8)",
"4000": "rgba(255, 0, 0, .8)",
"-": "rgba(150, 150, 150, .8)"
}
}
},
{
"icon": {
"column": "country",
"categories": {
"Pakistan": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAPCAIAAACAzcMBAAAABmJLR0QA/wD/AP+gvaeTAAAAw0lEQVQ4y2P4jwr2nz/G6ChDKmIYUqZIBhvGtRewuyoiCcqSZopylNXhyyc53JTgIlHN2UZpHqSZsuPUgcpZ7XCuXV7Qm4/vyma0kGCKVIjRv3//oltykDWE1KdJhxiTYIpphhdQpHpOJ0WhC3HL7Sf3Od2V0bQxO8mRFi5AwfWHd/B7a8AFgYZ6lMWQFkdP37wAir98/7pz+bSKWW1dK6av2L8ROdaITS+T1s178vr5n79/rty/WTq9GTXtDL0cQAwCAFS5mrmuqFgRAAAAAElFTkSuQmCC",
"China/Pakistan": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAPCAYAAAAPr1RWAAAAAXNSR0IArs4c6QAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB90IHwcYMYz0J78AAAFiSURBVDjLY3g/tfM/Oj5Xm/M/z1Tsf565GIQmEzMgG/phbgeK4eVpQv/zralk+PfDrf8/LW2HG54PdHm+jdj/Ii8R8g3/MLvj/8/zLf//v2/8//914/+rCzPhCgqAhhf7QQyvslP8PzfS8X+huSSaQeL4Xf5pSfv/Pw+a/v++1YwIc5gFtmL/651U/1+oz/tfZIFq8Oxwu//tHjr4Df+4CBjeMyD0+QaE4YWuov9LI4X/n67K/L86yQdFc6+P4f+nfQ3/VyZ6Ew5z9NRSFg+J0Gp7xf/vpnb8nxNuj2HAjFAboLwS6YYXOIqC6U5PXbD4mmRf8lMLRjqHYpjL73RUAsNcCqtB+Wbi5BkOwqAwB8kdK0/9X2olgyIHsnBSgBn5hoNSy6OeWrD8k976/5szQ/6vAkbwFiB9uDQRIxWRZDgsne/Iifj/sLv2/9sp7f9vtpYBU4oXlnRPhuEUZf8hZTgA8YnkUuk5wigAAAAASUVORK5CYII=",
"Bhutan/China": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAPCAYAAAAPr1RWAAAABmJLR0QA/wD/AP+gvaeTAAAB5UlEQVQ4y63Q20uTARzGcf+ctTWsiAbFmstRGXXRwYIgKqGRwaib6krwKhdkB6I1h+AsOhlkrHVYh2lTslndaLVDc+/51Ls5urK7bxeBIFrtDS+eu9/z4eHX0kh5We3U73vRu9bQsmpgqpVGysv3Pg/Kdhey3+Uc10d9VF+F0XJnsZ8FfsNP1mM/3ot21I3sdy3GEW6n/Rj5PrTPaUy1yo9GDaE0jZW7hRyL8m335v/H65kQczNv0OQKplKkZhmIDxOIQzeQ9geXwI5x62k7+tcMlmUhvBhk7kCQQvwacjKGeOY4YsDjHLdyEex8D+Z4GG20C70wi5B/h/llFvHta+ofp1CvX3S+XMtHma+ZGMIMUqWI9X4CtVxGmZpEOt+N2OFbtrgp3EpvxSxlKb28jHKqA6X3HFKsH+HDNFK0B9nvQmxvXRH+J25nwwjlAuLIbbQ7g0g7NyHu2UIpfgX90V2se0OoyTjVZvFaaiNm9hjaRILyWIbi8ADV4QGkxFWUg6ElZT15k58LC0i7fE3g6Q3Y4xFqpU8IqRHUyBGkE50Iz9Mo4UPLykpoHcK+tubeYsS3YVw4jRT0Lh5Uwp2Yk2NUug//EfkrPv/Ai3HSveKB1ObBvNSLHHA7x+3+tag7XI6LzeQXCpSkKvvyoHIAAAAASUVORK5CYII=",
"China/India": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAPCAYAAAAPr1RWAAAAAXNSR0IArs4c6QAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB90IHwcUGGLz8N8AAADoSURBVDjLY3ifpPQfJ07GI0cEZkDmfMhURJH8MVn2/4d0ReoY/n2C3P9PhQooLv+Qofj/c5kC+YaDXPdztuz//7ul/v/fIfX/a5s8im8+VytQ5vJPBQr//6yR/v97uQyGIvTgIt7wqZ3/Qfjjoo7/72dA6Zkd/2Hin5a2//+6s+3/leqa/3uSisA0TI4QZsAn+X1/6/8Pszv+X6qo/n+mqPL/qYKK/6eB9KXKasoN/7gQ4oOdCYVgg5d4Z4LpnfGFlBsOwyCXnoa5vLCCOi5HxqCw3g0M86vVtcSHeXkk939a4VHD6W84AMcMSEsYuXzSAAAAAElFTkSuQmCC",
"India/Nepal": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAPCAYAAAAPr1RWAAAAAXNSR0IArs4c6QAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB90IHwcVISTtSZYAAAHKSURBVDjLrZDRS5NRGId3111Q/4H3GULpkopUUihoKgjbhcoCY9GNIZP2OflK55wOdblE3EWtGmWCxpTpZwgqJTnTIFQUJQmT2EVOcSip0J7mAWOsVX6yFx5e3pfD8zvnaPCcI55eSzGRDi2J++OgSVwEjDpGjdeYri8g2pViuWK8QViv5avhIq9NRay1XU69/JCPpVcZla6z33k+9fIDQoZs+isKWXLmqpQnlPIowMZDH5GeYX5Mz7PlGxDz3F03z0rduNvHiOzscZRKKt8eec/+ypqYdxdWWOgc4IOpjeCtVoImF4+lbqZmvxGNRtXLN1zP2Vv6ws+tbXY/LTKsdwixL0cWXSlp5HPrC/p8E4TWd9TJw84ngnWnF3/HEM0NQzjuD2KXA7EewNWk4H81ib8nyEtlkeXVTfXycIuX77GAXu844+9W8XhmmZkJcdTSnG46QTxX9FlU69KolfKRHUVY7+Vhjs1nyrI5ZTtJ4vl/kVRuNefQ4K/C8bYOW/cdpNtZIiBdZcCfcoMW2a4T4gMax2RqrQXiNeZCdQFJb15TeYm6pxXY30g88JQjmTKFXG3AX//ccjMDa3UuFmPGb3F8wNmyC/8N+AVYXqHDIJue6wAAAABJRU5ErkJggg==",
"Nepal": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAA4AAAARCAYAAADtyJ2fAAAABmJLR0QA/wD/AP+gvaeTAAABnElEQVQoz2NgMe9fzWA9WYqBVJCpnvW/WjXpt4RpVwkDQwMT0Rrz1DL+3xGx+X9M3OWft3bJcwbzSRYkaYThmXLhf3SMGucx6HVzk6QRhC+IOf6L1Cz4yGA5xQevxnsKbv+fBhX8/zB95f/HrilgPsiAlTKBf0wM6w9iDTyQxldlPf+/7j7+HwQ+rdv9/0VaA9z2GyJ2/wvV0n7ymvUWoQQe2EY5l/9fNh/4//vJy/8fF2z4f1fCHsP5O6W8/rvoVDxgsJpqgOHHWyK2/xM1cv5b6tU8dNSrvIKO7fWqLrOZT9zJYD7FCzVwxO2ATrP976lT9prBcro0UaH6Mrv1/9v2OWD/3QRq9tEpfYtXM0jji4Tq/79uPQQHzvcTF/8/8cwAa/bXxqM5Xz3z/9vmGf9h4N+Pn/9f5rVD/CwK0lyCXTPIxmdhxf+/7Dr6/8+b9/8/rd75/4l7GiLAcGmG+fGRVfT/F0m1/x9aRmNEBUhzgHYxqmZsSQ4bvgnUHKhV9JrBfII4WKOxQf3/dGDWIgbHa+b9ZzObcAOkGQDaD1JZd6jOSgAAAABJRU5ErkJggg==",
"India": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAPCAYAAAAPr1RWAAAABmJLR0QA/wD/AP+gvaeTAAAAdElEQVQ4y2P4P9P4P60ww6jhA2A4keD06Wf/Z826BKaJBUQZfuzY4/+7dt3/v23b3f87d97/f/z4E+oZPnXqebDBOTn7wfSUKeepZzjIpSAXb9t27/+OHfeo63JYmM+ceen/mTPPiQ9zoQ72/7TCo4bT33AAzkG28NnasBMAAAAASUVORK5CYII=",
"China/Nepal": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAPCAYAAAAPr1RWAAAAAXNSR0IArs4c6QAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB90IHwcYDjqSCoIAAAG4SURBVDjLY7inKfAfGU/Ts/p/WUscwtdBlSMVMyBzHlrw/1+iYfN/nbrF/0062v/fd7P/f2DCTx3D37dx/F9lbvX/rrza/0sKWv/n6tj+P24m+/9ZMA/5hoNc93ke2///9xj+b28w+f/ERRlsAQjv0zb6v87f4P8tTSHyXf7Enff/zwPM/zfnmcINhuGb6hr/F2ja/t+rrUKe4Y+d+f7f1xP4v9LE6v99Da3/D23t/z+PjwFaavH/qacG2JIjygb/5+tYIiKclDAH4eUa1v+f+Pv+f1mY9f/bvqb/LwtS/j92d4f74ra8+v+VGpb/NwAj/C45ht9T0fr/Mjf9/7uJVf+fp8T/v6eojhFUZ5R0/8/Wsv1/UluWNMNhBtwBYlBYT9O2/z9X2xornqhnB4x0ZdINv6ugDgwGtf+ztOyIDmeiDH/s5fX/aXgoOLxBPphNhgVYDX/s4vr/TUP5/3eT2/6/qsj//9DSGmzBHBItwGK4zf+nocFgg8G4v/n/Y29veByQYgFWlz+ydwQmxcz/bzvr/r/ITAGWOVYokUysBTjD/IGh6f/Hrm7/7xti5liQBXOByZCQBQC9TOVO1zHzuwAAAABJRU5ErkJggg==",
"China/Kyrgyzstan": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAOCAYAAADE84fzAAAABmJLR0QA/wD/AP+gvaeTAAABFUlEQVQ4y82UsUoDQRCGv9m9XG4PQgjaiCSlwUJtrC31lXxJtRAjKChBi1glXtSce7c7FnkAj5wB/2aa4YP5Z/6R2eBI2ZIMW1RzuABW17WhkkZN4xK77zE7AYDqLqOeuPbwzvGK9GxJ52SFrgScYkbfGKf4q3xzuPQi6eknyUFJfZ9BHuDLkgw9elhSPXXRud3Mc7NbYUeeOE8w/YCfOMgjcWGx4xJJY4uFVoJ6IX5YwsLSv5xBKWiRIEEwvRbwuLSEaRe75wnPKdWto3rMMENPeO0Q3pLNPdd3i79xyCDQPS/QwpJdFNQPGf46R5e23bXUEwdRCC8pgqJBCNP1FL9Go3H8DWAUFAjydyFaLwCI8n9+yw+uh21xPR0lJAAAAABJRU5ErkJggg==",
"China": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAPCAIAAACAzcMBAAAABmJLR0QA/wD/AP+gvaeTAAAAjUlEQVQ4y2O4pymABekIYBfHgRjgrIcW/HD2hx72Byb85Jjyvo3jiQcvhH1fR+CBGf+zYB4STAFa+3ke2/97DP+uM74u4oI6zZz/eQwPaW554s778wDz960syHLIfiTKlMfOfPf1BB458d03gOp84skLdxcJ4YKM3tVxkBm6yOiRAx+ZMU0JGjVl8JsCABF+frZhYhUiAAAAAElFTkSuQmCC",
"China/India/Nepal": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAPCAYAAAAPr1RWAAAAAXNSR0IArs4c6QAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB90IHwcVCRFY4WwAAAH0SURBVDjLrZJPTxNRFMV/bzptp50ptilCaQsIQSVGcaFLEz+HWzd+BL+eKxdiXFDEuJEAIqX/S9vpMDPvXRdKLAH/DPHs3rvJOfeec1T/5bowg21/iUe5No6KQQGXpslgzT5UVnB84aBbYv+8iPM4QqXl/5Bn72tSecNaOKbci3j7ZoWedrDnzY3IbQCVFnJPYzJ3NLlpjP3Z4DThoT+gZfJ0HJc1GWArk3xziRTnH1PooUKPLeLmr4MWgoDaQcBuf5Fm7CXc/MkrAKQgjDqKUI9hc0Jq7hapShnTP8FmQqXhs99V7C4v8OyBh5NK4LkZKdBgAoVdq5DeWMZ9XsWur9DpFghDQzWj2Wg12X7f5suZQiRBoBeID5ugDeFRhASazt4RInBy4qNEKDfbFI9bfDiMGIbqz4FeZdcE7xoI8Clb5LhWQ8UGKQg9IF22CD0XC9jrK9bnYDEn/0h+0XtLsXk+QJfmKaVDeqcTqksu1epssL9vkHr9wr0kqfur3A3PsJcrWHkHM/aJjlvs5IrkCl+xVZSs51c+l26TubeK5eXR3SEyDdjqDdihnkjgmkAVlpfH8vIApEoFlOeigK3pgOmoTizpm5ILejgiPu0iYUT0rY2MJj9lkwlca4tu9ZBpgFVwMWcTzNifueuHQIM6zl8s+g5AOt+1kjl9KgAAAABJRU5ErkJggg==",
"Afghanistan/Pakistan": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAPCAYAAAAPr1RWAAAABmJLR0QA/wD/AP+gvaeTAAAA50lEQVQ4y2NgYGD4jws7A/F+PNg5GahuJh48ajjZhh9gZf1/Sk/v/xlz8/+n9PX/H5WSoo7hR2Ul/p/2sPl/WFzo/+XgwP+HJUX+n3ex/n9YXpJyw8952/4/Zarz/2KA+/+Hc3r/X/Rz+3/SXPf/OU9ryg2/mhL0/0p67P9Lcf7/zwc4/T8f7g7mX00Jptzwi+Fe/8+62fy/lh3//97kxv+XYoP/n3W3/X8+wodyw4/pqv+/Upj4/6wH0NXu7v/PejoD+Qn/j+qqUSe1HFGV+f9iycL/T+fN+v9ixdL/R5SlRzPRIDUcAOepDzYPRuOVAAAAAElFTkSuQmCC",
"Tajikistan": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABcAAAAMCAYAAACJOyb4AAAABmJLR0QA/wD/AP+gvaeTAAAAkElEQVQ4y2M4w8Dwn1aYYega/nrmzP/E4jezp/wnRT3Df6LAv///vlz4/+/7bTAN4hMDiDIcYuD//39fzEXhU274v9///329DKR//f/zdjOYhvB/U8nl3278/wN09e+Xi/7/eTkHzCfK5TMPzfxPCM8+NPX/+sMl/5ccavu/4VAJmE+MPgaGNGCSoRUesoYDAFwH0YKibe8HAAAAAElFTkSuQmCC"
}
}
},
{
"color": {
"column": "range"
}
},
{
"color": {
"column": "first_ascent",
"thresholds": [
1940,
1955,
1970,
1985,
2000
],
"categories": {
"1940": "rgba(38, 75, 89, .8)",
"1955": "rgba(36, 114, 117, .8)",
"1970": "rgba(50, 153, 132, .8)",
"1985": "rgba(95, 193, 136, .8)",
"2000": "rgba(159, 230, 130, .8)",
"-": "rgba(33, 41, 54, .8)",
"_null": "rgba(255, 255, 0, .8)"
}
}
},
{
"size": {
"column": "num_ascents",
"baseline": 100
}
},
{
"size": {
"column": "num_deaths",
"baseline": 100
}
},
{
"color": {
"column": "death_rate",
"colorstops": [
[
0,
"rgba(20,20,20,.8)"
],
[
0.4,
"rgba(255,0,0,.8)"
]
]
}
},
{
"title": "Ascents vs. Death Rate",
"size": {
"column": "num_ascents",
"baseline": 200
},
"color": {
"column": "death_rate",
"colorstops": [
[
0,
"rgba(20,20,20,.8)"
],
[
0.4,
"rgba(255,0,0,.8)"
]
]
}
}
]
}
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return user and user.is_previewer()
class DemoMapReport2(GenericMapReport):
"""this report is a demonstration of the maps report's capabilities
it uses a static dataset
"""
name = ugettext_noop("Maps: States of India")
slug = "maps_demo2"
data_source = {
"adapter": "geojson",
"geo_column": "geo",
"path": os.path.join(os.path.dirname(os.path.dirname(__file__)), 'tests/maps_demo/india.geojson'),
}
display_config = {
'name_column': 'name',
'detail_columns': ['iso', 'type', 'pop', 'area', 'pop_dens', 'lang', 'literacy', 'urbanity', 'sex_ratio'],
'column_titles': {
'name': 'State/Territory',
'iso': 'ISO 3166-2',
'type': 'Type',
'pop': 'Population',
'area': 'Area',
'pop_dens': 'Population Density',
'lang': 'Primary Official Language',
'literacy': 'Literacy Rate',
'urbanity': '% Urban',
'sex_ratio': 'Sex Ratio',
},
'numeric_format': {
'iso': "return 'IN-' + x",
'area': "return x + ' km^2'",
'pop': "return x.toString().replace(/\B(?=(?:\d{3})+(?!\d))/g, ',')",
'pop_dens': "return x + ' /km^2'",
'literacy': "return x + '%'",
'urbanity': "return x + '%'",
'sex_ratio': "return x/1000. + ' females per male'",
},
'metrics': [
{'color': {'column': 'pop'}},
{'color': {'column': 'pop_dens',
'colorstops': [
[0, 'rgba(20, 20, 20, .8)'],
[1200, 'rgba(255, 120, 0, .8)'],
]}},
{'color': {'column': 'area'}},
{'color': {'column': 'lang',
'categories': {
'Bengali': 'hsla(0, 100%, 50%, .8)',
'English': 'hsla(36, 100%, 50%, .8)',
'Gujarati': 'hsla(72, 100%, 50%, .8)',
'Hindi': 'hsla(108, 100%, 50%, .8)',
'Kannada': 'hsla(144, 100%, 50%, .8)',
'Nepali': 'hsla(180, 100%, 50%, .8)',
'Punjabi': 'hsla(216, 100%, 50%, .8)',
'Tamil': 'hsla(252, 100%, 50%, .8)',
'Telugu': 'hsla(288, 100%, 50%, .8)',
'Urdu': 'hsla(324, 100%, 50%, .8)',
'_other': 'hsla(0, 0%, 60%, .8)',
}
}},
{'color': {'column': 'literacy',
'colorstops': [
[60, 'rgba(20, 20, 20, .8)'],
[100, 'rgba(255, 120, 0, .8)'],
]}},
{'color': {'column': 'urbanity',
'colorstops': [
[10, 'rgba(20, 20, 20, .8)'],
[50, 'rgba(255, 120, 0, .8)'],
]}},
{'color': {'column': 'sex_ratio',
'colorstops': [
[850, 'rgba(20, 20, 255, .8)'],
[1050, 'rgba(255, 20, 20, .8)'],
]}},
],
}
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return user and user.is_previewer()
class GenericCaseListMap(GenericMapReport):
fields = CaseListMixin.fields
# override to specify geo-properties of case types
# (beyond default 'gps' case property)
case_config = {}
@property
def data_source(self):
return {
"adapter": "case",
"geo_fetch": self.case_config,
}
@property
def display_config(self):
cfg = {
"name_column": "case_name",
"detail_columns": [
'external_id',
'owner_name',
'num_forms',
'is_closed',
'opened_on',
'modified_on',
'closed_on',
],
"column_titles": {
'case_name': 'Case Name',
'case_type': 'Case Type',
'external_id': 'ID #',
'owner_name': 'Owner',
'num_forms': '# Forms',
'is_closed': 'Status',
'opened_on': 'Date Opened',
'modified_on': 'Date Last Modified',
'closed_on': 'Date Closed',
},
"enum_captions": {
"is_closed": {'y': 'Closed', 'n': 'Open'},
},
}
cfg['detail_template'] = render_to_string('reports/partials/caselist_mapdetail.html', {})
return cfg
def dynamic_config(self, static_config, data):
all_cols = reduce(lambda a, b: a.union(b), (row['properties'] for row in data), set())
all_props = filter(lambda e: e.startswith('prop_'), all_cols)
# TODO feels like there should be a more authoritative source of property titles
def format_propname(prop):
name = prop[len('prop_'):]
name = reduce(lambda str, sep: ' '.join(str.split(sep)), ('-', '_'), name).title()
return name
static_config['column_titles'].update((prop, format_propname(prop)) for prop in all_props)
static_config['detail_columns'].extend(sorted(all_props))
metric_cols = [k for k in static_config['detail_columns'] if k not in ('external_id')]
metric_cols.insert(0, 'case_type')
static_config['metrics'] = [{'auto': col} for col in metric_cols]
return static_config
class DemoMapCaseList(GenericCaseListMap):
name = ugettext_noop("Maps: Case List")
slug = "maps_demo_caselist"
case_config = {
"supply-point": "_random",
"supply-point-product": "_random",
}
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return user and user.is_previewer()
"""
metrics:
want to allow customization
"""
|
|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1Container(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'name': 'str',
'image': 'str',
'command': 'list[str]',
'args': 'list[str]',
'working_dir': 'str',
'ports': 'list[V1ContainerPort]',
'env': 'list[V1EnvVar]',
'resources': 'V1ResourceRequirements',
'volume_mounts': 'list[V1VolumeMount]',
'liveness_probe': 'V1Probe',
'readiness_probe': 'V1Probe',
'lifecycle': 'V1Lifecycle',
'termination_message_path': 'str',
'image_pull_policy': 'str',
'security_context': 'V1SecurityContext',
'stdin': 'bool',
'stdin_once': 'bool',
'tty': 'bool'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'name': 'name',
'image': 'image',
'command': 'command',
'args': 'args',
'working_dir': 'workingDir',
'ports': 'ports',
'env': 'env',
'resources': 'resources',
'volume_mounts': 'volumeMounts',
'liveness_probe': 'livenessProbe',
'readiness_probe': 'readinessProbe',
'lifecycle': 'lifecycle',
'termination_message_path': 'terminationMessagePath',
'image_pull_policy': 'imagePullPolicy',
'security_context': 'securityContext',
'stdin': 'stdin',
'stdin_once': 'stdinOnce',
'tty': 'tty'
}
def __init__(self, name=None, image=None, command=None, args=None, working_dir=None, ports=None, env=None, resources=None, volume_mounts=None, liveness_probe=None, readiness_probe=None, lifecycle=None, termination_message_path=None, image_pull_policy=None, security_context=None, stdin=None, stdin_once=None, tty=None):
"""
V1Container - a model defined in Swagger
"""
self._name = name
self._image = image
self._command = command
self._args = args
self._working_dir = working_dir
self._ports = ports
self._env = env
self._resources = resources
self._volume_mounts = volume_mounts
self._liveness_probe = liveness_probe
self._readiness_probe = readiness_probe
self._lifecycle = lifecycle
self._termination_message_path = termination_message_path
self._image_pull_policy = image_pull_policy
self._security_context = security_context
self._stdin = stdin
self._stdin_once = stdin_once
self._tty = tty
@property
def name(self):
"""
Gets the name of this V1Container.
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
:return: The name of this V1Container.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1Container.
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
:param name: The name of this V1Container.
:type: str
"""
self._name = name
@property
def image(self):
"""
Gets the image of this V1Container.
Docker image name. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md
:return: The image of this V1Container.
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""
Sets the image of this V1Container.
Docker image name. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md
:param image: The image of this V1Container.
:type: str
"""
self._image = image
@property
def command(self):
"""
Gets the command of this V1Container.
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands
:return: The command of this V1Container.
:rtype: list[str]
"""
return self._command
@command.setter
def command(self, command):
"""
Sets the command of this V1Container.
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands
:param command: The command of this V1Container.
:type: list[str]
"""
self._command = command
@property
def args(self):
"""
Gets the args of this V1Container.
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands
:return: The args of this V1Container.
:rtype: list[str]
"""
return self._args
@args.setter
def args(self, args):
"""
Sets the args of this V1Container.
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/containers.md#containers-and-commands
:param args: The args of this V1Container.
:type: list[str]
"""
self._args = args
@property
def working_dir(self):
"""
Gets the working_dir of this V1Container.
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
:return: The working_dir of this V1Container.
:rtype: str
"""
return self._working_dir
@working_dir.setter
def working_dir(self, working_dir):
"""
Sets the working_dir of this V1Container.
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
:param working_dir: The working_dir of this V1Container.
:type: str
"""
self._working_dir = working_dir
@property
def ports(self):
"""
Gets the ports of this V1Container.
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.
:return: The ports of this V1Container.
:rtype: list[V1ContainerPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""
Sets the ports of this V1Container.
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.
:param ports: The ports of this V1Container.
:type: list[V1ContainerPort]
"""
self._ports = ports
@property
def env(self):
"""
Gets the env of this V1Container.
List of environment variables to set in the container. Cannot be updated.
:return: The env of this V1Container.
:rtype: list[V1EnvVar]
"""
return self._env
@env.setter
def env(self, env):
"""
Sets the env of this V1Container.
List of environment variables to set in the container. Cannot be updated.
:param env: The env of this V1Container.
:type: list[V1EnvVar]
"""
self._env = env
@property
def resources(self):
"""
Gets the resources of this V1Container.
Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#resources
:return: The resources of this V1Container.
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""
Sets the resources of this V1Container.
Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/persistent-volumes.md#resources
:param resources: The resources of this V1Container.
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def volume_mounts(self):
"""
Gets the volume_mounts of this V1Container.
Pod volumes to mount into the container's filesyste. Cannot be updated.
:return: The volume_mounts of this V1Container.
:rtype: list[V1VolumeMount]
"""
return self._volume_mounts
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
"""
Sets the volume_mounts of this V1Container.
Pod volumes to mount into the container's filesyste. Cannot be updated.
:param volume_mounts: The volume_mounts of this V1Container.
:type: list[V1VolumeMount]
"""
self._volume_mounts = volume_mounts
@property
def liveness_probe(self):
"""
Gets the liveness_probe of this V1Container.
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes
:return: The liveness_probe of this V1Container.
:rtype: V1Probe
"""
return self._liveness_probe
@liveness_probe.setter
def liveness_probe(self, liveness_probe):
"""
Sets the liveness_probe of this V1Container.
Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes
:param liveness_probe: The liveness_probe of this V1Container.
:type: V1Probe
"""
self._liveness_probe = liveness_probe
@property
def readiness_probe(self):
"""
Gets the readiness_probe of this V1Container.
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes
:return: The readiness_probe of this V1Container.
:rtype: V1Probe
"""
return self._readiness_probe
@readiness_probe.setter
def readiness_probe(self, readiness_probe):
"""
Sets the readiness_probe of this V1Container.
Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/pod-states.md#container-probes
:param readiness_probe: The readiness_probe of this V1Container.
:type: V1Probe
"""
self._readiness_probe = readiness_probe
@property
def lifecycle(self):
"""
Gets the lifecycle of this V1Container.
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:return: The lifecycle of this V1Container.
:rtype: V1Lifecycle
"""
return self._lifecycle
@lifecycle.setter
def lifecycle(self, lifecycle):
"""
Sets the lifecycle of this V1Container.
Actions that the management system should take in response to container lifecycle events. Cannot be updated.
:param lifecycle: The lifecycle of this V1Container.
:type: V1Lifecycle
"""
self._lifecycle = lifecycle
@property
def termination_message_path(self):
"""
Gets the termination_message_path of this V1Container.
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Defaults to /dev/termination-log. Cannot be updated.
:return: The termination_message_path of this V1Container.
:rtype: str
"""
return self._termination_message_path
@termination_message_path.setter
def termination_message_path(self, termination_message_path):
"""
Sets the termination_message_path of this V1Container.
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Defaults to /dev/termination-log. Cannot be updated.
:param termination_message_path: The termination_message_path of this V1Container.
:type: str
"""
self._termination_message_path = termination_message_path
@property
def image_pull_policy(self):
"""
Gets the image_pull_policy of this V1Container.
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md#updating-images
:return: The image_pull_policy of this V1Container.
:rtype: str
"""
return self._image_pull_policy
@image_pull_policy.setter
def image_pull_policy(self, image_pull_policy):
"""
Sets the image_pull_policy of this V1Container.
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://releases.k8s.io/release-1.2/docs/user-guide/images.md#updating-images
:param image_pull_policy: The image_pull_policy of this V1Container.
:type: str
"""
self._image_pull_policy = image_pull_policy
@property
def security_context(self):
"""
Gets the security_context of this V1Container.
Security options the pod should run with. More info: http://releases.k8s.io/release-1.2/docs/design/security_context.md
:return: The security_context of this V1Container.
:rtype: V1SecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""
Sets the security_context of this V1Container.
Security options the pod should run with. More info: http://releases.k8s.io/release-1.2/docs/design/security_context.md
:param security_context: The security_context of this V1Container.
:type: V1SecurityContext
"""
self._security_context = security_context
@property
def stdin(self):
"""
Gets the stdin of this V1Container.
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
:return: The stdin of this V1Container.
:rtype: bool
"""
return self._stdin
@stdin.setter
def stdin(self, stdin):
"""
Sets the stdin of this V1Container.
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.
:param stdin: The stdin of this V1Container.
:type: bool
"""
self._stdin = stdin
@property
def stdin_once(self):
"""
Gets the stdin_once of this V1Container.
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
:return: The stdin_once of this V1Container.
:rtype: bool
"""
return self._stdin_once
@stdin_once.setter
def stdin_once(self, stdin_once):
"""
Sets the stdin_once of this V1Container.
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false
:param stdin_once: The stdin_once of this V1Container.
:type: bool
"""
self._stdin_once = stdin_once
@property
def tty(self):
"""
Gets the tty of this V1Container.
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
:return: The tty of this V1Container.
:rtype: bool
"""
return self._tty
@tty.setter
def tty(self, tty):
"""
Sets the tty of this V1Container.
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.
:param tty: The tty of this V1Container.
:type: bool
"""
self._tty = tty
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1Container.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
"""Tests for ZHA config flow."""
import os
from unittest.mock import AsyncMock, MagicMock, patch, sentinel
import pytest
import serial.tools.list_ports
import zigpy.config
from homeassistant import setup
from homeassistant.components.zha import config_flow
from homeassistant.components.zha.core.const import CONF_RADIO_TYPE, DOMAIN, RadioType
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_SOURCE
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from tests.common import MockConfigEntry
def com_port():
"""Mock of a serial port."""
port = serial.tools.list_ports_common.ListPortInfo("/dev/ttyUSB1234")
port.serial_number = "1234"
port.manufacturer = "Virtual serial port"
port.device = "/dev/ttyUSB1234"
port.description = "Some serial port"
return port
@patch("homeassistant.components.zha.async_setup_entry", AsyncMock(return_value=True))
@patch("zigpy_znp.zigbee.application.ControllerApplication.probe", return_value=True)
async def test_discovery(detect_mock, hass):
"""Test zeroconf flow -- radio detected."""
service_info = {
"host": "192.168.1.200",
"port": 6053,
"hostname": "_tube_zb_gw._tcp.local.",
"properties": {"name": "tube_123456"},
}
flow = await hass.config_entries.flow.async_init(
"zha", context={"source": "zeroconf"}, data=service_info
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "socket://192.168.1.200:6638"
assert result["data"] == {
"device": {
"baudrate": 115200,
"flow_control": None,
"path": "socket://192.168.1.200:6638",
},
CONF_RADIO_TYPE: "znp",
}
@patch("homeassistant.components.zha.async_setup_entry", AsyncMock(return_value=True))
@patch("zigpy_znp.zigbee.application.ControllerApplication.probe", return_value=True)
async def test_discovery_already_setup(detect_mock, hass):
"""Test zeroconf flow -- radio detected."""
service_info = {
"host": "192.168.1.200",
"port": 6053,
"hostname": "_tube_zb_gw._tcp.local.",
"properties": {"name": "tube_123456"},
}
await setup.async_setup_component(hass, "persistent_notification", {})
MockConfigEntry(domain=DOMAIN, data={"usb_path": "/dev/ttyUSB1"}).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
"zha", context={"source": "zeroconf"}, data=service_info
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
@patch("serial.tools.list_ports.comports", MagicMock(return_value=[com_port()]))
@patch(
"homeassistant.components.zha.config_flow.detect_radios",
return_value={CONF_RADIO_TYPE: "test_radio"},
)
async def test_user_flow(detect_mock, hass):
"""Test user flow -- radio detected."""
port = com_port()
port_select = f"{port}, s/n: {port.serial_number} - {port.manufacturer}"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data={zigpy.config.CONF_DEVICE_PATH: port_select},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"].startswith(port.description)
assert result["data"] == {CONF_RADIO_TYPE: "test_radio"}
assert detect_mock.await_count == 1
assert detect_mock.await_args[0][0] == port.device
@patch("serial.tools.list_ports.comports", MagicMock(return_value=[com_port()]))
@patch(
"homeassistant.components.zha.config_flow.detect_radios",
return_value=None,
)
async def test_user_flow_not_detected(detect_mock, hass):
"""Test user flow, radio not detected."""
port = com_port()
port_select = f"{port}, s/n: {port.serial_number} - {port.manufacturer}"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data={zigpy.config.CONF_DEVICE_PATH: port_select},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "pick_radio"
assert detect_mock.await_count == 1
assert detect_mock.await_args[0][0] == port.device
@patch("serial.tools.list_ports.comports", MagicMock(return_value=[com_port()]))
async def test_user_flow_show_form(hass):
"""Test user step form."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
@patch("serial.tools.list_ports.comports", MagicMock(return_value=[]))
async def test_user_flow_show_manual(hass):
"""Test user flow manual entry when no comport detected."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "pick_radio"
async def test_user_flow_manual(hass):
"""Test user flow manual entry."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data={zigpy.config.CONF_DEVICE_PATH: config_flow.CONF_MANUAL_PATH},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "pick_radio"
@pytest.mark.parametrize("radio_type", RadioType.list())
async def test_pick_radio_flow(hass, radio_type):
"""Test radio picker."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: "pick_radio"}, data={CONF_RADIO_TYPE: radio_type}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "port_config"
async def test_user_flow_existing_config_entry(hass):
"""Test if config entry already exists."""
MockConfigEntry(domain=DOMAIN, data={"usb_path": "/dev/ttyUSB1"}).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
assert result["type"] == "abort"
@patch("zigpy_cc.zigbee.application.ControllerApplication.probe", return_value=False)
@patch(
"zigpy_deconz.zigbee.application.ControllerApplication.probe", return_value=False
)
@patch(
"zigpy_zigate.zigbee.application.ControllerApplication.probe", return_value=False
)
@patch("zigpy_xbee.zigbee.application.ControllerApplication.probe", return_value=False)
async def test_probe_radios(xbee_probe, zigate_probe, deconz_probe, cc_probe, hass):
"""Test detect radios."""
app_ctrl_cls = MagicMock()
app_ctrl_cls.SCHEMA_DEVICE = zigpy.config.SCHEMA_DEVICE
app_ctrl_cls.probe = AsyncMock(side_effect=(True, False))
p1 = patch(
"bellows.zigbee.application.ControllerApplication.probe",
side_effect=(True, False),
)
with p1 as probe_mock:
res = await config_flow.detect_radios("/dev/null")
assert probe_mock.await_count == 1
assert res[CONF_RADIO_TYPE] == "ezsp"
assert zigpy.config.CONF_DEVICE in res
assert (
res[zigpy.config.CONF_DEVICE][zigpy.config.CONF_DEVICE_PATH] == "/dev/null"
)
res = await config_flow.detect_radios("/dev/null")
assert res is None
assert xbee_probe.await_count == 1
assert zigate_probe.await_count == 1
assert deconz_probe.await_count == 1
assert cc_probe.await_count == 1
@patch("bellows.zigbee.application.ControllerApplication.probe", return_value=False)
async def test_user_port_config_fail(probe_mock, hass):
"""Test port config flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: "pick_radio"},
data={CONF_RADIO_TYPE: RadioType.ezsp.description},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={zigpy.config.CONF_DEVICE_PATH: "/dev/ttyUSB33"},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "port_config"
assert result["errors"]["base"] == "cannot_connect"
assert probe_mock.await_count == 1
@patch("homeassistant.components.zha.async_setup_entry", AsyncMock(return_value=True))
@patch("bellows.zigbee.application.ControllerApplication.probe", return_value=True)
async def test_user_port_config(probe_mock, hass):
"""Test port config."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: "pick_radio"},
data={CONF_RADIO_TYPE: RadioType.ezsp.description},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={zigpy.config.CONF_DEVICE_PATH: "/dev/ttyUSB33"},
)
assert result["type"] == "create_entry"
assert result["title"].startswith("/dev/ttyUSB33")
assert (
result["data"][zigpy.config.CONF_DEVICE][zigpy.config.CONF_DEVICE_PATH]
== "/dev/ttyUSB33"
)
assert result["data"][CONF_RADIO_TYPE] == "ezsp"
assert probe_mock.await_count == 1
def test_get_serial_by_id_no_dir():
"""Test serial by id conversion if there's no /dev/serial/by-id."""
p1 = patch("os.path.isdir", MagicMock(return_value=False))
p2 = patch("os.scandir")
with p1 as is_dir_mock, p2 as scan_mock:
res = config_flow.get_serial_by_id(sentinel.path)
assert res is sentinel.path
assert is_dir_mock.call_count == 1
assert scan_mock.call_count == 0
def test_get_serial_by_id():
"""Test serial by id conversion."""
p1 = patch("os.path.isdir", MagicMock(return_value=True))
p2 = patch("os.scandir")
def _realpath(path):
if path is sentinel.matched_link:
return sentinel.path
return sentinel.serial_link_path
p3 = patch("os.path.realpath", side_effect=_realpath)
with p1 as is_dir_mock, p2 as scan_mock, p3:
res = config_flow.get_serial_by_id(sentinel.path)
assert res is sentinel.path
assert is_dir_mock.call_count == 1
assert scan_mock.call_count == 1
entry1 = MagicMock(spec_set=os.DirEntry)
entry1.is_symlink.return_value = True
entry1.path = sentinel.some_path
entry2 = MagicMock(spec_set=os.DirEntry)
entry2.is_symlink.return_value = False
entry2.path = sentinel.other_path
entry3 = MagicMock(spec_set=os.DirEntry)
entry3.is_symlink.return_value = True
entry3.path = sentinel.matched_link
scan_mock.return_value = [entry1, entry2, entry3]
res = config_flow.get_serial_by_id(sentinel.path)
assert res is sentinel.matched_link
assert is_dir_mock.call_count == 2
assert scan_mock.call_count == 2
|
|
#!/usr/bin/env python
# coding: utf-8
__author__ = 'toly'
"""
script for make per-page dictionaries according to user personal list of known words
"""
import re
import os
import sys
import argparse
from string import lower
import nltk
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.snowball import EnglishStemmer
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
stemmer = EnglishStemmer()
lemmatizer = WordNetLemmatizer()
NO_LETTER_REGEXP = re.compile(r'[^a-zA-Z]')
PERSONAL_USER_DIR = os.path.join(os.path.expanduser('~'), '.easy_english')
UNKNOWN_STEMS_FILE = 'unknown_stems.txt'
KNOWN_STEMS_FILE = 'known_stems.txt'
STUDY_DICT_FILE = 'dictionary.txt'
PERSONAL_FILES = [UNKNOWN_STEMS_FILE, KNOWN_STEMS_FILE, STUDY_DICT_FILE]
PERSONAL_FILES = map(lambda x: os.path.join(PERSONAL_USER_DIR, x), PERSONAL_FILES)
UNKNOWN_STEMS_FILE, KNOWN_STEMS_FILE, STUDY_DICT_FILE = PERSONAL_FILES
def main():
"""
main func - entry point
"""
# if not created - make work directory for personal user lists of words
if not os.path.exists(PERSONAL_USER_DIR):
os.mkdir(PERSONAL_USER_DIR)
# make arguments parser and parse arguments
arg_parser = make_arguments_parser()
args = arg_parser.parse_args()
# main loop-for by pages in input file
big_page = ''
for page_num, page in enumerate(file_pages(args.input_file)):
big_page += page
words = tokenizer.tokenize(big_page)
words = map(lower, words)
words = list(set(words))
words = filter_non_words(words)
tesaurus = Tesaurus()
tesaurus.determine_words(words)
def make_arguments_parser():
"""
make arguments parser and set options
"""
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('-i', '--input-file', type=str, required=True, help="input txt file")
argument_parser.add_argument('-o', '--output-file', type=str, help="output file (default: <input_file>_d.txt )")
return argument_parser
def filter_non_words(words):
return filter(lambda x: not NO_LETTER_REGEXP.findall(x), words)
class Tesaurus(object):
unknown_stems_file = None
known_stems_file = None
study_words_file = None
unknown_stems = None
known_stems = None
study_words = None
def __init__(self, unknown_stems_file=UNKNOWN_STEMS_FILE, known_stems_file=KNOWN_STEMS_FILE,
study_words_file=STUDY_DICT_FILE):
self.unknown_stems_file = unknown_stems_file
self.known_stems_file = known_stems_file
self.study_words_file = study_words_file
personal_files = (unknown_stems_file, known_stems_file, study_words_file)
self.unknown_stems, self.known_stems, self.study_words = map(get_user_words, personal_files)
def determine_words(self, words_list):
"""
Determine words - known or unknown, and append to dictionary if need
"""
# dict: lemma -> stem
dict_lemmas = {}
not_determined_words = []
total_words = len(words_list)
n = 0
for word, part_of_speech in nltk.pos_tag(words_list):
n += 1
lemma, stemm = get_base_forms(word, part_of_speech)
if stemm in self.known_stems or stemm in self.unknown_stems:
continue
not_determined_words.append(lemma)
dict_lemmas[lemma] = stemm
if len(not_determined_words) < 10:
continue
progress = 100 * float(n) / float(total_words)
print "Progress: %d/%d [%f %%]" % (n, total_words, progress)
known_words = input_known_words(not_determined_words)
unknown_words = set(not_determined_words) - set(known_words)
known_stems = map(lambda x: dict_lemmas[x], known_words)
unknown_stems = map(lambda x: dict_lemmas[x], unknown_words)
append_words(self.known_stems_file, known_stems)
append_words(self.unknown_stems_file, unknown_stems)
append_words(self.study_words_file, unknown_words)
self.known_stems += known_stems
self.unknown_stems += unknown_stems
not_determined_words = []
if not_determined_words:
known_words = input_known_words(not_determined_words)
unknown_words = set(not_determined_words) - set(known_words)
known_stems = map(lambda x: dict_lemmas[x], known_words)
unknown_stems = map(lambda x: dict_lemmas[x], unknown_words)
append_words(self.known_stems_file, known_stems)
append_words(self.unknown_stems_file, unknown_stems)
append_words(self.study_words_file, unknown_words)
def append_words(filename, words):
"""
append words to file
"""
lines = map(lambda x: '%s\n' % x, words)
with open(filename, 'a') as f:
f.writelines(lines)
def get_base_forms(word, part_of_speech):
"""
word, part_of_speech -> lemma, stemm
"""
try:
lemma = lemmatizer.lemmatize(word, lower(part_of_speech[0]))
except Exception:
lemma = lemmatizer.lemmatize(word)
stemm = stemmer.stem(lemma)
return lemma, stemm
def input_known_words(words):
"""
Determine words through user input
list of words -> [known words], [unknown words]
"""
word_views = map(lambda item: '%d) %s' % item, enumerate(words))
prompt = '\n'.join(word_views) + "\nWhat words are you know? "
not_inputed = True
while not_inputed:
try:
words_positions = raw_input(prompt)
if not words_positions:
words_positions
break
words_positions = map(int, words_positions.split(','))
not_inputed = False
except (ValueError, ):
print "Input like a '0,3,8'"
known_words = []
for position in words_positions:
try:
known_words.append(words[position])
except IndexError:
pass
return known_words
def get_user_words(filename):
"""
get list of user words from file <filename>
or
create file if not exists
"""
if not os.path.exists(filename):
open(filename, 'a').close()
return []
def remove_end_of_line(line):
if '\n' in line:
return line.replace('\n', '')
return line
with open(filename, 'r') as f:
return map(remove_end_of_line, f.readlines())
def file_lines(filename):
"""read file line by line"""
with open(filename) as f:
for line in f:
yield line
def file_pages(filename, split_regexp=r'^===page #\d+$'):
"""read file page by page"""
page = ''
for line in file_lines(filename):
if re.match(split_regexp, line):
yield page
page = ''
continue
page += line
yield page
if __name__ == "__main__":
sys.exit(main())
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
Example:
session.set_defaults(
sql_connection="sqlite:///var/lib/dragon/sqlite.db",
sqlite_db="/var/lib/dragon/sqlite.db")
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
model_query() will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
Note: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback().
Examples:
def get_foo(context, foo):
return model_query(context, models.Foo).\
filter_by(foo=foo).\
first()
def update_foo(context, id, newfoo):
model_query(context, models.Foo).\
filter_by(id=id).\
update({'foo': newfoo})
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keeping all the reads and writes within
the context managed by a single session. In this way, the session's __exit__
handler will take care of calling flush() and commit() for you.
If using this approach, you should not explicitly call flush() or commit().
Any error within the context of the session will cause the session to emit
a ROLLBACK. If the connection is dropped before this is possible, the
database will implicitly rollback the transaction.
Note: statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call model.save()
def create_many_foo(context, foos):
session = get_session()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = get_session()
with session.begin():
foo_ref = model_query(context, models.Foo, session).\
filter_by(id=foo_id).\
first()
model_query(context, models.Bar, session).\
filter_by(id=foo_ref['bar_id']).\
update({'bar': newbar})
Note: update_bar is a trivially simple example of using "with session.begin".
Whereas create_many_foo is a good example of when a transaction is needed,
it is always best to use as few queries as possible. The two queries in
update_bar can be better expressed using a single query which avoids
the need for an explicit transaction. It can be expressed like so:
def update_bar(context, foo_id, newbar):
subq = model_query(context, models.Foo.id).\
filter_by(id=foo_id).\
limit(1).\
subquery()
model_query(context, models.Bar).\
filter_by(id=subq.as_scalar()).\
update({'bar': newbar})
For reference, this emits approximagely the following SQL statement:
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
def myfunc(foo):
session = get_session()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = get_session()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your "with session.begin()" block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid "with_lockmode('UPDATE')" when possible.
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added
to your model class. For example:
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
model.soft_delete() and query.soft_delete().
model.soft_delete() method works with single already fetched entry.
query.soft_delete() makes only one db request for all entries that correspond
to query.
* In almost all cases you should use query.soft_delete(). Some examples:
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = get_session()
with session.begin(subtransactions=True):
count = model_query(BarModel).\
find(some_condition).\
soft_delete(synchronize_session=True)
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where model.soft_delete() is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
def soft_delete_bar_model():
session = get_session()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use query.soft_delete() method:
def soft_delete_multi_models():
session = get_session()
with session.begin():
query = model_query(BarModel, session=session).\
find(some_condition)
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using model.soft_delete(), as in the following
example, is very inefficient.
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import functools
import os.path
import re
import time
from eventlet import greenthread
from oslo.config import cfg
import six
from sqlalchemy import exc as sqla_exc
import sqlalchemy.interfaces
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from dragon.openstack.common.db import exception
from dragon.openstack.common.gettextutils import _ # noqa
from dragon.openstack.common import log as logging
from dragon.openstack.common import timeutils
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='dragon.sqlite',
help='the filename to use with sqlite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If true, use synchronous mode for sqlite'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('slave_connection',
default='',
help='The SQLAlchemy connection string used to connect to the '
'slave database',
secret=True),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
_SLAVE_ENGINE = None
_SLAVE_MAKER = None
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def cleanup():
global _ENGINE, _MAKER
global _SLAVE_ENGINE, _SLAVE_MAKER
if _MAKER:
_MAKER.close_all()
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
if _SLAVE_MAKER:
_SLAVE_MAKER.close_all()
_SLAVE_MAKER = None
if _SLAVE_ENGINE:
_SLAVE_ENGINE.dispose()
_SLAVE_ENGINE = None
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
slave_session=False, mysql_traditional_mode=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
if slave_session:
maker = _SLAVE_MAKER
if maker is None:
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
mysql_traditional_mode=mysql_traditional_mode)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
session = maker()
return session
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
_DUP_KEY_RE_DB = {
"sqlite": re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
"postgresql": re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),
"mysql": re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$")
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["mysql", "sqlite", "postgresql"]:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = _DUP_KEY_RE_DB[engine_name].match(integrity_error.message)
if not m:
return
columns = m.group(1)
if engine_name == "sqlite":
columns = columns.strip().split(", ")
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_('DB exception wrapped.'))
raise exception.DBError(e)
_wrap.func_name = f.func_name
return _wrap
def get_engine(sqlite_fk=False, slave_engine=False,
mysql_traditional_mode=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
engine = _ENGINE
db_uri = CONF.database.connection
if slave_engine:
engine = _SLAVE_ENGINE
db_uri = CONF.database.slave_connection
if engine is None:
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
mysql_traditional_mode=mysql_traditional_mode)
if slave_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
return engine
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _greenthread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
greenthread.sleep(0)
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL and DB2 connections are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
cursor = dbapi_conn.cursor()
try:
ping_sql = 'select 1'
if engine.name == 'ibm_db_sa':
# DB2 requires a table expression
ping_sql = 'select 1 from (values (1)) AS t1'
cursor.execute(ping_sql)
except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _('Database server has gone away: %s') % ex
LOG.warning(msg)
raise sqla_exc.DisconnectionError(msg)
else:
raise
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
"""Set engine mode to 'traditional'.
Required to prevent silent truncates at insert or update operations
under MySQL. By default MySQL truncates inserted string if it longer
than a declared field just with warning. That is fraught with data
corruption.
"""
dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def create_engine(sql_connection, sqlite_fk=False,
mysql_traditional_mode=False):
"""Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": CONF.database.idle_timeout,
"echo": False,
'convert_unicode': True,
}
# Map our SQL debug level to SQLAlchemy's options
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug'
elif CONF.database.connection_debug >= 50:
engine_args['echo'] = True
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if CONF.database.max_pool_size is not None:
engine_args['pool_size'] = CONF.database.max_pool_size
if CONF.database.max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow
if CONF.database.pool_timeout is not None:
engine_args['pool_timeout'] = CONF.database.pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _greenthread_yield)
if engine.name in ['mysql', 'ibm_db_sa']:
callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', callback)
if mysql_traditional_mode:
sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional)
else:
LOG.warning(_("This application has not enabled MySQL traditional"
" mode, which means silent data corruption may"
" occur. Please encourage the application"
" developers to enable this mode."))
elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and
engine.dialect.dbapi.__name__ == 'MySQLdb'):
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = CONF.database.max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
LOG.warn(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(CONF.database.retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries.
Patches MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for file, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if file.endswith('session.py') and method == '_do_query':
continue
if file.endswith('api.py') and method == 'wrapper':
continue
if file.endswith('utils.py') and method == '_inner':
continue
if file.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if file.endswith('db/api.py'):
continue
# only trace inside dragon
index = file.rfind('dragon')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (file[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
def _assert_matching_drivers():
"""Make sure slave handle and normal handle have the same driver."""
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
if CONF.database.slave_connection == '':
return
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
assert normal.drivername == slave.drivername
|
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
<Program Name>
wizard.py
<Author>
Lukas Puehringer <[email protected]>
<Started>
April 06, 2017
<Copyright>
See LICENSE for licensing information.
<Purpose>
Flask web app that provides a wizard to guide through an in-toto
software supply chain layout creation.
<Overview of Functions>
Utils:
Mostly used to transform data
NoSQL Helpers:
The app uses MongoDB to persist user posted data for auditing and to
improve the app. NoSQL Helpers are a couple of custom wrappers around
common queries.
View Decorator & Hooks:
Currently there is one view decorator for session handling (sessions are
used to isolate user posted data) and an after request hook to inject
messages from the Flask's message flash framework into ajax responses.
Views:
Each view is an entry point for an HTTP request (c.f. paths in @app.route
decorator). Most views correspond to a page the user surfs to and
therefore render and return the corresponding template on a GET request.
If a page has form postable content, the corresponding view also accepts
a POST request which is usually sent when the user clicks NEXT as s/he
walks through the pages. The view then handles and persists the posted
data and in case everything went well redirects to the text view (page).
The views in this document are ordered from top to bottom in the same
order as the user walks from page to page.
Some pages have additional views associated that accept ajax calls,
e.g. to upload or remove files.
"""
import os
import uuid
import time
import io
import tarfile
from functools import wraps
from flask import (Flask, render_template, session, redirect, url_for, request,
flash, send_file, abort, json, jsonify, get_flashed_messages)
from flask_pymongo import PyMongo
from flask_wtf.csrf import CSRFProtect
import in_toto.models.link
import in_toto.models.layout
import in_toto.models.metadata
import securesystemslib.keys
import tooldb
import create_layout
app = Flask(__name__, static_url_path="", instance_relative_config=True)
csrf = CSRFProtect(app)
app.config.update(dict(
DEBUG=True,
MONGO_URI="mongodb://localhost:27017/wizard",
SECRET_KEY="do not use the development key in production!!!",
))
# Supply a config file at "instance/config.py" that carries
# e.g. your deployment secret key
app.config.from_pyfile("config.py")
mongo = PyMongo(app)
# Reload if a template has changed (only for development, i.e. in DEBUG mode)
app.jinja_env.auto_reload = app.config["DEBUG"]
# -----------------------------------------------------------------------------
# Utils
# -----------------------------------------------------------------------------
def session_to_ssc(session_data):
"""
<Purpose>
Takes a data from a user session, i.e. form posted data from multiple
pages (vcs, building, qa, ...) to generate a dictionary of lists of step
data and inspection data, i.e. a software supply chain (ssc).
<Returns>
Software Supply Chain data, i.e. a dictionary of step and inspection data
Note: these are not not actual in-toto Step and Inspection objects
e.g.:
{
steps: [
{
"name": <unique step name>,
"cmd": <expected command>,
"modifies": <boolean> # whether the step modifies artifacts or not
}, ...
],
inspections: [
{
"name": <unique inspection name>,
"cmd": <command to run inspection>,
"based_on": <step name whose products are used for that inspection>
}
]
}
"""
ssc_steps = []
ssc_inspections = []
for step_type in ["vcs", "building", "qa", "package"]:
for idx, step in enumerate(session_data.get(step_type, {}).get(
"items", [])):
# FIXME: Come up with better auto names
step_name = "{}-{}".format(step_type, idx + 1)
ssc_step = {
"name": step_name,
"cmd" : step["cmd"],
"modifies": True
}
# We create an inspection for each retval, stdout and stderr for
# each specified QA step
# Special treatment for QA steps:
# If the user has specified how s/he knows if a QA step went well, e.g.
# by looking at return value, stdout and/or stderr, we use that
# information to suggest in-toto inspections (one per posted retval,
# stdout, stderr per posted, QA step)
if step_type == "qa":
# Override modifies property set above
# We suppose QA steps don't modify artifacts but just look at them
ssc_step["modifies"] = False
for inspect_type in ["retval", "stdout", "stderr"]:
enabled = step.get(inspect_type)
val = step.get(inspect_type + "_value")
operator = step.get(inspect_type + "_operator")
# Construct return value or byproducts inspection command
# c.f. https://github.com/in-toto/in-toto-inspections
if enabled:
# Inspection commands require a link file (to inspect)
# We use the auto name of the corresponding QA step to
# generate the filename passed to the inspection command
# FIXME: What about the functionary keyid in the link file name?!
link = in_toto.models.link.FILENAME_FORMAT_SHORT.format(
step_name=step_name)
value = step.get(inspect_type + "_value")
if inspect_type == "retval":
run = ("inspect-return-value --link={link} --{operator} {value}"
.format(link=link, operator=operator, value=value))
elif inspect_type in ["stdout", "stderr"]:
if operator == "empty":
operator = "is"
value = ""
run = ("inspect-byproducts"
" --link={link} --{inspect_type} --{operator} \"{value}\""
.format(link=link, inspect_type=inspect_type,
operator=operator, value=value))
# FIXME: Come up with better auto names
inspect_name = "inspection-" + str(len(ssc_inspections) + 1)
ssc_inspections.append({
"name": inspect_name,
"cmd": run,
"based_on": step_name
})
ssc_steps.append(ssc_step)
ssc_data = {
"steps": ssc_steps,
"inspections": ssc_inspections
}
return ssc_data
def form_data_to_ssc(step_names, step_commands, step_modifies,
inspection_names, inspection_commands, inspection_step_names):
"""
<Purpose>
Takes step and inspection properties (form posted lists) and aggregates
them by index to step and inspection dictionaries to construct a dictionary
of software supply chain (ssc) data.
<Returns>
Software Supply Chain data, i.e. a dictionary of step and inspection data
Note: these are not not actual in-toto Step and Inspection objects
e.g.:
{
steps: [
{
"name": <unique step name>,
"cmd": <expected command>
"modifies": <boolean> # whether the step modifies artifacts or not
}, ...
],
inspections: [
{
"name": <unique inspection name>,
"cmd": <command to run inspecting>,
"based_on": <step name whose products are used for that inspection>
}
]
}
<Exceptions>
ValueError if step or inspection arguments have unequal lengths
"""
if not (len(step_names) == len(step_commands) == len(step_modifies)):
raise ValueError("The arguments 'step_names', 'step_commands' and"
" 'step_modifies' must have equal lengths.")
if not (len(inspection_names) == len(inspection_commands) ==
len(inspection_step_names)):
raise ValueError("The arguments 'inspection_names', 'inspection_commands'"
" and 'inspection_step_names' must have equal lengths.")
steps = []
for i in range(len(step_names)):
steps.append({
"name": step_names[i],
"cmd": step_commands[i],
"modifies": step_modifies[i] == "true"
})
inspections = []
for i in range(len(inspection_names)):
inspections.append({
"name": inspection_names[i],
"cmd": inspection_commands[i],
"based_on": inspection_step_names[i]
})
ssc_data = {
"steps": steps,
"inspections": inspections
}
return ssc_data
def _auth_items_to_dict(auth_items):
"""Takes a list of auth_items and returns a dictionary mapping the items
to their respective step names, i.e..:
[{"step_name": ..., "threshold": ..., "authorized_functionaries": ...}, ...]
-->
{<step name> : {"step_name": ..., "threshold": ..., "authorized_..."}, ...}
"""
auth_dict = {}
for auth_item in auth_items:
auth_dict[auth_item["step_name"]] = auth_item
return auth_dict
# -----------------------------------------------------------------------------
# NoSQL Helpers
# -----------------------------------------------------------------------------
# NOTE: Below functions rely on the current session having an id. If there is
# no id in the session, all functions redirect to `404` (page not found).
# This should never happen because all calling views should be decorated with
# @with_session_id, which ensures that the current session does have an id.
def _persist_session_subdocument(subdocument):
"""Update a subdocument (e.g. vcs, ssc, functionaries...) in session document
identified by current session id. """
if not session.get("id"):
abort(404)
# Search session document by session ID in DB and update (replace)
# subdocument. If the entire document does not exist it is inserted
mongo.db.session_collection.update_one(
{"_id": session["id"]},
{"$set": subdocument},
upsert=True)
def _persist_session_subdocument_ts(subdocument):
"""Updates/adds last_modified to the subdocument before persisting it. """
for key in subdocument.keys():
subdocument[key]["last_modified"] = time.time()
_persist_session_subdocument(subdocument)
def _get_session_subdocument(key):
"""Returns a subdocument (e.g. vcs, ssc, functionaries...) identified by
passed key from session document identified by current session id.
Returns an empty dict if document or subdocument are not found. """
if not session.get("id"):
abort(404)
# Get session document (use short circuit for default empty dict)
session_doc = mongo.db.session_collection.find_one(
{"_id": session["id"]})
if not session_doc:
return {}
# Get vcs data from session document or empty dict
return session_doc.get(key, {})
def _get_session_document():
"""Returns the entire session document.
Returns an empty dict if document or subdocument are not found. """
if not session.get("id"):
abort(404)
session_doc = mongo.db.session_collection.find_one(
{"_id": session["id"]})
if not session_doc:
return {}
return session_doc
# -----------------------------------------------------------------------------
# View Decorator & Hooks
# -----------------------------------------------------------------------------
def with_session_id(wrapped_func):
"""
Generate new session id if it does not exist.
For now, a user could start a new session on any page
TODO: Should we redirect to the start page if the session is new?
"""
@wraps(wrapped_func)
def decorated_function(*args, **kwargs):
if not session.get("id"):
# Security is not paramount, we don't store sensitive data, right?
session["id"] = uuid.uuid4()
app.logger.info("New session ID '{}'".format(session["id"]))
return wrapped_func(*args, **kwargs)
return decorated_function
@app.after_request
def ajax_flash_messages(response):
""" This function intercepts JSON responses to ajax requests and injects
a "messages" field containing flashed messages.
To display them the JS callback that receives the response can call
show_messages(repsonse.messages).
"""
if (request.headers.get("X-Requested-With") == "XMLHttpRequest" and
response.headers.get("Content-Type") == "application/json"):
response_data = json.loads(response.get_data())
response_data["messages"] = get_flashed_messages(with_categories=True)
response.set_data(json.dumps(response_data))
return response
# -----------------------------------------------------------------------------
# Views
# -----------------------------------------------------------------------------
@app.route("/")
@with_session_id
def start():
"""Step 0.
Wizard entry point, static landing page. """
return render_template("start.html")
@app.route("/vcs", methods=["GET", "POST"])
@with_session_id
def vcs():
"""Step 1.
Enter information about version control system. """
options = tooldb.COLLECTION["vcs"]
if request.method == "POST":
# Grab the form posted vcs commands and persist
# FIXME: Needs sanitizing
vcs_data = {
"items": [{"cmd": cmd} for cmd in request.form.getlist("vcs_cmd[]")],
"comment": request.form.get("comment", "")
}
_persist_session_subdocument_ts({"vcs": vcs_data})
flash("Now let's see how you build your software...",
"alert-success")
return redirect(url_for("building"))
user_data = _get_session_subdocument("vcs")
return render_template("vcs.html", options=options,
user_data=user_data)
@app.route("/building", methods=["GET", "POST"])
@with_session_id
def building():
"""Step 2.
Enter information about building. """
options = tooldb.COLLECTION["building"]
if request.method == "POST":
# Grab the form posted building commands and persist
# FIXME: Needs sanitizing
building_data = {
"items": [{"cmd": cmd} for cmd in request.form.getlist("build_cmd[]")],
"comment": request.form.get("comment", "")
}
_persist_session_subdocument_ts({"building": building_data})
flash("Let's talk about quality management next...",
"alert-success")
return redirect(url_for("quality_management"))
user_data = _get_session_subdocument("building")
return render_template("building.html", options=options, user_data=user_data)
@app.route("/quality", methods=["GET", "POST"])
@with_session_id
def quality_management():
"""Step 3.
Enter information about quality management. """
options = tooldb.COLLECTION["qa"]
if request.method == "POST":
# Grab the form posted quality management data and persist
# FIXME: Needs sanitizing
cmd_list = request.form.getlist("cmd[]")
retval_include_list = request.form.getlist("retval_include[]")
retval_operator_list = request.form.getlist("retval_operator[]")
retval_value_list = request.form.getlist("retval_value[]")
stdout_include_list = request.form.getlist("stdout_include[]")
stdout_operator_list = request.form.getlist("stdout_operator[]")
stdout_value_list = request.form.getlist("stdout_value[]")
stderr_include_list = request.form.getlist("stderr_include[]")
stderr_operator_list = request.form.getlist("stderr_operator[]")
stderr_value_list = request.form.getlist("stderr_value[]")
# Values of a step are related by the same index
# All lists must be equally long
# FIXME: Don't assert, try!
assert(len(cmd_list) ==
len(retval_include_list) == len(retval_operator_list) ==
len(retval_value_list) == len(stdout_include_list) ==
len(stdout_operator_list) == len(stdout_value_list) ==
len(stderr_include_list) == len(stderr_operator_list) ==
len(stderr_value_list))
# Pick any one (they all must have the same length any way)
qa_steps_cnt = len(cmd_list)
# There can only be one comment
posted_coment = request.form.get("comment", "")
posted_items = []
for i in range(qa_steps_cnt):
posted_items.append({
"cmd": cmd_list[i],
"retval": retval_include_list[i] == "true",
"retval_operator": retval_operator_list[i],
"retval_value": retval_value_list[i],
"stdout": stdout_include_list[i] == "true",
"stdout_operator": stdout_operator_list[i],
"stdout_value": stdout_value_list[i],
"stderr": stderr_include_list[i] == "true",
"stderr_operator": stderr_operator_list[i],
"stderr_value": stderr_value_list[i],
})
# Note: We store the data as posted. Only in the software supply chain
# view do we transform this data, e.g. create inspection commands, etc.
qa_data = {
"items": posted_items,
"comment": posted_coment
}
_persist_session_subdocument_ts({"qa": qa_data})
flash("Nice quality management, but how do you package your software?",
"alert-success")
return redirect(url_for("packaging"))
user_data = _get_session_subdocument("qa")
return render_template("quality.html", options=options, user_data=user_data)
@app.route("/packaging", methods=["GET", "POST"])
@with_session_id
def packaging():
"""Step 4.
Enter information about packaging. """
options = tooldb.COLLECTION["package"]
if request.method == "POST":
# Grab the form posted building commands and persist
# FIXME: Needs sanitizing
package_data = {
"items": [{"cmd": cmd} for cmd in request.form.getlist("cmd[]")],
"comment": request.form.get("comment", "")
}
_persist_session_subdocument_ts({"package": package_data})
flash("Now let's see if we got your software supply chain right...",
"alert-success")
return redirect(url_for("software_supply_chain"))
user_data = _get_session_subdocument("package")
return render_template("packaging.html", options=options, user_data=user_data)
@app.route("/software-supply-chain", methods=["GET", "POST"])
@with_session_id
def software_supply_chain():
"""Step 5.
On get generate and serve software supply chain data (ssc graph) based
on form data posted from previous pages and stored to session
(c.f. helper `session_to_graph`).
On post we override the generated software supply chain using the posted
form data (c.f. helper `form_data_to_graph`).
Latter will be used for subsequent get queries and on subsequent pages.
"""
if request.method == "POST":
# Grab the form posted software supply chain data and persist
# FIXME: Needs sanitizing, e.g.
# restrict step and inspection names (must be unique) and inspection
# step names (must reference an existing step)
step_names = request.form.getlist("step_name[]")
step_commands = request.form.getlist("step_cmd[]")
step_modifies = request.form.getlist("step_modifies[]")
inspection_names = request.form.getlist("inspection_name[]")
inspection_commands = request.form.getlist("inspection_cmd[]")
inspection_step_names = request.form.getlist("inspection_step_name[]")
comment = request.form.get("comment", "")
# Names and Commands of a step or inspection are related by the same index
# All lists should be equally long
# FIXME: Don't assert, try!
assert(len(step_names) == len(step_commands) == len(step_modifies))
assert(len(inspection_names) == len(inspection_commands) ==
len(inspection_step_names))
# Create and persist software supply chain data from posted form
ssc_data = form_data_to_ssc(step_names, step_commands, step_modifies,
inspection_names, inspection_commands, inspection_step_names)
# Add posted comment to ssc_data
ssc_data["comment"] = comment
# Persist and redirect to next page
_persist_session_subdocument_ts({"ssc": ssc_data})
return redirect(url_for("functionaries"))
# Query all session data (posted on vcs, building, qa, ... pages)
session_data = _get_session_document()
# Query any existing software supply chain data (posted on this page)
ssc_data = session_data.get("ssc", {})
ssc_last_modified = ssc_data.get("last_modified", 0)
# Assume we don't have to show the refresh dialog (explained below)
show_refresh_dialog = False
# Only (re)generate software supply chain graph from data the user has posted
# on previous pages, if there is no ssc data from this page in the db or the
# user has sent he `refresh` parameter
if not ssc_data or request.args.get("refresh"):
ssc_data = session_to_ssc(session_data)
# Otherwise we serve existing ssc data
else:
# If existing ssc data is older than any of stored vcs/building/qa/package
# data we still serve the stored ssc data but additionally show a
# "Do you want to re-generate the software supply chain?" dialog with a
# link that includes the `refresh` get parameter
for subdocument in ["vcs", "building", "qa", "package"]:
data_last_modified = session_data.get(subdocument,
{}).get("last_modified", 0)
if ssc_last_modified < data_last_modified:
show_refresh_dialog = True
break
return render_template("software_supply_chain.html",
ssc_data=ssc_data, show_refresh=show_refresh_dialog)
@app.route("/functionaries", methods=["GET", "POST"])
@with_session_id
def functionaries():
"""Step 6.
On get serve functionary keys upload and keys dropzone.
On post store comment and redirect to next page.
The actual functionary public key upload uses a different view queried
with ajax (ajax_upload_key).
"""
functionaries = _get_session_subdocument("functionaries")
if request.method == "POST":
functionaries["comment"] = request.form.get("comment", "")
_persist_session_subdocument({"functionaries": functionaries})
flash("Now tell us who is authorized to do what...",
"alert-success")
return redirect(url_for("authorizing"))
return render_template("functionaries.html", functionaries=functionaries)
@app.route("/functionaries/upload", methods=["POST"])
@with_session_id
def ajax_upload_key():
"""Ajax upload a functionary key. Key files are stored to the db in their
dictionary representation. """
functionary_key = request.files.get("functionary_key", None)
functionary_name = request.form.get("functionary_name", None)
if not functionary_name:
flash("Something went wrong: We don't know which functionary,"
" this key belongs to", "alert-danger")
return jsonify({"error": True})
if not functionary_key:
flash("Something went wrong: No file uploaded", "alert-danger")
return jsonify({"error": True})
if functionary_key.filename == "":
flash("Something went wrong: No file selected", "alert-danger")
return jsonify({"error": True})
try:
# We try to load the public key to check the format
key = securesystemslib.keys.import_rsakey_from_public_pem(
functionary_key.read().decode("ascii"))
securesystemslib.formats.PUBLIC_KEY_SCHEMA.check_match(key)
file_name = functionary_key.filename
functionary_db_item = {
"functionary_name": functionary_name,
"file_name": file_name,
"key_dict": key
}
# Clumsy update or insert for functionary array embedded subdocument
# NOTE: Unfortunately we can't "upsert" on arrays but must first try to
# update and if that does not work insert.
# https://docs.mongodb.com/manual/reference/operator/update/positional/#upsert
# https://stackoverflow.com/questions/23470658/mongodb-upsert-sub-document
query_result = mongo.db.session_collection.update_one(
{
"_id": session["id"],
"functionaries.items.functionary_name": functionary_name
},
{
"$set": {"functionaries.items.$": functionary_db_item}
})
if not query_result.matched_count:
query_result = mongo.db.session_collection.update_one(
{
"_id": session["id"],
# This query part should deal with concurrent requests
"functionaries.items.functionary_name": {"$ne": functionary_name}
},
{
"$push": {"functionaries.items": functionary_db_item}
}, upsert=True)
flash("Added key '{fn}' for functionary '{functionary}'"
.format(fn=file_name, functionary=functionary_name),
"alert-success")
else:
flash("Updated key '{fn}' for functionary ""'{functionary}'"
.format(fn=file_name, functionary=functionary_name),
"alert-success")
# TODO: Throw more rocks at query_result
except UnicodeDecodeError:
flash("Could not decode the key. The key contains non-ascii characters.")
return jsonify({"error": True})
except Exception as e:
flash("Could not store uploaded file. Error: {}".format(e),
"alert-danger")
return jsonify({"error": True})
return jsonify({"error": False})
@app.route("/functionaries/remove", methods=["POST"])
@with_session_id
def ajax_remove_functionary():
"""Remove the posted functionary (by name) from the functionary session
subdocument, which also removes the key. """
functionary_name = request.form.get("functionary_name")
try:
# Remove the link entry with posted file name in the session
# document's functionaries.items list
query_result = mongo.db.session_collection.update_one(
{"_id": session["id"]},
{"$pull": {"functionaries.items":
{"functionary_name": functionary_name}}})
# TODO: Throw rocks at query_result
except Exception as e:
flash("Could not remove functionary '{name}': {e}".format(
name=functionary_name, e=e), "alert-danger")
return jsonify({"error": True})
else:
flash("Removed functionary '{name}'.".format(
name=functionary_name), "alert-success")
return jsonify({"error": False})
@app.route("/authorizing", methods=["GET", "POST"])
@with_session_id
def authorizing():
"""Step 7.
Authorize functionaries to carry out software supply chain steps. """
if request.method == "POST":
# Grab the form posted authorizing data and persist
# FIXME: Some sanitizing/validation already done below but might need more
step_names = request.form.getlist("step_name[]")
thresholds = request.form.getlist("threshold[]")
comment = request.form.get("comment", "")
# Steps names, commands and thresholds are related by the same index
# These lists should be equally long
# FIXME: Don't assert, try!
assert(len(step_names) == len(thresholds))
# The authorized functionaries multi select form element has the
# respective step name in its name, e.g. for building step:
# <select name="functionary_name_building[]" ...>
# (c.f authorizing_functionaries.html)
auth_items = []
for idx, step_name in enumerate(step_names):
functionaries_for_step = request.form.getlist(
"functionary_name_" + step_name + "[]")
auth_data = {
"step_name": step_name,
"threshold": int(thresholds[idx]),
"authorized_functionaries": functionaries_for_step
}
auth_items.append(auth_data)
# We validate here (after above processing) so that we can return
# consistent data in case of invalidity
valid = True
for auth_item in auth_items:
if not auth_item["authorized_functionaries"]:
valid = False
flash("Step '{name}': Authorize at least one functionary".format(
name=auth_item["step_name"]), "alert-warning")
elif auth_item["threshold"] > len(auth_item["authorized_functionaries"]):
valid = False
flash("Step '{name}': Threshold can't be higher than the "
" number of authorized functionaries".format(
name=auth_item["step_name"]), "alert-warning")
# Only persist and go to the next page if valid, else go back to this page
if valid:
flash("It's time to do a test run of your software supply chain",
"alert-success")
query_result = mongo.db.session_collection.update_one(
{ "_id": session["id"]},
{"$set": {"authorizing.items": auth_items,
"authorizing.comment": comment}})
return redirect(url_for("chaining"))
else: # request not POST
authorizing = _get_session_subdocument("authorizing")
auth_items = authorizing.get("items", [])
comment = authorizing.get("comment", "")
# We store auth data items to db as list but in the templates we need a
# mapping between auth items and steps
auth_dict = _auth_items_to_dict(auth_items)
session_functionaries = _get_session_subdocument("functionaries")
session_steps = _get_session_subdocument("ssc").get("steps", [])
return render_template("authorizing_functionaries.html",
functionaries=session_functionaries, steps=session_steps,
auth_dict=auth_dict, comment=comment)
@app.route("/chaining", methods=["GET", "POST"])
@with_session_id
def chaining():
"""Step 8.
On get serve dry run snippet and link metadata upload.
On post store comment and redirect to next page.
The link file upload uses a different view queried
with ajax (ajax_upload_link).
"""
chaining = _get_session_subdocument("chaining")
steps = _get_session_subdocument("ssc").get("steps", [])
if request.method == "POST":
chaining["comment"] = request.form.get("comment", "")
_persist_session_subdocument({"chaining": chaining})
flash("And that's basically it... :)", "alert-success")
return redirect(url_for("wrap_up"))
return render_template("chaining.html", steps=steps, chaining=chaining)
@app.route("/chaining/upload", methods=["POST"])
@with_session_id
def ajax_upload_link():
"""Ajax upload link metadata file either individually or as tar archive.
Link files are stored to the db as canonical json string dump. """
uploaded_file = request.files.get("step_link", None)
if not uploaded_file:
flash("Something went wrong: No file uploaded", "alert-danger")
return jsonify()
if uploaded_file.filename == "":
flash("Something went wrong: No file selected", "alert-danger")
return jsonify()
# The uploaded file might be a tar archive so let's try to unpack it
link_file_tuples = []
try:
link_archive = tarfile.open(fileobj=uploaded_file)
for tar_info in link_archive.getmembers():
link_file = link_archive.extractfile(tar_info)
link_file_tuples.append((tar_info.name, link_file))
except tarfile.TarError as e:
# If that does not work we assume the uploaded file was a link
link_file_tuples.append((uploaded_file.filename, uploaded_file))
added_files = []
msg_type = "alert-success"
# Now iterate over all files we have, try to load them as link and
# store them to database
for link_filename, link_file in link_file_tuples:
try:
link_metadata_dict = json.loads(link_file.read())
link_dict = link_metadata_dict.get("signed")
if not isinstance(link_dict, dict):
raise ValueError("Wrong metadata format")
# FIXME: There is a bug in in_toto_mock that causes the returned link
# be wrapped twice in a Metablock. The bug is fixed but not yet merged
# github.com/in-toto/in-toto/commit/4d34fd914d0a0dfac30eaa7af1590ff53161477e
# Let's work around this bug by unwrapping a second time. If it is not
# double wrapped we default to parsing a valid Link, as returned e.g. by
# in_toto_run
link_dict = link_dict.get("signed", link_dict)
# Instantiate a link object form the link dictionary
link = in_toto.models.link.Link.read(link_dict)
link_db_item = {
"step_name": link.name,
"file_name": link_filename,
# NOTE: We can't store the dict representation of the link, because
# MongoDB does not allow dotted keys, e.g. "materials": {"foo.py": {...
# hence we store it as canonical json string dump (c.f. Link __repr__)
# NOTE: I wonder if we are prone to exceed the max document size
# (16 MB) if we store all the session info in one document? Unlikely.
"link_str": repr(link)
}
# Push link item to the chaining.items array in the session document
query_result = mongo.db.session_collection.update_one(
{"_id": session["id"]},
{"$push": {"chaining.items": link_db_item}},
upsert=True)
# TODO: Throw more rocks at query_result
except Exception as e:
msg_type = "alert-danger"
flash("Could not store link '{}': {}".format(link_filename, e),
"alert-danger")
else:
added_files.append(link_filename)
flash("Stored link '{file_name}' for step '{name}'!"
.format(file_name=link_filename, name=link.name), "alert-success")
return jsonify({"files": added_files})
@app.route("/chaining/remove", methods=["POST"])
@with_session_id
def ajax_remove_link():
""" Remove the posted link by step name from the chaining session
subdocument.
"""
link_filename = request.form.get("link_filename")
try:
# Remove the link entry with posted file name in the session
# document's chaining.items list
res = mongo.db.session_collection.update_one(
{"_id": session["id"]},
{"$pull": {"chaining.items": {"file_name": link_filename}}})
# TODO: Throw rocks at query_result
except Exception as e:
flash("Could not remove link file '{link}': '{e}'".format(
link=link_filename, e=e), "alert-danger")
return jsonify({"error": True})
else:
flash("Removed link file '{link}'".format(
link=link_filename), "alert-success")
return jsonify({"error": False})
@app.route("/wrap-up")
@with_session_id
def wrap_up():
"""Step 9.
Serves link to download layout and further instructions
- Download layout
- Create project owner key (keygen snippet)
- Sign layout (signing snippet)
- Per functionary commands (in-toto-run snippet)
- FIXME: More release instructions
"""
functionaries = _get_session_subdocument("functionaries")
auth_items = _get_session_subdocument("authorizing").get("items", [])
auth_dict = _auth_items_to_dict(auth_items)
steps = _get_session_subdocument("ssc").get("steps", [])
return render_template("wrap_up.html", steps=steps, auth_dict=auth_dict,
functionaries=functionaries)
@app.route("/download-layout")
@with_session_id
def download_layout():
"""Creates in-toto layout based on session data and uploaded links and
serves it as file download with a timestamped name.
FIXME:
- Enhance layout creation
- Factor out layout creation functionality that's implemented here, e.g. to
create_layout.py
"""
# Iterate over items in ssc session subdocument and create an ordered list
# of related link objects retrieved from the chaining session subdocument
session_ssc = _get_session_subdocument("ssc")
session_chaining = _get_session_subdocument("chaining")
links = []
for step in session_ssc.get("steps", []):
for link_data in session_chaining.get("items", []):
if link_data["step_name"] == step["name"]:
link_str = json.loads(link_data["link_str"])
link = in_toto.models.link.Link.read(link_str)
links.append(link)
# Create basic layout with steps based on links and simple artifact rules
layout = create_layout.create_layout_from_ordered_links(links)
# Add pubkeys to layout
functionary_keyids = {}
for functionary in _get_session_subdocument("functionaries").get("items", []):
key = functionary.get("key_dict")
functionary_name = functionary.get("functionary_name")
# Check the format of the uploaded public key
# TODO: Handle invalid key
securesystemslib.formats.PUBLIC_KEY_SCHEMA.check_match(key)
# Add keys to layout's key store
layout.keys[key["keyid"]] = key
# Add keys to functionary name-keyid map needed below
functionary_keyids[functionary_name] = key["keyid"]
auth_items = _get_session_subdocument("authorizing").get("items", [])
auth_dict = _auth_items_to_dict(auth_items)
# Add authorized functionaries to steps and set functionary threshold
for idx in range(len(layout.steps)):
step_name = layout.steps[idx].name
auth_data = auth_dict.get(step_name)
for functionary_name in auth_data.get("authorized_functionaries", []):
keyid = functionary_keyids.get(functionary_name)
if keyid:
layout.steps[idx].pubkeys.append(keyid)
layout.steps[idx].threshold = auth_data.get("threshold")
# Add inspections to layout
inspections = session_ssc.get("inspections", [])
for inspection_data in inspections:
inspection = in_toto.models.layout.Inspection(
name=inspection_data["name"],
expected_materials=[
["MATCH", "*", "WITH", "PRODUCTS", "FROM", inspection_data["based_on"]]
])
inspection.set_run_from_string(inspection_data["cmd"])
layout.inspect.append(inspection)
layout.validate()
layout_name = "untitled-" + str(time.time()).replace(".", "") + ".layout"
layout_metadata = in_toto.models.metadata.Metablock(signed=layout)
# Dump layout to memory file and server to user
layout_fp = io.BytesIO()
layout_fp.write("{}".format(layout_metadata).encode("utf-8"))
layout_fp.seek(0)
return send_file(layout_fp,
mimetype="application/json", as_attachment=True,
attachment_filename=layout_name)
@app.route("/guarantees")
@with_session_id
def guarantees():
""" Show what the software supply chain protects against and give advice for
more guarantees.
FIXME: Not yet implemented
"""
return render_template("guarantees.html")
if __name__ == "__main__":
app.run()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import contextlib
import hashlib
from recipe_engine import recipe_api
PATCH_STORAGE_RIETVELD = 'rietveld'
PATCH_STORAGE_GIT = 'git'
PATCH_STORAGE_SVN = 'svn'
class TryserverApi(recipe_api.RecipeApi):
def __init__(self, *args, **kwargs):
super(TryserverApi, self).__init__(*args, **kwargs)
self._failure_reasons = []
@property
def patch_url(self):
"""Reads patch_url property and corrects it if needed."""
url = self.m.properties.get('patch_url')
return url
@property
def is_tryserver(self):
"""Returns true iff we can apply_issue or patch."""
return (self.can_apply_issue or self.is_patch_in_svn or
self.is_patch_in_git or self.is_gerrit_issue)
@property
def can_apply_issue(self):
"""Returns true iff the properties exist to apply_issue from rietveld."""
return (self.m.properties.get('rietveld')
and 'issue' in self.m.properties
and 'patchset' in self.m.properties)
@property
def is_gerrit_issue(self):
"""Returns true iff the properties exist to match a Gerrit issue."""
return ('event.patchSet.ref' in self.m.properties and
'event.change.url' in self.m.properties and
'event.change.id' in self.m.properties)
@property
def is_patch_in_svn(self):
"""Returns true iff the properties exist to patch from a patch URL."""
return self.patch_url
@property
def is_patch_in_git(self):
return (self.m.properties.get('patch_storage') == PATCH_STORAGE_GIT and
self.m.properties.get('patch_repo_url') and
self.m.properties.get('patch_ref'))
def _apply_patch_step(self, patch_file=None, patch_content=None, root=None):
assert not (patch_file and patch_content), (
'Please only specify either patch_file or patch_content, not both!')
patch_cmd = [
'patch',
'--dir', root or self.m.path['checkout'],
'--force',
'--forward',
'--remove-empty-files',
'--strip', '0',
]
if patch_file:
patch_cmd.extend(['--input', patch_file])
self.m.step('apply patch', patch_cmd,
stdin=patch_content)
def apply_from_svn(self, cwd):
"""Downloads patch from patch_url using svn-export and applies it"""
# TODO(nodir): accept these properties as parameters
patch_url = self.patch_url
root = cwd
if root is None:
issue_root = self.m.rietveld.calculate_issue_root()
root = self.m.path['checkout'].join(issue_root)
patch_file = self.m.raw_io.output('.diff')
ext = '.bat' if self.m.platform.is_win else ''
svn_cmd = ['svn' + ext, 'export', '--force', patch_url, patch_file]
result = self.m.step('download patch', svn_cmd,
step_test_data=self.test_api.patch_content)
result.presentation.logs['patch.diff'] = (
result.raw_io.output.split('\n'))
patch_content = self.m.raw_io.input(result.raw_io.output)
self._apply_patch_step(patch_content=patch_content, root=root)
def apply_from_git(self, cwd):
"""Downloads patch from given git repo and ref and applies it"""
# TODO(nodir): accept these properties as parameters
patch_repo_url = self.m.properties['patch_repo_url']
patch_ref = self.m.properties['patch_ref']
patch_dir = self.m.path.mkdtemp('patch')
git_setup_py = self.m.path['build'].join('scripts', 'slave', 'git_setup.py')
git_setup_args = ['--path', patch_dir, '--url', patch_repo_url]
patch_path = patch_dir.join('patch.diff')
self.m.python('patch git setup', git_setup_py, git_setup_args)
self.m.git('fetch', 'origin', patch_ref,
name='patch fetch', cwd=patch_dir)
self.m.git('clean', '-f', '-d', '-x',
name='patch clean', cwd=patch_dir)
self.m.git('checkout', '-f', 'FETCH_HEAD',
name='patch git checkout', cwd=patch_dir)
self._apply_patch_step(patch_file=patch_path, root=cwd)
self.m.step('remove patch', ['rm', '-rf', patch_dir])
def determine_patch_storage(self):
"""Determines patch_storage automatically based on properties."""
storage = self.m.properties.get('patch_storage')
if storage:
return storage
if self.can_apply_issue:
return PATCH_STORAGE_RIETVELD
elif self.is_patch_in_svn:
return PATCH_STORAGE_SVN
def maybe_apply_issue(self, cwd=None, authentication=None):
"""If we're a trybot, apply a codereview issue.
Args:
cwd: If specified, apply the patch from the specified directory.
authentication: authentication scheme whenever apply_issue.py is called.
This is only used if the patch comes from Rietveld. Possible values:
None, 'oauth2' (see also api.rietveld.apply_issue.)
"""
storage = self.determine_patch_storage()
if storage == PATCH_STORAGE_RIETVELD:
return self.m.rietveld.apply_issue(
self.m.rietveld.calculate_issue_root(),
authentication=authentication)
elif storage == PATCH_STORAGE_SVN:
return self.apply_from_svn(cwd)
elif storage == PATCH_STORAGE_GIT:
return self.apply_from_git(cwd)
else:
# Since this method is "maybe", we don't raise an Exception.
pass
def get_files_affected_by_patch(self, patch_root=None, **kwargs):
"""Returns list of paths to files affected by the patch.
Argument:
patch_root: path relative to api.path['root'], usually obtained from
api.gclient.calculate_patch_root(patch_project)
Returned paths will be relative to to patch_root.
TODO(tandrii): remove this doc.
Unless you use patch_root=None, in which case old behavior is used
which returns paths relative to checkout aka solution[0].name.
"""
# patch_root must be set! None is for backwards compataibility and will be
# removed.
if patch_root is None:
return self._old_get_files_affected_by_patch()
if not kwargs.get('cwd'):
kwargs['cwd'] = self.m.path['slave_build'].join(patch_root)
step_result = self.m.git('diff', '--cached', '--name-only',
name='git diff to analyze patch',
stdout=self.m.raw_io.output(),
step_test_data=lambda:
self.m.raw_io.test_api.stream_output('foo.cc'),
**kwargs)
paths = [self.m.path.join(patch_root, p) for p in
step_result.stdout.split()]
if self.m.platform.is_win:
# Looks like "analyze" wants POSIX slashes even on Windows (since git
# uses that format even on Windows).
paths = [path.replace('\\', '/') for path in paths]
step_result.presentation.logs['files'] = paths
return paths
def _old_get_files_affected_by_patch(self):
git_diff_kwargs = {}
issue_root = self.m.rietveld.calculate_issue_root()
if issue_root:
git_diff_kwargs['cwd'] = self.m.path['checkout'].join(issue_root)
step_result = self.m.git('diff', '--cached', '--name-only',
name='git diff to analyze patch',
stdout=self.m.raw_io.output(),
step_test_data=lambda:
self.m.raw_io.test_api.stream_output('foo.cc'),
**git_diff_kwargs)
paths = step_result.stdout.split()
if issue_root:
paths = [self.m.path.join(issue_root, path) for path in paths]
if self.m.platform.is_win:
# Looks like "analyze" wants POSIX slashes even on Windows (since git
# uses that format even on Windows).
paths = [path.replace('\\', '/') for path in paths]
step_result.presentation.logs['files'] = paths
return paths
def set_subproject_tag(self, subproject_tag):
"""Adds a subproject tag to the build.
This can be used to distinguish between builds that execute different steps
depending on what was patched, e.g. blink vs. pure chromium patches.
"""
assert self.is_tryserver
step_result = self.m.step.active_result
step_result.presentation.properties['subproject_tag'] = subproject_tag
def _set_failure_type(self, failure_type):
if not self.is_tryserver:
return
step_result = self.m.step.active_result
step_result.presentation.properties['failure_type'] = failure_type
def set_patch_failure_tryjob_result(self):
"""Mark the tryjob result as failure to apply the patch."""
self._set_failure_type('PATCH_FAILURE')
def set_compile_failure_tryjob_result(self):
"""Mark the tryjob result as a compile failure."""
self._set_failure_type('COMPILE_FAILURE')
def set_test_failure_tryjob_result(self):
"""Mark the tryjob result as a test failure.
This means we started running actual tests (not prerequisite steps
like checkout or compile), and some of these tests have failed.
"""
self._set_failure_type('TEST_FAILURE')
def set_invalid_test_results_tryjob_result(self):
"""Mark the tryjob result as having invalid test results.
This means we run some tests, but the results were not valid
(e.g. no list of specific test cases that failed, or too many
tests failing, etc).
"""
self._set_failure_type('INVALID_TEST_RESULTS')
def add_failure_reason(self, reason):
"""
Records a more detailed reason why build is failing.
The reason can be any JSON-serializable object.
"""
assert self.m.json.is_serializable(reason)
self._failure_reasons.append(reason)
@contextlib.contextmanager
def set_failure_hash(self):
"""
Context manager that sets a failure_hash build property on StepFailure.
This can be used to easily compare whether two builds have failed
for the same reason. For example, if a patch is bad (breaks something),
we'd expect it to always break in the same way. Different failures
for the same patch are usually a sign of flakiness.
"""
try:
yield
except self.m.step.StepFailure as e:
self.add_failure_reason(e.reason)
failure_hash = hashlib.sha1()
failure_hash.update(self.m.json.dumps(self._failure_reasons))
step_result = self.m.step.active_result
step_result.presentation.properties['failure_hash'] = \
failure_hash.hexdigest()
raise
def get_footers(self, patch_text=None):
"""Retrieves footers from the patch description.
footers are machine readable tags embedded in commit messages. See
git-footers documentation for more information.
"""
if patch_text is None:
codereview = None
if not self.can_apply_issue: #pragma: no cover
raise recipe_api.StepFailure("Cannot get tags from gerrit yet.")
else:
codereview = 'rietveld'
patch = (
self.m.properties['rietveld'].strip('/') + '/' +
str(self.m.properties['issue']))
patch_text = self.m.git_cl.get_description(
patch=patch, codereview=codereview).stdout
result = self.m.python(
'parse description', self.package_repo_resource('git_footers.py'),
args=['--json', self.m.json.output()],
stdin=self.m.raw_io.input(data=patch_text))
return result.json.output
def get_footer(self, tag, patch_text=None):
"""Gets a specific tag from a CL description"""
return self.get_footers(patch_text).get(tag, [])
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some common SessionRunHook classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import numpy as np
import six
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.client import timeline
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
from tensorflow.python.util.tf_export import tf_export
_HOOKS = "hooks"
_STEPS_PER_RUN_VAR = "steps_per_run"
class _HookTimer(object):
"""Base timer for determining when Hooks should trigger.
Should not be instantiated directly.
"""
def __init__(self):
pass
def reset(self):
"""Resets the timer."""
pass
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step."""
raise NotImplementedError
def update_last_triggered_step(self, step):
"""Update the last triggered time and step number.
Args:
step: The current step.
Returns:
A pair `(elapsed_time, elapsed_steps)`, where `elapsed_time` is the number
of seconds between the current trigger and the last one (a float), and
`elapsed_steps` is the number of steps between the current trigger and
the last one. Both values will be set to `None` on the first trigger.
"""
raise NotImplementedError
def last_triggered_step(self):
"""Returns the last triggered time step or None if never triggered."""
raise NotImplementedError
@tf_export(v1=["train.SecondOrStepTimer"])
class SecondOrStepTimer(_HookTimer):
"""Timer that triggers at most once every N seconds or once every N steps.
"""
def __init__(self, every_secs=None, every_steps=None):
self.reset()
self._every_secs = every_secs
self._every_steps = every_steps
if self._every_secs is None and self._every_steps is None:
raise ValueError("Either every_secs or every_steps should be provided.")
if (self._every_secs is not None) and (self._every_steps is not None):
raise ValueError("Can not provide both every_secs and every_steps.")
super(SecondOrStepTimer, self).__init__()
def reset(self):
self._last_triggered_step = None
self._last_triggered_time = None
def should_trigger_for_step(self, step):
"""Return true if the timer should trigger for the specified step.
Args:
step: Training step to trigger on.
Returns:
True if the difference between the current time and the time of the last
trigger exceeds `every_secs`, or if the difference between the current
step and the last triggered step exceeds `every_steps`. False otherwise.
"""
if self._last_triggered_step is None:
return True
if self._last_triggered_step == step:
return False
if self._every_secs is not None:
if time.time() >= self._last_triggered_time + self._every_secs:
return True
if self._every_steps is not None:
if step >= self._last_triggered_step + self._every_steps:
return True
return False
def update_last_triggered_step(self, step):
current_time = time.time()
if self._last_triggered_time is None:
elapsed_secs = None
elapsed_steps = None
else:
elapsed_secs = current_time - self._last_triggered_time
elapsed_steps = step - self._last_triggered_step
self._last_triggered_time = current_time
self._last_triggered_step = step
return (elapsed_secs, elapsed_steps)
def last_triggered_step(self):
return self._last_triggered_step
class NeverTriggerTimer(_HookTimer):
"""Timer that never triggers."""
def should_trigger_for_step(self, step):
_ = step
return False
def update_last_triggered_step(self, step):
_ = step
return (None, None)
def last_triggered_step(self):
return None
@tf_export(v1=["train.LoggingTensorHook"])
class LoggingTensorHook(session_run_hook.SessionRunHook):
"""Prints the given tensors every N local steps, every N seconds, or at end.
The tensors will be printed to the log, with `INFO` severity. If you are not
seeing the logs, you might want to add the following line after your imports:
```python
tf.logging.set_verbosity(tf.logging.INFO)
```
Note that if `at_end` is True, `tensors` should not include any tensor
whose evaluation produces a side effect such as consuming additional inputs.
"""
def __init__(self, tensors, every_n_iter=None, every_n_secs=None,
at_end=False, formatter=None):
"""Initializes a `LoggingTensorHook`.
Args:
tensors: `dict` that maps string-valued tags to tensors/tensor names,
or `iterable` of tensors/tensor names.
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
every_n_secs: `int` or `float`, print the values of `tensors` once every N
seconds. Exactly one of `every_n_iter` and `every_n_secs` should be
provided.
at_end: `bool` specifying whether to print the values of `tensors` at the
end of the run.
formatter: function, takes dict of `tag`->`Tensor` and returns a string.
If `None` uses default printing all tensors.
Raises:
ValueError: if `every_n_iter` is non-positive.
"""
only_log_at_end = (
at_end and (every_n_iter is None) and (every_n_secs is None))
if (not only_log_at_end and
(every_n_iter is None) == (every_n_secs is None)):
raise ValueError(
"either at_end and/or exactly one of every_n_iter and every_n_secs "
"must be provided.")
if every_n_iter is not None and every_n_iter <= 0:
raise ValueError("invalid every_n_iter=%s." % every_n_iter)
if not isinstance(tensors, dict):
self._tag_order = tensors
tensors = {item: item for item in tensors}
else:
self._tag_order = sorted(tensors.keys())
self._tensors = tensors
self._formatter = formatter
self._timer = (
NeverTriggerTimer() if only_log_at_end else
SecondOrStepTimer(every_secs=every_n_secs, every_steps=every_n_iter))
self._log_at_end = at_end
def begin(self):
self._timer.reset()
self._iter_count = 0
# Convert names to tensors if given
self._current_tensors = {tag: _as_graph_element(tensor)
for (tag, tensor) in self._tensors.items()}
def before_run(self, run_context): # pylint: disable=unused-argument
self._should_trigger = self._timer.should_trigger_for_step(self._iter_count)
if self._should_trigger:
return SessionRunArgs(self._current_tensors)
else:
return None
def _log_tensors(self, tensor_values):
original = np.get_printoptions()
np.set_printoptions(suppress=True)
elapsed_secs, _ = self._timer.update_last_triggered_step(self._iter_count)
if self._formatter:
logging.info(self._formatter(tensor_values))
else:
stats = []
for tag in self._tag_order:
stats.append("%s = %s" % (tag, tensor_values[tag]))
if elapsed_secs is not None:
logging.info("%s (%.3f sec)", ", ".join(stats), elapsed_secs)
else:
logging.info("%s", ", ".join(stats))
np.set_printoptions(**original)
def after_run(self, run_context, run_values):
_ = run_context
if self._should_trigger:
self._log_tensors(run_values.results)
self._iter_count += 1
def end(self, session):
if self._log_at_end:
values = session.run(self._current_tensors)
self._log_tensors(values)
def get_or_create_steps_per_run_variable():
"""Gets or creates the steps_per_run variable.
In Estimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each
device program execution and before the next execution.
The purpose of using a variable, rather than a constant, is to allow
Estimator adapt the device training iterations according to the final steps
specified by users. For example, if the user sets the steps_per_run as
4 and steps as 10 in Estimator.train(), the steps_per_run
variable will have the following value before each training run.
- 1-st execution: steps_per_run = 4
- 2-nd execution: steps_per_run = 4
- 3-rd execution: steps_per_run = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi steps_per_run variables were found.
"""
graph = ops.get_default_graph()
collection_name = "{}_{}".format(_HOOKS, _STEPS_PER_RUN_VAR)
steps_per_run_vars = graph.get_collection(collection_name)
if len(steps_per_run_vars) == 1:
return steps_per_run_vars[0]
elif len(steps_per_run_vars) > 1:
raise RuntimeError("Multiple steps_per_run_var in collection.")
with variable_scope.variable_scope(_HOOKS, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_STEPS_PER_RUN_VAR,
initializer=init_ops.ones_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
class _MultiStepStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps=None, last_step=None, steps_per_run=1):
"""Initializes a `MultiStepStopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
In Estimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The steps_per_run variable
determines the number of iterations of the loop before returning to the CPU.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
steps_per_run: Number of steps executed per run call.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
if steps_per_run is None or steps_per_run < 1:
raise ValueError("steps_per_run should be greater than 0")
self._num_steps = num_steps
self._last_step = last_step
self._steps_per_run_initial_value = steps_per_run
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
self._steps_per_run_variable = get_or_create_steps_per_run_variable()
def _update_steps_per_run_variable(self, global_step, session):
steps = min(self._last_step - global_step,
self._steps_per_run_initial_value)
self._steps_per_run_variable.load(steps, session=session)
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
self._update_steps_per_run_variable(global_step, session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition in hook execution.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
self._update_steps_per_run_variable(global_step, run_context.session)
@tf_export(v1=["train.StopAtStepHook"])
class StopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
This hook requests stop after either a number of steps have been
executed or a last step has been reached. Only one of the two options can be
specified.
if `num_steps` is specified, it indicates the number of steps to execute
after `begin()` is called. If instead `last_step` is specified, it
indicates the last step we want to execute, as passed to the `after_run()`
call.
Args:
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError("One of num_steps or last_step must be specified.")
if num_steps is not None and last_step is not None:
raise ValueError("Only one of num_steps or last_step can be specified.")
self._num_steps = num_steps
self._last_step = last_step
def begin(self):
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use StopAtStepHook.")
def after_create_session(self, session, coord):
if self._last_step is None:
global_step = session.run(self._global_step_tensor)
self._last_step = global_step + self._num_steps
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_values.results + 1
if global_step >= self._last_step:
# Check latest global step to ensure that the targeted last step is
# reached. global_step read tensor is the value of global step
# before running the operation. We're not sure whether current session.run
# incremented the global_step or not. Here we're checking it.
step = run_context.session.run(self._global_step_tensor)
if step >= self._last_step:
run_context.request_stop()
@tf_export(v1=["train.CheckpointSaverListener"])
class CheckpointSaverListener(object):
"""Interface for listeners that take action before or after checkpoint save.
`CheckpointSaverListener` triggers only in steps when `CheckpointSaverHook` is
triggered, and provides callbacks at the following points:
- before using the session
- before each call to `Saver.save()`
- after each call to `Saver.save()`
- at the end of session
To use a listener, implement a class and pass the listener to a
`CheckpointSaverHook`, as in this example:
```python
class ExampleCheckpointSaverListener(CheckpointSaverListener):
def begin(self):
# You can add ops to the graph here.
print('Starting the session.')
self.your_tensor = ...
def before_save(self, session, global_step_value):
print('About to write a checkpoint')
def after_save(self, session, global_step_value):
print('Done writing checkpoint.')
if decided_to_stop_training():
return True
def end(self, session, global_step_value):
print('Done with the session.')
...
listener = ExampleCheckpointSaverListener()
saver_hook = tf.train.CheckpointSaverHook(
checkpoint_dir, listeners=[listener])
with tf.train.MonitoredTrainingSession(chief_only_hooks=[saver_hook]):
...
```
A `CheckpointSaverListener` may simply take some action after every
checkpoint save. It is also possible for the listener to use its own schedule
to act less frequently, e.g. based on global_step_value. In this case,
implementors should implement the `end()` method to handle actions related to
the last checkpoint save. But the listener should not act twice if
`after_save()` already handled this last checkpoint save.
A `CheckpointSaverListener` can request training to be stopped, by returning
True in `after_save`. Please note that, in replicated distributed training
setting, only `chief` should use this behavior. Otherwise each worker will do
their own evaluation, which may be wasteful of resources.
"""
def begin(self):
pass
def before_save(self, session, global_step_value):
pass
def after_save(self, session, global_step_value):
pass
def end(self, session, global_step_value):
pass
@tf_export(v1=["train.CheckpointSaverHook"])
class CheckpointSaverHook(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances.
Used for callbacks that run immediately before or after this hook saves
the checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create CheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = SecondOrStepTimer(every_secs=save_secs,
every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir,
"graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True),
saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(
stale_global_step + self._steps_per_run):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
last_step = session.run(self._global_step_tensor)
if last_step != self._timer.last_triggered_step():
self._save(session, last_step)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step):
"""Saves the latest checkpoint, returns should_stop."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
should_stop = False
for l in self._listeners:
if l.after_save(session, step):
logging.info(
"A CheckpointSaverListener requested that training be stopped. "
"listener: {}".format(l))
should_stop = True
return should_stop
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor.".
format(collection_key))
self._saver = savers[0]
return savers[0]
@tf_export(v1=["train.StepCounterHook"])
class StepCounterHook(session_run_hook.SessionRunHook):
"""Hook that counts steps per second."""
def __init__(self,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError(
"exactly one of every_n_steps and every_n_secs should be provided.")
self._timer = SecondOrStepTimer(every_steps=every_n_steps,
every_secs=every_n_secs)
self._summary_writer = summary_writer
self._output_dir = output_dir
self._last_global_step = None
self._global_step_check_count = 0
self._steps_per_run = 1
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use StepCounterHook.")
self._summary_tag = training_util.get_global_step().op.name + "/sec"
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
steps_per_sec = elapsed_steps / elapsed_time
if self._summary_writer is not None:
summary = Summary(value=[Summary.Value(
tag=self._summary_tag, simple_value=steps_per_sec)])
self._summary_writer.add_summary(summary, global_step)
logging.info("%s: %g", self._summary_tag, steps_per_sec)
def after_run(self, run_context, run_values):
_ = run_context
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(
stale_global_step + self._steps_per_run):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
self._log_and_record(elapsed_steps, elapsed_time, global_step)
# Check whether the global step has been increased. Here, we do not use the
# timer.last_triggered_step as the timer might record a different global
# step value such that the comparison could be unreliable. For simplicity,
# we just compare the stale_global_step with previously recorded version.
if stale_global_step == self._last_global_step:
# Here, we use a counter to count how many times we have observed that the
# global step has not been increased. For some Optimizers, the global step
# is not increased each time by design. For example, SyncReplicaOptimizer
# doesn't increase the global step in worker's main train step.
self._global_step_check_count += 1
if self._global_step_check_count % 20 == 0:
self._global_step_check_count = 0
logging.warning(
"It seems that global step (tf.train.get_global_step) has not "
"been increased. Current value (could be stable): %s vs previous "
"value: %s. You could increase the global step by passing "
"tf.train.get_global_step() to Optimizer.apply_gradients or "
"Optimizer.minimize.", stale_global_step, self._last_global_step)
else:
# Whenever we observe the increment, reset the counter.
self._global_step_check_count = 0
self._last_global_step = stale_global_step
@tf_export(v1=["train.NanLossDuringTrainingError"])
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return "NaN loss during training."
@tf_export(v1=["train.NanTensorHook"])
class NanTensorHook(session_run_hook.SessionRunHook):
"""Monitors the loss tensor and stops training if loss is NaN.
Can either fail with exception or just stop training.
"""
def __init__(self, loss_tensor, fail_on_nan_loss=True):
"""Initializes a `NanTensorHook`.
Args:
loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
self._fail_on_nan_loss = fail_on_nan_loss
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._loss_tensor)
def after_run(self, run_context, run_values):
if np.isnan(run_values.results):
failure_message = "Model diverged with loss = NaN."
if self._fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError
else:
logging.warning(failure_message)
# We don't raise an error but we request stop without an exception.
run_context.request_stop()
@tf_export(v1=["train.SummarySaverHook"])
class SummarySaverHook(session_run_hook.SessionRunHook):
"""Saves summaries every N steps."""
def __init__(self,
save_steps=None,
save_secs=None,
output_dir=None,
summary_writer=None,
scaffold=None,
summary_op=None):
"""Initializes a `SummarySaverHook`.
Args:
save_steps: `int`, save summaries every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int`, save summaries every N seconds.
output_dir: `string`, the directory to save the summaries to. Only used
if no `summary_writer` is supplied.
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
summary_op: `Tensor` of type `string` containing the serialized `Summary`
protocol buffer or a list of `Tensor`. They are most likely an output
by TF summary methods like `tf.summary.scalar` or
`tf.summary.merge_all`. It can be passed in as one tensor; if more
than one, they must be passed in as a list.
Raises:
ValueError: Exactly one of scaffold or summary_op should be set.
"""
if ((scaffold is None and summary_op is None) or
(scaffold is not None and summary_op is not None)):
raise ValueError(
"Exactly one of scaffold or summary_op must be provided.")
self._summary_op = summary_op
self._summary_writer = summary_writer
self._output_dir = output_dir
self._scaffold = scaffold
self._timer = SecondOrStepTimer(every_secs=save_secs,
every_steps=save_steps)
# TODO(mdan): Throw an error if output_dir and summary_writer are None.
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._next_step = None
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use SummarySaverHook.")
def before_run(self, run_context): # pylint: disable=unused-argument
self._request_summary = (
self._next_step is None or
self._timer.should_trigger_for_step(self._next_step))
requests = {"global_step": self._global_step_tensor}
if self._request_summary:
if self._get_summary_op() is not None:
requests["summary"] = self._get_summary_op()
return SessionRunArgs(requests)
def after_run(self, run_context, run_values):
_ = run_context
if not self._summary_writer:
return
stale_global_step = run_values.results["global_step"]
global_step = stale_global_step + 1
if self._next_step is None or self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
if self._next_step is None:
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START), global_step)
if self._request_summary:
self._timer.update_last_triggered_step(global_step)
if "summary" in run_values.results:
for summary in run_values.results["summary"]:
self._summary_writer.add_summary(summary, global_step)
self._next_step = global_step + 1
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
def _get_summary_op(self):
"""Fetches the summary op either from self._summary_op or self._scaffold.
Returns:
Returns a list of summary `Tensor`.
"""
summary_op = None
if self._summary_op is not None:
summary_op = self._summary_op
elif self._scaffold.summary_op is not None:
summary_op = self._scaffold.summary_op
if summary_op is None:
return None
if not isinstance(summary_op, list):
return [summary_op]
return summary_op
@tf_export(v1=["train.GlobalStepWaiterHook"])
class GlobalStepWaiterHook(session_run_hook.SessionRunHook):
"""Delays execution until global step reaches `wait_until_step`.
This hook delays execution until global step reaches to `wait_until_step`. It
is used to gradually start workers in distributed settings. One example usage
would be setting `wait_until_step=int(K*log(task_id+1))` assuming that
task_id=0 is the chief.
"""
def __init__(self, wait_until_step):
"""Initializes a `GlobalStepWaiterHook`.
Args:
wait_until_step: an `int` shows until which global step should we wait.
"""
self._wait_until_step = wait_until_step
def begin(self):
self._worker_is_started = False
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use _GlobalStepWaiterHook.")
def before_run(self, run_context):
if self._worker_is_started:
return None
if self._wait_until_step <= 0:
self._worker_is_started = True
return None
logging.info("Waiting for global step %d before starting training.",
self._wait_until_step)
last_logged_step = 0
while True:
current_step = run_context.session.run(self._global_step_tensor)
if current_step >= self._wait_until_step:
self._worker_is_started = True
return None
if current_step - last_logged_step > 1000:
logging.info("Waiting for global step %d before starting training. "
"Current step is %d.", self._wait_until_step, current_step)
last_logged_step = current_step
time.sleep(0.5)
@tf_export(v1=["train.FinalOpsHook"])
class FinalOpsHook(session_run_hook.SessionRunHook):
"""A hook which evaluates `Tensors` at the end of a session."""
def __init__(self, final_ops, final_ops_feed_dict=None):
"""Initializes `FinalOpHook` with ops to run at the end of the session.
Args:
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of
names to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when running
`final_ops_dict`.
"""
self._final_ops = final_ops
self._final_ops_feed_dict = final_ops_feed_dict
self._final_ops_values = None
@property
def final_ops_values(self):
return self._final_ops_values
def end(self, session):
if self._final_ops is not None:
try:
self._final_ops_values = session.run(
self._final_ops, feed_dict=self._final_ops_feed_dict)
except (errors.OutOfRangeError, StopIteration) as e:
logging.warning(
"An OutOfRangeError or StopIteration exception is raised by the "
"code in FinalOpsHook. This typically means the Ops running by the "
"FinalOpsHook have a dependency back to some input source, which "
"should not happen. For example, for metrics in "
"tf.estimator.Estimator, all metrics functions return two Ops: "
"`value_op` and `update_op`. Estimator.evaluate calls the "
"`update_op` for each batch of the data in input source and, once "
"it is exhausted, it call the `value_op` to get the metric values. "
"The `value_op` here should have dependency back to variables "
"reading only, rather than reading another batch from input. "
"Otherwise, the `value_op`, executed by `FinalOpsHook`, triggers "
"another data reading, which ends OutOfRangeError/StopIteration. "
"Please fix that.")
raise e
@tf_export(v1=["train.FeedFnHook"])
class FeedFnHook(session_run_hook.SessionRunHook):
"""Runs `feed_fn` and sets the `feed_dict` accordingly."""
def __init__(self, feed_fn):
"""Initializes a `FeedFnHook`.
Args:
feed_fn: function that takes no arguments and returns `dict` of `Tensor`
to feed.
"""
self.feed_fn = feed_fn
def before_run(self, run_context): # pylint: disable=unused-argument
return session_run_hook.SessionRunArgs(
fetches=None, feed_dict=self.feed_fn())
@tf_export(v1=["train.ProfilerHook"])
class ProfilerHook(session_run_hook.SessionRunHook):
"""Captures CPU/GPU profiling information every N steps or seconds.
This produces files called "timeline-<step>.json", which are in Chrome
Trace format.
For more information see:
https://github.com/catapult-project/catapult/blob/master/tracing/README.md
"""
def __init__(self,
save_steps=None,
save_secs=None,
output_dir="",
show_dataflow=True,
show_memory=False):
"""Initializes a hook that takes periodic profiling snapshots.
`options.run_metadata` argument of `tf.Session.Run` is used to collect
metadata about execution. This hook sets the metadata and dumps it in Chrome
Trace format.
Args:
save_steps: `int`, save profile traces every N steps. Exactly one of
`save_secs` and `save_steps` should be set.
save_secs: `int` or `float`, save profile traces every N seconds.
output_dir: `string`, the directory to save the profile traces to.
Defaults to the current directory.
show_dataflow: `bool`, if True, add flow events to the trace connecting
producers and consumers of tensors.
show_memory: `bool`, if True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
"""
self._output_file = os.path.join(output_dir, "timeline-{}.json")
self._file_writer = SummaryWriterCache.get(output_dir)
self._show_dataflow = show_dataflow
self._show_memory = show_memory
self._timer = SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
def begin(self):
self._next_step = None
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError("Global step should be created to use ProfilerHook.")
def before_run(self, run_context):
self._request_summary = (
self._next_step is not None and
self._timer.should_trigger_for_step(self._next_step))
requests = {"global_step": self._global_step_tensor}
opts = (config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
if self._request_summary else None)
return SessionRunArgs(requests, options=opts)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results["global_step"]
if self._next_step is None:
# Update the timer so that it does not activate until N steps or seconds
# have passed.
self._timer.update_last_triggered_step(stale_global_step)
global_step = stale_global_step + 1
if self._request_summary:
global_step = run_context.session.run(self._global_step_tensor)
self._timer.update_last_triggered_step(global_step)
self._save(global_step,
self._output_file.format(global_step),
run_values.run_metadata.step_stats)
self._file_writer.add_run_metadata(run_values.run_metadata,
"step_%d" % global_step)
self._next_step = global_step + 1
def _save(self, step, save_path, step_stats):
logging.info("Saving timeline for %d into '%s'.", step, save_path)
with gfile.Open(save_path, "w") as f:
trace = timeline.Timeline(step_stats)
f.write(
trace.generate_chrome_trace_format(
show_dataflow=self._show_dataflow, show_memory=self._show_memory))
def _as_graph_element(obj):
"""Retrieves Graph element."""
graph = ops.get_default_graph()
if not isinstance(obj, six.string_types):
if not hasattr(obj, "graph") or obj.graph != graph:
raise ValueError("Passed %s should have graph attribute that is equal "
"to current graph %s." % (obj, graph))
return obj
if ":" in obj:
element = graph.as_graph_element(obj)
else:
element = graph.as_graph_element(obj + ":0")
# Check that there is no :1 (e.g. it's single output).
try:
graph.as_graph_element(obj + ":1")
except (KeyError, ValueError):
pass
else:
raise ValueError("Name %s is ambiguous, "
"as this `Operation` has multiple outputs "
"(at least 2)." % obj)
return element
|
|
from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import (
login_required,
login_user,
logout_user,
current_user
)
from . import account
from ..decorators import merchant_or_vendor_required
from .. import db
from ..email import send_email
from ..models import User, Listing, Vendor
from .forms import (
LoginForm,
CreateUserFromInviteForm,
ChangePasswordForm,
ChangeEmailForm,
RequestResetPasswordForm,
ResetPasswordForm,
ChangeCompanyNameForm,
ChangeNameForm,
CreateMerchantVendorFromInviteForm,
CSVColumnForm
)
@account.route('/login', methods=['GET', 'POST'])
def login():
"""Log in an existing user."""
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
flash('You are now logged in. Welcome back!', 'success')
return redirect(request.args.get('next') or url_for('main.index'))
else:
flash('Invalid email or password.', 'form-error')
return render_template('account/login.html', form=form)
@account.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.', 'info')
return redirect(url_for('main.index'))
@account.route('/manage', methods=['GET', 'POST'])
@account.route('/manage/info', methods=['GET', 'POST'])
@login_required
def manage():
"""Display a user's account information."""
return render_template('account/manage.html', user=current_user, form=None)
@account.route('/reset-password', methods=['GET', 'POST'])
def reset_password_request():
"""Respond to existing user's request to reset their password."""
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = RequestResetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_password_reset_token()
send_email(user.email,
'Reset Your Password',
'account/email/reset_password',
user=user,
token=token,
next=request.args.get('next'))
flash('A password reset link has been sent to {}.'
.format(form.email.data),
'warning')
return redirect(url_for('account.login'))
return render_template('account/reset_password.html', form=form)
@account.route('/reset-password/<token>', methods=['GET', 'POST'])
def reset_password(token):
"""Reset an existing user's password."""
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
flash('Invalid email address.', 'form-error')
return redirect(url_for('main.index'))
if user.reset_password(token, form.new_password.data):
user.password = form.new_password.data
db.session.add(user)
db.session.commit()
flash('Your password has been updated.', 'form-success')
return redirect(url_for('account.login'))
else:
flash('The password reset link is invalid or has expired.',
'form-error')
return redirect(url_for('main.index'))
return render_template('account/reset_password.html', form=form)
@account.route('/manage/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
"""Change an existing user's password."""
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.new_password.data
db.session.add(current_user)
db.session.commit()
flash('Your password has been updated.', 'form-success')
return redirect(url_for('main.index'))
else:
flash('Original password is invalid.', 'form-error')
return render_template('account/manage.html', form=form)
@account.route('/manage/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
"""Respond to existing user's request to change their email."""
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email,
'Confirm Your New Email',
'account/email/change_email',
user=current_user,
token=token)
flash('A confirmation link has been sent to {}.'.format(new_email),
'warning')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.', 'form-error')
return render_template('account/manage.html', form=form)
@account.route('/manage/change-email/<token>', methods=['GET', 'POST'])
@login_required
def change_email(token):
"""Change existing user's email with provided token."""
if current_user.change_email(token):
flash('Your email address has been updated.', 'success')
else:
flash('The confirmation link is invalid or has expired.', 'error')
return redirect(url_for('main.index'))
@account.route('/confirm-account')
@login_required
def confirm_request():
"""Respond to new user's request to confirm their account."""
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'account/email/confirm', user=current_user, token=token)
flash('A new confirmation link has been sent to {}.'.
format(current_user.email),
'warning')
return redirect(url_for('main.index'))
@account.route('/confirm-account/<token>')
@login_required
def confirm(token):
"""Confirm new user's account with provided token."""
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm_account(token):
db.session.commit()
flash('Your account has been confirmed.', 'success')
else:
flash('The confirmation link is invalid or has expired.', 'error')
return redirect(url_for('main.index'))
@account.route('/join-from-invite/<int:user_id>/<token>',
methods=['GET', 'POST'])
def join_from_invite(user_id, token):
"""
Confirm new user's account with provided token and prompt them to set
a password.
"""
if current_user is not None and current_user.is_authenticated():
flash('You are already logged in.', 'error')
return redirect(url_for('main.index'))
new_user = User.query.get(user_id)
if new_user is None:
return redirect(404)
if new_user.password_hash is not None:
if new_user.confirmed is False:
if new_user.confirm_account(token):
flash('You have been confirmed.', 'success')
db.session.commit()
return redirect(url_for('main.index'))
else:
flash('The confirmation link is invalid or has expired. Another '
'invite email with a new link has been sent to you.', 'error')
token = new_user.generate_confirmation_token()
send_email(new_user.email,
'You Are Invited To Join',
'account/email/invite',
user=new_user,
user_id=new_user.id,
token=token)
else:
flash('You have already confirmed your account', 'error');
return redirect(url_for('main.index'))
if new_user.confirm_account(token):
if new_user.is_admin():
form = CreateUserFromInviteForm()
else:
form = CreateMerchantVendorFromInviteForm()
if form.validate_on_submit():
new_user.first_name = form.first_name.data
new_user.last_name = form.last_name.data
new_user.password = form.password.data
if 'company_name' in form:
new_user.company_name = form.company_name.data
db.session.add(new_user)
db.session.commit()
flash('Your password has been set. After you log in, you can '
'go to the "Your Account" page to review your account '
'information and settings.', 'success')
return redirect(url_for('account.login'))
return render_template('account/join_invite.html', form=form)
else:
flash('The confirmation link is invalid or has expired. Another '
'invite email with a new link has been sent to you.', 'error')
token = new_user.generate_confirmation_token()
send_email(new_user.email,
'You Are Invited To Join',
'account/email/invite',
user=new_user,
user_id=new_user.id,
token=token)
return redirect(url_for('main.index'))
@account.before_app_request
def before_request():
"""Force user to confirm email before accessing login-required routes."""
if current_user.is_authenticated() \
and not current_user.confirmed \
and request.endpoint[:8] != 'account.' \
and request.endpoint != 'static':
return redirect(url_for('account.unconfirmed'))
@account.route('/unconfirmed')
def unconfirmed():
"""Catch users with unconfirmed emails."""
if current_user.is_anonymous() or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('account/unconfirmed.html')
@account.route('/manage/change-company-name', methods=['GET', 'POST'])
@login_required
@merchant_or_vendor_required
def change_company_name():
"""Change an existing user's company name."""
form = ChangeCompanyNameForm()
if form.validate_on_submit():
current_user.company_name = form.company_name.data
db.session.add(current_user)
db.session.commit()
flash('Your company name has been updated.', 'form-success')
return render_template('account/manage.html', form=form)
@account.route('/manage/change-name', methods=['GET', 'POST'])
@login_required
def change_name():
"""Change an existing user's name."""
form = ChangeNameForm()
if form.validate_on_submit():
current_user.first_name = form.first_name.data
current_user.last_name = form.last_name.data
db.session.add(current_user)
db.session.commit()
flash('Your name has been updated.', 'form-success')
return render_template('account/manage.html', form=form)
@account.route('/v/<int:profile_id>', methods=['GET','POST'])
@login_required
def profile_page(profile_id):
vendor = Vendor.query.filter_by(id=profile_id).first()
return render_template('vendor/profile.html', vendor=vendor)
@account.route('/manage/csv-settings', methods=['GET', 'POST'])
@login_required
def csv_settings():
form = CSVColumnForm()
current_vendor = User.query.filter_by(id=current_user.id).first()
if form.validate_on_submit():
current_vendor.product_id_col = form.product_id_col.data
current_vendor.listing_description_col = form.listing_description_col.data
current_vendor.price_col = form.price_col.data
current_vendor.name_col = form.name_col.data
current_vendor.unit_col = form.unit_col.data
current_vendor.quantity_col = form.quantity_col.data
flash('Your CSV settings have been updated.', 'form-success')
db.session.commit()
form.product_id_col.data = current_vendor.product_id_col
form.listing_description_col.data = current_vendor.listing_description_col
form.price_col.data = current_vendor.price_col
form.name_col.data = current_vendor.name_col
form.unit_col.data = current_vendor.unit_col
form.quantity_col.data = current_vendor.quantity_col
return render_template('account/manage.html', form=form)
|
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_zplsc_c
@file mi-dataset/mi/dataset/parser/test/test_zplsc_c.py
@author Rene Gelinas
@brief Test code for a zplsc_c data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.zplsc_c.resource import RESOURCE_PATH
from mi.dataset.parser.zplsc_c import ZplscCParser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
MODULE_NAME = 'mi.dataset.parser.zplsc_c'
CLASS_NAME = 'ZplscCRecoveredDataParticle'
PARTICLE_TYPE = 'zplsc_c_recovered'
@attr('UNIT', group='mi')
class ZplscCParserUnitTestCase(ParserUnitTestCase):
"""
Zplsc_c Parser unit test suite
"""
def create_zplsc_c_parser(self, file_handle):
"""
This function creates a ZplscCDCL parser for recovered data.
"""
return ZplscCParser(self.config, file_handle, self.rec_exception_callback)
def file_path(self, filename):
log.debug('resource path = %s, file name = %s', RESOURCE_PATH, filename)
return os.path.join(RESOURCE_PATH, filename)
def rec_exception_callback(self, exception):
"""
Call back method to watch what comes in via the exception callback
"""
self.exception_callback_value.append(exception)
self.exceptions_detected += 1
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: CLASS_NAME
}
self.exception_callback_value = []
self.exceptions_detected = 0
def test_zplsc_c_parser(self):
"""
Test Zplsc C parser
Just test that it is able to parse the file and records are generated.
"""
log.debug('===== START TEST ZPLSC_C Parser =====')
with open(self.file_path('15100520-Test.01A')) as in_file:
parser = self.create_zplsc_c_parser(in_file)
# In a single read, get all particles in this file.
result = parser.get_records(10)
self.assertEqual(len(result), 10)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END TEST ZPLSC_C Parser =====')
def test_recovered(self):
"""
Read a file and pull out a data particle.
Verify that the results are those we expected.
"""
log.debug('===== START TEST TELEM =====')
with open(self.file_path('15100520-Test.01A'), 'rb') as in_file:
parser = self.create_zplsc_c_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(10)
self.assertEqual(len(result), 10)
self.assert_particles(result, '15100520-Test.01A.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END TEST TELEM =====')
def test_large_recovered(self):
"""
Read a large file and pull out a data particle.
Verify that the results are those we expected.
"""
log.debug('===== START LARGE RECOVERED =====')
with open(self.file_path('16100100-Test.01A'), 'rb') as in_file:
parser = self.create_zplsc_c_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(60)
self.assertEqual(len(result), 60)
self.assert_particles(result, '16100100-Test.01A.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END LARGE RECOVERED =====')
def test_variable_length_channels(self):
"""
The raw data binary file used in the test_recovered test above was modifed.
The channel data for the first two records have been modified by removing a
random number of values from the four "channel values" lists. The new number
of bins is updated in the "number of bins" parameter for those records. The
yml file used for the test_recovered test was used as a starting point and
the same changes made in the raw data file were applied to the expected results
yml file.
"""
log.debug('===== START TEST VARIABLE NUM OF CHANNELS =====')
with open(self.file_path('15100520-Test-Var_Chans.01A')) as in_file:
parser = self.create_zplsc_c_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(10)
self.assertEqual(len(result), 10)
self.assert_particles(result, '15100520-Test-Var_Chans.01A.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END TEST VARIABLE NUM OF CHANNELS =====')
def test_bad_timestamp(self):
"""
Ensure that bad data is skipped when it exists.
"""
log.debug('===== START TEST BAD TIMESTAMP =====')
with open(self.file_path('15100520-Test-Corrupt.01A')) as in_file:
parser = self.create_zplsc_c_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(10)
self.assertEqual(len(result), 8)
self.assert_particles(result, '15100520-Test-Corrupt.01A.yml', RESOURCE_PATH)
self.assertEqual(len(self.exception_callback_value), 2)
for i in range(len(self.exception_callback_value)):
log.error('Exception: %s', self.exception_callback_value[i])
log.debug('===== END TEST BAD TIMESTAMP =====')
def test_bad_delimiter(self):
"""
Ensure that bad data is skipped when it exists.
"""
log.debug('===== START TEST BAD DELIMITER =====')
with open(self.file_path('15100520-Test-Corrupt-1.01A')) as in_file:
parser = self.create_zplsc_c_parser(in_file)
# In a single read, get all particles for this file.
result = parser.get_records(10)
self.assertEqual(len(result), 9)
self.assert_particles(result, '15100520-Test-Corrupt-1.01A.yml', RESOURCE_PATH)
self.assertEqual(len(self.exception_callback_value), 1)
for i in range(len(self.exception_callback_value)):
log.error('Exception: %s', self.exception_callback_value[i])
log.debug('===== END TEST BAD DELIMITER =====')
def create_large_yml(self):
"""
Create a large yml file corresponding to an actual recovered dataset.
This is not an actual test - it allows us to create what we need
for integration testing, i.e. a yml file.
"""
with open(self.file_path('16100100-Test.01A')) as in_file:
parser = self.create_zplsc_c_parser(in_file)
result = parser.get_records(1000)
out_file = '.'.join([in_file.name, 'yml'])
self.particle_to_yml(result, out_file)
def particle_to_yml(self, particles, filename, mode='w'):
"""
This is added as a testing helper, not actually as part of the parser tests.
Since the same particles will be used for the driver test it is helpful to
write them to .yml in the same form they need in the results.yml file here.
"""
# open write append, if you want to start from scratch manually delete this file
with open(self.file_path(filename), mode) as fid:
fid.write('header:\n')
fid.write(" particle_object: %s\n" % CLASS_NAME)
fid.write(" particle_type: %s\n" % PARTICLE_TYPE)
fid.write('data:\n')
for index in range(len(particles)):
particle_dict = particles[index].generate_dict()
fid.write(' - _index: %d\n' % (index+1))
fid.write(' internal_timestamp: %.7f\n' % particle_dict.get('internal_timestamp'))
fid.write(' port_timestamp: %.7f\n' % particle_dict.get('port_timestamp'))
values_dict = {}
for value in particle_dict.get('values'):
values_dict[value.get('value_id')] = value.get('value')
for key in sorted(values_dict.iterkeys()):
value = values_dict[key]
if value is None:
fid.write(' %s: %s\n' % (key, 'Null'))
elif isinstance(value, float):
fid.write(' %s: %15.4f\n' % (key, value))
elif isinstance(value, str):
fid.write(" %s: '%s'\n" % (key, value))
else:
fid.write(' %s: %s\n' % (key, value))
|
|
import itertools
import time
import eventlet
import pytest
from mock import Mock, call
from nameko.events import event_handler
from nameko.exceptions import ExtensionNotFound, MethodNotFound
from nameko.extensions import DependencyProvider
from nameko.rpc import RpcProxy, rpc
from nameko.standalone.events import event_dispatcher
from nameko.standalone.rpc import ServiceRpcProxy
from nameko.testing.services import (
entrypoint_hook, entrypoint_waiter, once, replace_dependencies,
restrict_entrypoints, worker_factory)
from nameko.testing.utils import get_container
from nameko.testing.waiting import wait_for_call
class LanguageReporter(DependencyProvider):
""" Return the language given in the worker context data
"""
def get_dependency(self, worker_ctx):
def get_language():
return worker_ctx.data['language']
return get_language
handle_event = Mock()
@pytest.fixture
def counter():
class Counter(object):
value = 0
def count(self):
self.value += 1
return self.value
return Counter()
@pytest.yield_fixture(autouse=True)
def reset_mock():
yield
handle_event.reset_mock()
@pytest.yield_fixture
def spawn_thread():
threads = []
def spawn(fn, *args):
""" Spawn a new thread to execute `fn(*args)`.
The thread will be killed at test teardown if it's still running.
"""
threads.append(eventlet.spawn(fn, *args))
yield spawn
for gt in threads:
try:
gt.kill()
except Exception: # pragma: no cover
pass
class Service(object):
name = "service"
a = RpcProxy("service_a")
language = LanguageReporter()
@rpc
def working(self, value):
return self.a.remote_method(value)
@rpc
def broken(self, value):
raise ExampleError('broken')
@event_handler('srcservice', 'eventtype')
def handle(self, msg):
handle_event(msg)
@rpc
def get_language(self):
return self.language()
class ServiceA(object):
name = "service_a"
b = RpcProxy("service_b")
@rpc
def remote_method(self, value):
res = "{}-a".format(value)
return self.b.remote_method(res)
class ServiceB(object):
name = "service_b"
c = RpcProxy("service_c")
@rpc
def remote_method(self, value):
res = "{}-b".format(value)
return self.c.remote_method(res)
class ServiceC(object):
name = "service_c"
@rpc
def remote_method(self, value):
return "{}-c".format(value)
class ExampleError(Exception):
pass
def test_entrypoint_hook(runner_factory, rabbit_config):
service_classes = (Service, ServiceA, ServiceB, ServiceC)
runner = runner_factory(rabbit_config, *service_classes)
runner.start()
service_container = get_container(runner, Service)
event_payload = "msg"
with entrypoint_hook(service_container, 'handle') as handle:
with entrypoint_waiter(service_container, 'handle'):
handle(event_payload)
handle_event.assert_called_once_with(event_payload)
def test_entrypoint_hook_with_return(runner_factory, rabbit_config):
service_classes = (Service, ServiceA, ServiceB, ServiceC)
runner = runner_factory(rabbit_config, *service_classes)
runner.start()
service_container = get_container(runner, Service)
with entrypoint_hook(service_container, 'working') as working:
assert working("value") == "value-a-b-c"
with entrypoint_hook(service_container, 'broken') as broken:
with pytest.raises(ExampleError):
broken("value")
@pytest.mark.parametrize("context_data",
[{'language': 'en'}, {'language': 'fr'}])
def test_entrypoint_hook_context_data(container_factory, rabbit_config,
context_data):
container = container_factory(Service, rabbit_config)
container.start()
method = 'get_language'
with entrypoint_hook(container, method, context_data) as get_language:
assert get_language() == context_data['language']
def test_entrypoint_hook_dependency_not_found(container_factory,
rabbit_config):
container = container_factory(Service, rabbit_config)
container.start()
method = 'nonexistent_method'
with pytest.raises(ExtensionNotFound):
with entrypoint_hook(container, method):
pass # pragma: no cover
def test_entrypoint_hook_container_dying(container_factory, rabbit_config):
class DependencyError(Exception):
pass
class BadDependency(DependencyProvider):
def worker_setup(self, worker_ctx):
raise DependencyError("Boom")
class BadService(Service):
bad = BadDependency()
container = container_factory(BadService, rabbit_config)
container.start()
with pytest.raises(DependencyError):
with entrypoint_hook(container, 'working') as call:
call()
def test_worker_factory():
class Service(object):
name = "service"
foo_proxy = RpcProxy("foo_service")
bar_proxy = RpcProxy("bar_service")
class OtherService(object):
pass
# simplest case, no overrides
instance = worker_factory(Service)
assert isinstance(instance, Service)
assert isinstance(instance.foo_proxy, Mock)
assert isinstance(instance.bar_proxy, Mock)
# no dependencies to replace
instance = worker_factory(OtherService)
assert isinstance(instance, OtherService)
# override specific dependency
bar_dependency = object()
instance = worker_factory(Service, bar_proxy=bar_dependency)
assert isinstance(instance, Service)
assert isinstance(instance.foo_proxy, Mock)
assert instance.bar_proxy is bar_dependency
# non-applicable dependency
with pytest.raises(ExtensionNotFound):
worker_factory(Service, nonexist=object())
def test_replace_dependencies_kwargs(container_factory, rabbit_config):
class Service(object):
name = "service"
foo_proxy = RpcProxy("foo_service")
bar_proxy = RpcProxy("bar_service")
baz_proxy = RpcProxy("baz_service")
@rpc
def method(self, arg):
self.foo_proxy.remote_method(arg)
class FakeDependency(object):
def __init__(self):
self.processed = []
def remote_method(self, arg):
self.processed.append(arg)
container = container_factory(Service, rabbit_config)
# customise a single dependency
fake_foo_proxy = FakeDependency()
replace_dependencies(container, foo_proxy=fake_foo_proxy)
assert 2 == len([dependency for dependency in container.extensions
if isinstance(dependency, RpcProxy)])
# customise multiple dependencies
res = replace_dependencies(container, bar_proxy=Mock(), baz_proxy=Mock())
assert list(res) == []
# verify that container.extensions doesn't include an RpcProxy anymore
assert all([not isinstance(dependency, RpcProxy)
for dependency in container.extensions])
container.start()
# verify that the fake dependency collected calls
msg = "msg"
with ServiceRpcProxy("service", rabbit_config) as service_proxy:
service_proxy.method(msg)
assert fake_foo_proxy.processed == [msg]
def test_replace_dependencies_args(container_factory, rabbit_config):
class Service(object):
name = "service"
foo_proxy = RpcProxy("foo_service")
bar_proxy = RpcProxy("bar_service")
baz_proxy = RpcProxy("baz_service")
@rpc
def method(self, arg):
self.foo_proxy.remote_method(arg)
container = container_factory(Service, rabbit_config)
# replace a single dependency
foo_proxy = replace_dependencies(container, "foo_proxy")
# replace multiple dependencies
replacements = replace_dependencies(container, "bar_proxy", "baz_proxy")
assert len([x for x in replacements]) == 2
# verify that container.extensions doesn't include an RpcProxy anymore
assert all([not isinstance(dependency, RpcProxy)
for dependency in container.extensions])
container.start()
# verify that the mock dependency collects calls
msg = "msg"
with ServiceRpcProxy("service", rabbit_config) as service_proxy:
service_proxy.method(msg)
foo_proxy.remote_method.assert_called_once_with(msg)
def test_replace_dependencies_args_and_kwargs(container_factory,
rabbit_config):
class Service(object):
name = "service"
foo_proxy = RpcProxy("foo_service")
bar_proxy = RpcProxy("bar_service")
baz_proxy = RpcProxy("baz_service")
@rpc
def method(self, arg):
self.foo_proxy.remote_method(arg)
self.bar_proxy.bar()
self.baz_proxy.baz()
class FakeDependency(object):
def __init__(self):
self.processed = []
def remote_method(self, arg):
self.processed.append(arg)
container = container_factory(Service, rabbit_config)
fake_foo_proxy = FakeDependency()
mock_bar_proxy, mock_baz_proxy = replace_dependencies(
container, 'bar_proxy', 'baz_proxy', foo_proxy=fake_foo_proxy
)
# verify that container.extensions doesn't include an RpcProxy anymore
assert all([not isinstance(dependency, RpcProxy)
for dependency in container.extensions])
container.start()
# verify that the fake dependency collected calls
msg = "msg"
with ServiceRpcProxy("service", rabbit_config) as service_proxy:
service_proxy.method(msg)
assert fake_foo_proxy.processed == [msg]
assert mock_bar_proxy.bar.call_count == 1
assert mock_baz_proxy.baz.call_count == 1
def test_replace_dependencies_in_both_args_and_kwargs_error(container_factory,
rabbit_config):
class Service(object):
name = "service"
foo_proxy = RpcProxy("foo_service")
bar_proxy = RpcProxy("bar_service")
baz_proxy = RpcProxy("baz_service")
container = container_factory(Service, rabbit_config)
with pytest.raises(RuntimeError) as exc:
replace_dependencies(
container, 'bar_proxy', 'foo_proxy', foo_proxy='foo'
)
assert "Cannot replace the same dependency" in str(exc)
def test_replace_non_dependency(container_factory, rabbit_config):
class Service(object):
name = "service"
proxy = RpcProxy("foo_service")
@rpc
def method(self):
pass # pragma: no cover
container = container_factory(Service, rabbit_config)
# error if dependency doesn't exit
with pytest.raises(ExtensionNotFound):
replace_dependencies(container, "nonexist")
# error if dependency is not an dependency
with pytest.raises(ExtensionNotFound):
replace_dependencies(container, "method")
def test_replace_dependencies_container_already_started(container_factory,
rabbit_config):
class Service(object):
name = "service"
proxy = RpcProxy("foo_service")
container = container_factory(Service, rabbit_config)
container.start()
with pytest.raises(RuntimeError):
replace_dependencies(container, "proxy")
def test_restrict_entrypoints(container_factory, rabbit_config):
method_called = Mock()
class Service(object):
name = "service"
@rpc
@once("assert not seen")
def handler_one(self, arg):
method_called(arg) # pragma: no cover
@event_handler('srcservice', 'eventtype')
def handler_two(self, msg):
method_called(msg)
container = container_factory(Service, rabbit_config)
# disable the entrypoints on handler_one
restrict_entrypoints(container, "handler_two")
container.start()
# verify the rpc entrypoint on handler_one is disabled
with ServiceRpcProxy("service", rabbit_config) as service_proxy:
with pytest.raises(MethodNotFound) as exc_info:
service_proxy.handler_one("msg")
assert str(exc_info.value) == "handler_one"
# dispatch an event to handler_two
msg = "msg"
dispatch = event_dispatcher(rabbit_config)
with entrypoint_waiter(container, 'handler_two'):
dispatch('srcservice', 'eventtype', msg)
# method_called should have exactly one call, derived from the event
# handler and not from the disabled @once entrypoint
method_called.assert_called_once_with(msg)
def test_restrict_nonexistent_entrypoint(container_factory, rabbit_config):
class Service(object):
name = "service"
@rpc
def method(self, arg):
pass # pragma: no cover
container = container_factory(Service, rabbit_config)
with pytest.raises(ExtensionNotFound):
restrict_entrypoints(container, "nonexist")
def test_restrict_entrypoint_container_already_started(container_factory,
rabbit_config):
class Service(object):
name = "service"
@rpc
def method(self, arg):
pass # pragma: no cover
container = container_factory(Service, rabbit_config)
container.start()
with pytest.raises(RuntimeError):
restrict_entrypoints(container, "method")
def test_entrypoint_waiter(container_factory, rabbit_config):
container = container_factory(Service, rabbit_config)
container.start()
dispatch = event_dispatcher(rabbit_config)
with entrypoint_waiter(container, 'handle'):
dispatch('srcservice', 'eventtype', "")
def test_entrypoint_waiter_result(container_factory, rabbit_config):
class Service(object):
name = "service"
@event_handler('srcservice', 'eventtype')
def handle_event(self, msg):
return msg.upper()
container = container_factory(Service, rabbit_config)
container.start()
dispatch = event_dispatcher(rabbit_config)
with entrypoint_waiter(container, 'handle_event') as result:
dispatch('srcservice', 'eventtype', "msg")
res = result.get()
assert res == "MSG"
def test_entrypoint_waiter_with_callback(container_factory, rabbit_config):
class Service(object):
name = "service"
@event_handler('srcservice', 'eventtype')
def handle_event(self, msg):
return msg
container = container_factory(Service, rabbit_config)
container.start()
results = []
def cb(worker_ctx, res, exc_info):
results.append((res, exc_info))
return len(results) == 2
dispatch = event_dispatcher(rabbit_config)
with entrypoint_waiter(container, 'handle_event', callback=cb):
dispatch('srcservice', 'eventtype', "msg1")
dispatch('srcservice', 'eventtype', "msg2")
assert results == [("msg1", None), ("msg2", None)]
def test_entrypoint_waiter_wait_for_specific_result(
container_factory, rabbit_config, spawn_thread
):
class Service(object):
name = "service"
@event_handler('srcservice', 'eventtype')
def handle_event(self, msg):
return msg
container = container_factory(Service, rabbit_config)
container.start()
target = 5
def cb(worker_ctx, res, exc_info):
return res == target
def increment_forever():
dispatch = event_dispatcher(rabbit_config)
for count in itertools.count():
dispatch('srcservice', 'eventtype', count)
time.sleep() # force yield
with entrypoint_waiter(container, 'handle_event', callback=cb) as result:
spawn_thread(increment_forever)
assert result.get() == target
def test_entrypoint_waiter_wait_until_called_with_argument(
container_factory, rabbit_config, spawn_thread
):
class Service(object):
name = "service"
@event_handler('srcservice', 'eventtype')
def handle_event(self, msg):
return msg
container = container_factory(Service, rabbit_config)
container.start()
target = 5
def cb(worker_ctx, res, exc_info):
return worker_ctx.args == (target,)
def increment_forever():
dispatch = event_dispatcher(rabbit_config)
for count in itertools.count():
dispatch('srcservice', 'eventtype', count)
time.sleep() # force yield
with entrypoint_waiter(container, 'handle_event', callback=cb) as result:
spawn_thread(increment_forever)
assert result.get() == target
def test_entrypoint_waiter_wait_until_raises(
container_factory, rabbit_config, spawn_thread
):
threshold = 5
class TooMuch(Exception):
pass
class Service(object):
name = "service"
@event_handler('srcservice', 'eventtype')
def handle_event(self, msg):
if msg > threshold:
raise TooMuch(msg)
return msg
container = container_factory(Service, rabbit_config)
container.start()
def cb(worker_ctx, res, exc_info):
return exc_info is not None
def increment_forever():
dispatch = event_dispatcher(rabbit_config)
for count in itertools.count():
dispatch('srcservice', 'eventtype', count)
time.sleep() # force yield
with entrypoint_waiter(container, 'handle_event', callback=cb) as result:
spawn_thread(increment_forever)
with pytest.raises(TooMuch):
result.get()
def test_entrypoint_waiter_wait_until_stops_raising(
container_factory, rabbit_config, spawn_thread
):
threshold = 5
class NotEnough(Exception):
pass
class Service(object):
name = "service"
@event_handler('srcservice', 'eventtype')
def handle_event(self, msg):
if msg < threshold:
raise NotEnough(msg)
return msg
container = container_factory(Service, rabbit_config)
container.start()
def cb(worker_ctx, res, exc_info):
return exc_info is None
def increment_forever():
dispatch = event_dispatcher(rabbit_config)
for count in itertools.count():
dispatch('srcservice', 'eventtype', count)
time.sleep() # force yield
with entrypoint_waiter(container, 'handle_event', callback=cb) as result:
spawn_thread(increment_forever)
assert result.get() == threshold
def test_entrypoint_waiter_timeout(container_factory, rabbit_config):
container = container_factory(Service, rabbit_config)
container.start()
with pytest.raises(entrypoint_waiter.Timeout) as exc_info:
with entrypoint_waiter(container, 'handle', timeout=0.01):
pass
assert str(exc_info.value) == (
"Timeout on service.handle after 0.01 seconds")
def test_entrypoint_waiter_bad_entrypoint(container_factory, rabbit_config):
container = container_factory(Service, rabbit_config)
with pytest.raises(RuntimeError) as exc:
with entrypoint_waiter(container, "unknown"):
pass # pragma: no cover
assert 'has no entrypoint' in str(exc)
def test_entrypoint_waiter_nested(container_factory, rabbit_config):
class Service(object):
name = "service"
@event_handler('srcservice', 'eventtype1')
def handle_event1(self, msg):
handle_event(1)
@event_handler('srcservice', 'eventtype2')
def handle_event2(self, msg):
handle_event(2)
container = container_factory(Service, rabbit_config)
container.start()
dispatch = event_dispatcher(rabbit_config)
with entrypoint_waiter(container, 'handle_event1'):
with entrypoint_waiter(container, 'handle_event2'):
dispatch('srcservice', 'eventtype1', "")
dispatch('srcservice', 'eventtype2', "")
assert call(1) in handle_event.call_args_list
assert call(2) in handle_event.call_args_list
def test_entrypoint_waiter_duplicate(container_factory, rabbit_config):
class Service(object):
name = "service"
@event_handler('srcservice', 'eventtype')
def handle_event(self, msg):
handle_event(msg)
container = container_factory(Service, rabbit_config)
container.start()
dispatch = event_dispatcher(rabbit_config)
with entrypoint_waiter(container, 'handle_event'):
with entrypoint_waiter(container, 'handle_event'):
dispatch('srcservice', 'eventtype', "msg")
assert handle_event.call_args_list == [call("msg")]
def test_entrypoint_waiter_result_teardown_race(
container_factory, rabbit_config, counter
):
tracker = Mock()
class TrackingDependency(DependencyProvider):
def worker_result(self, worker_ctx, res, exc_info):
tracker.worker_result()
def worker_teardown(self, worker_ctx):
tracker.worker_teardown()
class Service(object):
name = "service"
tracker = TrackingDependency()
@event_handler('srcservice', 'eventtype')
def handle(self, msg):
tracker.handle(msg)
container = container_factory(Service, rabbit_config)
container.start()
def wait_for_two_calls(worker_ctx, res, exc_info):
return counter.count() > 1
dispatch = event_dispatcher(rabbit_config)
with entrypoint_waiter(container, 'handle', callback=wait_for_two_calls):
# dispatch the first message
# wait until teardown has happened
with wait_for_call(TrackingDependency, 'worker_teardown'):
dispatch('srcservice', 'eventtype', "msg")
assert tracker.worker_teardown.call_count == 1
assert tracker.worker_result.call_count == 1
assert tracker.handle.call_count == 1
# dispatch the second event
dispatch('srcservice', 'eventtype', "msg")
# we should wait for the second teardown to complete before exiting
# the entrypoint waiter
assert tracker.worker_teardown.call_count == 2
assert tracker.worker_result.call_count == 2
assert tracker.handle.call_count == 2
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud implementation for preprocessing, training and prediction for inception model.
"""
import base64
import datetime
import logging
import os
import shutil
import tempfile
from tensorflow.python.lib.io import file_io
import urllib
from . import _util
_TF_GS_URL = 'gs://cloud-datalab/deploy/tf/tensorflow-1.0.0-cp27-cp27mu-manylinux1_x86_64.whl'
_PROTOBUF_GS_URL = 'gs://cloud-datalab/deploy/tf/protobuf-3.1.0-py2.py3-none-any.whl'
class Cloud(object):
"""Class for cloud training, preprocessing and prediction."""
@staticmethod
def preprocess(train_dataset, output_dir, eval_dataset, checkpoint, pipeline_option):
"""Preprocess data in Cloud with DataFlow."""
import apache_beam as beam
import google.datalab.utils
from . import _preprocess
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
job_name = ('preprocess-image-classification-' +
datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
staging_package_url = _util.repackage_to_staging(output_dir)
tmpdir = tempfile.mkdtemp()
# suppress DataFlow warnings about wheel package as extra package.
original_level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.ERROR)
try:
# Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.
# Remove when the issue is fixed and new version of DataFlow is included in Datalab.
extra_packages = [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL]
local_packages = [os.path.join(tmpdir, os.path.basename(p))
for p in extra_packages]
for source, dest in zip(extra_packages, local_packages):
file_io.copy(source, dest, overwrite=True)
options = {
'staging_location': os.path.join(output_dir, 'tmp', 'staging'),
'temp_location': os.path.join(output_dir, 'tmp'),
'job_name': job_name,
'project': _util.default_project(),
'extra_packages': local_packages,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
if pipeline_option is not None:
options.update(pipeline_option)
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline('DataflowRunner', options=opts)
_preprocess.configure_pipeline(p, train_dataset, eval_dataset,
checkpoint, output_dir, job_name)
job_results = p.run()
finally:
shutil.rmtree(tmpdir)
logging.getLogger().setLevel(original_level)
if (_util.is_in_IPython()):
import IPython
dataflow_url = 'https://console.developers.google.com/dataflow?project=%s' % \
_util.default_project()
html = 'Job "%s" submitted.' % job_name
html += '<p>Click <a href="%s" target="_blank">here</a> to track preprocessing job. <br/>' \
% dataflow_url
IPython.display.display_html(html, raw=True)
return google.datalab.utils.DataflowJob(job_results)
@staticmethod
def train(input_dir, batch_size, max_steps, output_dir, checkpoint, cloud_train_config):
"""Train model in the cloud with CloudML trainer service."""
import google.datalab.ml as ml
if checkpoint is None:
checkpoint = _util._DEFAULT_CHECKPOINT_GSURL
staging_package_url = _util.repackage_to_staging(output_dir)
job_args = {
'input_dir': input_dir,
'max_steps': max_steps,
'batch_size': batch_size,
'checkpoint': checkpoint
}
job_request = {
'package_uris': [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL],
'python_module': 'mltoolbox.image.classification.task',
'job_dir': output_dir,
'args': job_args
}
job_request.update(dict(cloud_train_config._asdict()))
job_id = 'image_classification_train_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')
job = ml.Job.submit_training(job_request, job_id)
if (_util.is_in_IPython()):
import IPython
log_url_query_strings = {
'project': _util.default_project(),
'resource': 'ml.googleapis.com/job_id/' + job.info['jobId']
}
log_url = 'https://console.developers.google.com/logs/viewer?' + \
urllib.urlencode(log_url_query_strings)
html = 'Job "%s" submitted.' % job.info['jobId']
html += '<p>Click <a href="%s" target="_blank">here</a> to view cloud log. <br/>' % log_url
IPython.display.display_html(html, raw=True)
return job
@staticmethod
def predict(model_id, image_files, resize, show_image):
"""Predict using a deployed (online) model."""
import google.datalab.ml as ml
images = _util.load_images(image_files, resize=resize)
parts = model_id.split('.')
if len(parts) != 2:
raise ValueError('Invalid model name for cloud prediction. Use "model.version".')
if len(images) == 0:
raise ValueError('images is empty.')
data = []
for ii, image in enumerate(images):
image_encoded = base64.b64encode(image)
data.append({
'key': str(ii),
'image_bytes': {'b64': image_encoded}
})
predictions = ml.ModelVersions(parts[0]).predict(parts[1], data)
if len(predictions) == 0:
raise Exception('Prediction results are empty.')
# Although prediction results contains a labels list in each instance, they are all the same
# so taking the first one.
labels = predictions[0]['labels']
labels_and_scores = [(x['prediction'], x['scores'][labels.index(x['prediction'])])
for x in predictions]
results = zip(image_files, images, labels_and_scores)
ret = _util.process_prediction_results(results, show_image)
return ret
@staticmethod
def batch_predict(dataset, model_dir, output_csv, output_bq_table, pipeline_option):
"""Batch predict running in cloud."""
import apache_beam as beam
import google.datalab.utils
from . import _predictor
if output_csv is None and output_bq_table is None:
raise ValueError('output_csv and output_bq_table cannot both be None.')
if 'temp_location' not in pipeline_option:
raise ValueError('"temp_location" is not set in cloud.')
job_name = ('batch-predict-image-classification-' +
datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
staging_package_url = _util.repackage_to_staging(pipeline_option['temp_location'])
tmpdir = tempfile.mkdtemp()
# suppress DataFlow warnings about wheel package as extra package.
original_level = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.ERROR)
try:
# Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.
# Remove when the issue is fixed and new version of DataFlow is included in Datalab.
extra_packages = [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL]
local_packages = [os.path.join(tmpdir, os.path.basename(p))
for p in extra_packages]
for source, dest in zip(extra_packages, local_packages):
file_io.copy(source, dest, overwrite=True)
options = {
'staging_location': os.path.join(pipeline_option['temp_location'], 'staging'),
'job_name': job_name,
'project': _util.default_project(),
'extra_packages': local_packages,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
options.update(pipeline_option)
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline('DataflowRunner', options=opts)
_predictor.configure_pipeline(p, dataset, model_dir, output_csv, output_bq_table)
job_results = p.run()
finally:
shutil.rmtree(tmpdir)
logging.getLogger().setLevel(original_level)
if (_util.is_in_IPython()):
import IPython
dataflow_url = ('https://console.developers.google.com/dataflow?project=%s' %
_util.default_project())
html = 'Job "%s" submitted.' % job_name
html += ('<p>Click <a href="%s" target="_blank">here</a> to track batch prediction job. <br/>'
% dataflow_url)
IPython.display.display_html(html, raw=True)
return google.datalab.utils.DataflowJob(job_results)
|
|
import functools
import hashlib
from flask import jsonify, request, url_for, current_app, make_response, g
from .rate_limit import RateLimit
from .errors import too_many_requests, precondition_failed, not_modified, ValidationError, admin_right, hub_not_active
from .models import User, Hub, Properties
from .common import is_admin
def json(f):
"""This decorator generates a JSON response from a Python dictionary or
a SQLAlchemy model."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
rv = f(*args, **kwargs)
status_or_headers = None
headers = None
if isinstance(rv, tuple):
rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))
if isinstance(status_or_headers, (dict, list)):
headers, status_or_headers = status_or_headers, None
if not isinstance(rv, dict):
# assume it is a model, call its export_data() method
rv = rv.export_data()
rv = jsonify(rv)
if status_or_headers is not None:
rv.status_code = status_or_headers
if headers is not None:
rv.headers.extend(headers)
return rv
return wrapped
def rate_limit(limit, period):
"""This decorator implements rate limiting."""
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if current_app.config['USE_RATE_LIMITS']:
# generate a unique key to represent the decorated function and
# the IP address of the client. Rate limiting counters are
# maintained on each unique key.
key = '{0}/{1}'.format(f.__name__, str(g.user.id))
limiter = RateLimit(key, limit, period)
# set the rate limit headers in g, so that they are picked up
# by the after_request handler and attached to the response
g.headers = {
'X-RateLimit-Remaining': str(limiter.remaining
if limiter.remaining >= 0 else 0),
'X-RateLimit-Limit': str(limit),
'X-RateLimit-Reset': str(limiter.reset)
}
# if the client went over the limit respond with a 429 status
# code, else invoke the wrapped function
if not limiter.allowed:
return too_many_requests()
# let the request through
return f(*args, **kwargs)
return wrapped
return decorator
def _filter_query(model, query, filter_spec):
filters = [f.split(',') for f in filter_spec.split(';')]
for f in filters:
if len(f) < 3 or (len(f) > 3 and f[1] != 'in'):
continue
if f[1] == 'in':
f = [f[0], f[1], f[2:]]
ops = {'eq': '__eq__', 'ne': '__ne__', 'lt': '__lt__', 'le': '__le__',
'gt': '__gt__', 'ge': '__ge__', 'in': 'in_', 'like': 'like'}
if hasattr(model, f[0]) and f[1] in ops.keys():
column = getattr(model, f[0])
op = ops[f[1]]
query = query.filter(getattr(column, op)(f[2]))
return query
def _sort_query(model, query, sort_spec):
sort = [s.split(',') for s in sort_spec.split(';')]
for s in sort:
if hasattr(model, s[0]):
column = getattr(model, s[0])
if len(s) == 2 and s[1] in ['asc', 'desc']:
query = query.order_by(getattr(column, s[1])())
else:
query = query.order_by(column.asc())
return query
def collection(model, name=None, max_per_page=10):
"""This decorator implements pagination, filtering, sorting and expanding
for collections. The expected response from the decorated route is a
SQLAlchemy query."""
if name is None:
name = model.__tablename__
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
query = f(*args, **kwargs)
# filtering and sorting
filter = request.args.get('filter')
if filter:
query = _filter_query(model, query, filter)
sort = request.args.get('sort')
if sort:
query = _sort_query(model, query, sort)
# pagination
page = request.args.get('page', 1, type=int)
per_page = min(request.args.get('per_page', max_per_page,
type=int), max_per_page)
expand = request.args.get('expand')
p = query.paginate(page, per_page)
pages = {'page': page, 'per_page': per_page,
'total': p.total, 'pages': p.pages}
if p.has_prev:
pages['prev_url'] = url_for(request.endpoint, page=p.prev_num,
per_page=per_page,
expand=expand, _external=True,
**kwargs)
else:
pages['prev_url'] = None
if p.has_next:
pages['next_url'] = url_for(request.endpoint, filter=filter,
sort=sort, page=p.next_num,
per_page=per_page,
expand=expand, _external=True,
**kwargs)
else:
pages['next_url'] = None
pages['first_url'] = url_for(request.endpoint, filter=filter,
sort=sort, page=1, per_page=per_page,
expand=expand, _external=True,
**kwargs)
pages['last_url'] = url_for(request.endpoint, filter=filter,
sort=sort, page=p.pages,
per_page=per_page, expand=expand,
_external=True, **kwargs)
if expand:
items = [item.export_data() for item in p.items]
else:
items = [item.get_url() for item in p.items]
return {name: items, 'meta': pages}
return wrapped
return decorator
def etag(f):
"""This decorator adds an ETag header to the response."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
# only for HEAD and GET requests
assert request.method in ['HEAD', 'GET'],\
'@etag is only supported for GET requests'
rv = f(*args, **kwargs)
rv = make_response(rv)
etag = '"' + hashlib.md5(rv.get_data()).hexdigest() + '"'
rv.headers['Cache-Control'] = 'max-age=86400'
rv.headers['ETag'] = etag
if_match = request.headers.get('If-Match')
if_none_match = request.headers.get('If-None-Match')
if if_match:
etag_list = [tag.strip() for tag in if_match.split(',')]
if etag not in etag_list and '*' not in etag_list:
rv = precondition_failed()
elif if_none_match:
etag_list = [tag.strip() for tag in if_none_match.split(',')]
if etag in etag_list or '*' in etag_list:
rv = not_modified()
return rv
return wrapped
def admin_role_required(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_admin(g.user.username):
return admin_right()
# user = User.query.filter_by(username=g.user.username).first()
# if user != None:
# if user.group != 'ADMIN':
# return admin_right()
return f(*args, **kwargs)
return wrapped
def hub_active(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
# Dont consider Hub status if the request is coming via 'edit_hub' endpoint, else admin will not be able to change the hub status
if request.method == 'PUT' and (request.endpoint == 'api.edit_hubstatus' or request.endpoint == 'api.edit_hub'):
pass
else:
hub = Hub.query.first()
cph = Properties.query.filter_by(key='ContactPh').first()
cad = Properties.query.filter_by(key='ContactAd').first()
cws = Properties.query.filter_by(key='ContactWs').first()
if hub == None or hub.status == False:
return hub_not_active('Details:'+' Phone:'+cph.value+' Address:'+cad.value+' Website:'+cws.value)
return f(*args, **kwargs)
return wrapped
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
from oslo.config import cfg
from neutron.common import utils
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import periodic_task
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
class Manager(periodic_task.PeriodicTasks):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
def __init__(self, host=None):
if not host:
host = cfg.CONF.host
self.host = host
super(Manager, self).__init__()
def periodic_tasks(self, context, raise_on_error=False):
self.run_periodic_tasks(context, raise_on_error=raise_on_error)
def init_host(self):
"""Handle initialization if this is a standalone service.
Child classes should override this method.
"""
pass
def after_start(self):
"""Handler post initialization stuff.
Child classes can override this method.
"""
pass
def validate_post_plugin_load():
"""Checks if the configuration variables are valid.
If the configuration is invalid then the method will return an error
message. If all is OK then it will return None.
"""
if ('dhcp_agents_per_network' in cfg.CONF and
cfg.CONF.dhcp_agents_per_network <= 0):
msg = _("dhcp_agents_per_network must be >= 1. '%s' "
"is invalid.") % cfg.CONF.dhcp_agents_per_network
return msg
def validate_pre_plugin_load():
"""Checks if the configuration variables are valid.
If the configuration is invalid then the method will return an error
message. If all is OK then it will return None.
"""
if cfg.CONF.core_plugin is None:
msg = _('Neutron core_plugin not configured!')
return msg
class NeutronManager(object):
"""Neutron's Manager class.
Neutron's Manager class is responsible for parsing a config file and
instantiating the correct plugin that concretely implement
neutron_plugin_base class.
The caller should make sure that NeutronManager is a singleton.
"""
_instance = None
def __init__(self, options=None, config_file=None):
# If no options have been provided, create an empty dict
if not options:
options = {}
msg = validate_pre_plugin_load()
if msg:
LOG.critical(msg)
raise Exception(msg)
# NOTE(jkoelker) Testing for the subclass with the __subclasshook__
# breaks tach monitoring. It has been removed
# intentianally to allow v2 plugins to be monitored
# for performance metrics.
plugin_provider = cfg.CONF.core_plugin
LOG.debug(_("Plugin location: %s"), plugin_provider)
# If the plugin can't be found let them know gracefully
try:
LOG.info(_("Loading Plugin: %s"), plugin_provider)
plugin_klass = importutils.import_class(plugin_provider)
except ImportError:
LOG.exception(_("Error loading plugin"))
raise Exception(_("Plugin not found. "))
self.plugin = plugin_klass()
msg = validate_post_plugin_load()
if msg:
LOG.critical(msg)
raise Exception(msg)
# core plugin as a part of plugin collection simplifies
# checking extensions
# TODO(enikanorov): make core plugin the same as
# the rest of service plugins
self.service_plugins = {constants.CORE: self.plugin}
self._load_service_plugins()
def _load_services_from_core_plugin(self):
"""Puts core plugin in service_plugins for supported services."""
LOG.debug(_("Loading services supported by the core plugin"))
# supported service types are derived from supported extensions
if not hasattr(self.plugin, "supported_extension_aliases"):
return
for ext_alias in self.plugin.supported_extension_aliases:
if ext_alias in constants.EXT_TO_SERVICE_MAPPING:
service_type = constants.EXT_TO_SERVICE_MAPPING[ext_alias]
self.service_plugins[service_type] = self.plugin
LOG.info(_("Service %s is supported by the core plugin"),
service_type)
def _load_service_plugins(self):
"""Loads service plugins.
Starts from the core plugin and checks if it supports
advanced services then loads classes provided in configuration.
"""
# load services from the core plugin first
self._load_services_from_core_plugin()
plugin_providers = cfg.CONF.service_plugins
LOG.debug(_("Loading service plugins: %s"), plugin_providers)
for provider in plugin_providers:
if provider == '':
continue
try:
LOG.info(_("Loading Plugin: %s"), provider)
plugin_class = importutils.import_class(provider)
except ImportError:
LOG.exception(_("Error loading plugin"))
raise ImportError(_("Plugin not found."))
plugin_inst = plugin_class()
# only one implementation of svc_type allowed
# specifying more than one plugin
# for the same type is a fatal exception
if plugin_inst.get_plugin_type() in self.service_plugins:
raise ValueError(_("Multiple plugins for service "
"%s were configured"),
plugin_inst.get_plugin_type())
self.service_plugins[plugin_inst.get_plugin_type()] = plugin_inst
LOG.debug(_("Successfully loaded %(type)s plugin. "
"Description: %(desc)s"),
{"type": plugin_inst.get_plugin_type(),
"desc": plugin_inst.get_plugin_description()})
@classmethod
@utils.synchronized("manager")
def _create_instance(cls):
if cls._instance is None:
cls._instance = cls()
@classmethod
def get_instance(cls):
# double checked locking
if cls._instance is None:
cls._create_instance()
return cls._instance
@classmethod
def get_plugin(cls):
return cls.get_instance().plugin
@classmethod
def get_service_plugins(cls):
return cls.get_instance().service_plugins
|
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
from collections import OrderedDict
import logbook
import pandas as pd
from pandas.io.data import DataReader
import pytz
from six import iteritems
from . benchmarks import get_benchmark_returns
from . import treasuries, treasuries_can
from .paths import (
cache_root,
data_root,
)
from zipline.utils.tradingcalendar import (
trading_day as trading_day_nyse,
trading_days as trading_days_nyse,
)
logger = logbook.Logger('Loader')
# Mapping from index symbol to appropriate bond data
INDEX_MAPPING = {
'^GSPC':
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
'^GSPTSE':
(treasuries_can, 'treasury_curves_can.csv', 'bankofcanada.ca'),
'^FTSE': # use US treasuries until UK bonds implemented
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
}
ONE_HOUR = pd.Timedelta(hours=1)
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
def get_data_filepath(name):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root()
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name)
def get_cache_filepath(name):
cr = cache_root()
if not os.path.exists(cr):
os.makedirs(cr)
return os.path.join(cr, name)
def get_benchmark_filename(symbol):
return "%s_benchmark.csv" % symbol
def has_data_for_dates(series_or_df, first_date, last_date):
"""
Does `series_or_df` have data on or before first_date and on or after
last_date?
"""
dts = series_or_df.index
if not isinstance(dts, pd.DatetimeIndex):
raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts))
first, last = dts[[0, -1]]
return (first <= first_date) and (last >= last_date)
def load_market_data(trading_day=trading_day_nyse,
trading_days=trading_days_nyse,
bm_symbol='^GSPC'):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from Yahoo Finance. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to '^GSPC', the Yahoo
ticker for the S&P 500.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
first_date = trading_days[0]
now = pd.Timestamp.utcnow()
# We expect to have benchmark and treasury data that's current up until
# **two** full trading days prior to the most recently completed trading
# day.
# Example:
# On Thu Oct 22 2015, the previous completed trading day is Wed Oct 21.
# However, data for Oct 21 doesn't become available until the early morning
# hours of Oct 22. This means that there are times on the 22nd at which we
# cannot reasonably expect to have data for the 21st available. To be
# conservative, we instead expect that at any time on the 22nd, we can
# download data for Tuesday the 20th, which is two full trading days prior
# to the date on which we're running a test.
# We'll attempt to download new data if the latest entry in our cache is
# before this date.
last_date = trading_days[trading_days.get_loc(now, method='ffill') - 2]
benchmark_returns = ensure_benchmark_data(
bm_symbol,
first_date,
last_date,
now,
# We need the trading_day to figure out the close prior to the first
# date so that we can compute returns for the first date.
trading_day,
)
treasury_curves = ensure_treasury_data(
bm_symbol,
first_date,
last_date,
now,
)
return benchmark_returns, treasury_curves
def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
path = get_data_filepath(get_benchmark_filename(symbol))
try:
data = pd.Series.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a file
# in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new benchmark "
"data because a download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
logger.info(
"Cache at {path} does not have data from {start} to {end}.\n"
"Downloading benchmark data for '{symbol}'.",
start=first_date,
end=last_date,
symbol=symbol,
path=path,
)
data = get_benchmark_returns(symbol, first_date - trading_day, last_date)
data.to_csv(path)
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def ensure_treasury_data(bm_symbol, first_date, last_date, now):
"""
Ensure we have treasury data from treasury module associated with
`bm_symbol`.
Parameters
----------
bm_symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
bm_symbol, INDEX_MAPPING['^GSPC']
)
first_date = max(first_date, loader_module.earliest_possible_date())
path = get_data_filepath(filename)
try:
data = pd.DataFrame.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a file
# in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new treasury "
"data because a download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(path)
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def _load_raw_yahoo_data(indexes=None, stocks=None, start=None, end=None):
"""Load closing prices from yahoo finance.
:Optional:
indexes : dict (Default: {'SPX': '^GSPC'})
Financial indexes to load.
stocks : list (Default: ['AAPL', 'GE', 'IBM', 'MSFT',
'XOM', 'AA', 'JNJ', 'PEP', 'KO'])
Stock closing prices to load.
start : datetime (Default: datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices from start date on.
end : datetime (Default: datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices until end date.
:Note:
This is based on code presented in a talk by Wes McKinney:
http://wesmckinney.com/files/20111017/notebook_output.pdf
"""
assert indexes is not None or stocks is not None, """
must specify stocks or indexes"""
if start is None:
start = pd.datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
if start is not None and end is not None:
assert start < end, "start date is later than end date."
data = OrderedDict()
if stocks is not None:
for stock in stocks:
print(stock)
stock_pathsafe = stock.replace(os.path.sep, '--')
cache_filename = "{stock}-{start}-{end}.csv".format(
stock=stock_pathsafe,
start=start,
end=end).replace(':', '-')
cache_filepath = get_cache_filepath(cache_filename)
if os.path.exists(cache_filepath):
stkd = pd.DataFrame.from_csv(cache_filepath)
else:
stkd = DataReader(stock, 'yahoo', start, end).sort_index()
stkd.to_csv(cache_filepath)
data[stock] = stkd
if indexes is not None:
for name, ticker in iteritems(indexes):
print(name)
stkd = DataReader(ticker, 'yahoo', start, end).sort_index()
data[name] = stkd
return data
def load_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads price data from Yahoo into a dataframe for each of the indicated
assets. By default, 'price' is taken from Yahoo's 'Adjusted Close',
which removes the impact of splits and dividends. If the argument
'adjusted' is False, then the non-adjusted 'close' field is used instead.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust the price for splits and dividends.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
if adjusted:
close_key = 'Adj Close'
else:
close_key = 'Close'
df = pd.DataFrame({key: d[close_key] for key, d in iteritems(data)})
df.index = df.index.tz_localize(pytz.utc)
return df
def load_bars_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads data from Yahoo into a panel with the following
column names for each indicated security:
- open
- high
- low
- close
- volume
- price
Note that 'price' is Yahoo's 'Adjusted Close', which removes the
impact of splits and dividends. If the argument 'adjusted' is True, then
the open, high, low, and close values are adjusted as well.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust open/high/low/close for splits and dividends.
The 'price' field is always adjusted.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
panel = pd.Panel(data)
# Rename columns
panel.minor_axis = ['open', 'high', 'low', 'close', 'volume', 'price']
panel.major_axis = panel.major_axis.tz_localize(pytz.utc)
# Adjust data
if adjusted:
adj_cols = ['open', 'high', 'low', 'close']
for ticker in panel.items:
ratio = (panel[ticker]['price'] / panel[ticker]['close'])
ratio_filtered = ratio.fillna(0).values
for col in adj_cols:
panel[ticker][col] *= ratio_filtered
return panel
def load_prices_from_csv(filepath, identifier_col, tz='UTC'):
data = pd.read_csv(filepath, index_col=identifier_col)
data.index = pd.DatetimeIndex(data.index, tz=tz)
data.sort_index(inplace=True)
return data
def load_prices_from_csv_folder(folderpath, identifier_col, tz='UTC'):
data = None
for file in os.listdir(folderpath):
if '.csv' not in file:
continue
raw = load_prices_from_csv(os.path.join(folderpath, file),
identifier_col, tz)
if data is None:
data = raw
else:
data = pd.concat([data, raw], axis=1)
return data
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for configuring TensorFlow execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import _pywrap_tensor_float_32_execution
from tensorflow.python.eager import context
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export('config.experimental.tensor_float_32_execution_enabled')
def tensor_float_32_execution_enabled():
"""Returns whether TensorFloat-32 is enabled.
By default, TensorFloat-32 is enabled, but this can be changed with
`tf.config.experimental.enable_tensor_float_32_execution`.
Returns:
True if TensorFloat-32 is enabled (the default) and False otherwise
"""
return _pywrap_tensor_float_32_execution.is_enabled()
@tf_export('config.experimental.enable_tensor_float_32_execution')
def enable_tensor_float_32_execution(enabled):
"""Enable or disable the use of TensorFloat-32 on supported hardware.
[TensorFloat-32](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format),
or TF32 for short, is a math mode for NVIDIA Ampere GPUs. TensorFloat-32
execution causes certain float32 ops, such as matrix multiplications and
convolutions, to run much faster on Ampere GPUs but with reduced precision.
This reduced precision should not impact convergence of deep learning models
in practice.
TensorFloat-32 is enabled by default in the nightly versions of TensorFlow. We
expect it will remain enabled by default in the first stable version that
TensorFloat-32 is available, which is TensorFlow 2.4, as it increases
performance and does not reduce model quality in practice. If you want to use
the full float32 precision, you can disable TensorFloat-32 execution with this
function. For example:
```python
x = tf.fill((2, 2), 1.0001)
y = tf.fill((2, 2), 1.)
# TensorFloat-32 is enabled, so matmul is run with reduced precision
print(tf.linalg.matmul(x, y)) # [[2., 2.], [2., 2.]]
tf.config.experimental.enable_tensor_float_32_execution(False)
# Matmul is run with full precision
print(tf.linalg.matmul(x, y)) # [[2.0002, 2.0002], [2.0002, 2.0002]]
```
There is [an RFC](https://github.com/tensorflow/community/pull/287) proposing
that TensorFloat-32 remain enabled by default in stable versions of
TensorFlow. We expect the RFC to be accepted, but if it isn't, TensorFloat-32
will be disabled by default in TensorFlow 2.4.
To check whether TensorFloat-32 execution is currently enabled, use
`tf.config.experimental.tensor_float_32_execution_enabled`.
Enabling TensorFloat-32 causes float32 inputs of supported ops, such as
`tf.linalg.matmul`, to be rounded from 23 bits of precision to 10 bits of
precision in most cases. This allows the ops to execute much faster by
utilizing the GPU's tensor cores. TensorFloat-32 has the same dynamic range as
float32, meaning it is no more likely to underflow or overflow than float32.
Ops still use float32 accumulation when TensorFloat-32 is enabled. Enabling
TensorFloat-32 only affects Ampere GPUs and subsequent GPUs that support
TensorFloat-32.
Note TensorFloat-32 is not always used in supported ops, as only inputs of
certain shapes are supported. Support for more input shapes and more ops may
be added in the future. As a result, precision of float32 ops may decrease in
minor versions of TensorFlow.
Args:
enabled: Bool indicating whether to enable TensorFloat-32 execution.
"""
_pywrap_tensor_float_32_execution.enable(enabled)
@tf_export('config.threading.get_intra_op_parallelism_threads')
def get_intra_op_parallelism_threads():
"""Get number of threads used within an individual op for parallelism.
Certain operations like matrix multiplication and reductions can utilize
parallel threads for speed ups. A value of 0 means the system picks an
appropriate number.
Returns:
Number of parallel threads
"""
return context.context().intra_op_parallelism_threads
@tf_export('config.threading.set_intra_op_parallelism_threads')
def set_intra_op_parallelism_threads(num_threads):
"""Set number of threads used within an individual op for parallelism.
Certain operations like matrix multiplication and reductions can utilize
parallel threads for speed ups. A value of 0 means the system picks an
appropriate number.
Args:
num_threads: Number of parallel threads
"""
context.context().intra_op_parallelism_threads = num_threads
@tf_export('config.threading.get_inter_op_parallelism_threads')
def get_inter_op_parallelism_threads():
"""Get number of threads used for parallelism between independent operations.
Determines the number of threads used by independent non-blocking operations.
0 means the system picks an appropriate number.
Returns:
Number of parallel threads
"""
return context.context().inter_op_parallelism_threads
@tf_export('config.threading.set_inter_op_parallelism_threads')
def set_inter_op_parallelism_threads(num_threads):
"""Set number of threads used for parallelism between independent operations.
Determines the number of threads used by independent non-blocking operations.
0 means the system picks an appropriate number.
Args:
num_threads: Number of parallel threads
"""
context.context().inter_op_parallelism_threads = num_threads
@tf_export('config.optimizer.get_jit')
def get_optimizer_jit():
"""Get if JIT compilation is enabled.
Note that optimizations are only applied to code that is compiled into a
graph. In eager mode, which is the TF2 API default, that means only code that
is defined under a tf.function decorator.
Returns:
If JIT compilation is enabled.
"""
return context.context().optimizer_jit
@tf_export('config.optimizer.set_jit')
def set_optimizer_jit(enabled):
"""Set if JIT compilation is enabled.
Note that optimizations are only applied to code that is compiled into a
graph. In eager mode, which is the TF2 API default, that means only code that
is defined under a tf.function decorator.
Args:
enabled: Whether to enable JIT compilation.
"""
context.context().optimizer_jit = enabled
@tf_export('config.optimizer.get_experimental_options')
def get_optimizer_experimental_options():
"""Get experimental optimizer options.
Refer to tf.config.optimizer.set_experimental_options for a list of current
options.
Note that optimizations are only applied in graph mode, (within tf.function).
In addition, as these are experimental options, the list is subject to change.
Returns:
Dictionary of configured experimental optimizer options
"""
return context.context().get_optimizer_experimental_options()
@tf_export('config.optimizer.set_experimental_options')
def set_optimizer_experimental_options(options):
"""Set experimental optimizer options.
Note that optimizations are only applied in graph mode, (within tf.function).
In addition, as these are experimental options, the list is subject to change.
Args:
options: Dictionary of experimental optimizer options to configure.
Valid keys:
- layout_optimizer: Optimize tensor layouts
e.g. This will try to use NCHW layout on GPU which is faster.
- constant_folding: Fold constants
Statically infer the value of tensors when possible, and materialize the
result using constants.
- shape_optimization: Simplify computations made on shapes.
- remapping: Remap subgraphs onto more efficient implementations.
- arithmetic_optimization: Simplify arithmetic ops with common
sub-expression elimination and arithmetic simplification.
- dependency_optimization: Control dependency optimizations. Remove
redundant control dependencies, which may enable other optimization.
This optimizer is also essential for pruning Identity and NoOp nodes.
- loop_optimization: Loop optimizations.
- function_optimization: Function optimizations and inlining.
- debug_stripper: Strips debug-related nodes from the graph.
- disable_model_pruning: Disable removal of unnecessary ops from the graph
- scoped_allocator_optimization: Try to allocate some independent Op
outputs contiguously in order to merge or eliminate downstream Ops.
- pin_to_host_optimization: Force small ops onto the CPU.
- implementation_selector: Enable the swap of kernel implementations based
on the device placement.
- auto_mixed_precision: Change certain float32 ops to float16 on Volta
GPUs and above. Without the use of loss scaling, this can cause
numerical underflow (see
`keras.mixed_precision.experimental.LossScaleOptimizer`).
- disable_meta_optimizer: Disable the entire meta optimizer.
- min_graph_nodes: The minimum number of nodes in a graph to optimizer.
For smaller graphs, optimization is skipped.
"""
context.context().set_optimizer_experimental_options(options)
@tf_export('config.get_soft_device_placement')
def get_soft_device_placement():
"""Get if soft device placement is enabled.
If enabled, an op will be placed on CPU if any of the following are true
1. there's no GPU implementation for the OP
2. no GPU devices are known or registered
3. need to co-locate with reftype input(s) which are from CPU
Returns:
If soft placement is enabled.
"""
return context.context().soft_device_placement
@tf_export('config.set_soft_device_placement')
def set_soft_device_placement(enabled):
"""Set if soft device placement is enabled.
If enabled, an op will be placed on CPU if any of the following are true
1. there's no GPU implementation for the OP
2. no GPU devices are known or registered
3. need to co-locate with reftype input(s) which are from CPU
Args:
enabled: Whether to enable soft placement.
"""
context.context().soft_device_placement = enabled
@tf_export('config.experimental.get_device_policy')
def get_device_policy():
"""Gets the current device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
This function only gets the device policy for the current thread. Any
subsequently started thread will again use the default policy.
Returns:
Current thread device policy
"""
device_policy = context.context().device_policy
if device_policy == context.DEVICE_PLACEMENT_SILENT:
return 'silent'
elif device_policy == context.DEVICE_PLACEMENT_SILENT_FOR_INT32:
return 'silent_for_int32'
elif device_policy == context.DEVICE_PLACEMENT_WARN:
return 'warn'
elif device_policy == context.DEVICE_PLACEMENT_EXPLICIT:
return 'explicit'
else:
raise ValueError('Not a valid device policy: %r' % device_policy)
@tf_export('config.experimental.set_device_policy')
def set_device_policy(device_policy):
"""Sets the current thread device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
When using the default, an appropriate policy will be picked automatically.
The default policy may change over time.
This function only sets the device policy for the current thread. Any
subsequently started thread will again use the default policy.
Args:
device_policy: A device policy.
Valid values:
- None: Switch to a system default.
- 'warn': Copies the tensors which are not on the right device and logs
a warning.
- 'explicit': Raises an error if the placement is not as required.
- 'silent': Silently copies the tensors. Note that this may hide
performance problems as there is no notification provided when
operations are blocked on the tensor being copied between devices.
- 'silent_for_int32': silently copies `int32` tensors, raising errors on
the other ones.
Raises:
ValueError: If an invalid `device_policy` is passed.
"""
if device_policy == 'silent':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT
elif device_policy == 'silent_for_int32':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32
elif device_policy == 'warn':
context.context().device_policy = context.DEVICE_PLACEMENT_WARN
elif device_policy == 'explicit':
context.context().device_policy = context.DEVICE_PLACEMENT_EXPLICIT
elif device_policy is None:
context.context().device_policy = None
else:
raise ValueError('Not a valid device policy: %r' % device_policy)
@tf_export('config.experimental.get_synchronous_execution')
def get_synchronous_execution():
"""Gets whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
Returns:
Current thread execution mode
"""
return context.context().execution_mode == context.SYNC
@tf_export('config.experimental.set_synchronous_execution')
def set_synchronous_execution(enable):
"""Specifies whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
When `enable` is set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Args:
enable: Whether operations should be dispatched synchronously.
Valid values:
- None: sets the system default.
- True: executes each operation synchronously.
- False: executes each operation asynchronously.
"""
if enable is None:
context.context().execution_mode = None
elif enable:
context.context().execution_mode = context.SYNC
else:
context.context().execution_mode = context.ASYNC
@tf_export('config.list_physical_devices',
'config.experimental.list_physical_devices')
@deprecation.deprecated_endpoints(
'config.experimental.list_physical_devices')
def list_physical_devices(device_type=None):
"""Return a list of physical devices visible to the host runtime.
Physical devices are hardware devices present on the host machine. By default
all discovered CPU and GPU devices are considered visible.
This API allows querying the physical hardware resources prior to runtime
initialization. Thus, giving an opportunity to call any additional
configuration APIs. This is in contrast to `tf.config.list_logical_devices`,
which triggers runtime initialization in order to list the configured devices.
The following example lists the number of visible GPUs on the host.
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> print("Num GPUs:", len(physical_devices))
Num GPUs: ...
However, the number of GPUs available to the runtime may change during runtime
initialization due to marking certain devices as not visible or configuring
multiple logical devices.
Args:
device_type: (optional string) Only include devices matching this device
type. For example "CPU" or "GPU".
Returns:
List of discovered `tf.config.PhysicalDevice` objects
"""
return context.context().list_physical_devices(device_type)
@tf_export('config.list_logical_devices',
'config.experimental.list_logical_devices')
@deprecation.deprecated_endpoints(
'config.experimental.list_logical_devices')
def list_logical_devices(device_type=None):
"""Return a list of logical devices created by runtime.
Logical devices may correspond to physical devices or remote devices in the
cluster. Operations and tensors may be placed on these devices by using the
`name` of the `tf.config.LogicalDevice`.
Calling `tf.config.list_logical_devices` triggers the runtime to configure any
`tf.config.PhysicalDevice` visible to the runtime, thereby preventing
further configuration. To avoid runtime initialization, call
`tf.config.list_physical_devices` instead.
For example:
>>> logical_devices = tf.config.list_logical_devices('GPU')
>>> if len(logical_devices) > 0:
... # Allocate on GPU:0
... with tf.device(logical_devices[0].name):
... one = tf.constant(1)
... # Allocate on GPU:1
... with tf.device(logical_devices[1].name):
... two = tf.constant(2)
Args:
device_type: (optional string) Only include devices matching this device
type. For example "CPU" or "GPU".
Returns:
List of initialized `LogicalDevice`s
"""
return context.context().list_logical_devices(device_type=device_type)
@tf_export('config.get_visible_devices',
'config.experimental.get_visible_devices')
@deprecation.deprecated_endpoints(
'config.experimental.get_visible_devices')
def get_visible_devices(device_type=None):
"""Get the list of visible physical devices.
Returns the list of `PhysicalDevice`s currently marked as visible to the
runtime. A visible device will have at least one `LogicalDevice` associated
with it once the runtime is initialized.
The following example verifies all visible GPUs have been disabled:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... # Disable all GPUS
... tf.config.set_visible_devices([], 'GPU')
... visible_devices = tf.config.get_visible_devices()
... for device in visible_devices:
... assert device.device_type != 'GPU'
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
device_type: (optional string) Only include devices matching this device
type. For example "CPU" or "GPU".
Returns:
List of visible `PhysicalDevice`s
"""
return context.context().get_visible_devices(device_type)
@tf_export('config.set_visible_devices',
'config.experimental.set_visible_devices')
@deprecation.deprecated_endpoints(
'config.experimental.set_visible_devices')
def set_visible_devices(devices, device_type=None):
"""Set the list of visible devices.
Specifies which `PhysicalDevice` objects are visible to the runtime.
TensorFlow will only allocate memory and place operations on visible
physical devices, as otherwise no `LogicalDevice` will be created on them.
By default all discovered devices are marked as visible.
The following example demonstrates disabling the first GPU on the machine.
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... # Disable first GPU
... tf.config.set_visible_devices(physical_devices[1:], 'GPU')
... logical_devices = tf.config.list_logical_devices('GPU')
... # Logical device was not created for first GPU
... assert len(logical_devices) == len(physical_devices) - 1
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
devices: List of `PhysicalDevice`s to make visible
device_type: (optional) Only configure devices matching this device type.
For example "CPU" or "GPU". Other devices will be left unaltered.
Raises:
ValueError: If argument validation fails.
RuntimeError: Runtime is already initialized.
"""
context.context().set_visible_devices(devices, device_type)
@tf_export('config.experimental.get_memory_usage')
def get_memory_usage(device):
"""Get the memory usage, in bytes, for the chosen device.
See https://www.tensorflow.org/api_docs/python/tf/device for specifying device
strings.
For example:
>>> gpu_devices = tf.config.list_physical_devices('GPU')
>>> if gpu_devices:
... tf.config.experimental.get_memory_usage('GPU:0')
Does not work for CPU.
Args:
device: Device string to get the bytes in use for.
Returns:
Total memory usage in bytes.
Raises:
ValueError: Non-existent or CPU device specified.
"""
return context.context().get_total_memory_usage(device)
@tf_export('config.experimental.get_memory_growth')
def get_memory_growth(device):
"""Get if memory growth is enabled for a `PhysicalDevice`.
If memory growth is enabled for a `PhysicalDevice`, the runtime initialization
will not allocate all memory on the device.
For example:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... tf.config.experimental.set_memory_growth(physical_devices[0], True)
... assert tf.config.experimental.get_memory_growth(physical_devices[0])
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
device: `PhysicalDevice` to query
Returns:
A boolean indicating the memory growth setting for the `PhysicalDevice`.
Raises:
ValueError: Invalid `PhysicalDevice` specified.
"""
return context.context().get_memory_growth(device)
@tf_export('config.experimental.set_memory_growth')
def set_memory_growth(device, enable):
"""Set if memory growth should be enabled for a `PhysicalDevice`.
If memory growth is enabled for a `PhysicalDevice`, the runtime initialization
will not allocate all memory on the device. Memory growth cannot be configured
on a `PhysicalDevice` with virtual devices configured.
For example:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... tf.config.experimental.set_memory_growth(physical_devices[0], True)
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
device: `PhysicalDevice` to configure
enable: (Boolean) Whether to enable or disable memory growth
Raises:
ValueError: Invalid `PhysicalDevice` specified.
RuntimeError: Runtime is already initialized.
"""
context.context().set_memory_growth(device, enable)
@tf_export('config.experimental.get_device_details')
def get_device_details(device):
"""Returns details about a physical devices.
This API takes in a `tf.config.PhysicalDevice` returned by
`tf.config.list_physical_devices`. It returns a dict with string keys
containing various details about the device. Each key is only supported by a
subset of devices, so you should not assume the returned dict will have any
particular key.
>>> gpu_devices = tf.config.list_physical_devices('GPU')
>>> if gpu_devices:
... details = tf.config.experimental.get_device_details(gpu_devices[0])
... details.get('device_name', 'Unknown GPU')
Currently, details are only returned for GPUs. This function returns an
empty dict if passed a non-GPU device.
The returned dict may have the following keys:
* `'device_name'`: A human-readable name of the device as a string, e.g.
"Titan V". Unlike `tf.config.PhysicalDevice.name`, this will be the same for
multiple devices if each device is the same model. Currently only available
for GPUs.
* `'compute_capability'`: The
[compute capability](https://developer.nvidia.com/cuda-gpus) of the device
as a tuple of two ints, in the form `(major_version, minor_version)`. Only
available for NVIDIA GPUs
Note: This is similar to `tf.sysconfig.get_build_info` in that both functions
can return information relating to GPUs. However, this function returns
run-time information about a specific device (such as a GPU's compute
capability), while `tf.sysconfig.get_build_info` returns compile-time
information about how TensorFlow was built (such as what version of CUDA
TensorFlow was built for).
Args:
device: A `tf.config.PhysicalDevice` returned by
`tf.config.list_physical_devices` or `tf.config.get_visible_devices`.
Returns:
A dict with string keys.
"""
return context.context().get_device_details(device)
@tf_export('config.get_logical_device_configuration',
'config.experimental.get_virtual_device_configuration')
@deprecation.deprecated_endpoints(
'config.experimental.get_virtual_device_configuration')
def get_logical_device_configuration(device):
"""Get the virtual device configuration for a `tf.config.PhysicalDevice`.
Returns the list of `tf.config.LogicalDeviceConfiguration`
objects previously configured by a call to
`tf.config.set_logical_device_configuration`.
For example:
>>> physical_devices = tf.config.list_physical_devices('CPU')
>>> assert len(physical_devices) == 1, "No CPUs found"
>>> configs = tf.config.get_logical_device_configuration(
... physical_devices[0])
>>> try:
... assert configs is None
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration()])
... configs = tf.config.get_logical_device_configuration(
... physical_devices[0])
... assert len(configs) == 2
... except:
... # Cannot modify virtual devices once initialized.
... pass
Args:
device: `PhysicalDevice` to query
Returns:
List of `tf.config.LogicalDeviceConfiguration` objects or
`None` if no virtual device configuration has been set for this physical
device.
"""
return context.context().get_logical_device_configuration(device)
@tf_export('config.set_logical_device_configuration',
'config.experimental.set_virtual_device_configuration')
@deprecation.deprecated_endpoints(
'config.experimental.set_virtual_device_configuration')
def set_logical_device_configuration(device, logical_devices):
"""Set the logical device configuration for a `tf.config.PhysicalDevice`.
A visible `tf.config.PhysicalDevice` will by default have a single
`tf.config.LogicalDevice` associated with it once the runtime is initialized.
Specifying a list of `tf.config.LogicalDeviceConfiguration` objects allows
multiple devices to be created on the same `tf.config.PhysicalDevice`.
The following example splits the CPU into 2 logical devices:
>>> physical_devices = tf.config.list_physical_devices('CPU')
>>> assert len(physical_devices) == 1, "No CPUs found"
>>> # Specify 2 virtual CPUs. Note currently memory limit is not supported.
>>> try:
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration()])
... logical_devices = tf.config.list_logical_devices('CPU')
... assert len(logical_devices) == 2
...
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration()])
... except:
... # Cannot modify logical devices once initialized.
... pass
The following example splits the GPU into 2 logical devices with 100 MB each:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(memory_limit=100),
... tf.config.LogicalDeviceConfiguration(memory_limit=100)])
...
... logical_devices = tf.config.list_logical_devices('GPU')
... assert len(logical_devices) == len(physical_devices) + 1
...
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(memory_limit=10),
... tf.config.LogicalDeviceConfiguration(memory_limit=10)])
... except:
... # Invalid device or cannot modify logical devices once initialized.
... pass
Args:
device: The `PhysicalDevice` to configure.
logical_devices: (optional) List of `tf.config.LogicalDeviceConfiguration`
objects to allocate for the specified `PhysicalDevice`. If None, the
default configuration will be used.
Raises:
ValueError: If argument validation fails.
RuntimeError: Runtime is already initialized.
"""
context.context().set_logical_device_configuration(device, logical_devices)
@tf_export('config.experimental.enable_mlir_bridge')
def enable_mlir_bridge():
"""Enables experimental MLIR-Based TensorFlow Compiler Bridge.
DO NOT USE, DEV AND TESTING ONLY AT THE MOMENT.
NOTE: MLIR-Based TensorFlow Compiler is under active development and has
missing features, please refrain from using. This API exists for development
and testing only.
TensorFlow Compiler Bridge (TF Bridge) is responsible for translating parts
of TensorFlow graph into a form that can be accepted as an input by a backend
compiler such as XLA.
"""
context.context().enable_mlir_bridge = True
@tf_export('config.experimental.enable_mlir_graph_optimization')
def enable_mlir_graph_optimization():
"""Enables experimental MLIR-Based TensorFlow Compiler Optimizations.
DO NOT USE, DEV AND TESTING ONLY AT THE MOMENT.
NOTE: MLIR-Based TensorFlow Compiler is under active development and has
missing features, please refrain from using. This API exists for development
and testing only.
TensorFlow Compiler Optimizations are responsible general graph level
optimizations that in the current stack mostly done by Grappler graph
optimizers.
"""
context.context().enable_mlir_graph_optimization = True
@tf_export('config.experimental.disable_mlir_bridge')
def disable_mlir_bridge():
"""Disables experimental MLIR-Based TensorFlow Compiler Bridge."""
context.context().enable_mlir_bridge = False
@tf_export('config.experimental.disable_mlir_graph_optimization')
def disable_mlir_graph_optimization():
"""Disables experimental MLIR-Based TensorFlow Compiler Optimizations."""
context.context().enable_mlir_graph_optimization = False
|
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import re
import sys
ExternalEncoding = sys.getdefaultencoding()
Tag_pattern_ = re.compile(r'({.*})?(.*)')
def showIndent(outfile, level, pretty_print=False):
for i in range(level - 1):
outfile.write(" ")
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\\\"')
if s1.find('\\n') == -1:
return '"%s"' % s1
else:
return '\"\"\"%s\"\"\"' % s1
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
if input_data is None:
return ""
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans'
' ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@staticmethod
def populate_string(name):
if "mac_address" in name:
return '00:ca:fe:00:ba:be'
elif "prefix" in name:
return '10.5.6.0'
elif "_network" in name or 'subnet' in name:
return '10.5.6.0/24'
elif ("address" in name or 'gateway' in name or
"router" in name):
return '10.5.6.253'
elif "uuid" in name:
return '0797d558-1d98-479e-a941-a05ae88dc159'
elif "protocol" in name:
return 'udp'
elif "route_target" in name:
return '192.168.1.42/32''192.168.1.42/32'
elif "creator" in name:
return 'test'
else:
return 'test-' + name
@staticmethod
def populate_unsignedLong(name):
return 42
@staticmethod
def populate_unsignedInt(name):
return 42
@staticmethod
def populate_integer(name):
if "prefix" in name:
return 24
if name.endswith('_access'):
return 7
else:
return 42
@staticmethod
def populate_dateTime(name):
return "2002-05-30T09:30:10.5"
@staticmethod
def populate_time(name):
return "09:30:10Z"
@staticmethod
def populate_boolean(name):
return False
|
|
import unittest
import pexif
import StringIO
import difflib
test_data = [
("test/data/rose.jpg", "test/data/rose.txt"),
("test/data/conker.jpg", "test/data/conker.txt"),
("test/data/noexif.jpg", "test/data/noexif.txt"),
]
DEFAULT_TESTFILE = test_data[0][0]
NONEXIST_TESTFILE = "test/data/noexif.jpg"
class TestLoadFunctions(unittest.TestCase):
def test_fromFile(self):
# Simple test ensures we can load and parse a file from filename
for test_file, _ in test_data:
pexif.JpegFile.fromFile(test_file)
def test_fromString(self):
# Simple test ensures we can load and parse a file passed as a string
for test_file, _ in test_data:
fd = open(test_file, "rb")
data = fd.read()
fd.close()
pexif.JpegFile.fromString(data)
def test_fromFd(self):
# Simple test ensure we can load and parse a file passed as a fd
for test_file, _ in test_data:
fd = open(test_file, "rb")
pexif.JpegFile.fromFd(fd)
def test_emptyData(self):
# Simple test ensures that empty string fails
self.assertRaises(pexif.JpegFile.InvalidFile, pexif.JpegFile.fromString, "")
def test_badData(self):
# Simple test ensures that random crap doesn't get parsed
self.assertRaises(pexif.JpegFile.InvalidFile, pexif.JpegFile.fromString,
"asl;dkfjasl;kdjfsld")
def test_regen(self):
# Test to ensure the new file matches the existing file
for test_file, _ in test_data:
data = open(test_file, "rb").read()
jpeg = pexif.JpegFile.fromString(data)
new_data = jpeg.writeString()
self.assertEqual(data, new_data, "Binary differs for <%s>" % test_file)
def test_dump(self):
# Test that the dumped data is as expected.
for test_file, expected_file in test_data:
expected = open(expected_file, 'rb').read()
jpeg = pexif.JpegFile.fromFile(test_file)
out = StringIO.StringIO()
jpeg.dump(out)
res = "Error in file <%s>\n" % test_file
x = difflib.unified_diff(expected.split('\n'), out.getvalue().split('\n'))
for each in x:
res += each
res += '\n'
self.assertEqual(expected, out.getvalue(), res)
class TestExifFunctions(unittest.TestCase):
def test_badendian(self):
data = list(open(DEFAULT_TESTFILE, "rb").read())
# Now trash the exif signature
assert(data[0x1E] == 'I')
data[0x1E] = '0'
self.assertRaises(pexif.JpegFile.InvalidFile, pexif.JpegFile.fromString, "".join(data))
def test_badtifftag(self):
data = list(open(DEFAULT_TESTFILE, "rb").read())
# Now trash the exif signature
assert(data[0x20] == '\x2a')
data[0x20] = '0'
self.assertRaises(pexif.JpegFile.InvalidFile, pexif.JpegFile.fromString, "".join(data))
def test_goodexif(self):
for test_file, _ in test_data:
jp = pexif.JpegFile.fromFile(test_file)
jp.get_exif()
def test_noexif(self):
jp = pexif.JpegFile.fromFile(NONEXIST_TESTFILE)
self.assertEqual(jp.get_exif(), None)
def test_noexif_create(self):
jp = pexif.JpegFile.fromFile(NONEXIST_TESTFILE)
self.assertNotEqual(jp.get_exif(create=True), None)
def test_getattr_nonexist(self):
for test_file, _ in test_data:
attr = pexif.JpegFile.fromFile(test_file). \
get_exif(create=True). \
get_primary(create=True)
self.assertEqual(attr["ImageWidth"], None)
def foo():
attr.ImageWidth
self.assertRaises(AttributeError, foo)
def test_getattr_exist(self):
attr = pexif.JpegFile.fromFile(DEFAULT_TESTFILE).get_exif().get_primary()
self.assertEqual(attr["Make"], "Canon")
self.assertEqual(attr.Make, "Canon")
def test_setattr_nonexist(self):
for test_file, _ in test_data:
attr = pexif.JpegFile.fromFile(test_file). \
get_exif(create=True).get_primary(create=True)
attr["ImageWidth"] = 3
self.assertEqual(attr["ImageWidth"], 3)
def test_setattr_exist(self):
for test_file, _ in test_data:
attr = pexif.JpegFile.fromFile(test_file). \
get_exif(create=True). \
get_primary(create=True)
attr.Make = "CanonFoo"
self.assertEqual(attr.Make, "CanonFoo")
attr["Make"] = "CanonFoo"
self.assertEqual(attr["Make"], "CanonFoo")
def test_setattr_exist_none(self):
for test_file, _ in test_data:
attr = pexif.JpegFile.fromFile(test_file). \
get_exif(create=True). \
get_primary(create=True)
attr["Make"] = None
self.assertEqual(attr["Make"], None)
attr.Make = "Foo"
self.assertEqual(attr["Make"], "Foo")
del attr.Make
self.assertEqual(attr["Make"], None)
def test_add_geo(self):
for test_file, _ in test_data:
jf = pexif.JpegFile.fromFile(test_file)
try:
jf.get_geo()
return
except jf.NoSection:
pass
attr = jf.get_exif(create=True).get_primary(create=True)
gps = attr.new_gps()
gps["GPSLatitudeRef"] = "S"
gps["GPSLongitudeRef"] = "E"
data = jf.writeString()
jf2 = pexif.JpegFile.fromString(data)
self.assertEqual(jf2.get_exif().get_primary().GPS \
["GPSLatitudeRef"], "S")
def test_simple_add_geo(self):
for test_file, _ in test_data:
jf = pexif.JpegFile.fromFile(test_file)
(lat, lng) = (-37.312312, 45.412321)
jf.set_geo(lat, lng)
new_file = jf.writeString()
new = pexif.JpegFile.fromString(new_file)
new_lat, new_lng = new.get_geo()
self.assertAlmostEqual(lat, new_lat, 6)
self.assertAlmostEqual(lng, new_lng, 6)
def test_simple_add_geo2(self):
for test_file, _ in test_data:
jf = pexif.JpegFile.fromFile(test_file)
(lat, lng) = (51.522, -1.455)
jf.set_geo(lat, lng)
new_file = jf.writeString()
new = pexif.JpegFile.fromString(new_file)
new_lat, new_lng = new.get_geo()
self.assertAlmostEqual(lat, new_lat, 6)
self.assertAlmostEqual(lng, new_lng, 6)
def test_simple_add_geo3(self):
for test_file, _ in test_data:
jf = pexif.JpegFile.fromFile(test_file)
(lat, lng) = (51.522, -1.2711)
jf.set_geo(lat, lng)
new_file = jf.writeString()
new = pexif.JpegFile.fromString(new_file)
new_lat, new_lng = new.get_geo()
self.assertAlmostEqual(lat, new_lat, 6)
self.assertAlmostEqual(lng, new_lng, 6)
def test_get_geo(self):
jf = pexif.JpegFile.fromFile(DEFAULT_TESTFILE)
self.assertRaises(pexif.JpegFile.NoSection, jf.get_geo)
def test_exif_property(self):
def test_get():
foo = jf.exif
jf = pexif.JpegFile.fromFile(DEFAULT_TESTFILE, mode="ro")
self.assertEqual(jf.exif.__class__, pexif.ExifSegment)
# exif doesn't exist
jf = pexif.JpegFile.fromFile(NONEXIST_TESTFILE, mode="ro")
self.assertRaises(AttributeError, test_get)
def test_invalid_set(self):
"""Test that setting an invalid tag raise an attribute error"""
jf = pexif.JpegFile.fromFile(DEFAULT_TESTFILE)
def test_set():
jf.exif.primary.UserComment = "foobar"
self.assertRaises(AttributeError, test_set)
def test_invalid_set_embedded(self):
"""Test that setting an embedded tag raises a type error"""
jf = pexif.JpegFile.fromFile(DEFAULT_TESTFILE)
def test_set():
jf.exif.primary.ExtendedEXIF = 5
self.assertRaises(TypeError, test_set)
def test_set_embedded(self):
"""Test that setting an embedded tag raises a type error"""
jf = pexif.JpegFile.fromFile(DEFAULT_TESTFILE)
ext_exif = pexif.IfdExtendedEXIF(jf.exif.primary.e, 0, "rw", jf)
jf.exif.primary.ExtendedEXIF = ext_exif
def test_set_xy_dimensions(self):
"""Test setting PixelXDimension and PixelYDimension."""
jf = pexif.JpegFile.fromFile(DEFAULT_TESTFILE)
jf.exif.primary.ExtendedEXIF.PixelXDimension = [1600]
jf.exif.primary.ExtendedEXIF.PixelYDimension = [1200]
new = jf.writeString()
nf = pexif.JpegFile.fromString(new)
self.assertEqual(nf.exif.primary.ExtendedEXIF.PixelXDimension, [1600])
self.assertEqual(nf.exif.primary.ExtendedEXIF.PixelYDimension, [1200])
if __name__ == "__main__":
unittest.main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import logging
import operator
import os
import subprocess
import tempfile
from luigi import six
import luigi
import luigi.contrib.hadoop
from luigi.target import FileAlreadyExists, FileSystemTarget
from luigi.task import flatten
if six.PY3:
unicode = str
logger = logging.getLogger('luigi-interface')
class HiveCommandError(RuntimeError):
def __init__(self, message, out=None, err=None):
super(HiveCommandError, self).__init__(message, out, err)
self.message = message
self.out = out
self.err = err
def load_hive_cmd():
return luigi.configuration.get_config().get('hive', 'command', 'hive')
def get_hive_syntax():
return luigi.configuration.get_config().get('hive', 'release', 'cdh4')
def run_hive(args, check_return_code=True):
"""
Runs the `hive` from the command line, passing in the given args, and
returning stdout.
With the apache release of Hive, so of the table existence checks
(which are done using DESCRIBE do not exit with a return code of 0
so we need an option to ignore the return code and just return stdout for parsing
"""
cmd = [load_hive_cmd()] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if check_return_code and p.returncode != 0:
raise HiveCommandError("Hive command: {0} failed with error code: {1}".format(" ".join(cmd), p.returncode),
stdout, stderr)
return stdout
def run_hive_cmd(hivecmd, check_return_code=True):
"""
Runs the given hive query and returns stdout.
"""
return run_hive(['-e', hivecmd], check_return_code)
def run_hive_script(script):
"""
Runs the contents of the given script in hive and returns stdout.
"""
if not os.path.isfile(script):
raise RuntimeError("Hive script: {0} does not exist.".format(script))
return run_hive(['-f', script])
@six.add_metaclass(abc.ABCMeta)
class HiveClient(object): # interface
@abc.abstractmethod
def table_location(self, table, database='default', partition=None):
"""
Returns location of db.table (or db.table.partition). partition is a dict of partition key to
value.
"""
pass
@abc.abstractmethod
def table_schema(self, table, database='default'):
"""
Returns list of [(name, type)] for each column in database.table.
"""
pass
@abc.abstractmethod
def table_exists(self, table, database='default', partition=None):
"""
Returns true if db.table (or db.table.partition) exists. partition is a dict of partition key to
value.
"""
pass
@abc.abstractmethod
def partition_spec(self, partition):
""" Turn a dict into a string partition specification """
pass
class HiveCommandClient(HiveClient):
"""
Uses `hive` invocations to find information.
"""
def table_location(self, table, database='default', partition=None):
cmd = "use {0}; describe formatted {1}".format(database, table)
if partition is not None:
cmd += " PARTITION ({0})".format(self.partition_spec(partition))
stdout = run_hive_cmd(cmd)
for line in stdout.split("\n"):
if "Location:" in line:
return line.split("\t")[1]
def table_exists(self, table, database='default', partition=None):
if partition is None:
stdout = run_hive_cmd('use {0}; show tables like "{1}";'.format(database, table))
return stdout and table.lower() in stdout
else:
stdout = run_hive_cmd("""use %s; show partitions %s partition
(%s)""" % (database, table, self.partition_spec(partition)))
if stdout:
return True
else:
return False
def table_schema(self, table, database='default'):
describe = run_hive_cmd("use {0}; describe {1}".format(database, table))
if not describe or "does not exist" in describe:
return None
return [tuple([x.strip() for x in line.strip().split("\t")]) for line in describe.strip().split("\n")]
def partition_spec(self, partition):
"""
Turns a dict into the a Hive partition specification string.
"""
return ','.join(["{0}='{1}'".format(k, v) for (k, v) in
sorted(six.iteritems(partition), key=operator.itemgetter(0))])
class ApacheHiveCommandClient(HiveCommandClient):
"""
A subclass for the HiveCommandClient to (in some cases) ignore the return code from
the hive command so that we can just parse the output.
"""
def table_schema(self, table, database='default'):
describe = run_hive_cmd("use {0}; describe {1}".format(database, table), False)
if not describe or "Table not found" in describe:
return None
return [tuple([x.strip() for x in line.strip().split("\t")]) for line in describe.strip().split("\n")]
class MetastoreClient(HiveClient):
def table_location(self, table, database='default', partition=None):
with HiveThriftContext() as client:
if partition is not None:
partition_str = self.partition_spec(partition)
thrift_table = client.get_partition_by_name(database, table, partition_str)
else:
thrift_table = client.get_table(database, table)
return thrift_table.sd.location
def table_exists(self, table, database='default', partition=None):
with HiveThriftContext() as client:
if partition is None:
return table in client.get_all_tables(database)
else:
return partition in self._existing_partitions(table, database, client)
def _existing_partitions(self, table, database, client):
def _parse_partition_string(partition_string):
partition_def = {}
for part in partition_string.split("/"):
name, value = part.split("=")
partition_def[name] = value
return partition_def
# -1 is max_parts, the # of partition names to return (-1 = unlimited)
partition_strings = client.get_partition_names(database, table, -1)
return [_parse_partition_string(existing_partition) for existing_partition in partition_strings]
def table_schema(self, table, database='default'):
with HiveThriftContext() as client:
return [(field_schema.name, field_schema.type) for field_schema in client.get_schema(database, table)]
def partition_spec(self, partition):
return "/".join("%s=%s" % (k, v) for (k, v) in sorted(six.iteritems(partition), key=operator.itemgetter(0)))
class HiveThriftContext(object):
"""
Context manager for hive metastore client.
"""
def __enter__(self):
try:
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
# Note that this will only work with a CDH release.
# This uses the thrift bindings generated by the ThriftHiveMetastore service in Beeswax.
# If using the Apache release of Hive this import will fail.
from hive_metastore import ThriftHiveMetastore
config = luigi.configuration.get_config()
host = config.get('hive', 'metastore_host')
port = config.getint('hive', 'metastore_port')
transport = TSocket.TSocket(host, port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
transport.open()
self.transport = transport
return ThriftHiveMetastore.Client(protocol)
except ImportError as e:
raise Exception('Could not import Hive thrift library:' + str(e))
def __exit__(self, exc_type, exc_val, exc_tb):
self.transport.close()
def get_default_client():
if get_hive_syntax() == "apache":
return ApacheHiveCommandClient()
else:
return HiveCommandClient()
client = get_default_client()
class HiveQueryTask(luigi.contrib.hadoop.BaseHadoopJobTask):
"""
Task to run a hive query.
"""
# by default, we let hive figure these out.
n_reduce_tasks = None
bytes_per_reducer = None
reducers_max = None
@abc.abstractmethod
def query(self):
""" Text of query to run in hive """
raise RuntimeError("Must implement query!")
def hiverc(self):
"""
Location of an rc file to run before the query
if hiverc-location key is specified in client.cfg, will default to the value there
otherwise returns None.
Returning a list of rc files will load all of them in order.
"""
return luigi.configuration.get_config().get('hive', 'hiverc-location', default=None)
def hiveconfs(self):
"""
Returns an dict of key=value settings to be passed along
to the hive command line via --hiveconf. By default, sets
mapred.job.name to task_id and if not None, sets:
* mapred.reduce.tasks (n_reduce_tasks)
* mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool)
* hive.exec.reducers.bytes.per.reducer (bytes_per_reducer)
* hive.exec.reducers.max (reducers_max)
"""
jcs = {}
jcs['mapred.job.name'] = self.task_id
if self.n_reduce_tasks is not None:
jcs['mapred.reduce.tasks'] = self.n_reduce_tasks
if self.pool is not None:
# Supporting two schedulers: fair (default) and capacity using the same option
scheduler_type = luigi.configuration.get_config().get('hadoop', 'scheduler', 'fair')
if scheduler_type == 'fair':
jcs['mapred.fairscheduler.pool'] = self.pool
elif scheduler_type == 'capacity':
jcs['mapred.job.queue.name'] = self.pool
if self.bytes_per_reducer is not None:
jcs['hive.exec.reducers.bytes.per.reducer'] = self.bytes_per_reducer
if self.reducers_max is not None:
jcs['hive.exec.reducers.max'] = self.reducers_max
return jcs
def job_runner(self):
return HiveQueryRunner()
class HiveQueryRunner(luigi.contrib.hadoop.JobRunner):
"""
Runs a HiveQueryTask by shelling out to hive.
"""
def prepare_outputs(self, job):
"""
Called before job is started.
If output is a `FileSystemTarget`, create parent directories so the hive command won't fail
"""
outputs = flatten(job.output())
for o in outputs:
if isinstance(o, FileSystemTarget):
parent_dir = os.path.dirname(o.path)
if parent_dir and not o.fs.exists(parent_dir):
logger.info("Creating parent directory %r", parent_dir)
try:
# there is a possible race condition
# which needs to be handled here
o.fs.mkdir(parent_dir)
except FileAlreadyExists:
pass
def run_job(self, job):
self.prepare_outputs(job)
with tempfile.NamedTemporaryFile() as f:
query = job.query()
if isinstance(query, unicode):
query = query.encode('utf8')
f.write(query)
f.flush()
arglist = [load_hive_cmd(), '-f', f.name]
hiverc = job.hiverc()
if hiverc:
if isinstance(hiverc, str):
hiverc = [hiverc]
for rcfile in hiverc:
arglist += ['-i', rcfile]
if job.hiveconfs():
for k, v in six.iteritems(job.hiveconfs()):
arglist += ['--hiveconf', '{0}={1}'.format(k, v)]
logger.info(arglist)
return luigi.contrib.hadoop.run_and_track_hadoop_job(arglist)
class HiveTableTarget(luigi.Target):
"""
exists returns true if the table exists.
"""
def __init__(self, table, database='default', client=None):
self.database = database
self.table = table
self.hive_cmd = load_hive_cmd()
if client is None:
client = get_default_client()
self.client = client
def exists(self):
logger.debug("Checking Hive table '%s.%s' exists", self.database, self.table)
return self.client.table_exists(self.table, self.database)
@property
def path(self):
"""
Returns the path to this table in HDFS.
"""
location = self.client.table_location(self.table, self.database)
if not location:
raise Exception("Couldn't find location for table: {0}".format(str(self)))
return location
def open(self, mode):
return NotImplementedError("open() is not supported for HiveTableTarget")
class HivePartitionTarget(luigi.Target):
"""
exists returns true if the table's partition exists.
"""
def __init__(self, table, partition, database='default', fail_missing_table=True, client=None):
self.database = database
self.table = table
self.partition = partition
if client is None:
client = get_default_client()
self.client = client
self.fail_missing_table = fail_missing_table
def exists(self):
try:
logger.debug("Checking Hive table '{d}.{t}' for partition {p}".format(d=self.database, t=self.table, p=str(self.partition)))
return self.client.table_exists(self.table, self.database, self.partition)
except HiveCommandError:
if self.fail_missing_table:
raise
else:
if self.client.table_exists(self.table, self.database):
# a real error occurred
raise
else:
# oh the table just doesn't exist
return False
@property
def path(self):
"""
Returns the path for this HiveTablePartitionTarget's data.
"""
location = self.client.table_location(self.table, self.database, self.partition)
if not location:
raise Exception("Couldn't find location for table: {0}".format(str(self)))
return location
def open(self, mode):
return NotImplementedError("open() is not supported for HivePartitionTarget")
class ExternalHiveTask(luigi.ExternalTask):
"""
External task that depends on a Hive table/partition.
"""
database = luigi.Parameter(default='default')
table = luigi.Parameter()
# since this is an external task and will never be initialized from the CLI, partition can be any python object, in this case a dictionary
partition = luigi.Parameter(default=None, description='Python dictionary specifying the target partition e.g. {"date": "2013-01-25"}')
def output(self):
if self.partition is not None:
assert self.partition, "partition required"
return HivePartitionTarget(table=self.table,
partition=self.partition,
database=self.database)
else:
return HiveTableTarget(self.table, self.database)
|
|
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import click
from guild import click_util
def _ac_python(**_kw):
return click_util.completion_command("python*[^-config]")
def _ac_guild_version_or_path(incomplete, ctx, **_kw):
versions = [ver for ver in _guild_versions(ctx) if ver.startswith(incomplete)]
return versions + click_util.completion_filename(ext=["whl"])
def _guild_versions(ctx):
import json
# We want to import pip._vendor.requests but pip has an import
# cycle so we get to it via pip._internal.index.
from pip._internal.index import requests
def f():
resp = requests.get("https://pypi.org/pypi/guildai/json")
data = json.loads(resp.text)
return sorted(data.get("releases") or {})
return click_util.completion_safe_apply(ctx, f, []) or []
def _ac_guild_home(**_kw):
return click_util.completion_dir()
def _ac_requirement(**_kw):
return click_util.completion_filename(ext=["txt"])
def _ac_path(**_kw):
return click_util.completion_dir()
@click.command()
@click.argument("dir", default="venv", autocompletion=click_util.completion_dir)
@click.option(
"-n",
"--name",
metavar="NAME",
help="Environment name (default is env parent directory name).",
)
@click.option(
"-p",
"--python",
metavar="VERSION",
help="Version of Python to use for the environment.",
autocompletion=_ac_python,
)
@click.option(
"-g",
"--guild",
metavar="VERSION_OR_PATH",
help=(
"Version of Guild AI to use for the environment. "
"By default, the active version of Guild is installed. This "
"value may alternatively be a path to a Guild wheel distribution."
),
autocompletion=_ac_guild_version_or_path,
)
@click.option(
"-s",
"--system-site-packages",
is_flag=True,
help="Give environment access to system site packages.",
)
@click.option(
"-H",
"--no-isolate",
is_flag=True,
help=(
"Use current Guild home for the environment. Ignored if `--guild-home` "
"is also specified, this option is ignored."
),
)
@click.option(
"-h",
"--guild-home",
metavar="PATH",
help=(
"Alternative Guild home location associated with the environment. "
"By default, Guild home is '.guild' in the environment directory."
),
autocompletion=_ac_guild_home,
)
@click.option(
"-r",
"--requirement",
metavar="REQ",
multiple=True,
help=(
"Install required package or packages defined in a file. May be "
"used multiple times."
),
autocompletion=_ac_requirement,
)
@click.option(
"-P",
"--path",
metavar="DIR",
multiple=True,
help="Include DIR as a Python path in the environment.",
autocompletion=_ac_path,
)
@click.option(
"--no-reqs",
is_flag=True,
help=(
"Don't install from requirements.txt or guild.yml in environment "
"parent directory."
),
)
@click.option(
"-l",
"--isolate-resources",
is_flag=True,
help="Use a local cache when initializing an environment.",
)
@click.option(
"-y",
"--yes",
is_flag=True,
help="Initialize a Guild environment without prompting.",
)
@click.option(
"--no-progress",
is_flag=True,
help="Don't show progress when installing environment packages.",
)
@click.option(
"--pre",
"pre_release",
is_flag=True,
help="Install pre-release versions of applicable packages.",
)
@click_util.use_args
def init(args):
"""Initialize a Guild environment.
`init` initializes a Guild environment in `DIR`, which is the
current directory by default.
`init` creates a virtual environment in `DIR` using the `venv`
module if available or the `virtualenv` program if `venv` is not
available.
Use `--python` to specify the Python interpreter to use within the
generated virtual environment. By default, the default Python
interpreter for `virtualenv` is used unless `python` is explicitly
listed as a requirement. If `no-venv` is specified, `--python` is
ignored.
### Requirements
By default, any required packages listed under packages.requires
in `guild.yml` in the environment parent directory are installed
into the environment. Use `--no-reqs` to suppress this behavior.
Additionally, packages defined in `requirements.txt` in the
environment parent directory will be installed. Use `--no-reqs` to
suppress this behavior.
Note that packages defined in `guild.yml` use Guild package names
while packages defined in `requirements.txt` use PyPI package
names.
For information on requirements files, see:
https://bit.ly/guild-help-req-files
You may explicitly specify requirements file using `-r` or
`--requirement`. If `-r, --requirement` is specified, Guild will
not automatically install packages in `requirements.txt` -- that
file must be specified explicitly in the command.
### Guild AI Version
By default `init` installs the active version of Guild AI in the
initialized environment. To install a different version, or to
install a Guild wheel distribution file use the `--guild` option.
### Resource Cache
By default resources are cached and shared at the user level in
`~/.guild/cache/resources` so that resources downloaded from one
environment are available to other environments. You can modify
this behavior to have all resources downloaded local to the
environment by specifying `--local-resource-cache`.
"""
from . import init_impl
init_impl.main(args)
|
|
#!/usr/bin/env python
""" loggerator.py contains the loggerator utility to be used for logging debug,
warning, errors and other information from applications.
:author: Jose Carlos Recuero
:version: 0.1
:since: 08/13/2014
"""
__docformat__ = 'restructuredtext en'
# -----------------------------------------------------------------------------
# _ _
# (_)_ __ ___ _ __ ___ _ __| |_ ___
# | | '_ ` _ \| '_ \ / _ \| '__| __/ __|
# | | | | | | | |_) | (_) | | | |_\__ \
# |_|_| |_| |_| .__/ \___/|_| \__|___/
# |_|
# -----------------------------------------------------------------------------
#
# import std python modules
#
import os
import sys
import logging
import logging.handlers
import logging.config
import io
# from contextlib import redirect_stdout
#
# import dbase python modules
#
# -----------------------------------------------------------------------------
#
# ___ ___ _ __ ___| |_ __ _ _ __ | |_ ___
# / __/ _ \| '_ \/ __| __/ _` | '_ \| __/ __|
# | (_| (_) | | | \__ \ || (_| | | | | |_\__ \
# \___\___/|_| |_|___/\__\__,_|_| |_|\__|___/
#
# -----------------------------------------------------------------------------
#
COL_RESET = "\x1b[0m"
"""
:type: string
Clears all colors and styles (to white on black).
"""
BOLD_ON = "\x1b[1m"
"""
:type: string
Bold on.
"""
ITALICS_ON = "\x1b[3m"
"""
:type: string
Italics on.
"""
UNDERLINE_ON = "\x1b[4m"
"""
:type: string
Underline on.
"""
INVERSE_ON = "\x1b[7m"
"""
:type: string
Inverse on, reverses foreground & background colors.
"""
STRIKETHRGH_ON = "\x1b[9m"
"""
:type: string
Strikethrough on.
"""
BOLD_OFF = "\x1b[22m"
"""
:type: string
Bold off.
"""
ITALICS_OFF = "\x1b[23m"
"""
:type: string
Italics off.
"""
UNDERLINE_OFF = "\x1b[24m"
"""
:type: string
Underline off.
"""
INVERSE_OFF = "\x1b[27m"
"""
:type: string
Inverse off.
"""
STRIKETHRGH_OFF = "\x1b[29m"
"""
:type: string
Strikethrough off.
"""
# Foreground colors are in form of 3x, background are 4x
FG_BLACK = "\x1b[30m"
"""
:type: string
Set foreground color to black.
"""
FG_RED = "\x1b[31m"
"""
:type: string
Set foreground color to red.
"""
FG_GREEN = "\x1b[32m"
"""
:type: string
Set foreground color to green.
"""
FG_YELLOW = "\x1b[33m"
"""
:type: string
Set foreground color to yellow.
"""
FG_BLUE = "\x1b[34m"
"""
:type: string
Set foreground color to blue.
"""
FG_MAGENTA = "\x1b[35m"
"""
:type: string
Set foreground color to magenta (purple).
"""
FG_CYAN = "\x1b[36m"
"""
:type: string
Set foreground color to cyan.
"""
FG_WHITE = "\x1b[37m"
"""
:type: string
Set foreground color to white.
"""
FG_DEFAULT = "\x1b[39m"
"""
:type: string
Set foreground color to default (white).
"""
BG_BLACK = "\x1b[40m"
"""
:type: string
Set background color to black.
"""
BG_RED = "\x1b[41m"
"""
:type: string
Set background color to red.
"""
BG_GREEN = "\x1b[42m"
"""
:type: string
Set background color to green.
"""
BG_YELLOW = "\x1b[43m"
"""
:type: string
Set background color to yellow.
"""
BG_BLUE = "\x1b[44m"
"""
:type: string
Set background color to blue.
"""
BG_MAGENTA = "\x1b[45m"
"""
:type: string
Set background color to magenta (purple).
"""
BG_CYAN = "\x1b[46m"
"""
:type: string
Set background color to cyan.
"""
BG_WHITE = "\x1b[47m"
"""
:type: string
Set background color to white.
"""
BG_DEFAULT = "\x1b[49m"
"""
:type: string
Set background color to default (black).
"""
_loggerDB = {}
"""
:type: dict
This module variable dictionary stores all Logger instance created, where
the key for every instance is the component name. When the same component
request a logger, it returns the already created instance.
"""
TRACE_LEVEL = 25
DISPLAY_LEVEL = 24
# -----------------------------------------------------------------------------
# _ _ _
# ___ _ _| |__ _ __ ___ _ _| |_(_)_ __ ___ ___
# / __| | | | '_ \| '__/ _ \| | | | __| | '_ \ / _ \/ __|
# \__ \ |_| | |_) | | | (_) | |_| | |_| | | | | __/\__ \
# |___/\__,_|_.__/|_| \___/ \__,_|\__|_|_| |_|\___||___/
#
# -----------------------------------------------------------------------------
#
# ===========================================================================
def getLoggerator(name, color=(BOLD_ON + FG_BLACK)):
"""Returns the loggerator for a given component.
It create a new loggerator to a component, if there is not any instance for
that component. If there is an instance, it is returned.
Args:
name (str) : Component name. It is used to assign a loggerator instance.
color (str) : Color to display the component name.
Returns:
Loggerator : Create a new loggerator instance if there is not anyone\
for the given component, or return the one previously created.
"""
if name not in _loggerDB:
_loggerDB[name] = Loggerator(name, color)
return _loggerDB[name]
# -----------------------------------------------------------------------------
# _ _ __ _ _ _ _
# ___| | __ _ ___ ___ __| | ___ / _(_)_ __ (_) |_(_) ___ _ __ ___
# / __| |/ _` / __/ __| / _` |/ _ \ |_| | '_ \| | __| |/ _ \| '_ \/ __|
# | (__| | (_| \__ \__ \ | (_| | __/ _| | | | | | |_| | (_) | | | \__ \
# \___|_|\__,_|___/___/ \__,_|\___|_| |_|_| |_|_|\__|_|\___/|_| |_|___/
#
# -----------------------------------------------------------------------------
#
#
# =============================================================================
#
class ContextFilter(logging.Filter):
"""ContextFilter class allows to create two new log operations: TRACE and
DISPLAY to be used.
"""
def filter(self, record):
fr = sys._getframe(8)
msg = '{0}::{1}::{2}'.format(os.path.basename(fr.f_code.co_filename),
fr.f_code.co_name,
fr.f_lineno)
record.titular = msg
if record.levelno == TRACE_LEVEL:
record.levelname = 'TRACE'
elif record.levelno == DISPLAY_LEVEL:
record.levelname = 'DISPLAY'
return True
#
# =============================================================================
#
class Loggerator(object):
"""Loggerator class is used to log information for a given component.
Component name is given when Loggerator instance is created, and it will
be reused.
"""
# =========================================================================
def __init__(self, name, color, out=sys.stdout, fname='cmd.log'):
"""Loggerator class constructor.
Create a Loggerator instance for the component with the given name and
using given color.
:todo:
New parameter with the log filename should be added. If the
parameter is present, then log information will be sent to
the logfile instead of to the display.
Args:
name (str) : Name of the component for logging information.
color (str) : String containing the color to display the component\
name in all logs.
out (sys.stdout) : standard output
fname (str) : filename for the log file
"""
self.loggerator = logging.getLogger(name[0:15].center(16, '*'))
self.loggerator.setLevel(logging.DEBUG)
formatString = '%(asctime)s ' + color + '%(name)-16s ' +\
COL_RESET + '[%(levelname)-8s] [%(titular)-32s] %(message)s'
formatter = logging.Formatter(formatString)
self._maxSize = 1024 * 1024 * 1024
self._maxCount = 9
fileHandler = logging.handlers.RotatingFileHandler(fname, maxBytes=self._maxSize, backupCount=self._maxCount)
fileHandler.setFormatter(formatter)
self.loggerator.addHandler(fileHandler)
self.loggerator.addFilter(ContextFilter())
# consoleHandler = logging.StreamHandler()
# consoleHandler.setFormatter(formatter)
# self.loggerator.addHandler(consoleHandler)
self.defaultColor = {}
self.defaultColor['debug'] = (('FG', 'GREEN'), )
self.defaultColor['info'] = (('FG', 'BLUE'), )
self.defaultColor['trace'] = (('FG', 'MAGENTA'), )
self.defaultColor['display'] = None
self.defaultColor['warning'] = (('FG', 'RED'), )
self.defaultColor['error'] = (('FG', 'WHITE'), ('BG', 'RED'))
self.__out = out
self.__redirect = False
self.__buffer = None
self.__save_out = None
# =========================================================================
def _out(self, message):
"""Sends a message in the default standard output provided.
Args:
message (str) : string with the message to be displayed.
Returns:
None
"""
self.__out.write(str(message))
self.__out.write('\n')
if self.__redirect:
# self.__buffer.append(self.__out.getvalue())
self.__buffer.append("{}\n".format(message))
# =========================================================================
def redirect_out_to(self, out_buff=None):
"""Redirects loggerator output to a temporal buffer.
Args:
out_buff (list) : Standard output will be copied to this buffer.
Returns:
bool : True if redirection was created, False, else
"""
if not self.__redirect:
self.__redirect = True
self.__buffer = out_buff if out_buff is not None else []
self.__save_out = self.__out
self.__out = io.StringIO()
return True
return False
# =========================================================================
def stop_redirect_out(self):
"""Stops loggerator output redirection.
Returns:
bool : True if redirect could be stopped, False else.
"""
if self.__redirect:
self.__redirect = False
self.__out = self.__save_out
self.__save_out = None
return True
return False
# =========================================================================
def get_redirect_buffer(self, all_buff=False):
"""Retrieves the content that has been redirected.
Args:
all_buff (bool) : True if all buffer content has to be retrieved,\
False if only the last entry.
Returns:
:any:`list` or str : List (when all_buff is True) or String (when\
all_buff is False) with the output being redirected.
"""
if self.__buffer:
if all_buff:
return self.__buffer
else:
return self.__buffer[-1]
return []
# =========================================================================
def display(self, message, **kwargs):
"""Display a message in the default standard output provided.
Args:
message (str) : string with the message to be displayed.
Returns:
None
"""
msg = self._extended_log(message, 'display', **kwargs)
self._out(msg)
# =========================================================================
def _filterLevel(self, level):
"""Translate new logging operation TRACE and DISPLAY.
Args:
level (str) : Logging level to be used.
Returns:
int : Loggin level number to be used by the module.
"""
if level in ['debug', 'info', 'warning', 'error']:
return level
elif level == 'trace':
return TRACE_LEVEL
elif level == 'display':
return DISPLAY_LEVEL
else:
return logging.NOTSET
# =========================================================================
def _setColor(self, color):
""" Set the color based on input list.
It takes an input parameter, which could be a list or a string.
If the parameter is a string, it is supposed to set as a foreground
color.
If the parameter is a list, it is supposed to set the foreground and/or
background color.
Args:
color (:any:`list` or :any:`str`) : foregorund color, or list\
with fore/background color.
Returns:
str : string to be used as color for log message.
"""
if isinstance(color, str):
color = (('FG', color), )
return eval('+'.join(map(lambda x: '%s_%s' % (x[0], x[1]), color)))
# =========================================================================
def _log(self, message, level, color=None, *args, **kwargs):
""" Log a message with the given color.
It logs the given message with the given color.
Args:
message (str) : Debug message to be logged.
color (:any:`list` or :any:`str`) : foregorund color, or list\
with fore/background color.
level (str) : Logging level.
"""
if color:
color = self._setColor(color)
formatted_message = '%s%s%s' % (color, message, COL_RESET)
else:
formatted_message = message
function = getattr(self.loggerator, level, None)
if kwargs.get('log', True):
# Remove any kwargs that is not handled by the standard logging
# library.
if kwargs.get('log', None) is not None:
del kwargs['log']
if kwargs.get('out', None) is not None:
del kwargs['out']
if function:
function(formatted_message, *args, **kwargs)
else:
level = self._filterLevel(level)
self.loggerator.log(level, formatted_message, *args, **kwargs)
return formatted_message
# =========================================================================
def getLoggerator(self):
"""Return the loggerator.
Returns:
Logging : Return the logging instance used for the current loggerator.
"""
return self.loggerator
# =========================================================================
def _extended_log(self, message, level, **kwargs):
"""Debug log.
It logs a debug message.
Args:
message (str) : Debug message to be logged.
level (str) : Logging level.
"""
color = kwargs.get('color', None)
mode = kwargs.pop('mode', 'FG')
extended = kwargs.pop('extended', None)
if extended:
useColor = extended
elif color:
useColor = ((mode, color), )
else:
useColor = self.defaultColor[level]
kwargs['color'] = useColor
return self._log(message, level, **kwargs)
# =========================================================================
def debug(self, message, color=None, mode='FG', *args, **kwargs):
"""Debug log.
It logs a debug message.
Args:
message (str) : Debug message to be logged.
color (:any:`list` or :any:`str`) : foregorund color, or list\
with fore/background color.
mode (str) : Display mode. It could be 'FG' for foreground or\
'BG' for background.
Returns:
None
"""
self._extended_log(message, 'debug', color=color, mode=mode, *args, **kwargs)
# =========================================================================
def info(self, message, color=None, mode='FG', *args, **kwargs):
"""Information log.
It logs an information message.
Args:
message (str) : Debug message to be logged.
color (:any:`list` or :any:`str`) : foregorund color, or list\
with fore/background color.
mode (str) : Display mode. It could be 'FG' for foreground or\
'BG' for background.
Returns:
None
"""
self._extended_log(message, 'info', color=color, mode=mode, *args, **kwargs)
# =========================================================================
def trace(self, message, color=None, mode='FG', *args, **kwargs):
"""Trace log.
It logs a trace message.
Args:
message (str) : Debug message to be logged.
color (:any:`list` or :any:`str`) : foregorund color, or list\
with fore/background color.
mode (str) : Display mode. It could be 'FG' for foreground or\
'BG' for background.
Returns:
None
"""
self._extended_log(message, 'trace', color=color, mode=mode, *args, **kwargs)
# =========================================================================
def warning(self, message, color=None, mode='FG', *args, **kwargs):
"""Warning log.
It logs a warning message.
Args:
message (str) : Debug message to be logged.
color (:any:`list` or :any:`str`) : foregorund color, or list\
with fore/background color.
mode (str) : Display mode. It could be 'FG' for foreground or\
'BG' for background.
Returns:
None
"""
self._extended_log(message, 'warning', color=color, mode=mode, *args, **kwargs)
if kwargs.get('out', False):
self._out(message)
# =========================================================================
def error(self, message, color=None, mode='FG', *args, **kwargs):
"""Error log.
It logs an error message.
Args:
message (str) : Debug message to be logged.
color (:any:`list` or :any:`str`) : foregorund color, or list\
with fore/background color.
mode (str) : Display mode. It could be 'FG' for foreground or\
'BG' for background.
Returns:
None
"""
msg = self._extended_log(message, 'error', color=color, mode=mode, *args, **kwargs)
if kwargs.get('out', False):
self._out(msg)
|
|
# ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from collections import defaultdict
import logging
import os
import hexdump
import signal
import socket
import struct
import sys
from threading import Thread
from .client_message import ClientMessage, Request
from .network import BadPacket, get_ip, get_ip_packet
from .server_message import Reply, ServerMessage, WatchEvent
from .zookeeper import DeserializationError, OpCodes
from scapy.sendrecv import sniff
from scapy.config import conf as scapy_conf
from twitter.common import log
scapy_conf.logLevel = logging.ERROR # shush scapy
DEFAULT_PORT = 2181
class SnifferConfig(object):
def __init__(self,
iface="eth0",
writes_only=False,
debug=False):
"""
if client_port is 0 we sniff all clients
if zookeeper_port is changed later on you must call update_filter()
"""
self.iface = iface
self.writes_only = writes_only
self.debug = debug
self.client_port = 0
self.track_replies = False
self.max_queued_requests = 10000
self.zookeeper_port = DEFAULT_PORT
self.excluded_opcodes = set()
self.is_loopback = False
self.read_timeout_ms = 0
self.dump_bad_packet = False
# These are set after initialization, and require `update_filter` to be called
self.included_ips = []
self.excluded_ips = []
self.update_filter()
self.exclude_pings()
def update_filter(self):
self.filter = "port %d" % (self.zookeeper_port)
assert not (self.included_ips and self.excluded_ips)
if self.excluded_ips:
self.filter += " and host not " + " and host not ".join(self.excluded_ips)
elif self.included_ips:
self.filter += " and (host " + " or host ".join(self.included_ips) + ")"
def include_pings(self):
self.update_exclusion_list(OpCodes.PING, False)
def exclude_pings(self):
self.update_exclusion_list(OpCodes.PING, True)
def excluded(self, opcode):
return opcode in self.excluded_opcodes
def update_exclusion_list(self, opcode, exclude):
if exclude:
self.excluded_opcodes.add(opcode)
else:
try:
self.excluded_opcodes.remove(opcode)
except KeyError:
pass
def __str__(self):
return """
***sniffer config ***
iface = %s
writes_only = %s
filter = %s
port = %d
is_loopback = %s
read_timeout_ms = %d
debug = %s
""" % (self.iface,
str((self.writes_only)).lower(),
self.filter,
self.port,
str(self.is_loopback),
self.read_timeout_ms,
str(self.debug).lower())
class Sniffer(Thread):
class RegistrationError(Exception): pass
def __init__(self,
config,
request_handler=None,
reply_handler=None,
event_handler=None):
"""
This sniffer will intercept:
- client requests
- server replies
- server events (i.e.: connection state change or, most of the times, watches)
Hence handlers for each.
"""
super(Sniffer, self).__init__()
self._packet_size = 65535
self._request_handlers = []
self._reply_handlers = []
self._event_handlers = []
self._requests_xids = defaultdict(dict) # if tracking replies, keep a tab for seen reqs
self.config = config
self.add_request_handler(request_handler)
self.add_reply_handler(reply_handler)
self.add_event_handler(event_handler)
self.setDaemon(True)
def add_request_handler(self, handler):
self._add_handler(self._request_handlers, handler)
def add_reply_handler(self, handler):
self._add_handler(self._reply_handlers, handler)
def add_event_handler(self, handler):
self._add_handler(self._event_handlers, handler)
def _add_handler(self, handlers, handler):
if handler is None:
return
if handler in handlers:
raise self.RegistrationError("handler %s has already been added" % (handler))
handlers.append(handler)
def pause(self):
""" TODO(rgs): scapy doesn't expose a way to call breakloop() """
pass
def unpause(self):
""" TODO(rgs): scapy doesn't expose a way to call unpause the main loop() """
pass
def run(self):
try:
log.info("Setting filter: %s", self.config.filter)
if self.config.iface == "any":
sniff(filter=self.config.filter, store=0, prn=self.handle_packet)
else:
sniff(filter=self.config.filter, store=0, prn=self.handle_packet, iface=self.config.iface)
except socket.error as ex:
log.error("Error: %s, device: %s", ex, self.config.iface)
finally:
log.info("The sniff loop exited")
os.kill(os.getpid(), signal.SIGINT)
def handle_packet(self, packet):
try:
message = self._message_from_packet(packet)
if not self.config.excluded(message.opcode):
for h in self._handlers_for(message):
h(message)
except (BadPacket, DeserializationError, struct.error) as ex:
if self.config.dump_bad_packet:
print("got: %s" % str(ex))
hexdump.hexdump(packet.load)
sys.stdout.flush()
def _handlers_for(self, message):
if isinstance(message, Request):
if self.config.writes_only and not message.is_write:
raise BadPacket("Not a write packet")
return self._request_handlers
elif isinstance(message, Reply):
return self._reply_handlers
elif isinstance(message, WatchEvent):
return self._event_handlers
raise BadPacket("No handlers for: %s" % (message))
def _message_from_packet(self, packet):
"""
:returns: Returns an instance of ClientMessage or ServerMessage (or a subclass)
:raises:
:exc:`BadPacket` if the packet is for a client we are not tracking
:exc:`DeserializationError` if deserialization failed
:exc:`struct.error` if deserialization failed
"""
client_port = self.config.client_port
zk_port = self.config.zookeeper_port
ip_p = get_ip_packet(packet.load, client_port, zk_port, self.config.is_loopback)
timestamp = packet.time
if ip_p.data.dport == zk_port:
client = intern("%s:%s" % (get_ip(ip_p, ip_p.src), ip_p.data.sport))
server = intern("%s:%s" % (get_ip(ip_p, ip_p.dst), zk_port))
client_message = ClientMessage.from_payload(ip_p.data.data, client, server)
client_message.timestamp = timestamp
self._track_client_message(client_message)
return client_message
if ip_p.data.sport == zk_port:
client = intern("%s:%s" % (get_ip(ip_p, ip_p.dst), ip_p.data.dport))
server = intern("%s:%s" % (get_ip(ip_p, ip_p.src), zk_port))
requests_xids = self._requests_xids.get(client, {})
server_message = ServerMessage.from_payload(ip_p.data.data, client, server, requests_xids)
server_message.timestamp = timestamp
return server_message
raise BadPacket("Packet to the wrong port?")
def _track_client_message(self, request):
"""
Any request that is not a ping or a close should be tracked
"""
if self.config.track_replies and not request.is_ping and not request.is_close:
requests_xids = self._requests_xids[request.client]
if len(requests_xids) > self.config.max_queued_requests:
log.error("Too many queued requests, replies for %s will be lost", request.client)
return
requests_xids[request.xid] = request.opcode
|
|
#!/usr/bin/env python
import atexit
import glob
import os
import re
import shutil
import sys
import tempfile
import textwrap
from fabric.api import env, local, hide
from fabric.context_managers import lcd, settings, shell_env
from fabric.contrib.console import confirm
from fabric.contrib.files import exists
from fabric.decorators import runs_once
from fabric.utils import abort
from fabric import colors
sys.path.append('scripts')
from string_scripts.confirm_ready_for_release import confirm_ready_for_release as _confirm_ready_for_release
# --- Configuration ---------------------------------------------------------
env.verbose = False
env.libname = "libCardIO.a"
env.developer_dir = local("xcode-select -p", capture=True)
# --- Tasks -----------------------------------------------------------------
def verbose(be_verbose=True):
"""
Makes all following tasks more verbose.
"""
env.verbose = be_verbose
def developer_dir(dir):
"""
Sets DEVELOPER_DIR environment variable to correct Xcode
For example, `fab developer_dir:"/Applications/Xcode6.2.app"
"""
if os.path.exists(dir):
env.developer_dir = dir
else:
print(colors.red("{dir} is not a valid path".format(dir=dir), bold=True))
sys.exit(1)
def _locate(fileset, root=os.curdir):
# based on http://code.activestate.com/recipes/499305-locating-files-throughout-a-directory-tree/
"""
Locate supplied files in supplied root directory.
"""
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in files:
if filename in fileset:
yield os.path.join(path, filename)
def _add_version_to_header_file(version_str, file):
lines = []
for line in file.readlines():
lines.append(line)
m = re.match("^(//\s+)CardIO.*\.h$", line)
if m:
lines.append("{0}Version {1}\n".format(m.groups()[0], version_str))
lines.append("//\n")
file.seek(0)
file.truncate()
for line in lines:
file.write(line)
def _version_str(show_dirty=False):
git_describe_cmd = "git describe --match='iOS_[0-9]*.[0-9]*' --tags --always --dirty"
version_str = local(git_describe_cmd, capture=True).strip()[4:]
if not show_dirty:
version_str = version_str.replace('-dirty', '')
return version_str
def _copy(source_files, dest_dir):
for public_header_file in source_files:
with open(public_header_file, "rb") as in_file:
contents = in_file.read()
unused, header_filename = os.path.split(public_header_file)
header_filename = os.path.join(dest_dir, header_filename)
with open(header_filename, "wb") as out_file:
out_file.write(contents)
with open(header_filename, "r+") as out_file:
_add_version_to_header_file(_version_str(), out_file)
def build(outdir=None, device_sdk=None, simulator_sdk=None, **kwargs):
"""
Build card.io SDK.
"""
print(colors.white("Setup", bold=True))
to_hide = [] if env.verbose else ["stdout", "stderr", "running"]
xcode_preprocessor_flags = {}
if not outdir:
message = """
You must provide outdir=<sdk output parent dir>
Example usage:
`fab build:outdir=~` - normal build
`fab build:outdir=~,SCAN_EXPIRY=0` - to disable the experimental expiry-scan feature
"""
abort(textwrap.dedent(message).format(**locals()))
if _confirm_ready_for_release("assets/strings"):
sys.exit(1)
outdir = os.path.abspath(os.path.expanduser(outdir))
print colors.yellow("Will save release sdk to {outdir}".format(outdir=outdir))
out_subdir = "card.io_ios_sdk_{0}".format(_version_str(show_dirty=True))
xcode_preprocessor_flags.update(kwargs)
formatted_xcode_preprocessor_flags = " ".join("{k}={v}".format(k=k, v=v) for k, v in xcode_preprocessor_flags.iteritems())
extra_xcodebuild_settings = "GCC_PREPROCESSOR_DEFINITIONS='$(value) {formatted_xcode_preprocessor_flags}'".format(**locals())
device_sdk = device_sdk or "iphoneos"
simulator_sdk = simulator_sdk or "iphonesimulator"
arch_to_sdk = (
("i386", simulator_sdk),
("x86_64", simulator_sdk)
)
with settings(hide(*to_hide)):
icc_root = local("git rev-parse --show-toplevel", capture=True)
temp_dir = tempfile.mkdtemp() + os.sep
atexit.register(shutil.rmtree, temp_dir, True)
print(colors.white("Preparing dmz", bold=True))
with settings(hide(*to_hide)):
with lcd(os.path.join(icc_root, "dmz")):
dmz_all_filename = os.path.join("dmz", "dmz_all.cpp")
with open(dmz_all_filename) as f:
old_dmz_all = f.read()
local("fab concat")
with open(dmz_all_filename) as f:
new_dmz_all = f.read()
if old_dmz_all != new_dmz_all:
print(colors.red("WARNING: dmz_all.h was not up to date!", bold=True))
print(colors.white("Building", bold=True))
print(colors.white("Using temp dir {temp_dir}".format(**locals())))
print(colors.white("Using extra Xcode flags: {formatted_xcode_preprocessor_flags}".format(**locals())))
print(colors.white("Using developer directory: {}".format(env.developer_dir)))
with lcd(icc_root):
with shell_env(DEVELOPER_DIR=env.developer_dir):
with settings(hide(*to_hide)):
lipo_build_dirs = {}
build_config = "Release"
arch_build_dirs = {}
# Build the Archive release
print(colors.blue("({build_config}) Building Archive (arm* architectures specified in build config)".format(**locals())))
base_xcodebuild_command = "xcrun xcodebuild -scheme \"CardIO Static Library\" -target CardIO-static -configuration {build_config} archive".format(**locals())
build_dir = os.path.join(temp_dir, build_config, "Archive")
arch_build_dirs["archive"] = build_dir
os.makedirs(build_dir)
parallelize = "" if env.verbose else "-parallelizeTargets" # don't parallelize verbose builds, it's hard to read the output
build_cmd = "{base_xcodebuild_command} {parallelize} CONFIGURATION_BUILD_DIR={build_dir} {extra_xcodebuild_settings}".format(**locals())
local(build_cmd)
for arch, sdk in arch_to_sdk:
print(colors.blue("({build_config}) Building {arch}".format(**locals())))
base_xcodebuild_command = "xcrun xcodebuild OTHER_CFLAGS='-fembed-bitcode' -target CardIO-static -arch {arch} -sdk {sdk} -configuration {build_config}".format(**locals())
clean_cmd = "{base_xcodebuild_command} clean".format(**locals())
local(clean_cmd)
build_dir = os.path.join(temp_dir, build_config, arch)
arch_build_dirs[arch] = build_dir
os.makedirs(build_dir)
parallelize = "" if env.verbose else "-parallelizeTargets" # don't parallelize verbose builds, it's hard to read the output
build_cmd = "{base_xcodebuild_command} {parallelize} CONFIGURATION_BUILD_DIR={build_dir} {extra_xcodebuild_settings}".format(**locals())
local(build_cmd)
print(colors.blue("({build_config}) Lipoing".format(**locals())))
lipo_dir = os.path.join(temp_dir, build_config, "universal")
lipo_build_dirs[build_config] = lipo_dir
os.makedirs(lipo_dir)
arch_build_dirs["universal"] = lipo_dir
# in Xcode 4.5 GM, xcrun selects the wrong lipo to use, so circumventing xcrun for now :(
lipo_cmd = "`xcode-select -print-path`/Toolchains/XcodeDefault.xctoolchain/usr/bin/lipo " \
" {archive}/{libname}" \
" -arch i386 {i386}/{libname}" \
" -arch x86_64 {x86_64}/{libname}" \
" -create" \
" -output {universal}/{libname}".format(libname=env.libname, **arch_build_dirs)
local(lipo_cmd)
print(colors.blue("({build_config}) Stripping debug symbols".format(**locals())))
strip_cmd = "xcrun strip -S {universal}/{libname}".format(libname=env.libname, **arch_build_dirs)
local(strip_cmd)
out_subdir_suffix = "_".join("{k}-{v}".format(k=k, v=v) for k, v in kwargs.iteritems())
if out_subdir_suffix:
out_subdir_suffix = "_" + out_subdir_suffix
out_subdir += out_subdir_suffix
sdk_dir = os.path.join(outdir, out_subdir)
print(colors.white("Assembling release SDK in {sdk_dir}".format(sdk_dir=sdk_dir), bold=True))
if os.path.isdir(sdk_dir):
shutil.rmtree(sdk_dir)
cardio_dir = os.path.join(sdk_dir, "CardIO")
os.makedirs(cardio_dir)
header_files = glob.glob(os.path.join("CardIO_Public_API", "*.h"))
_copy(header_files, cardio_dir)
opencv_libraries = glob.glob(os.path.join("opencv_device/lib/", "*.a"))
_copy(opencv_libraries, cardio_dir)
libfile = os.path.join(lipo_build_dirs["Release"], env.libname)
shutil.copy2(libfile, cardio_dir)
release_dir = os.path.join(icc_root, "Release")
shutil.copy2(os.path.join(release_dir, "release_notes.txt"), sdk_dir)
shutil.copy2(os.path.join(release_dir, "CardIO.podspec"), sdk_dir)
shutil.copy2(os.path.join(release_dir, "acknowledgments.md"), sdk_dir)
shutil.copy2(os.path.join(release_dir, "LICENSE.md"), sdk_dir)
shutil.copy2(os.path.join(release_dir, "README.md"), sdk_dir)
shutil.copy2(os.path.join(release_dir, "CardIO/CardIO.m"), os.path.join(sdk_dir, "CardIO"))
shutil.copytree(os.path.join(release_dir, "SampleApp"), os.path.join(sdk_dir, "SampleApp"), ignore=shutil.ignore_patterns(".DS_Store"))
shutil.copytree(os.path.join(release_dir, "SampleApp-Swift"), os.path.join(sdk_dir, "SampleApp-Swift"), ignore=shutil.ignore_patterns(".DS_Store"))
|
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from mpi4py import MPI
from copy import copy
import random
import sys
import os
import os.path
import shutil
import time
from collections import deque
from pprint import pprint
import itertools
"""
Reduce Note:
The reduce logic is located in reduce_check().
There are three related functions that expects from a task: be it FCP or FWALK
(1) reduce_init(buf): this is invoked at the the starting point of a reduce operation.
The "buf" is self.reduce_buf
(2) reduce(buf1, buf2): buf1 is self.reduce_buf; buf2 is from one of the child input.
Usually, each child will report a number, which represent the amount of task it has performed
during this period. self.reduce_buf simply accumulate it - non-descreasing.
In that sense, the buffer (a free form dictionary) is not really needed. A simple return of
of integer number might make more sense. Each child will simply return a number each time reduce()
is called, and that number represent the amount of works it has done over this period. Once reduce()
is called, the number is reset to zero.
Inside the circle, self.reduce_buf -> let's name it as self.reduce_work is a number.
for each child:
self.reduce_work += self.task.reduce()
if I have parent:
send self.reduce_work upward
self.reduce_work = 0
if I am root:
self.reduce_report()
(3) reduce_finish(): I don't see this is in use today.
"""
from pcircle import utils
from pcircle.globals import T, G
from pcircle.dbstore import DbStore
from pcircle.utils import getLogger
from pcircle.token import Token
from builtins import range
DB_BUFSIZE = 10000
class Circle:
def __init__(self, name="Circle", split="equal", k=2, dbname=None, resume=False):
random.seed() # use system time to seed
self.comm = MPI.COMM_WORLD
self.comm.Set_name(name)
self.size = self.comm.Get_size()
self.rank = self.comm.Get_rank()
self.host = MPI.Get_processor_name()
self.pid = os.getpid()
self.d = {"rank": "rank %s" % self.rank}
self.logger = getLogger(__name__)
self.split = split
self.dbname = dbname
self.resume = resume
self.reduce_time_interval = G.reduce_interval
self.task = None
self.abort = False
self.requestors = []
# counters
self.work_requested = 0
self.work_processed = 0
self.work_request_received = 0
self.workreq_outstanding = False
self.workreq_rank = None
# reduction
self.reduce_enabled = False
self.reduce_time_last = MPI.Wtime()
self.reduce_outstanding = False
self.reduce_replies = 0
self.reduce_buf = {}
self.reduce_status = None
# periodic report
self.report_enabled = False
self.report_interval = 60
self.report_last = MPI.Wtime()
self.report_processed = 0
# barriers
self.barrier_started = False
self.barrier_up = False # flag to indicate barrier sent to parent
self.barrier_replies = 0
self.workdir = os.getcwd()
if not G.tempdir:
G.tempdir = os.path.join(os.getcwd(), (".pcircle" + utils.timestamp()))
G.tempdir = self.comm.bcast(G.tempdir)
if not os.path.exists(G.tempdir):
try:
os.mkdir(G.tempdir)
except OSError:
pass
# token
self.token = Token(self)
# tree init
self.k = k
self.parent_rank = MPI.PROC_NULL
self.child_ranks = [] # [MPI.PROC_NULL] * k is too much C
self.children = 0
# compute rank of parent if we have one
if self.rank > 0:
self.parent_rank = (self.rank - 1) // k
# identify ranks of what would be leftmost and rightmost children
left = self.rank * k + 1
right = self.rank * k + k
# if we have at least one child
# compute number of children and list of child ranks
if left < self.size:
# adjust right child in case we don't have a full set of k
if right >= self.size:
right = self.size - 1
# compute number of children and the list
self.children = right - left + 1
for i in range(self.children):
self.child_ranks.append(left + i)
self.logger.debug("parent: %s, children: %s" % (self.parent_rank, self.child_ranks),
extra=self.d)
# workq init
# TODO: compare list vs. deque
# 3 possible workq: workq, workq_buf(locates in memory, used when pushing to or retrieving from database )
self.workq = deque()
# workq buffer
self.workq_buf = deque()
# flag that indicates database is used for workq
self.use_store = False
if G.resume:
self.workq_init(self.dbname, G.resume)
self.logger.debug("Circle initialized", extra=self.d)
def finalize(self, cleanup=True):
if cleanup and hasattr(self, "workq_db"):
self.workq_db.cleanup()
if os.path.exists(G.tempdir) and cleanup:
try:
shutil.rmtree(G.tempdir)
except:
pass
def workq_init(self, dbname=None, resume=False):
# NOTE: the db filename and its rank is seprated with "-"
# we rely on this to separate, so the filename itself (within our control)
# should not use dash ... the default is to use "." for sepration
# Yes, this is very fragile, hopefully we will fix this later
if G.resume == True:
self.dbname = os.path.join(self.workdir, ".pcp_workq.%s.%s.db" % (G.rid, self.rank))
if os.path.exists(self.dbname):
self.workq_db = DbStore(self.dbname, G.resume)
else:
if dbname is None:
self.dbname = os.path.join(G.tempdir, "workq-%s" % self.rank)
else:
self.dbname = os.path.join(G.tempdir, "%s.workq-%s" % (dbname, self.rank))
self.workq_db = DbStore(self.dbname, resume=G.resume)
# after task(fcp) creation, push works in workq_buf into workq_db
def push_remaining_buf(self):
if len(self.workq_buf) > 0:
self.workq_db.mput(self.workq_buf)
self.workq_db.clear()
def next_proc(self):
""" Note next proc could return rank of itself """
if self.size == 1:
return MPI.PROC_NULL
else:
return random.randint(0, self.size - 1)
def workq_info(self):
s = "has %s items in work queue\n" % self.qsize
return s
def qsize(self):
qsize = len(self.workq) + len(self.workq_buf)
if hasattr(self, "workq_db"):
qsize += len(self.workq_db)
return qsize
def begin(self, task):
""" entry point to work """
self.task = task
self.task.create()
self.comm.barrier()
self.loop()
self.cleanup()
if self.report_enabled:
self.do_periodic_report(prefix="Circle final report")
self.comm.barrier()
if self.qsize() != 0:
pprint("Rank %s workq.len = %s" % (self.rank, self.qsize()))
pprint(self.__dict__)
sys.stdout.flush()
self.comm.Abort(1)
def loop(self):
""" central loop to finish the work """
while True:
# check if we shall do report
cur_time = MPI.Wtime()
if self.report_enabled and (cur_time - self.report_last > self.report_interval):
self.report_last = cur_time
self.do_periodic_report()
# check for and service requests
self.workreq_check()
if self.reduce_enabled:
self.reduce_check()
if self.qsize() == 0:
self.request_work()
# if I have work, and no abort signal, process one
if self.qsize() > 0 and not self.abort:
self.task.process()
self.work_processed += 1
else:
status = self.token.check_for_term()
if status == G.TERMINATE:
break
def enq(self, work):
if work is None:
self.logger.warn("enq work item is None", extra=self.d)
return
if len(self.workq) < G.memitem_threshold:
self.workq.append(work)
return
else:
self.workq_buf.append(work)
if len(self.workq_buf) == G.DB_BUFSIZE:
if self.use_store == False:
self.workq_init(self.dbname, G.resume)
self.use_store = True
self.workq_db.mput(self.workq_buf)
self.workq_buf.clear()
def preq(self, work):
self.workq.appendleft(work)
def setq(self, q):
self.workq = q
def deq(self):
# deque a work starting from workq, then from workq_buf, then from workq_db
if len(self.workq) > 0:
return self.workq.pop()
elif len(self.workq_buf) > 0:
return self.workq_buf.pop()
elif hasattr(self, "workq_db") and len(self.workq_db) > 0:
#read a batch of works into memory
workq, objs_size = self.workq_db.mget(G.memitem_threshold)
self.workq = deque(workq)
self.workq_db.mdel(G.memitem_threshold, objs_size)
if len(self.workq) > 0:
return self.workq.pop()
else:
return None
def barrier_start(self):
self.barrier_started = True
def barrier_test(self):
if not self.barrier_started:
return False
# check if we have received message from all children
if self.barrier_replies < self.children:
# still waiting for barries from children
st = MPI.Status()
flag = self.comm.Iprobe(MPI.ANY_SOURCE, T.BARRIER, st)
if flag:
child = st.Get_source()
self.comm.recv(source=child, tag=T.BARRIER)
self.barrier_replies += 1
# if we have not sent a message to our parent, and we
# have received a message from all of our children (or we have no children)
# send a message to our parent
if not self.barrier_up and self.barrier_replies == self.children:
if self.parent_rank != MPI.PROC_NULL:
self.comm.send(None, self.parent_rank, T.BARRIER)
# transition to state where we're waiting for parent
# to notify us that the barrier is complete
self.barrier_up = True
# wait for message to come back down from parent to mark end of barrier
complete = False
if self.barrier_up:
if self.parent_rank != MPI.PROC_NULL:
# check for message from parent
flag = self.comm.Iprobe(self.parent_rank, T.BARRIER)
if flag:
self.comm.recv(source=self.parent_rank, tag=T.BARRIER)
# mark barrier as complete
complete = True
else:
# we have no parent, we must be root
# so mark the barrier complete
complete = True
# barrier is complete, send messages to children if any and return true
if complete:
for child in self.child_ranks:
self.comm.send(None, dest=child, tag=T.BARRIER)
# reset state for another barrier
self.barrier_started = False
self.barrier_up = False
self.barrier_replies = 0
return True
# barrier still not complete
return False
def bcast_abort(self):
self.abort = True
buf = G.ABORT
for i in range(self.size):
if (i != self.rank):
self.comm.send(buf, dest=i, tag=T.WORK_REQUEST)
self.logger.warn("abort message sent to %s" % i, extra=self.d)
def cleanup(self):
while True:
# start non-block barrier if we have no outstanding items
if not self.reduce_outstanding and \
not self.workreq_outstanding and \
self.token.send_req == MPI.REQUEST_NULL:
self.barrier_start()
# break the loop when non-blocking barrier completes
if self.barrier_test():
break
# send no work message for any work request that comes in
self.workreq_check(cleanup=True)
# clean up any outstanding reduction
if self.reduce_enabled:
self.reduce_check(cleanup=True)
# recv any incoming work reply messages
self.request_work(cleanup=True)
# check and recv any incoming token
self.token.check_and_recv()
# if we have an outstanding token, check if it has been recv'ed
# I don't think this is needed as there seem no side effect
if self.token.send_req != MPI.REQUEST_NULL:
if self.token.send_req.Test():
self.token.send_req = MPI.REQUEST_NULL
def workreq_check(self, cleanup=False):
""" for any process that sends work request message:
add the process to the requester list
if my work queue is not empty:
distribute the work evenly
else:
send "no work" message to each requester
reset the requester list to empty
"""
while True:
st = MPI.Status()
ret = self.comm.Iprobe(source=MPI.ANY_SOURCE, tag=T.WORK_REQUEST, status=st)
if not ret: # no work request, break out the loop
break
# we have work request message
rank = st.Get_source()
buf = self.comm.recv(source=rank, tag=T.WORK_REQUEST, status=st)
if buf == G.ABORT:
self.logger.warn("Abort request from rank %s" % rank, extra=self.d)
self.abort = True
self.send_no_work(rank)
return
else:
self.logger.debug("receive work request from requestor [%s]" % rank, extra=self.d)
# add rank to requesters
self.requestors.append(rank)
# out of while loop
if not self.requestors:
return
else:
# first combine workq and work_buf, both of them are in memory
if len(self.workq_buf) > 0:
self.workq.extend(self.workq_buf)
self.workq_buf.clear()
# if in-memory workq is empty, get a batch of works from database
if len(self.workq) == 0 and hasattr(self, "workq_db"):
if len(self.workq_db) > 0:
workq, objs_size = self.workq_db.mget(G.memitem_threshold)
self.workq = deque(workq)
self.workq_db.mdel(G.memitem_threshold, objs_size)
self.logger.debug("have %s requesters, with %s work items in queue" %
(len(self.requestors), len(self.workq)), extra=self.d)
# have work requesters
if self.qsize() == 0 or cleanup:
for rank in self.requestors:
self.send_no_work(rank)
else:
# we do have work
self.send_work_to_many()
self.requestors = []
def spread_counts(self, rcount, wcount):
"""
@rcount: # of requestors
@wcount: # of work items
@return: spread it evenly among all requesters
case 1: wcount == rcount:
base = 0
extra = wcount
each requestor get 1
case 2: wcount < rcount:
base = 0
extra = wcount
first "wcount" requester get 1
case 3: wcount > rcount:
is it possible?
"""
if self.split != "equal":
raise NotImplementedError
base = wcount // (rcount + 1) # leave self a base number of works
extra = wcount - base * (rcount + 1)
assert extra <= rcount
sizes = [base] * rcount
for i in range(extra):
sizes[i] += 1
return sizes
def send_no_work(self, rank):
""" send no work reply to someone requesting work"""
buf = {G.KEY: G.ABORT} if self.abort else {G.KEY: G.ZERO}
r = self.comm.isend(buf, dest=rank, tag=T.WORK_REPLY)
r.wait()
self.logger.debug("Send no work reply to %s" % rank, extra=self.d)
def send_work_to_many(self):
rcount = len(self.requestors)
wcount = len(self.workq)
sizes = self.spread_counts(rcount, wcount)
self.logger.debug("requester count: %s, work count: %s, spread: %s" %
(rcount, wcount, sizes), extra=self.d)
for idx, dest in enumerate(self.requestors):
self.send_work(dest, sizes[idx])
def send_work(self, rank, witems):
"""
@dest - the rank of requester
@count - the number of work to send
"""
if witems <= 0:
self.send_no_work(rank)
return
# for termination detection
if (rank < self.rank) or (rank == self.token.src):
self.token.proc = G.BLACK
buf = None
# based on if it is memory or store-based
# we have different ways of constructing buf
sliced = list(itertools.islice(self.workq, 0, witems))
buf = {G.KEY: witems, G.VAL: sliced}
self.comm.send(buf, dest=rank, tag=T.WORK_REPLY)
self.logger.debug("%s work items sent to rank %s" % (witems, rank), extra=self.d)
# remove (witems) of work items
# for DbStotre, all we need is a number, not the actual objects
# for KVStore, we do need the object list for its key value
# the "size" is a bit awkward use - we know the size after we do mget()
# however, it is not readily available when we do mdel(), so we keep
# previous data and pass it back in to save us some time.
#
for i in range(witems):
self.workq.popleft()
def request_work(self, cleanup=False):
if self.workreq_outstanding:
st = MPI.Status()
reply = self.comm.Iprobe(source=self.work_requested_rank,
tag=T.WORK_REPLY, status=st)
if reply:
self.work_receive(self.work_requested_rank)
# flip flag to indicate we no longer waiting for reply
self.workreq_outstanding = False
# else:
# self.logger.debug("has req outstanding, dest = %s, no reply" %
# self.work_requested_rank, extra = self.d)
elif not cleanup:
# send request
dest = self.next_proc()
if dest == self.rank or dest == MPI.PROC_NULL:
# have no one to ask, we are done
return
buf = G.ABORT if self.abort else G.MSG
# blocking send
self.logger.debug("send work request to rank %s : %s" % (dest, G.str[buf]),
extra=self.d)
self.comm.send(buf, dest, T.WORK_REQUEST)
self.workreq_outstanding = True
self.work_requested_rank = dest
def work_receive(self, rank):
""" when incoming work reply detected """
buf = self.comm.recv(source=rank, tag=T.WORK_REPLY)
if buf[G.KEY] == G.ABORT:
self.logger.debug("receive abort signal", extra=self.d)
self.abort = True
return
elif buf[G.KEY] == G.ZERO:
self.logger.debug("receive no work signal", extra=self.d)
return
else:
assert type(buf[G.VAL]) == list
self.workq.extend(buf[G.VAL])
def reduce(self, buf):
# copy data from user buffer
self.reduce_buf = copy(buf)
def reduce_check(self, cleanup=False):
"""
initiate and progress a reduce operation at specified interval,
ensure progress of reduction in background, stop reduction if cleanup flag is True
"""
if self.reduce_outstanding:
# if we have outstanding reduce, check message from children
# otherwise, check whether we should start new reduce
for child in self.child_ranks:
if self.comm.Iprobe(source=child, tag=T.REDUCE):
# receive message from child
# 'status' element is G.MSG_VALID or not
# the rest is opaque
inbuf = self.comm.recv(source=child, tag=T.REDUCE)
self.reduce_replies += 1
self.logger.debug("client data from %s: %s" %
(child, inbuf), extra=self.d)
if inbuf['status'] == G.MSG_INVALID:
self.reduce_status = False
else:
self.reduce_status = True
# invoke user's callback to reduce user data
if hasattr(self.task, "reduce"):
self.task.reduce(self.reduce_buf, inbuf)
# check if we have gotten replies from all children
if self.reduce_replies == self.children:
# all children replied
# add our own contents to reduce buffer
# send message to parent if we have one
if self.parent_rank != MPI.PROC_NULL:
self.comm.send(self.reduce_buf, self.parent_rank, T.REDUCE)
else:
# we are the root, print results if we have valid data
if self.reduce_status and hasattr(self.task, "reduce_report"):
self.task.reduce_report(self.reduce_buf)
# invoke callback on root to deliver final results
if hasattr(self.task, "reduce_finish"):
self.task.reduce_finish(self.reduce_buf)
# disable flag to indicate we got what we want
self.reduce_outstanding = False
else:
# we don't have an outstanding reduction
# determine if a new reduce should be started
# only bother checking if we think it is about time or
# we are in cleanup mode
start_reduce = False
time_now = MPI.Wtime()
time_next = self.reduce_time_last + self.reduce_time_interval
if time_now >= time_next or cleanup:
if self.parent_rank == MPI.PROC_NULL:
# we are root, kick it off
start_reduce = True
elif self.comm.Iprobe(source=self.parent_rank, tag=T.REDUCE):
# we are not root, check if parent sent us a message
# receive message from parent and set flag to start reduce
self.comm.recv(source=self.parent_rank, tag=T.REDUCE)
start_reduce = True
# it is critical that we don't start a reduce if we are in cleanup
# phase because we may have already started the non-blocking barrier
# just send an invalid message back to parent
if start_reduce and cleanup:
# avoid starting a reduce
start_reduce = False
# if we have parent, send invalid msg
if self.parent_rank != MPI.PROC_NULL:
self.reduce_status = G.MSG_INVALID
self.comm.send(self.reduce_buf, self.parent_rank, T.REDUCE)
if start_reduce:
# set flag to indicate we have a reduce outstanding
# and initiate state for a fresh reduction
self.reduce_time_last = time_now
self.reduce_outstanding = True
self.reduce_replies = 0
self.reduce_status = G.MSG_VALID
self.reduce_buf['status'] = G.MSG_VALID
# invoke callback to get input data
if hasattr(self.task, "reduce_init"):
self.task.reduce_init(self.reduce_buf)
# sent message to each child
for child in self.child_ranks:
self.comm.send(None, child, T.REDUCE)
def do_periodic_report(self, prefix="Circle report"):
delta = self.work_processed - self.report_processed
rate = int(delta/self.report_interval)
self.report_processed = self.work_processed
s = "\n%s on [rank: %s %s/%s] at %s\n" % \
(prefix, self.rank, self.host, self.pid, time.strftime("%Y-%m-%d %H:%M:%S"))
s += "\t{:<20}{:<10,}{:5}{:<20}{:<12,}\n".format("work queue size:",
len(self.workq), "|", "work processed:", self.work_processed)
s += "\t{:<20}{:<10,}{:5}{:<20}{:<10}\n".format("work delta:", delta,
"|", "rate:", "%s /s" % rate)
print(s)
@staticmethod
def exit(code):
MPI.Finalize()
sys.exit(code)
|
|
""" Utilities for interacting with a DC/OS instance via REST API
Most DC/OS deployments will have auth enabled, so this module includes
DcosUser and DcosAuth to be attached to a DcosApiSession. Additionally,
it is sometimes necessary to query specific nodes within a DC/OS cluster,
so there is ARNodeApiClientMixin to allow querying nodes without boilerplate
to set the correct port and scheme.
"""
import copy
import logging
import os
from typing import List, Optional
import requests
import retrying
import test_util.marathon
from test_util.helpers import ApiClientSession, RetryCommonHttpErrorsMixin, Url
class DcosUser:
"""A lightweight user representation for grabbing the auth info and stashing it"""
def __init__(self, credentials: dict):
self.credentials = credentials
self.auth_token = None
self.auth_cookie = None
@property
def auth_header(self):
return {'Authorization': 'token={}'.format(self.auth_token)}
class DcosAuth(requests.auth.AuthBase):
def __init__(self, auth_token: str):
self.auth_token = auth_token
def __call__(self, request):
request.headers['Authorization'] = 'token={}'.format(self.auth_token)
return request
class Exhibitor(RetryCommonHttpErrorsMixin, ApiClientSession):
def __init__(self, default_url: Url, session: Optional[requests.Session]=None,
exhibitor_admin_password: Optional[str]=None):
super().__init__(default_url)
if session is not None:
self.session = session
if exhibitor_admin_password is not None:
# Override auth to use HTTP basic auth with the provided admin password.
self.session.auth = requests.auth.HTTPBasicAuth('admin', exhibitor_admin_password)
class ARNodeApiClientMixin:
def api_request(self, method, path_extension, *, scheme=None, host=None, query=None,
fragment=None, port=None, node=None, **kwargs):
""" Communicating with a DC/OS cluster is done by default through Admin Router.
Use this Mixin with an ApiClientSession that requires distinguishing between nodes.
Admin Router has both a master and agent process and so this wrapper accepts a
node argument. node must be a host in self.master or self.all_slaves. If given,
the request will be made to the Admin Router endpoint for that node type
"""
if node is not None:
assert port is None, 'node is intended to retrieve port; cannot set both simultaneously'
assert host is None, 'node is intended to retrieve host; cannot set both simultaneously'
if node in self.masters:
# Nothing else to do, master Admin Router uses default HTTP (80) and HTTPS (443) ports
pass
elif node in self.all_slaves:
scheme = scheme if scheme is not None else self.default_url.scheme
if scheme == 'http':
port = 61001
if scheme == 'https':
port = 61002
else:
raise Exception('Node {} is not recognized within the DC/OS cluster'.format(node))
host = node
return super().api_request(method, path_extension, scheme=scheme, host=host,
query=query, fragment=fragment, port=port, **kwargs)
class DcosApiSession(ARNodeApiClientMixin, RetryCommonHttpErrorsMixin, ApiClientSession):
def __init__(
self,
dcos_url: str,
masters: Optional[List[str]],
slaves: Optional[List[str]],
public_slaves: Optional[List[str]],
default_os_user: str,
auth_user: Optional[DcosUser],
exhibitor_admin_password: Optional[str]=None):
"""Proxy class for DC/OS clusters. If any of the host lists (masters,
slaves, public_slaves) are provided, the wait_for_dcos function of this
class will wait until provisioning is complete. If these lists are not
provided, then there is no ground truth and the cluster will be assumed
the be in a completed state.
Args:
dcos_url: address for the DC/OS web UI.
masters: list of Mesos master advertised IP addresses.
slaves: list of Mesos slave/agent advertised IP addresses.
public_slaves: list of public Mesos slave/agent advertised IP addresses.
default_os_user: default user that marathon/metronome will launch tasks under
auth_user: use this user's auth for all requests
Note: user must be authenticated explicitly or call self.wait_for_dcos()
"""
super().__init__(Url.from_string(dcos_url))
self.master_list = masters
self.slave_list = slaves
self.public_slave_list = public_slaves
self.default_os_user = default_os_user
self.auth_user = auth_user
self.exhibitor_admin_password = exhibitor_admin_password
@staticmethod
def get_args_from_env():
""" Provides the required arguments for a unauthenticated cluster
"""
masters = os.getenv('MASTER_HOSTS')
slaves = os.getenv('SLAVE_HOSTS')
public_slaves = os.getenv('PUBLIC_SLAVE_HOSTS')
return {
'dcos_url': os.getenv('DCOS_DNS_ADDRESS', 'http://leader.mesos'),
'masters': masters.split(',') if masters else None,
'slaves': slaves.split(',') if slaves else None,
'public_slaves': public_slaves.split(',') if public_slaves else None,
'default_os_user': os.getenv('DCOS_DEFAULT_OS_USER', 'root')}
@property
def masters(self):
return sorted(self.master_list)
@property
def slaves(self):
return sorted(self.slave_list)
@property
def public_slaves(self):
return sorted(self.public_slave_list)
@property
def all_slaves(self):
return sorted(self.slaves + self.public_slaves)
def set_node_lists_if_unset(self):
""" Sets the expected cluster topology to be the observed cluster
topology from exhibitor and mesos. I.E. if masters, slave, or
public_slaves were not provided, accept whatever is currently available
"""
if self.master_list is None:
logging.debug('Master list not provided, setting from exhibitor...')
r = self.get('/exhibitor/exhibitor/v1/cluster/list')
r.raise_for_status()
self.master_list = sorted(r.json()['servers'])
logging.info('Master list set as: {}'.format(self.masters))
if self.slave_list is not None and self.public_slave_list is not None:
return
r = self.get('/mesos/slaves')
r.raise_for_status()
slaves_json = r.json()['slaves']
if self.slave_list is None:
logging.debug('Private slave list not provided; fetching from mesos...')
self.slave_list = sorted(
[s['hostname'] for s in slaves_json if s['attributes'].get('public_ip') != 'true'])
logging.info('Private slave list set as: {}'.format(self.slaves))
if self.public_slave_list is None:
logging.debug('Public slave list not provided; fetching from mesos...')
self.public_slave_list = sorted(
[s['hostname'] for s in slaves_json if s['attributes'].get('public_ip') == 'true'])
logging.info('Public slave list set as: {}'.format(self.public_slaves))
@retrying.retry(wait_fixed=2000, stop_max_delay=120 * 1000)
def _authenticate_default_user(self):
"""retry default auth user because in some deployments,
the auth endpoint might not be routable immediately
after Admin Router is up. DcosUser.authenticate()
will raise exception if authorization fails
"""
if self.auth_user is None:
return
logging.info('Attempting authentication')
# explicitly use a session with no user authentication for requesting auth headers
r = self.post('/acs/api/v1/auth/login', json=self.auth_user.credentials, auth=None)
r.raise_for_status()
logging.info('Received authentication blob: {}'.format(r.json()))
self.auth_user.auth_token = r.json()['token']
self.auth_user.auth_cookie = r.cookies['dcos-acs-auth-cookie']
logging.info('Authentication successful')
# Set requests auth
self.session.auth = DcosAuth(self.auth_user.auth_token)
@retrying.retry(wait_fixed=1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_marathon_up(self):
r = self.get('/marathon/ui/')
# resp_code >= 500 -> backend is still down probably
if r.status_code < 500:
logging.info("Marathon is probably up")
return True
else:
msg = "Waiting for Marathon, resp code is: {}"
logging.info(msg.format(r.status_code))
return False
@retrying.retry(wait_fixed=1000)
def _wait_for_zk_quorum(self):
"""Queries exhibitor to ensure all master ZKs have joined
"""
r = self.get('/exhibitor/exhibitor/v1/cluster/status')
if not r.ok:
logging.warning('Exhibitor status not available')
r.raise_for_status()
status = r.json()
logging.info('Exhibitor cluster status: {}'.format(status))
zk_nodes = sorted([n['hostname'] for n in status])
assert zk_nodes == self.masters, 'ZooKeeper has not formed the expected quorum'
@retrying.retry(wait_fixed=1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_slaves_to_join(self):
r = self.get('/mesos/master/slaves')
if r.status_code != 200:
msg = "Mesos master returned status code {} != 200 "
msg += "continuing to wait..."
logging.info(msg.format(r.status_code))
return False
data = r.json()
# Check that there are all the slaves the test knows about. They are all
# needed to pass the test.
num_slaves = len(data['slaves'])
if num_slaves >= len(self.all_slaves):
msg = "Sufficient ({} >= {}) number of slaves have joined the cluster"
logging.info(msg.format(num_slaves, self.all_slaves))
return True
else:
msg = "Current number of slaves: {} < {}, continuing to wait..."
logging.info(msg.format(num_slaves, self.all_slaves))
return False
@retrying.retry(wait_fixed=1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_dcos_history_up(self):
r = self.get('/dcos-history-service/ping')
# resp_code >= 500 -> backend is still down probably
if r.status_code <= 500:
logging.info("DC/OS History is probably up")
return True
else:
msg = "Waiting for DC/OS History, resp code is: {}"
logging.info(msg.format(r.status_code))
return False
@retrying.retry(wait_fixed=1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_dcos_history_data(self):
ro = self.get('/dcos-history-service/history/last')
# resp_code >= 500 -> backend is still down probably
if ro.status_code <= 500:
logging.info("DC/OS History is probably getting data")
json = ro.json()
# if an agent was removed, it may linger in the history data
assert len(json["slaves"]) >= len(self.all_slaves)
return True
else:
msg = "Waiting for DC/OS History, resp code is: {}"
logging.info(msg.format(ro.status_code))
return False
@retrying.retry(wait_fixed=1000,
retry_on_result=lambda ret: ret is False,
retry_on_exception=lambda x: False)
def _wait_for_adminrouter_up(self):
try:
# Yeah, we can also put it in retry_on_exception, but
# this way we will loose debug messages
self.get('/')
except requests.ConnectionError as e:
msg = "Cannot connect to nginx, error string: '{}', continuing to wait"
logging.info(msg.format(e))
return False
else:
logging.info("Nginx is UP!")
return True
# Retry if returncode is False, do not retry on exceptions.
@retrying.retry(wait_fixed=2000,
retry_on_result=lambda r: r is False,
retry_on_exception=lambda _: False)
def _wait_for_srouter_slaves_endpoints(self):
# Get currently known agents. This request is served straight from
# Mesos (no AdminRouter-based caching is involved).
r = self.get('/mesos/master/slaves')
assert r.status_code == 200
data = r.json()
# only check against the slaves we expect to be in the cluster
# so we can check that cluster has returned after a failure
# in which case will will have new slaves and dead slaves
slaves_ids = sorted(x['id'] for x in data['slaves'] if x['hostname'] in self.all_slaves)
for slave_id in slaves_ids:
# AdminRouter's slave endpoint internally uses cached Mesos
# state data. That is, slave IDs of just recently joined
# slaves can be unknown here. For those, this endpoint
# returns a 404. Retry in this case, until this endpoint
# is confirmed to work for all known agents.
uri = '/slave/{}/slave%281%29/state.json'.format(slave_id)
r = self.get(uri)
if r.status_code == 404:
return False
assert r.status_code == 200
data = r.json()
assert "id" in data
assert data["id"] == slave_id
@retrying.retry(wait_fixed=2000,
retry_on_result=lambda r: r is False,
retry_on_exception=lambda _: False)
def _wait_for_metronome(self):
r = self.get('/service/metronome/v1/jobs')
# 500 and 504 are the expected behavior of a service
# backend that is not up and running.
if r.status_code == 500 or r.status_code == 504:
logging.info("Metronome gateway timeout, continue waiting for backend...")
return False
assert r.status_code == 200
def wait_for_dcos(self):
self._wait_for_adminrouter_up()
self._authenticate_default_user()
self.set_node_lists_if_unset()
self._wait_for_marathon_up()
self._wait_for_zk_quorum()
self._wait_for_slaves_to_join()
self._wait_for_dcos_history_up()
self._wait_for_srouter_slaves_endpoints()
self._wait_for_dcos_history_data()
self._wait_for_metronome()
def copy(self):
""" Create a new client session without cookies, with the authentication intact.
"""
new = copy.deepcopy(self)
new.session.cookies.clear()
return new
def get_user_session(self, user):
"""Returns a copy of this client but with auth for user (can be None)
"""
new = self.copy()
new.session.auth = None
new.auth_user = None
if user is not None:
new.auth_user = user
new._authenticate_default_user()
return new
@property
def exhibitor(self):
if self.exhibitor_admin_password is None:
# No basic HTTP auth. Access Exhibitor via the adminrouter.
default_url = self.default_url.copy(path='exhibitor')
else:
# Exhibitor is protected with HTTP basic auth, which conflicts with adminrouter's auth. We must bypass
# the adminrouter and access Exhibitor directly.
default_url = Url.from_string('http://{}:8181'.format(self.masters[0]))
return Exhibitor(
default_url=default_url,
session=self.copy().session,
exhibitor_admin_password=self.exhibitor_admin_password)
@property
def marathon(self):
return test_util.marathon.Marathon(
default_url=self.default_url.copy(path='marathon'),
default_os_user=self.default_os_user,
session=self.copy().session)
@property
def metronome(self):
new = self.copy()
new.default_url = self.default_url.copy(path='service/metronome/v1')
return new
@property
def health(self):
new = self.copy()
new.default_url = self.default_url.copy(query='cache=0', path='system/health/v1')
return new
@property
def logs(self):
new = self.copy()
new.default_url = self.default_url.copy(path='system/v1/logs')
return new
@property
def metrics(self):
new = self.copy()
new.default_url = self.default_url.copy(path='/system/v1/metrics/v0')
return new
def metronome_one_off(self, job_definition, timeout=300, ignore_failures=False):
"""Run a job on metronome and block until it returns success
"""
job_id = job_definition['id']
@retrying.retry(wait_fixed=2000, stop_max_delay=timeout * 1000,
retry_on_result=lambda ret: not ret,
retry_on_exception=lambda x: False)
def wait_for_completion():
r = self.metronome.get('jobs/' + job_id, params={'embed': 'history'})
r.raise_for_status()
out = r.json()
if not ignore_failures and (out['history']['failureCount'] != 0):
raise Exception('Metronome job failed!: ' + repr(out))
if out['history']['successCount'] != 1:
logging.info('Waiting for one-off to finish. Status: ' + repr(out))
return False
logging.info('Metronome one-off successful')
return True
logging.info('Creating metronome job: ' + repr(job_definition))
r = self.metronome.post('jobs', json=job_definition)
r.raise_for_status()
logging.info('Starting metronome job')
r = self.metronome.post('jobs/{}/runs'.format(job_id))
r.raise_for_status()
wait_for_completion()
logging.info('Deleting metronome one-off')
r = self.metronome.delete('jobs/' + job_id)
r.raise_for_status()
def mesos_sandbox_directory(self, slave_id, framework_id, task_id):
r = self.get('/agent/{}/state'.format(slave_id))
r.raise_for_status()
agent_state = r.json()
try:
framework = next(f for f in agent_state['frameworks'] if f['id'] == framework_id)
except StopIteration:
raise Exception('Framework {} not found on agent {}'.format(framework_id, slave_id))
try:
executor = next(e for e in framework['executors'] if e['id'] == task_id)
except StopIteration:
raise Exception('Executor {} not found on framework {} on agent {}'.format(task_id, framework_id, slave_id))
return executor['directory']
def mesos_sandbox_file(self, slave_id, framework_id, task_id, filename):
r = self.get(
'/agent/{}/files/download'.format(slave_id),
params={'path': self.mesos_sandbox_directory(slave_id, framework_id, task_id) + '/' + filename}
)
r.raise_for_status()
return r.text
def get_version(self):
version_metadata = self.get('/dcos-metadata/dcos-version.json')
version_metadata.raise_for_status()
data = version_metadata.json()
return data["version"]
|
|
# -*- coding: utf-8 -*-
'''
SECCorrespondenceLoader is a plug-in to both GUI menu and command line/web service
that loads a Correspondence tar.gz file.
(c) Copyright 2014 Mark V Systems Limited, All rights reserved.
'''
import datetime, re, os, time
from arelle import FileSource, ModelDocument
from arelle.ModelRssObject import ModelRssObject
from arelle.XmlValidate import UNVALIDATED, VALID
class SECCorrespondenceItem:
def __init__(self, modelXbrl, fileName, entryUrl):
self.cikNumber = None
self.accessionNumber = None
self.fileNumber = None
self.companyName = None
self.formType = None
pubDate = os.path.basename(modelXbrl.uri).partition(".")[0]
try:
self.pubDate = datetime.datetime(int(pubDate[0:4]), int(pubDate[4:6]), int(pubDate[6:8]))
self.acceptanceDatetime = self.pubDate
self.filingDate = self.pubDate.date()
except ValueError:
self.pubDate = self.acceptanceDatetime = self.filingDate = None
self.filingDate = None
self.period = None
self.assignedSic = None
self.fiscalYearEnd = None
self.htmlUrl = None
self.url = entryUrl
self.zippedUrl = entryUrl
self.htmURLs = ((fileName, entryUrl),)
self.status = "not tested"
self.results = None
self.assertions = None
self.objectIndex = len(modelXbrl.modelObjects)
modelXbrl.modelObjects.append(self)
def setResults(self, modelXbrl):
self.results = []
self.assertionUnsuccessful = False
# put error codes first, sorted, then assertion result (dict's)
self.status = "pass"
for error in modelXbrl.errors:
if isinstance(error,dict): # assertion results
self.assertions = error
for countSuccessful, countNotsuccessful in error.items():
if countNotsuccessful > 0:
self.assertionUnsuccessful = True
self.status = "unsuccessful"
else: # error code results
self.results.append(error)
self.status = "fail" # error code
self.results.sort()
def objectId(self,refId=""):
"""Returns a string surrogate representing the object index of the model document,
prepended by the refId string.
:param refId: A string to prefix the refId for uniqueless (such as to use in tags for tkinter)
:type refId: str
"""
return "_{0}_{1}".format(refId, self.objectIndex)
def secCorrespondenceLoader(modelXbrl, mappedUri, filepath, *args, **kwargs):
if (mappedUri.startswith("http://www.sec.gov/Archives/edgar/Feed/") and
mappedUri.endswith(".nc.tar.gz")):
# daily feed loader (the rss object)
rssObject = ModelRssObject(modelXbrl, uri=mappedUri, filepath=filepath)
# location for expanded feed files
tempdir = os.path.join(modelXbrl.modelManager.cntlr.userAppDir, "tmp", "edgarFeed")
# remove prior files
if os.path.exists(tempdir):
os.system("rm -fr {}".format(tempdir)) # rmtree does not work with this many files!
os.makedirs(tempdir, exist_ok=True)
# untar to /temp/arelle/edgarFeed for faster operation
startedAt = time.time()
modelXbrl.fileSource.open()
modelXbrl.fileSource.fs.extractall(tempdir)
modelXbrl.info("info", "untar edgarFeed temp files in %.2f sec" % (time.time() - startedAt),
modelObject=modelXbrl)
# find <table> with <a>Download in it
for instanceFile in sorted(os.listdir(tempdir)): # modelXbrl.fileSource.dir:
if instanceFile != ".":
rssObject.rssItems.append(
SECCorrespondenceItem(modelXbrl, instanceFile, mappedUri + '/' + instanceFile))
return rssObject
elif "rssItem" in kwargs and ".nc.tar.gz/" in mappedUri:
rssItem = kwargs["rssItem"]
text = None # no instance information
# parse document
try:
startedAt = time.time()
file, encoding = modelXbrl.fileSource.file(
os.path.join(modelXbrl.modelManager.cntlr.userAppDir, "tmp", "edgarFeed",
os.path.basename(rssItem.url)))
s = file.read()
file.close()
for match in re.finditer(r"[<]([^>]+)[>]([^<\n\r]*)", s, re.MULTILINE):
tag = match.group(1).lower()
v = match.group(2)
if tag == "accession-number":
rssItem.accessionNumber = v
elif tag == "form-type":
rssItem.formType = v
if v != "UPLOAD":
rssItem.doNotProcessRSSitem = True # skip this RSS item in validate loop, don't load DB
elif tag == "filing-date":
try:
rssItem.filingDate = datetime.date(int(v[0:4]), int(v[4:6]), int(v[6:8]))
except (ValueError, IndexError):
pass
elif tag == "conformed-name":
rssItem.companyName = v
elif tag == "cik":
rssItem.cikNumber = v
elif tag == "assigned-sic":
rssItem.assignedSic = v
elif tag == "fiscal-year-end":
try:
rssItem.fiscalYearEnd = v[0:2] + '-' + v[2:4]
except (IndexError, TypeError):
pass
match = re.search("<PDF>(.*)</PDF>", s, re.DOTALL)
if match:
import uu, io
pageText = []
uuIn = io.BytesIO(match.group(1).encode(encoding))
uuOut = io.BytesIO()
uu.decode(uuIn, uuOut)
from pyPdf import PdfFileReader
uuOut.seek(0,0)
try:
pdfIn = PdfFileReader(uuOut)
for pageNum in range(pdfIn.getNumPages()):
pageText.append(pdfIn.getPage(pageNum).extractText())
except:
# do we want a warning here that the PDF can't be read with this library?
pass
uuIn.close()
uuOut.close()
text = ''.join(pageText)
else:
match = re.search("<TEXT>(.*)</TEXT>", s, re.DOTALL)
if match:
text = match.group(1)
except (IOError, EnvironmentError):
pass # give up, no instance
# daily rss item loader, provide unpopulated instance document to be filled in by RssItem.Xbrl.Loaded
if not text:
rssItem.doNotProcessRSSitem = True # skip this RSS item in validate loop, don't load DB
instDoc = ModelDocument.create(modelXbrl,
ModelDocument.Type.UnknownXML,
rssItem.url,
isEntry=True,
base='', # block pathname from becomming absolute
initialXml='<DummyXml/>')
else:
instDoc = ModelDocument.create(modelXbrl,
ModelDocument.Type.INSTANCE,
rssItem.url,
isEntry=True,
base='', # block pathname from becomming absolute
initialXml='''
<xbrli:xbrl xmlns:doc="http://arelle.org/doc/2014-01-31"
xmlns:link="http://www.xbrl.org/2003/linkbase"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance">
<link:schemaRef xlink:type="simple" xlink:href="http://arelle.org/2014/doc-2014-01-31.xsd"/>
<xbrli:context id="pubDate">
<xbrli:entity>
<xbrli:identifier scheme="http://www.sec.gov/CIK">{cik}</xbrli:identifier>
</xbrli:entity>
<xbrli:period>
<xbrli:instant>{pubDate}</xbrli:instant>
</xbrli:period>
</xbrli:context>
<doc:Correspondence contextRef="pubDate">{text}</doc:Correspondence>
</xbrli:xbrl>
'''.format(cik=rssItem.cikNumber,
pubDate=rssItem.pubDate.date(),
text=text.strip().replace("&","&").replace("<","<")))
#modelXbrl.info("info", "loaded in %.2f sec" % (time.time() - startedAt),
# modelDocument=instDoc)
return instDoc
return None
def secCorrespondenceCloser(modelDocument, *args, **kwargs):
if (modelDocument.uri.startswith("http://www.sec.gov/Archives/edgar/Feed/") and
modelDocument.uri.endswith(".nc.tar.gz")):
# remove prior files
if os.path.exists("/tmp/arelle/edgarFeed"):
os.system("rm -fr /tmp/arelle/edgarFeed")
__pluginInfo__ = {
'name': 'SEC Correspondence Loader',
'version': '0.9',
'description': "This plug-in loads SEC Correspondence. ",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2014 Mark V Systems Limited, All rights reserved. \n'
'PyPDF (c) Copyright 2012 Jeet Sukumaran',
# classes of mount points (required)
'ModelDocument.PullLoader': secCorrespondenceLoader,
'ModelDocument.CustomCloser': secCorrespondenceCloser,
}
|
|
"""Test the Network Configuration."""
from unittest.mock import MagicMock, Mock, patch
import ifaddr
from homeassistant.components import network
from homeassistant.components.network.const import (
ATTR_ADAPTERS,
ATTR_CONFIGURED_ADAPTERS,
MDNS_TARGET_IP,
STORAGE_KEY,
STORAGE_VERSION,
)
from homeassistant.setup import async_setup_component
_NO_LOOPBACK_IPADDR = "192.168.1.5"
_LOOPBACK_IPADDR = "127.0.0.1"
def _mock_socket(sockname):
mock_socket = MagicMock()
mock_socket.getsockname = Mock(return_value=sockname)
return mock_socket
def _mock_socket_exception(exc):
mock_socket = MagicMock()
mock_socket.getsockname = Mock(side_effect=exc)
return mock_socket
def _generate_mock_adapters():
mock_lo0 = Mock(spec=ifaddr.Adapter)
mock_lo0.nice_name = "lo0"
mock_lo0.ips = [ifaddr.IP("127.0.0.1", 8, "lo0")]
mock_lo0.index = 0
mock_eth0 = Mock(spec=ifaddr.Adapter)
mock_eth0.nice_name = "eth0"
mock_eth0.ips = [ifaddr.IP(("2001:db8::", 1, 1), 8, "eth0")]
mock_eth0.index = 1
mock_eth1 = Mock(spec=ifaddr.Adapter)
mock_eth1.nice_name = "eth1"
mock_eth1.ips = [ifaddr.IP("192.168.1.5", 23, "eth1")]
mock_eth1.index = 2
mock_vtun0 = Mock(spec=ifaddr.Adapter)
mock_vtun0.nice_name = "vtun0"
mock_vtun0.ips = [ifaddr.IP("169.254.3.2", 16, "vtun0")]
mock_vtun0.index = 3
return [mock_eth0, mock_lo0, mock_eth1, mock_vtun0]
async def test_async_detect_interfaces_setting_non_loopback_route(hass, hass_storage):
"""Test without default interface config and the route returns a non-loopback address."""
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([_NO_LOOPBACK_IPADDR]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, network.DOMAIN, {network.DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[network.DOMAIN]
assert network_obj.configured_adapters == []
assert network_obj.adapters == [
{
"auto": False,
"index": 1,
"default": False,
"enabled": False,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"index": 0,
"auto": False,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"index": 2,
"auto": True,
"default": True,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"index": 3,
"auto": False,
"default": False,
"enabled": False,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_async_detect_interfaces_setting_loopback_route(hass, hass_storage):
"""Test without default interface config and the route returns a loopback address."""
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([_LOOPBACK_IPADDR]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, network.DOMAIN, {network.DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[network.DOMAIN]
assert network_obj.configured_adapters == []
assert network_obj.adapters == [
{
"index": 1,
"auto": True,
"default": False,
"enabled": True,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"index": 0,
"auto": False,
"default": True,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"index": 2,
"auto": True,
"default": False,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"index": 3,
"auto": False,
"default": False,
"enabled": False,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_async_detect_interfaces_setting_empty_route(hass, hass_storage):
"""Test without default interface config and the route returns nothing."""
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, network.DOMAIN, {network.DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[network.DOMAIN]
assert network_obj.configured_adapters == []
assert network_obj.adapters == [
{
"auto": True,
"index": 1,
"default": False,
"enabled": True,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"auto": False,
"index": 0,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"auto": True,
"index": 2,
"default": False,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"auto": False,
"index": 3,
"default": False,
"enabled": False,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_async_detect_interfaces_setting_exception(hass, hass_storage):
"""Test without default interface config and the route throws an exception."""
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket_exception(AttributeError),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, network.DOMAIN, {network.DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[network.DOMAIN]
assert network_obj.configured_adapters == []
assert network_obj.adapters == [
{
"auto": True,
"index": 1,
"default": False,
"enabled": True,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"auto": False,
"index": 0,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"auto": True,
"index": 2,
"default": False,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"auto": False,
"index": 3,
"default": False,
"enabled": False,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_interfaces_configured_from_storage(hass, hass_storage):
"""Test settings from storage are preferred over auto configure."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["eth0", "eth1", "vtun0"]},
}
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([_NO_LOOPBACK_IPADDR]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, network.DOMAIN, {network.DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[network.DOMAIN]
assert network_obj.configured_adapters == ["eth0", "eth1", "vtun0"]
assert network_obj.adapters == [
{
"auto": False,
"index": 1,
"default": False,
"enabled": True,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"auto": False,
"index": 0,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"auto": True,
"index": 2,
"default": True,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"auto": False,
"index": 3,
"default": False,
"enabled": True,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_interfaces_configured_from_storage_websocket_update(
hass, hass_ws_client, hass_storage
):
"""Test settings from storage can be updated via websocket api."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["eth0", "eth1", "vtun0"]},
}
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([_NO_LOOPBACK_IPADDR]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, network.DOMAIN, {network.DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[network.DOMAIN]
assert network_obj.configured_adapters == ["eth0", "eth1", "vtun0"]
ws_client = await hass_ws_client(hass)
await ws_client.send_json({"id": 1, "type": "network"})
response = await ws_client.receive_json()
assert response["success"]
assert response["result"][ATTR_CONFIGURED_ADAPTERS] == ["eth0", "eth1", "vtun0"]
assert response["result"][ATTR_ADAPTERS] == [
{
"auto": False,
"index": 1,
"default": False,
"enabled": True,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"auto": False,
"index": 0,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"auto": True,
"index": 2,
"default": True,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"auto": False,
"index": 3,
"default": False,
"enabled": True,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
await ws_client.send_json(
{"id": 2, "type": "network/configure", "config": {ATTR_CONFIGURED_ADAPTERS: []}}
)
response = await ws_client.receive_json()
assert response["result"][ATTR_CONFIGURED_ADAPTERS] == []
await ws_client.send_json({"id": 3, "type": "network"})
response = await ws_client.receive_json()
assert response["result"][ATTR_CONFIGURED_ADAPTERS] == []
assert response["result"][ATTR_ADAPTERS] == [
{
"auto": False,
"index": 1,
"default": False,
"enabled": False,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"auto": False,
"index": 0,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"auto": True,
"index": 2,
"default": True,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"auto": False,
"index": 3,
"default": False,
"enabled": False,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_async_get_source_ip_matching_interface(hass, hass_storage):
"""Test getting the source ip address with interface matching."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["eth1"]},
}
with patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
), patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket(["192.168.1.5"]),
):
assert await async_setup_component(hass, network.DOMAIN, {network.DOMAIN: {}})
await hass.async_block_till_done()
assert await network.async_get_source_ip(hass, MDNS_TARGET_IP) == "192.168.1.5"
async def test_async_get_source_ip_interface_not_match(hass, hass_storage):
"""Test getting the source ip address with interface does not match."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["vtun0"]},
}
with patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
), patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket(["192.168.1.5"]),
):
assert await async_setup_component(hass, network.DOMAIN, {network.DOMAIN: {}})
await hass.async_block_till_done()
assert await network.async_get_source_ip(hass, MDNS_TARGET_IP) == "169.254.3.2"
async def test_async_get_source_ip_cannot_determine_target(hass, hass_storage):
"""Test getting the source ip address when getsockname fails."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["eth1"]},
}
with patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
), patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([None]),
):
assert await async_setup_component(hass, network.DOMAIN, {network.DOMAIN: {}})
await hass.async_block_till_done()
assert await network.async_get_source_ip(hass, MDNS_TARGET_IP) == "192.168.1.5"
|
|
from __future__ import absolute_import, print_function, division
import os.path
import urwid
import netlib.http.url
from mitmproxy.console import common
from mitmproxy.console import pathedit
from mitmproxy.console import signals
from netlib import human
class ActionBar(urwid.WidgetWrap):
def __init__(self):
urwid.WidgetWrap.__init__(self, None)
self.clear()
signals.status_message.connect(self.sig_message)
signals.status_prompt.connect(self.sig_prompt)
signals.status_prompt_path.connect(self.sig_path_prompt)
signals.status_prompt_onekey.connect(self.sig_prompt_onekey)
self.last_path = ""
self.prompting = False
self.onekey = False
self.pathprompt = False
def sig_message(self, sender, message, expire=None):
if self.prompting:
return
w = urwid.Text(message)
self._w = w
if expire:
def cb(*args):
if w == self._w:
self.clear()
signals.call_in.send(seconds=expire, callback=cb)
def prep_prompt(self, p):
return p.strip() + ": "
def sig_prompt(self, sender, prompt, text, callback, args=()):
signals.focus.send(self, section="footer")
self._w = urwid.Edit(self.prep_prompt(prompt), text or "")
self.prompting = (callback, args)
def sig_path_prompt(self, sender, prompt, callback, args=()):
signals.focus.send(self, section="footer")
self._w = pathedit.PathEdit(
self.prep_prompt(prompt),
os.path.dirname(self.last_path)
)
self.pathprompt = True
self.prompting = (callback, args)
def sig_prompt_onekey(self, sender, prompt, keys, callback, args=()):
"""
Keys are a set of (word, key) tuples. The appropriate key in the
word is highlighted.
"""
signals.focus.send(self, section="footer")
prompt = [prompt, " ("]
mkup = []
for i, e in enumerate(keys):
mkup.extend(common.highlight_key(e[0], e[1]))
if i < len(keys) - 1:
mkup.append(",")
prompt.extend(mkup)
prompt.append(")? ")
self.onekey = set(i[1] for i in keys)
self._w = urwid.Edit(prompt, "")
self.prompting = (callback, args)
def selectable(self):
return True
def keypress(self, size, k):
if self.prompting:
if k == "esc":
self.prompt_done()
elif self.onekey:
if k == "enter":
self.prompt_done()
elif k in self.onekey:
self.prompt_execute(k)
elif k == "enter":
self.prompt_execute(self._w.get_edit_text())
else:
if common.is_keypress(k):
self._w.keypress(size, k)
else:
return k
def clear(self):
self._w = urwid.Text("")
self.prompting = False
def prompt_done(self):
self.prompting = False
self.onekey = False
self.pathprompt = False
signals.status_message.send(message="")
signals.focus.send(self, section="body")
def prompt_execute(self, txt):
if self.pathprompt:
self.last_path = txt
p, args = self.prompting
self.prompt_done()
msg = p(txt, *args)
if msg:
signals.status_message.send(message=msg, expire=1)
class StatusBar(urwid.WidgetWrap):
def __init__(self, master, helptext):
# type: (mitmproxy.console.master.ConsoleMaster, object) -> None
self.master = master
self.helptext = helptext
self.ab = ActionBar()
self.ib = urwid.WidgetWrap(urwid.Text(""))
super(StatusBar, self).__init__(urwid.Pile([self.ib, self.ab]))
signals.update_settings.connect(self.sig_update_settings)
signals.flowlist_change.connect(self.sig_update_settings)
master.options.changed.connect(self.sig_update_settings)
self.redraw()
def sig_update_settings(self, sender):
self.redraw()
def keypress(self, *args, **kwargs):
return self.ab.keypress(*args, **kwargs)
def get_status(self):
r = []
if len(self.master.options.setheaders):
r.append("[")
r.append(("heading_key", "H"))
r.append("eaders]")
if len(self.master.options.replacements):
r.append("[")
r.append(("heading_key", "R"))
r.append("eplacing]")
if self.master.client_playback:
r.append("[")
r.append(("heading_key", "cplayback"))
r.append(":%s to go]" % self.master.client_playback.count())
if self.master.server_playback:
r.append("[")
r.append(("heading_key", "splayback"))
if self.master.nopop:
r.append(":%s in file]" % self.master.server_playback.count())
else:
r.append(":%s to go]" % self.master.server_playback.count())
if self.master.options.ignore_hosts:
r.append("[")
r.append(("heading_key", "I"))
r.append("gnore:%d]" % len(self.master.options.ignore_hosts))
if self.master.options.tcp_hosts:
r.append("[")
r.append(("heading_key", "T"))
r.append("CP:%d]" % len(self.master.options.tcp_hosts))
if self.master.state.intercept_txt:
r.append("[")
r.append(("heading_key", "i"))
r.append(":%s]" % self.master.state.intercept_txt)
if self.master.state.limit_txt:
r.append("[")
r.append(("heading_key", "l"))
r.append(":%s]" % self.master.state.limit_txt)
if self.master.state.mark_filter:
r.append("[")
r.append(("heading_key", "Marked Flows"))
r.append("]")
if self.master.options.stickycookie:
r.append("[")
r.append(("heading_key", "t"))
r.append(":%s]" % self.master.options.stickycookie)
if self.master.options.stickyauth:
r.append("[")
r.append(("heading_key", "u"))
r.append(":%s]" % self.master.options.stickyauth)
if self.master.state.default_body_view.name != "Auto":
r.append("[")
r.append(("heading_key", "M"))
r.append(":%s]" % self.master.state.default_body_view.name)
opts = []
if self.master.options.anticache:
opts.append("anticache")
if self.master.options.anticomp:
opts.append("anticomp")
if self.master.options.showhost:
opts.append("showhost")
if not self.master.options.refresh_server_playback:
opts.append("norefresh")
if self.master.options.kill:
opts.append("killextra")
if self.master.options.no_upstream_cert:
opts.append("no-upstream-cert")
if self.master.state.follow_focus:
opts.append("following")
if self.master.stream_large_bodies:
opts.append(
"stream:%s" % human.pretty_size(
self.master.stream_large_bodies.max_size
)
)
if opts:
r.append("[%s]" % (":".join(opts)))
if self.master.options.mode in ["reverse", "upstream"]:
dst = self.master.server.config.upstream_server
r.append("[dest:%s]" % netlib.http.url.unparse(
dst.scheme,
dst.address.host,
dst.address.port
))
if self.master.options.scripts:
r.append("[")
r.append(("heading_key", "s"))
r.append("cripts:%s]" % len(self.master.options.scripts))
if self.master.options.outfile:
r.append("[W:%s]" % self.master.options.outfile[0])
return r
def redraw(self):
fc = self.master.state.flow_count()
if self.master.state.focus is None:
offset = 0
else:
offset = min(self.master.state.focus + 1, fc)
t = [
('heading', ("[%s/%s]" % (offset, fc)).ljust(9))
]
if self.master.server.bound:
host = self.master.server.address.host
if host == "0.0.0.0":
host = "*"
boundaddr = "[%s:%s]" % (host, self.master.server.address.port)
else:
boundaddr = ""
t.extend(self.get_status())
status = urwid.AttrWrap(urwid.Columns([
urwid.Text(t),
urwid.Text(
[
self.helptext,
boundaddr
],
align="right"
),
]), "heading")
self.ib._w = status
def update(self, text):
self.helptext = text
self.redraw()
self.master.loop.draw_screen()
def selectable(self):
return True
|
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for common methods in strategy classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import strategy_test_lib
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute.collective_all_reduce_strategy import CollectiveAllReduceStrategy
from tensorflow.python.distribute.tpu_strategy import TPUStrategy
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
] + strategy_combinations.all_strategies,
mode=['eager']))
class StrategyTest(test.TestCase, parameterized.TestCase):
def testCaptureReplicaId(self, strategy):
m = {}
@def_function.function
def f():
return ds_context.get_replica_context().replica_id_in_sync_group
@def_function.function
def g():
# Make g() a stateful function so it's traced twice.
if m.get('v', None) is None:
m['v'] = variables.Variable(0.)
return strategy.run(f)
g()
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
] + strategy_combinations.all_strategies,
mode=['eager']))
class ReduceTest(test.TestCase, parameterized.TestCase):
def testBasic(self, strategy):
per_replica_value = strategy.experimental_distribute_values_from_function(
lambda _: array_ops.ones((), dtypes.float32))
def fn_eager():
return strategy.reduce(
reduce_util.ReduceOp.SUM, value=per_replica_value, axis=None)
fn_graph = def_function.function(fn_eager)
# Run reduce under the strategy scope to explicitly enter
# strategy default_device scope.
with strategy.scope():
self.assertEqual(fn_eager().numpy(), 1.0 * strategy.num_replicas_in_sync)
self.assertEqual(fn_graph().numpy(), 1.0 * strategy.num_replicas_in_sync)
# Run reduce without a strategy scope to implicitly enter
# strategy default_device scope.
self.assertEqual(fn_eager().numpy(), 1.0 * strategy.num_replicas_in_sync)
self.assertEqual(fn_graph().numpy(), 1.0 * strategy.num_replicas_in_sync)
def testAxis(self, strategy):
@def_function.function
def fn():
return constant_op.constant([1., 2.])
x = strategy.run(fn)
x_m = strategy.reduce(reduce_util.ReduceOp.MEAN, x, axis=0)
self.assertEqual(1.5, x_m)
x_s = strategy.reduce(reduce_util.ReduceOp.SUM, x, axis=0)
self.assertEqual(3 * strategy.num_replicas_in_sync, x_s)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=['eager'],
pure_eager=[True, False]))
class GatherTest(test.TestCase, parameterized.TestCase):
def _gather_same_shape_and_verify(self, value_on_replica, axis, pure_eager,
strategy):
distributed_values = strategy.experimental_distribute_values_from_function(
lambda _: array_ops.identity(value_on_replica))
def run():
return strategy._gather(distributed_values, axis=axis)
if not pure_eager:
run = def_function.function(run)
all_results = [
value_on_replica for _ in range(strategy.num_replicas_in_sync)
]
expected_result = array_ops.concat(all_results, axis=axis)
self.assertAllEqual(run().numpy(), expected_result)
def testGatherPerReplicaDense1D0Axis(self, strategy, pure_eager):
"""A DistributedValues object with two tensors of shape [3] on each replica gathers to a tensor of [6]."""
single_value = constant_op.constant([1, 2, 3])
axis = 0
self._gather_same_shape_and_verify(single_value, axis, pure_eager, strategy)
def testGatherPerReplicaDense2D0Axis(self, strategy, pure_eager):
"""A DistributedValues object with two tensors of [1, 3] on each replica gathers along 0th dim to a tensor of [2, 3]."""
single_value = constant_op.constant([[1, 2, 3]])
axis = 0
self._gather_same_shape_and_verify(single_value, axis, pure_eager, strategy)
def testGatherPerReplicaDense2D1Axis(self, strategy, pure_eager):
"""A DistributedValues object with two tensors of [1, 3] on each replica gathers along 1st dim to a tensor of [1, 6]."""
single_value = constant_op.constant([[1, 2, 3]])
axis = 1
self._gather_same_shape_and_verify(single_value, axis, pure_eager, strategy)
def testGatherPerReplicaDense3D0Axis(self, strategy, pure_eager):
"""A DistributedValues object with two tensors of [1, 2, 2] on each replica gathers along 0th dim to a tensor of [2, 2, 2]."""
single_value = constant_op.constant([[[1, 2], [1, 2]]])
axis = 0
self._gather_same_shape_and_verify(single_value, axis, pure_eager, strategy)
def testGatherPerReplicaDense3D1Axis(self, strategy, pure_eager):
"""A DistributedValues object with two tensors of [1, 2, 2] on each replica gathers along 1nd dimension to a tensor of [1, 4, 2]."""
single_value = constant_op.constant([[[1, 2], [1, 2]]])
axis = 1
self._gather_same_shape_and_verify(single_value, axis, pure_eager, strategy)
def testGatherPerReplicaDense3D2Axis(self, strategy, pure_eager):
"""A DistributedValues object with two tensors of [1, 2, 2] on each replica gathers along 2nd dimension to a tensor of [1, 2, 4]."""
single_value = constant_op.constant([[[1, 2], [1, 2]]])
axis = 2
self._gather_same_shape_and_verify(single_value, axis, pure_eager, strategy)
def testGatherDiffShapeAtAxis0(self, strategy, pure_eager):
"""Different `Axis`-th (0) dimension: shape [1, 1], [2, 1] -> [3, 1]."""
def value_fn(ctx):
return constant_op.constant(
1, shape=(ctx.replica_id_in_sync_group + 1, 1))
distributed_values = strategy.experimental_distribute_values_from_function(
value_fn)
axis = 0
def run():
return strategy._gather(distributed_values, axis=axis)
if not pure_eager:
run = def_function.function(run)
if strategy.num_replicas_in_sync == 1:
expected_result = constant_op.constant(1, shape=(1, 1))
elif strategy.num_replicas_in_sync == 2:
expected_result = constant_op.constant(1, shape=(3, 1))
elif strategy.num_replicas_in_sync == 4:
expected_result = constant_op.constant(1, shape=(10, 1))
else:
# should follow expected_result = constant_op.constant(
# 1, shape=(sum(range(strategy.num_replicas_in_sync + 1)), 1))
raise ValueError('Add your own expect according to num_replicas_in sync')
self.assertAllEqual(run().numpy(), expected_result)
def testGatherDiffShapeAtAxis1(self, strategy, pure_eager):
"""Different `Axis`-th (non-0) dimension: shape [1, 1], [1, 2] -> [1, 3]."""
def value_fn(ctx):
return constant_op.constant(
1, shape=(1, ctx.replica_id_in_sync_group + 1))
distributed_values = strategy.experimental_distribute_values_from_function(
value_fn)
axis = 1
def run():
return strategy._gather(distributed_values, axis=axis)
if not pure_eager:
run = def_function.function(run)
if strategy.num_replicas_in_sync == 1:
expected_result = constant_op.constant(1, shape=(1, 1))
elif strategy.num_replicas_in_sync == 2:
expected_result = constant_op.constant(1, shape=(1, 3))
elif strategy.num_replicas_in_sync == 4:
expected_result = constant_op.constant(1, shape=(1, 10))
else:
# should follow expected_result = constant_op.constant(
# 1, shape=(1, sum(range(strategy.num_replicas_in_sync + 1))))
raise ValueError('Add your own expect according to num_replicas_in sync')
self.assertAllEqual(run().numpy(), expected_result)
def testGatherRaiseDiffShapeAtNonAxis(self, strategy, pure_eager):
"""Different at non-`axis`-th dimension : [1, 1], [1, 2], 0th -> raise error."""
if isinstance(strategy, CollectiveAllReduceStrategy
) and _get_num_replicas_per_client(strategy) > 1:
self.skipTest('b/167331966')
if strategy.num_replicas_in_sync <= 1:
self.skipTest('Test for more than 1 replica only.')
def value_fn(ctx):
return constant_op.constant(
1, shape=(1, ctx.replica_id_in_sync_group + 1))
distributed_values = strategy.experimental_distribute_values_from_function(
value_fn)
axis = 0
def run():
return strategy._gather(distributed_values, axis=axis)
error_message = 'Shape mismatch'
if not pure_eager:
run = def_function.function(run)
with self.assertRaisesRegex(errors.InvalidArgumentError, error_message):
run()
def testGatherRaiseSparsePerReplicaMultiWorker(self, strategy, pure_eager):
if strategy.num_replicas_in_sync != 2:
self.skipTest('Test for two replicas.')
dense_shape = [5, 2]
if multi_worker_test_base.get_task_type() == 'chief':
t0 = _make_indexed_slices(
values=[[1., 2.]], indices=[2], dense_shape=dense_shape)
if multi_worker_test_base.get_task_type() == 'worker':
t0 = _make_indexed_slices(
values=[[3., 4.], [5., 6.]], indices=[1, 3], dense_shape=dense_shape)
def run(value):
return strategy._gather(value, axis=0)
with self.assertRaisesRegex(
NotImplementedError,
r'gather/all_gather does not support IndexedSlices'):
if pure_eager:
run(t0)
else:
def_function.function(run)(t0)
def testGatherRaiseDifferentRank(self, strategy, pure_eager):
"""Different rank: [1,], [1, 2] -> raise error."""
if strategy.num_replicas_in_sync <= 1:
self.skipTest('Test for more than 1 replicas.')
if isinstance(strategy, CollectiveAllReduceStrategy
) and _get_num_replicas_per_client(strategy) > 1:
self.skipTest('b/167331966')
def value_fn(ctx):
return array_ops.ones(shape=(range(1, ctx.replica_id_in_sync_group + 2)))
distributed_values = strategy.experimental_distribute_values_from_function(
value_fn)
axis = 0
def run():
return strategy._gather(distributed_values, axis=axis)
error_message = 'Shape mismatch'
if not pure_eager:
run = def_function.function(run)
with self.assertRaisesRegex(errors.InvalidArgumentError, error_message):
run()
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=['eager'],
pure_eager=[True, False]))
class AllGatherTest(test.TestCase, parameterized.TestCase):
def _all_gather_same_shape_and_verify(self, value_on_replica, axis,
pure_eager, strategy):
per_replica_value = strategy.experimental_distribute_values_from_function(
lambda _: array_ops.identity(value_on_replica))
def replica_fn(per_replica_value):
ctx = ds_context.get_replica_context()
local_value = array_ops.identity(per_replica_value)
return ctx._all_gather(local_value, axis=axis)
if not pure_eager:
replica_fn = def_function.function(replica_fn)
result = strategy.experimental_local_results(
strategy.run(replica_fn, args=(per_replica_value,)))
all_value = [value_on_replica for _ in range(strategy.num_replicas_in_sync)]
expect = array_ops.concat(all_value, axis=axis)
expected_result = [expect] * _get_num_replicas_per_client(strategy)
self.assertAllClose(result, expected_result)
def testAllGatherPerReplicaDense1D0Axis(self, strategy, pure_eager):
"""all_gather(..., axis=0,...) a DistributedValues with a Tensor of shape (3,) on two replica returns a PerReplica of tensor(s) with shape (6,)."""
single_value = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
axis = 0
self._all_gather_same_shape_and_verify(single_value, axis, pure_eager,
strategy)
def testAllGatherPerReplicaDense2D0Axis(self, strategy, pure_eager):
"""all_gather(..., axis=0,...) a DistributedValues with a Tensor of shape (1,3) on two replica returns PerReplica of tensor(s) with shape (2,3)."""
single_value = constant_op.constant([[1, 2, 3]])
axis = 0
self._all_gather_same_shape_and_verify(single_value, axis, pure_eager,
strategy)
def testAllGatherPerReplicaDense2D1Axis(self, strategy, pure_eager):
"""all_gather(..., axis=1,...) a DistributedValues with a Tensor of shape (1,3) on two replica returns PerReplica of tensor(s) with shape (1,6)."""
single_value = constant_op.constant([[1, 2, 3]])
axis = 1
self._all_gather_same_shape_and_verify(single_value, axis, pure_eager,
strategy)
def testAllGatherPerReplicaDense3D0Axis(self, strategy, pure_eager):
"""all_gather(..., axis=0,...) a DistributedValues with a Tensor of shape (1,2,2) on two replica returns PerReplica of tensor(s) with shape (2,2,2)."""
single_value = constant_op.constant([[[1, 2], [1, 2]]])
axis = 0
self._all_gather_same_shape_and_verify(single_value, axis, pure_eager,
strategy)
def testAllGatherPerReplicaDense3D1Axis(self, strategy, pure_eager):
"""all_gather(..., axis=1,...) a DistributedValues with a Tensor of shape (1,2,2) on two replica returns PerReplica of tensor(s) with shape (1,4,2)."""
single_value = constant_op.constant([[[1, 2], [1, 2]]])
axis = 1
self._all_gather_same_shape_and_verify(single_value, axis, pure_eager,
strategy)
def testAllGatherPerReplicaDense3D2Axis(self, strategy, pure_eager):
"""all_gather(..., axis=2,...) a DistributedValues with a Tensor of shape (1,2,2) on two replica returns PerReplica of tensor(s) with shape (1,2,4)."""
single_value = constant_op.constant([[[1, 2], [1, 2]]])
axis = 2
self._all_gather_same_shape_and_verify(single_value, axis, pure_eager,
strategy)
def testAllGatherDiffShapeAtAxis0(self, strategy, pure_eager):
"""Different `Axis==0`-th dimension: shape [1, 1], [2, 1] -> [3, 1]."""
def value_fn(ctx):
return constant_op.constant(
1, shape=(ctx.replica_id_in_sync_group + 1, 1))
per_replica_value = strategy.experimental_distribute_values_from_function(
value_fn)
if strategy.num_replicas_in_sync == 1:
expect = constant_op.constant(1, shape=(1, 1))
elif strategy.num_replicas_in_sync == 2:
expect = constant_op.constant(1, shape=(3, 1))
elif strategy.num_replicas_in_sync == 4:
expect = constant_op.constant(1, shape=(10, 1))
else:
# should follow expect = constant_op.constant(
# 1, shape=(sum(range(strategy.num_replicas_in_sync + 1)), 1))
raise ValueError('Add your own expect according to num_replicas_in sync')
def run(value):
value_identity = array_ops.identity(value)
ctx = ds_context.get_replica_context()
return ctx._all_gather(value_identity, axis=0)
if not pure_eager:
run = def_function.function(run)
expected_result = [expect] * _get_num_replicas_per_client(strategy)
result = strategy.experimental_local_results(
strategy.run(run, args=(per_replica_value,)))
self.assertAllEqual(result, expected_result)
def testAllGatherDiffShapeAtAxis1(self, strategy, pure_eager):
"""Different `Axis`-th (not 0th) dimension: shape [1, 1], [1, 2] -> [1, 3]."""
def value_fn(ctx):
return constant_op.constant(
1, shape=(1, ctx.replica_id_in_sync_group + 1))
per_replica_value = strategy.experimental_distribute_values_from_function(
value_fn)
if strategy.num_replicas_in_sync == 1:
expect = constant_op.constant(1, shape=(1, 1))
elif strategy.num_replicas_in_sync == 2:
expect = constant_op.constant(1, shape=(1, 3))
elif strategy.num_replicas_in_sync == 4:
expect = constant_op.constant(1, shape=(1, 10))
else:
# should follow expect = constant_op.constant(
# 1, shape=(1, sum(range(strategy.num_replicas_in_sync + 1))))
raise ValueError('Add your own expect according to num_replicas_in sync')
def run(value):
value_identity = array_ops.identity(value)
ctx = ds_context.get_replica_context()
return ctx._all_gather(value_identity, axis=1)
if not pure_eager:
run = def_function.function(run)
expected_result = [expect] * _get_num_replicas_per_client(strategy)
result = strategy.experimental_local_results(
strategy.run(run, args=(per_replica_value,)))
self.assertAllEqual(result, expected_result)
def testAllGatherNest(self, strategy, pure_eager):
axis = 1
def value_fn(ctx):
value = constant_op.constant(
1, shape=(1, ctx.replica_id_in_sync_group + 1))
return value
per_replica_value = strategy.experimental_distribute_values_from_function(
value_fn)
if strategy.num_replicas_in_sync == 1:
expect_1 = constant_op.constant(1, shape=(1, 1))
elif strategy.num_replicas_in_sync == 2:
expect_1 = constant_op.constant(1, shape=(1, 3))
elif strategy.num_replicas_in_sync == 4:
expect_1 = constant_op.constant(1, shape=(1, 10))
else:
# should follow expect_1 = constant_op.constant(
# 1, shape=(1, sum(range(strategy.num_replicas_in_sync + 1))))
raise ValueError('Add your own expect according to num_replicas_in sync')
expected_per_replica_1 = [expect_1] * _get_num_replicas_per_client(strategy)
value_2 = constant_op.constant([[[1, 2], [1, 2]]])
if strategy.num_replicas_in_sync == 1:
expect_2 = constant_op.constant([[[1, 2], [1, 2]]])
elif strategy.num_replicas_in_sync == 2:
expect_2 = constant_op.constant([[[1, 2], [1, 2], [1, 2], [1, 2]]])
elif strategy.num_replicas_in_sync == 4:
expect_2 = constant_op.constant([[[1, 2], [1, 2], [1, 2], [1, 2], [1, 2],
[1, 2], [1, 2], [1, 2]]])
else:
# should follow expect_2 = array_ops.concat(
# [value_2 for _ in range(strategy.num_replicas_in_sync)], axis=axis)
raise ValueError('Add your own expect according to num_replicas_in sync')
expected_per_replica_2 = [expect_2] * _get_num_replicas_per_client(strategy)
def run(value):
value_1 = array_ops.identity(value)
value_3 = array_ops.identity(value_2)
ctx = ds_context.get_replica_context()
return ctx._all_gather([value_1, value_3], axis=axis)
if not pure_eager:
run = def_function.function(run)
result = strategy.run(run, args=(per_replica_value,))
self.assertAllEqual(
strategy.experimental_local_results(result[0]), expected_per_replica_1)
self.assertAllEqual(
strategy.experimental_local_results(result[1]), expected_per_replica_2)
def testAllGatherNest1D0Axis(self, strategy, pure_eager):
"""all_gather(..., axis=0,...) a nest of DistributedValues."""
single_value = constant_op.constant([1, 2, 3])
axis = 0
def run():
value_identity = array_ops.identity(single_value)
ctx = ds_context.get_replica_context()
return ctx._all_gather([value_identity, value_identity], axis=axis)
if not pure_eager:
run = def_function.function(run)
all_value = [single_value for _ in range(strategy.num_replicas_in_sync)]
expect = array_ops.concat(all_value, axis=axis)
expected_per_replica = [expect] * _get_num_replicas_per_client(strategy)
result = strategy.run(run)
for gathered_result in result:
self.assertAllEqual(
strategy.experimental_local_results(gathered_result),
expected_per_replica)
def testAllGatherRaiseDiffShapeAtNonAxis(self, strategy, pure_eager):
"""Different at non-`axis`-th dimension : [2, 1], [1, 1], all_gather(...axis=1...) -> raise error."""
if isinstance(strategy, CollectiveAllReduceStrategy
) and _get_num_replicas_per_client(strategy) > 1:
self.skipTest('b/167331966')
if strategy.num_replicas_in_sync <= 1:
self.skipTest('Test for more than 1 replica only.')
def value_fn(ctx):
return constant_op.constant(
1, shape=(1, ctx.replica_id_in_sync_group + 1))
per_replica_value = strategy.experimental_distribute_values_from_function(
value_fn)
def run(value):
value_identity = array_ops.identity(value)
ctx = ds_context.get_replica_context()
return ctx._all_gather(value_identity, axis=0)
if not pure_eager:
run = def_function.function(run)
with self.assertRaisesRegex(errors.InvalidArgumentError, r'Shape mismatch'):
strategy.run(run, args=(per_replica_value,))
def testAllGatherRaiseSparsePerReplica(self, strategy, pure_eager):
# all_gather supports sparse when using tf.function, because sparse tensors
# are converted to dense in
# third_party/tensorflow/python/ops/custom_gradient.py _graph_mode_decorator
if strategy.num_replicas_in_sync != 2:
self.skipTest('Test for two replicas.')
dense_shape = [5, 2]
t0 = _make_indexed_slices(
values=[[1., 2.]], indices=[2], dense_shape=dense_shape)
def replica_fn(value):
ctx = ds_context.get_replica_context()
return ctx._all_gather(value, axis=0)
with self.assertRaisesRegex(
NotImplementedError,
r'gather/all_gather does not support IndexedSlices'):
strategy.run(replica_fn, args=(t0,))
def testAllGatherRaiseDifferentRank(self, strategy, pure_eager):
"""Different rank: [1,], [1, 2] -> raise error."""
if strategy.num_replicas_in_sync <= 1:
self.skipTest('Test for more than 1 replicas.')
if isinstance(strategy, CollectiveAllReduceStrategy
) and _get_num_replicas_per_client(strategy) > 1:
self.skipTest('b/167331966')
def value_fn(ctx):
return array_ops.ones(shape=(range(1, ctx.replica_id_in_sync_group + 2)))
per_replica_value = strategy.experimental_distribute_values_from_function(
value_fn)
def run(value):
value_identity = array_ops.identity(value)
ctx = ds_context.get_replica_context()
return ctx._all_gather(value_identity, axis=0)
error_message = 'Shape mismatch'
if not pure_eager:
run = def_function.function(run)
with self.assertRaisesRegex(errors.InvalidArgumentError, error_message):
strategy.run(run, args=(per_replica_value,))
def _make_indexed_slices(values, indices, dense_shape):
tensor = ops.IndexedSlices(
values=constant_op.constant(values),
indices=constant_op.constant(indices),
dense_shape=constant_op.constant(dense_shape))
return tensor
def _get_num_replicas_per_client(strategy):
if isinstance(strategy, CollectiveAllReduceStrategy):
resolver = strategy.cluster_resolver
return max(nest.flatten(resolver.num_accelerators())[0], 1)
else:
return strategy.num_replicas_in_sync
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=['eager']))
class DistributedCollectiveAllReduceStrategyTest(
strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
def testDatasetFromFunction(self, strategy):
def dataset_fn(input_context):
global_batch_size = 10
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
d = dataset_ops.DatasetV2.range(100).repeat().batch(batch_size)
return d.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
expected_sum_on_workers = {'chief': 10, 'worker': 35}
input_iterator = iter(
strategy.distribute_datasets_from_function(dataset_fn))
@def_function.function
def run(iterator):
return strategy.experimental_local_results(iterator.get_next())
result = run(input_iterator)
sum_value = math_ops.reduce_sum(result)
self.assertEqual(
sum_value.numpy(),
expected_sum_on_workers[multi_worker_test_base.get_task_type()])
def testSimpleInputFromDatasetLastPartialBatch(self, strategy):
global_batch_size = 8
dataset = dataset_ops.DatasetV2.range(14).batch(
global_batch_size, drop_remainder=False)
input_iterator = iter(strategy.experimental_distribute_dataset(dataset))
@def_function.function
def run(input_iterator):
return strategy.run(lambda x: x, args=(next(input_iterator),))
# Let the complete batch go.
run(input_iterator)
# `result` is an incomplete batch
result = run(input_iterator)
expected_data_on_workers = {'chief': [8, 9, 10], 'worker': [11, 12, 13]}
self.assertAllEqual(
result.numpy(),
expected_data_on_workers[multi_worker_test_base.get_task_type()])
def testSimpleInputFromFnLastPartialBatch(self, strategy):
def dataset_fn(input_context):
global_batch_size = 8
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
dataset = dataset_ops.DatasetV2.range(14).batch(
batch_size, drop_remainder=False)
return dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
input_iterator = iter(
strategy.distribute_datasets_from_function(dataset_fn))
@def_function.function
def run(input_iterator):
return strategy.run(lambda x: x, args=(next(input_iterator),))
# Let the complete batch go.
run(input_iterator)
# `result` is an incomplete batch
result = run(input_iterator)
expected_data_on_worker = {'chief': [8, 9, 10, 11], 'worker': [12, 13]}
self.assertAllEqual(
result.numpy(),
expected_data_on_worker[multi_worker_test_base.get_task_type()])
def testReduceHostTensor(self, strategy):
reduced = strategy.reduce(
reduce_util.ReduceOp.SUM, array_ops.identity(1.), axis=None)
self.assertEqual(reduced.numpy(), 2.)
def testReduceToHostTensor(self, strategy):
value = array_ops.identity(1.)
reduced = strategy.extended.reduce_to(reduce_util.ReduceOp.SUM, value,
value)
self.assertEqual(reduced.numpy(), 2.)
def testBatchReduceToHostTensor(self, strategy):
value = array_ops.identity(1.)
reduced = strategy.extended.batch_reduce_to(reduce_util.ReduceOp.SUM,
[(value, value),
(value, value)])
self.assertAllEqual(reduced, [2., 2.])
def testReduceDeviceTensors(self, strategy):
value = strategy.run(lambda: array_ops.identity(1.))
reduced = strategy.reduce(reduce_util.ReduceOp.SUM, value, axis=None)
self.assertEqual(reduced.numpy(), 2.)
def testReduceToDeviceTensors(self, strategy):
value = strategy.run(lambda: array_ops.identity(1.))
reduced = strategy.extended.reduce_to(reduce_util.ReduceOp.SUM, value,
value)
self.assertEqual(reduced.numpy(), 2.)
def testBatchReduceToDeviceTensors(self, strategy):
value = strategy.run(lambda: array_ops.identity(1.))
reduced = strategy.extended.batch_reduce_to(reduce_util.ReduceOp.SUM,
[(value, value),
(value, value)])
self.assertAllEqual(reduced, [2., 2.])
# TODO(crccw): add a test that mixes device and host tensors after multi
# worker strategy combinations can run on a fixed number of GPUs.
class StrategyClusterResolverTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
strategy=[strategy_combinations.multi_worker_mirrored_2x1_cpu] +
strategy_combinations.all_strategies,
mode=['eager']))
def testClusterResolverProperty(self, strategy):
# CollectiveAllReduceStrategy and TPUStrategy must have a cluster resolver.
# `None` otherwise.
resolver = strategy.cluster_resolver
if not isinstance(strategy, CollectiveAllReduceStrategy) and not isinstance(
strategy, TPUStrategy):
self.assertIsNone(resolver)
return
with strategy.scope():
self.assertIs(strategy.cluster_resolver, resolver)
self.assertTrue(hasattr(resolver, 'cluster_spec'))
self.assertTrue(hasattr(resolver, 'master'))
self.assertTrue(hasattr(resolver, 'num_accelerators'))
self.assertTrue(hasattr(resolver, 'task_id'))
self.assertTrue(hasattr(resolver, 'task_type'))
if isinstance(strategy, CollectiveAllReduceStrategy):
self.assertEqual(resolver.task_id, 0)
self.assertAllInSet(resolver.task_type, ['chief', 'worker'])
if __name__ == '__main__':
test_util.main()
|
|
"""
General Description
-------------------
These filters compute the local histogram at each pixel, using a sliding window
similar to the method described in [1]_. A histogram is built using a moving
window in order to limit redundant computation. The moving window follows a
snake-like path:
...------------------------\
/--------------------------/
\--------------------------...
The local histogram is updated at each pixel as the structuring element window
moves by, i.e. only those pixels entering and leaving the structuring element
update the local histogram. The histogram size is 8-bit (256 bins) for 8-bit
images and 2- to 16-bit for 16-bit images depending on the maximum value of the
image.
The filter is applied up to the image border, the neighborhood used is
adjusted accordingly. The user may provide a mask image (same size as input
image) where non zero values are the part of the image participating in the
histogram computation. By default the entire image is filtered.
This implementation outperforms grey.dilation for large structuring elements.
Input image can be 8-bit or 16-bit, for 16-bit input images, the number of
histogram bins is determined from the maximum value present in the image.
Result image is 8-/16-bit or double with respect to the input image and the
rank filter operation.
To do
-----
* add simple examples, adapt documentation on existing examples
* add/check existing doc
* adapting tests for each type of filter
References
----------
.. [1] Huang, T. ,Yang, G. ; Tang, G.. "A fast two-dimensional
median filtering algorithm", IEEE Transactions on Acoustics, Speech and
Signal Processing, Feb 1979. Volume: 27 , Issue: 1, Page(s): 13 - 18.
"""
import functools
import numpy as np
from scipy import ndimage as ndi
from ... import img_as_ubyte
from ..._shared.utils import assert_nD, warn
from . import generic_cy
__all__ = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'maximum', 'mean',
'geometric_mean', 'subtract_mean', 'median', 'minimum', 'modal',
'enhance_contrast', 'pop', 'threshold', 'tophat', 'noise_filter',
'entropy', 'otsu']
def _handle_input(image, selem, out, mask, out_dtype=None, pixel_size=1):
assert_nD(image, 2)
if image.dtype not in (np.uint8, np.uint16):
image = img_as_ubyte(image)
selem = np.ascontiguousarray(img_as_ubyte(selem > 0))
image = np.ascontiguousarray(image)
if mask is None:
mask = np.ones(image.shape, dtype=np.uint8)
else:
mask = img_as_ubyte(mask)
mask = np.ascontiguousarray(mask)
if image is out:
raise NotImplementedError("Cannot perform rank operation in place.")
if out is None:
if out_dtype is None:
out_dtype = image.dtype
out = np.empty(image.shape+(pixel_size,), dtype=out_dtype)
else:
if len(out.shape) == 2:
out = out.reshape(out.shape+(pixel_size,))
is_8bit = image.dtype in (np.uint8, np.int8)
if is_8bit:
max_bin = 255
else:
max_bin = max(4, image.max())
bitdepth = int(np.log2(max_bin))
if bitdepth > 10:
warn("Bitdepth of %d may result in bad rank filter "
"performance due to large number of bins." % bitdepth)
return image, selem, out, mask, max_bin
def _apply_scalar_per_pixel(func, image, selem, out, mask, shift_x, shift_y,
out_dtype=None):
image, selem, out, mask, max_bin = _handle_input(image, selem, out, mask,
out_dtype)
func(image, selem, shift_x=shift_x, shift_y=shift_y, mask=mask,
out=out, max_bin=max_bin)
return out.reshape(out.shape[:2])
def _apply_vector_per_pixel(func, image, selem, out, mask, shift_x, shift_y,
out_dtype=None, pixel_size=1):
image, selem, out, mask, max_bin = _handle_input(image, selem, out, mask,
out_dtype,
pixel_size=pixel_size)
func(image, selem, shift_x=shift_x, shift_y=shift_y, mask=mask,
out=out, max_bin=max_bin)
return out
def _default_selem(func):
"""Decorator to add a default structuring element to morphology functions.
Parameters
----------
func : function
A morphology function such as erosion, dilation, opening, closing,
white_tophat, or black_tophat.
Returns
-------
func_out : function
The function, using a default structuring element of same dimension
as the input image with connectivity 1.
"""
@functools.wraps(func)
def func_out(image, selem=None, *args, **kwargs):
if selem is None:
selem = ndi.generate_binary_structure(image.ndim, image.ndim)
return func(image, selem=selem, *args, **kwargs)
return func_out
def autolevel(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Auto-level image using local histogram.
This filter locally stretches the histogram of greyvalues to cover the
entire range of values from "white" to "black".
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import autolevel
>>> img = data.camera()
>>> auto = autolevel(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._autolevel, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def bottomhat(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Local bottom-hat of an image.
This filter computes the morphological closing of the image and then
subtracts the result from the original image.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : 2-D array
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import bottomhat
>>> img = data.camera()
>>> out = bottomhat(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._bottomhat, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def equalize(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Equalize image using local histogram.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import equalize
>>> img = data.camera()
>>> equ = equalize(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._equalize, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def gradient(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return local gradient of an image (i.e. local maximum - local minimum).
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import gradient
>>> img = data.camera()
>>> out = gradient(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._gradient, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def maximum(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return local maximum of an image.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
See also
--------
skimage.morphology.dilation
Notes
-----
The lower algorithm complexity makes `skimage.filters.rank.maximum`
more efficient for larger images and structuring elements.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import maximum
>>> img = data.camera()
>>> out = maximum(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._maximum, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def mean(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return local mean of an image.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import mean
>>> img = data.camera()
>>> avg = mean(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._mean, image, selem, out=out,
mask=mask, shift_x=shift_x, shift_y=shift_y)
def geometric_mean(image, selem, out=None, mask=None,
shift_x=False, shift_y=False):
"""Return local geometric mean of an image.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import mean
>>> img = data.camera()
>>> avg = geometric_mean(img, disk(5))
References
----------
.. [1] Gonzalez, R. C. and Wood, R. E. "Digital Image Processing (3rd Edition)."
Prentice-Hall Inc, 2006.
"""
return _apply_scalar_per_pixel(generic_cy._geometric_mean, image, selem, out=out,
mask=mask, shift_x=shift_x, shift_y=shift_y)
def subtract_mean(image, selem, out=None, mask=None, shift_x=False,
shift_y=False):
"""Return image subtracted from its local mean.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import subtract_mean
>>> img = data.camera()
>>> out = subtract_mean(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._subtract_mean, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
@_default_selem
def median(image, selem=None, out=None, mask=None,
shift_x=False, shift_y=False):
"""Return local median of an image.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array, optional
The neighborhood expressed as a 2-D array of 1's and 0's. If None, a
full square of size 3 is used.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import median
>>> img = data.camera()
>>> med = median(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._median, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def minimum(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return local minimum of an image.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
See also
--------
skimage.morphology.erosion
Notes
-----
The lower algorithm complexity makes `skimage.filters.rank.minimum` more
efficient for larger images and structuring elements.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import minimum
>>> img = data.camera()
>>> out = minimum(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._minimum, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def modal(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return local mode of an image.
The mode is the value that appears most often in the local histogram.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import modal
>>> img = data.camera()
>>> out = modal(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._modal, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def enhance_contrast(image, selem, out=None, mask=None, shift_x=False,
shift_y=False):
"""Enhance contrast of an image.
This replaces each pixel by the local maximum if the pixel greyvalue is
closer to the local maximum than the local minimum. Otherwise it is
replaced by the local minimum.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
Output image.
out : 2-D array (same dtype as input image)
The result of the local enhance_contrast.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import enhance_contrast
>>> img = data.camera()
>>> out = enhance_contrast(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._enhance_contrast, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def pop(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return the local number (population) of pixels.
The number of pixels is defined as the number of pixels which are included
in the structuring element and the mask.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage.morphology import square
>>> import skimage.filters.rank as rank
>>> img = 255 * np.array([[0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> rank.pop(img, square(3))
array([[4, 6, 6, 6, 4],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[4, 6, 6, 6, 4]], dtype=uint8)
"""
return _apply_scalar_per_pixel(generic_cy._pop, image, selem, out=out,
mask=mask, shift_x=shift_x,
shift_y=shift_y)
def sum(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Return the local sum of pixels.
Note that the sum may overflow depending on the data type of the input
array.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage.morphology import square
>>> import skimage.filters.rank as rank
>>> img = np.array([[0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> rank.sum(img, square(3))
array([[1, 2, 3, 2, 1],
[2, 4, 6, 4, 2],
[3, 6, 9, 6, 3],
[2, 4, 6, 4, 2],
[1, 2, 3, 2, 1]], dtype=uint8)
"""
return _apply_scalar_per_pixel(generic_cy._sum, image, selem, out=out,
mask=mask, shift_x=shift_x,
shift_y=shift_y)
def threshold(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Local threshold of an image.
The resulting binary mask is True if the greyvalue of the center pixel is
greater than the local mean.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage.morphology import square
>>> from skimage.filters.rank import threshold
>>> img = 255 * np.array([[0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=np.uint8)
>>> threshold(img, square(3))
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
return _apply_scalar_per_pixel(generic_cy._threshold, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def tophat(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Local top-hat of an image.
This filter computes the morphological opening of the image and then
subtracts the result from the original image.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import tophat
>>> img = data.camera()
>>> out = tophat(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._tophat, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def noise_filter(image, selem, out=None, mask=None, shift_x=False,
shift_y=False):
"""Noise feature.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
References
----------
.. [1] N. Hashimoto et al. Referenceless image quality evaluation
for whole slide imaging. J Pathol Inform 2012;3:9.
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import noise_filter
>>> img = data.camera()
>>> out = noise_filter(img, disk(5))
"""
# ensure that the central pixel in the structuring element is empty
centre_r = int(selem.shape[0] / 2) + shift_y
centre_c = int(selem.shape[1] / 2) + shift_x
# make a local copy
selem_cpy = selem.copy()
selem_cpy[centre_r, centre_c] = 0
return _apply_scalar_per_pixel(generic_cy._noise_filter, image, selem_cpy,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
def entropy(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Local entropy.
The entropy is computed using base 2 logarithm i.e. the filter returns the
minimum number of bits needed to encode the local greylevel
distribution.
Parameters
----------
image : 2-D array (uint8, uint16)
Input image.
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (same dtype as input)
If None, a new array is allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : ndarray (double)
Output image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Entropy_(information_theory)
Examples
--------
>>> from skimage import data
>>> from skimage.filters.rank import entropy
>>> from skimage.morphology import disk
>>> img = data.camera()
>>> ent = entropy(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._entropy, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y,
out_dtype=np.double)
def otsu(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Local Otsu's threshold value for each pixel.
Parameters
----------
image : ndarray
Image array (uint8 array).
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Otsu's_method
Examples
--------
>>> from skimage import data
>>> from skimage.filters.rank import otsu
>>> from skimage.morphology import disk
>>> img = data.camera()
>>> local_otsu = otsu(img, disk(5))
>>> thresh_image = img >= local_otsu
"""
return _apply_scalar_per_pixel(generic_cy._otsu, image, selem, out=out,
mask=mask, shift_x=shift_x,
shift_y=shift_y)
def windowed_histogram(image, selem, out=None, mask=None,
shift_x=False, shift_y=False, n_bins=None):
"""Normalized sliding window histogram
Parameters
----------
image : ndarray
Image array (uint8 array).
selem : 2-D array
The neighborhood expressed as a 2-D array of 1's and 0's.
out : ndarray
If None, a new array will be allocated.
mask : ndarray
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
n_bins : int or None
The number of histogram bins. Will default to ``image.max() + 1``
if None is passed.
Returns
-------
out : 3-D array with float dtype of dimensions (H,W,N), where (H,W) are
the dimensions of the input image and N is n_bins or
``image.max() + 1`` if no value is provided as a parameter.
Effectively, each pixel is a N-D feature vector that is the histogram.
The sum of the elements in the feature vector will be 1, unless no
pixels in the window were covered by both selem and mask, in which
case all elements will be 0.
Examples
--------
>>> from skimage import data
>>> from skimage.filters.rank import windowed_histogram
>>> from skimage.morphology import disk
>>> img = data.camera()
>>> hist_img = windowed_histogram(img, disk(5))
"""
if n_bins is None:
n_bins = int(image.max()) + 1
return _apply_vector_per_pixel(generic_cy._windowed_hist, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y,
out_dtype=np.double,
pixel_size=n_bins)
|
|
from flask import render_template, flash, redirect, session, url_for, request, g
from flask_login import login_user, logout_user, current_user, login_required
from app import app, db, lm, oid
from .forms import LoginForm, EditForm, PostForm
from .models import User, Post
from datetime import datetime
from config import POSTS_PER_PAGE
import sys, os
sys.path.append('/Users/davidng109/Projects/microblogv2/app/')
from oauth import OAuthSignIn
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated:
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
@app.route('/', methods = ['GET','POST'])
@app.route('/index', methods = ['GET','POST'])
@app.route('/index/<int:page>', methods = ['GET','POST'])
@login_required
def index(page = 1):
form = PostForm()
if form.validate_on_submit():
post = Post(body = form.post.data, timestamp = datetime.utcnow(), author=g.user)
db.session.add(post)
db.session.commit()
flash('Your post is now live!')
return redirect(url_for('index'))
posts = g.user.followed_posts().paginate(page,POSTS_PER_PAGE,False)
return render_template('index.html',
title='Home',
form = form,
posts=posts)
@app.route('/login', methods=['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
return oid.try_login(form.openid.data, ask_for=['nickname', 'email'])
return render_template('login.html',
title='Sign In',
form=form,
providers=app.config['OPENID_PROVIDERS'])
@oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash('Invalid login. Please try again.')
return redirect(url_for('login'))
user = User.query.filter_by(email=resp.email).first()
if user is None:
nickname = resp.nickname
if nickname is None or nickname == "":
nickname = resp.email.split('@')[0]
nickname = User.make_unique_nickname(nickname)
user = User(nickname=nickname, email=resp.email)
db.session.add(user)
db.session.commit()
db.session.add(user.follow(user))
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember=remember_me)
return redirect(request.args.get('next') or url_for('index'))
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
###### New from app.py
@app.route('/authorize/<provider>')
def oauth_authorize(provider):
if not current_user.is_anonymous:
return redirect(url_for('index'))
oauth = OAuthSignIn.get_provider(provider)
return oauth.authorize()
@app.route('/callback/<provider>')
def oauth_callback(provider):
if not current_user.is_anonymous:
return redirect(url_for('index'))
oauth = OAuthSignIn.get_provider(provider)
social_id, username, email = oauth.callback()
if social_id is None:
flash('Authentication failed.')
return redirect(url_for('login'))
user = User.query.filter_by(social_id=social_id).first()
if not user:
user = User(social_id=social_id, nickname=username, email=email)
db.session.add(user)
db.session.commit()
db.session.add(user.follow(user))
login_user(user, True)
return redirect(url_for('index'))
### This is for user profiles
@app.route('/user/<nickname>')
@app.route('/user/<nickname>/<int:page>')
@login_required
def user(nickname, page=1):
user = User.query.filter_by(nickname = nickname).first()
if user == None:
flash('User %s not found.' % nickname)
return redirect(url_for('index'))
posts = user.posts.paginate(page, POSTS_PER_PAGE, False)
return render_template('user.html', user=user, posts=posts)
### This is for the editing form
@app.route('/edit', methods = ['GET', 'POST'])
@login_required
def edit():
form = EditForm(g.user.nickname)
if form.validate_on_submit():
g.user.nickname = form.nickname.data
g.user.about_me = form.about_me.data
db.session.add(g.user)
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('edit'))
else:
form.nickname.data = g.user.nickname
form.about_me.data = g.user.about_me
return render_template('edit.html', form=form)
### 404 / 500 Error Messages
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
##### Follow and Unfollow Links
@app.route('/follow/<nickname>')
@login_required
def follow(nickname):
user = User.query.filter_by(nickname=nickname).first()
if user is None:
flash('User %s not found.' % nickname)
return redirected(url_for('index'))
if user == g.user:
flash('You can\'t follow yourself!')
return redirect(url_for('user',nickname=nickname))
u = g.user.follow(user)
if u is None:
flash('Cannot follow ' + nickname + '.')
return redirect(url_for('user',nickname = nickname))
db.session.add(u)
db.session.commit()
flash('You are now following ' + nickname + '!')
return redirect(url_for('user',nickname=nickname))
@app.route('/unfollow/<nickname>')
@login_required
def unfollow(nickname):
user = User.query.filter_by(nickname=nickname).first()
if user is None:
flash('User %s not found.' % nickname)
return redirect(url_for('index'))
if user == g.user:
flash('You can\'t unfollow yourself!')
return redirect(url_for('user', nickname=nickname))
u = g.user.unfollow(user)
if u is None:
flash('Cannot unfollow ' + nickname + '.')
return redirect(url_for('user', nickname=nickname))
db.session.add(u)
db.session.commit()
flash('You have stopped following ' + nickname + '.')
return redirect(url_for('user', nickname=nickname))
|
|
"""Testing TcEx Input module field types."""
# standard library
from typing import TYPE_CHECKING, List, Optional, Union
# third-party
import pytest
from pydantic import BaseModel, validator
# first-party
from tcex.input.field_types import (
GroupEntity,
IndicatorEntity,
TCEntity,
always_array,
indicator_entity,
)
from tcex.pleb.scoped_property import scoped_property
from tests.input.field_types.utils import InputTest
if TYPE_CHECKING:
# first-party
from tests.mock_app import MockApp
# pylint: disable=no-self-argument, no-self-use
class TestInputsFieldTypes(InputTest):
"""Test TcEx String Field Model Tests."""
def setup_method(self):
"""Configure setup before all tests."""
# print('\n') # print blank line for readability
scoped_property._reset()
@pytest.mark.parametrize(
'input_value,expected,optional,fail_test',
[
#
# Pass Testing
#
# required, normal input
(
{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'},
{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': 5},
False,
False,
),
# required, string in -> int out
(
{'id': '123', 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'},
{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': 5},
False,
False,
),
# optional, None
(
None,
None,
True,
False,
),
#
# Fail Testing
#
# required, null input
(
{'id': 123, 'type': 'Address', 'value': None, 'rating': '5'},
None,
False,
True,
),
# required, empty input
(
{'id': 123, 'type': 'Address', 'value': '', 'rating': '5'},
None,
False,
True,
),
# required, missing field
# (
# {'id': 123, 'value': '1.1.1.1', 'rating': '5'},
# None,
# False,
# True,
# ),
# optional, Invalid data
(
{'id': '123', 'type': 'Address', 'value': '', 'rating': '5'},
None,
True,
True,
),
],
)
def test_field_model_tc_entity_input(
self,
input_value: str,
expected: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: TCEntity
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[TCEntity]
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='TCEntity',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
'input_value,expected,optional,fail_test',
[
#
# Pass Testing
#
# required, list input
(
[{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'}],
[{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': 5}],
False,
False,
),
# required, string in -> int out
(
[{'id': '123', 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'}],
[{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': 5}],
False,
False,
),
# optional, None
(
None,
None,
True,
False,
),
#
# Fail Testing
#
# required, null input
(
[{'id': 123, 'type': 'Address', 'value': None, 'rating': '5'}],
None,
False,
True,
),
# required, empty input
(
[{'id': 123, 'type': 'Address', 'value': '', 'rating': '5'}],
None,
False,
True,
),
# required, missing field
# (
# {'id': 123, 'value': '1.1.1.1', 'rating': '5'},
# None,
# False,
# True,
# ),
# optional, Invalid data
(
[{'id': '123', 'type': 'Address', 'value': '', 'rating': '5'}],
None,
True,
True,
),
],
)
def test_field_model_tc_entity_array_input(
self,
input_value: str,
expected: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: List[TCEntity]
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[List[TCEntity]]
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='TCEntityArray',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
'input_value,expected,input_type,optional,fail_test',
[
#
# Pass Testing
#
# required, dict input
(
{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'},
[{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': 5}],
'TCEntity',
False,
False,
),
# required, list input
(
[{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'}],
[{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': 5}],
'TCEntityArray',
False,
False,
),
# required, dict input, string value in -> int value out
(
{'id': '123', 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'},
[{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': 5}],
'TCEntity',
False,
False,
),
# required, list input, string value in -> int value out
(
[{'id': '123', 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'}],
[{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': 5}],
'TCEntityArray',
False,
False,
),
# optional, None TCEntity
(
None,
[],
'TCEntity',
True,
False,
),
# optional, None TCEntityArray
(
None,
[],
'TCEntityArray',
True,
False,
),
#
# Fail Testing
#
# required, tcentity, null input
(
{'id': 123, 'type': 'Address', 'value': None, 'rating': '5'},
None,
'TCEntity',
False,
True,
),
# optional tcentity, null input
(
{'id': 123, 'type': 'Address', 'value': None, 'rating': '5'},
None,
'TCEntity',
True,
True,
),
# required, tcentityarray, null input
(
[{'id': 123, 'type': 'Address', 'value': None, 'rating': '5'}],
None,
'TCEntityArray',
False,
True,
),
# optional, tcentityarray, null input
(
[{'id': 123, 'type': 'Address', 'value': None, 'rating': '5'}],
None,
'TCEntityArray',
True,
True,
),
# required, tcentity, empty input
(
{'id': 123, 'type': 'Address', 'value': '', 'rating': '5'},
None,
'TCEntity',
False,
True,
),
# optional, tcentity, empty input
(
{'id': 123, 'type': 'Address', 'value': '', 'rating': '5'},
None,
'TCEntity',
True,
True,
),
# required, tcentityarray, empty input
(
[{'id': 123, 'type': 'Address', 'value': '', 'rating': '5'}],
None,
'TCEntityArray',
False,
True,
),
# optional, tcentityarray, empty input
(
[{'id': 123, 'type': 'Address', 'value': '', 'rating': '5'}],
None,
'TCEntityArray',
True,
True,
),
# required, missing field
# (
# {'id': 123, 'value': '1.1.1.1', 'rating': '5'},
# None,
# 'TCEntityArray',
# False,
# True,
# ),
# required, None TCEntity
(
None,
[],
'TCEntity',
False,
True,
),
# required, None TCEntityArray
(
None,
[],
'TCEntityArray',
False,
True,
),
],
)
def test_field_model_tc_entity_union_input(
self,
input_value: str,
expected: str,
input_type: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Union[TCEntity, List[TCEntity]]
_always_array = validator('my_data', allow_reuse=True)(always_array())
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[Union[TCEntity, List[TCEntity]]]
_always_array = validator('my_data', allow_reuse=True)(always_array())
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type=input_type,
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
'input_value,expected,optional,fail_test',
[
#
# Pass Testing
#
# required, normal input
(
{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'},
{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': 5},
False,
False,
),
# required, string in -> int out
(
{'id': '123', 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'},
{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': 5},
False,
False,
),
# optional, None
(
None,
None,
True,
False,
),
#
# Fail Testing
#
# required, null input
(
{'id': 123, 'type': 'Address', 'value': None, 'rating': '5'},
None,
False,
True,
),
# required, empty input
(
{'id': 123, 'type': 'Address', 'value': '', 'rating': '5'},
None,
False,
True,
),
# required, wrong type
(
{'id': 123, 'type': 'Adversary', 'value': 'adversary-001', 'rating': '5'},
None,
False,
True,
),
# optional, wrong type
(
{'id': 123, 'type': 'Adversary', 'value': 'adversary-001', 'rating': '5'},
None,
True,
True,
),
# required, missing field
# (
# {'id': 123, 'value': '1.1.1.1', 'rating': '5'},
# None,
# False,
# True,
# ),
# optional, Invalid data
(
{'id': '123', 'type': 'Address', 'value': '', 'rating': '5'},
None,
True,
True,
),
],
)
def test_field_model_indicator_entity_input(
self,
input_value: str,
expected: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: IndicatorEntity
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[IndicatorEntity]
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='TCEntity',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
'input_value,expected,indicator_types,optional,fail_test',
[
#
# Pass Testing
#
# required, normal input
(
{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'},
{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': 5},
['Address'],
False,
False,
),
#
# Fail Testing
#
(
{'id': 123, 'type': 'Address', 'value': 'bad.com', 'rating': '5'},
None,
['Host'],
False,
True,
),
],
)
def test_field_model_custom_indicator_entity_input(
self,
input_value: str,
expected: str,
indicator_types: list,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: indicator_entity(indicator_types=indicator_types)
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[indicator_entity(indicator_types=indicator_types)]
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='TCEntity',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
@pytest.mark.parametrize(
'input_value,expected,optional,fail_test',
[
#
# Pass Testing
#
# required, normal input
(
{'id': 123, 'type': 'Adversary', 'value': 'adversary-001'},
{'id': 123, 'type': 'Adversary', 'value': 'adversary-001'},
False,
False,
),
# required, string in -> int out
(
{'id': '123', 'type': 'Adversary', 'value': 'adversary-001'},
{'id': 123, 'type': 'Adversary', 'value': 'adversary-001'},
False,
False,
),
# optional, None
(
None,
None,
True,
False,
),
#
# Fail Testing
#
# required, null input
(
{'id': 123, 'type': 'Adversary', 'value': None, 'rating': '5'},
None,
False,
True,
),
# required, empty input
(
{'id': 123, 'type': 'Adversary', 'value': '', 'rating': '5'},
None,
False,
True,
),
# required, wrong type
(
{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'},
None,
False,
True,
),
# optional, wrong type
(
{'id': 123, 'type': 'Address', 'value': '1.1.1.1', 'rating': '5'},
None,
True,
True,
),
# required, missing field
# (
# {'id': 123, 'value': '1.1.1.1', 'rating': '5'},
# None,
# False,
# True,
# ),
# optional, Invalid data
(
{'id': '123', 'type': 'Adversary', 'value': '', 'rating': '5'},
None,
True,
True,
),
],
)
def test_field_model_group_entity_input(
self,
input_value: str,
expected: str,
optional: bool,
fail_test: bool,
playbook_app: 'MockApp',
):
"""Test Binary field type.
Playbook Data Type: String
Validation: Not null
"""
if optional is False:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: GroupEntity
else:
class PytestModel(BaseModel):
"""Test Model for Inputs"""
my_data: Optional[GroupEntity]
self._type_validation(
PytestModel,
input_name='my_data',
input_value=input_value,
input_type='TCEntity',
expected=expected,
fail_test=fail_test,
playbook_app=playbook_app,
)
|
|
import os
import re
from datetime import datetime
import configuration
import shell
import shouter
class Initializer:
def __init__(self):
config = configuration.get()
self.repoName = config.gitRepoName
self.clonedRepoName = config.clonedGitRepoName
self.author = config.user
@staticmethod
def createignore():
git_ignore = ".gitignore"
if not os.path.exists(git_ignore):
with open(git_ignore, "w") as ignore:
ignore.write(".jazz5" + '\n')
ignore.write(".metadata" + '\n')
ignore.write(".jazzShed" + '\n')
config = configuration.get()
if len(config.ignoredirectories) > 0:
ignore.write('\n' + "# directories" + '\n')
for directory in config.ignoredirectories:
ignore.write('/' + directory + '\n')
ignore.write('\n')
shell.execute("git add " + git_ignore)
shell.execute("git commit -m %s -q" % shell.quote("Add .gitignore"))
@staticmethod
def createattributes():
"""
create a .gitattributes file (if so specified and not yet present)
"""
config = configuration.get()
if len(config.gitattributes) > 0:
gitattribues = ".gitattributes"
if not os.path.exists(gitattribues):
with open(gitattribues, "w") as attributes:
for line in config.gitattributes:
attributes.write(line + '\n')
shell.execute("git add " + gitattribues)
shell.execute("git commit -m %s -q" % shell.quote("Add .gitattributes"))
def initalize(self):
self.createrepo()
self.preparerepo()
@staticmethod
def preparerepo():
Initializer.setgitconfigs()
Initializer.createignore()
Initializer.createattributes()
def createrepo(self):
shell.execute("git init --bare " + self.repoName)
shouter.shout("Repository was created in " + os.getcwd())
shell.execute("git clone " + self.repoName)
os.chdir(self.clonedRepoName)
@staticmethod
def setgitconfigs():
shell.execute("git config push.default current")
shell.execute("git config core.ignorecase false") # should be the default anyway
shouter.shout("Set core.ignorecase to false")
@staticmethod
def initialcommit():
shouter.shout("Initial git add")
shell.execute("git add -A", os.devnull)
shouter.shout("Finished initial git add, starting commit")
shell.execute("git commit -m %s -q" % shell.quote("Initial Commit"))
shouter.shout("Finished initial commit")
class Commiter:
commitcounter = 0
isattachedtoaworkitemregex = re.compile("^\d*:.*-")
findignorepatternregex = re.compile("\{([^\{\}]*)\}")
@staticmethod
def addandcommit(changeentry):
Commiter.handleignore()
Commiter.replaceauthor(changeentry.author, changeentry.email)
shell.execute("git add -A")
Commiter.handle_captitalization_filename_changes()
shell.execute(Commiter.getcommitcommand(changeentry))
Commiter.commitcounter += 1
if Commiter.commitcounter is 30:
shouter.shout("30 Commits happend, push current branch to avoid out of memory")
Commiter.pushbranch("")
Commiter.commitcounter = 0
shouter.shout("Commited change in local git repository")
@staticmethod
def handle_captitalization_filename_changes():
sandbox = os.path.join(configuration.get().workDirectory, configuration.get().clonedGitRepoName)
lines = shell.getoutput("git status -z", stripped=False)
for newfilerelativepath in Commiter.splitoutputofgitstatusz(lines, "A "):
directoryofnewfile = os.path.dirname(os.path.join(sandbox, newfilerelativepath))
newfilename = os.path.basename(newfilerelativepath)
cwd = os.getcwd()
os.chdir(directoryofnewfile)
files = shell.getoutput("git ls-files")
for previousFileName in files:
was_same_file_name = newfilename.lower() == previousFileName.lower()
file_was_renamed = newfilename != previousFileName
if was_same_file_name and file_was_renamed:
shell.execute("git rm --cached %s" % previousFileName)
os.chdir(cwd)
@staticmethod
def getcommitcommand(changeentry):
comment = Commiter.getcommentwithprefix(changeentry.comment)
return "git commit -m %s --date %s --author=%s" \
% (shell.quote(comment), shell.quote(changeentry.date), changeentry.getgitauthor())
@staticmethod
def getcommentwithprefix(comment):
prefix = configuration.get().commitmessageprefix
if prefix and Commiter.isattachedtoaworkitemregex.match(comment):
return prefix + comment
return comment
@staticmethod
def replaceauthor(author, email):
shell.execute("git config --replace-all user.name " + shell.quote(author))
if not email:
email = Commiter.defaultemail(author)
shell.execute("git config --replace-all user.email " + email)
@staticmethod
def defaultemail(author):
if not author:
name = "default"
else:
haspoint = False
index = 0
name = ""
for c in author:
if c.isalnum() or c == "_":
name += c
else:
if index > 0 and not haspoint:
name += "."
haspoint = True
else:
name += "_"
index += 1
return name.lower() + "@rtc.to"
@staticmethod
def checkbranchname(branchname):
exitcode = shell.execute("git check-ref-format --normalize refs/heads/" + branchname)
if exitcode is 0:
return True
else:
return False
@staticmethod
def branch(branchname):
branchexist = shell.execute("git show-ref --verify --quiet refs/heads/" + branchname)
if branchexist is 0:
Commiter.checkout(branchname)
else:
shell.execute("git checkout -b " + branchname)
@staticmethod
def pushbranch(branchname, force=False):
if branchname:
shouter.shout("Push of branch " + branchname)
if force:
return shell.execute("git push -f origin " + branchname)
else:
return shell.execute("git push origin " + branchname)
@staticmethod
def pushmaster():
Commiter.pushbranch("master")
@staticmethod
def checkout(branchname):
shell.execute("git checkout " + branchname)
@staticmethod
def renamebranch(oldname, newname):
return shell.execute("git branch -m %s %s" % (oldname, newname))
@staticmethod
def copybranch(existingbranchname, newbranchname):
return shell.execute("git branch %s %s" % (newbranchname, existingbranchname))
@staticmethod
def promotebranchtomaster(branchname):
master = "master"
masterrename = Commiter.renamebranch(master, "masterRenamedAt_" + datetime.now().strftime('%Y%m%d_%H%M%S'))
copybranch = Commiter.copybranch(branchname, master)
if masterrename is 0 and copybranch is 0:
return Commiter.pushbranch(master, True)
else:
shouter.shout("Branch %s couldnt get renamed to master, please do that on your own" % branchname)
return 1 # branch couldnt get renamed
@staticmethod
def get_untracked_statuszlines():
return shell.getoutput("git status --untracked-files=all -z", stripped=False)
@staticmethod
def handleignore():
"""
check untracked files and handle both global and local ignores
"""
repositoryfiles = Commiter.splitoutputofgitstatusz(Commiter.get_untracked_statuszlines())
Commiter.ignoreextensions(repositoryfiles)
Commiter.ignorejazzignore(repositoryfiles)
@staticmethod
def ignoreextensions(repositoryfiles):
"""
add files with extensions to be ignored to the global .gitignore
"""
ignorefileextensions = configuration.get().ignorefileextensions
if len(ignorefileextensions) > 0:
Commiter.ignore(ExtensionFilter.match(repositoryfiles, ignorefileextensions))
@staticmethod
def ignore(filelines):
"""
append the file lines to the toplevel .gitignore
:param filelines: a list of newline terminated file names to be ignored
"""
if len(filelines) > 0:
with open(".gitignore", "a") as ignore:
ignore.writelines(filelines)
@staticmethod
def splitoutputofgitstatusz(lines, filterprefix=None):
"""
Split the output of 'git status -z' into single files
:param lines: the unstripped output line(s) from the command
:param filterprefix: if given, only the files of those entries matching the prefix will be returned
:return: a list of repository files with status changes
"""
repositoryfiles = []
for line in lines: # expect exactly one line
entries = line.split(sep='\x00') # ascii 0 is the delimiter
for entry in entries:
if len(entry) > 0:
if not filterprefix or entry.startswith(filterprefix):
start = entry.find(' ')
if 0 <= start <= 2:
repositoryfile = entry[3:] # output is formatted
else:
repositoryfile = entry # file on a single line (e.g. rename continuation)
repositoryfiles.append(repositoryfile)
return repositoryfiles
@staticmethod
def translatejazzignore(jazzignorelines):
"""
translate the lines of a local .jazzignore file into the lines of a local .gitignore file
:param jazzignorelines: the input lines
:return: the .gitignore lines
"""
recursive = False
gitignorelines = []
for line in jazzignorelines:
if not line.startswith("#"):
line = line.strip()
if line.startswith("core.ignore"):
gitignorelines.append('\n')
recursive = line.startswith("core.ignore.recursive")
for foundpattern in Commiter.findignorepatternregex.findall(line):
gitignoreline = foundpattern + '\n'
if not recursive:
gitignoreline = '/' + gitignoreline # forward, not os.sep
gitignorelines.append(gitignoreline)
return gitignorelines
@staticmethod
def restore_shed_gitignore(statuszlines):
"""
If a force reload of the RTC workspace sheds .gitignore files away, we need to restore them.
In this case they are marked as deletions from git.
:param statuszlines: the git status z output lines
"""
gitignore = ".gitignore"
gitignorelen = len(gitignore)
deletedfiles = Commiter.splitoutputofgitstatusz(statuszlines, " D ")
for deletedfile in deletedfiles:
if deletedfile[-gitignorelen:] == gitignore:
# only restore .gitignore if sibling .jazzignore still exists
jazzignorefile = deletedfile[:-gitignorelen] + ".jazzignore"
if os.path.exists(jazzignorefile):
shell.execute("git checkout -- %s" % deletedfile)
@staticmethod
def ignorejazzignore(repositoryfiles):
"""
If a .jazzignore file is modified or added, translate it to .gitignore,
if a .jazzignore file is deleted, delete the corresponding .gitignore file as well.
:param repositoryfiles: the modified files
"""
jazzignore = ".jazzignore"
jazzignorelen = len(jazzignore)
for repositoryfile in repositoryfiles:
if repositoryfile[-jazzignorelen:] == jazzignore:
path = repositoryfile[0:len(repositoryfile)-jazzignorelen]
gitignore = path + ".gitignore"
if os.path.exists(repositoryfile):
# update (or create) .gitignore
jazzignorelines = []
with open(repositoryfile, 'r') as jazzignorefile:
jazzignorelines = jazzignorefile.readlines()
if len(jazzignorelines) > 0:
# overwrite in any case
with open(gitignore, 'w') as gitignorefile:
gitignorefile.writelines(Commiter.translatejazzignore(jazzignorelines))
else:
# delete .gitignore
if os.path.exists(gitignore):
os.remove(gitignore)
class Differ:
@staticmethod
def has_diff():
return shell.execute("git diff --quiet") is 1
class ExtensionFilter:
@staticmethod
def match(repositoryfiles, extensions):
"""
Determine the repository files to ignore.
These filenames are returned as a list of newline terminated lines,
ready to be added to .gitignore with writelines()
:param repositoryfiles: a list of (changed) files
:param extensions the extensions to be ignored
:return: a list of newline terminated file names, possibly empty
"""
repositoryfilestoignore = []
for extension in extensions:
for repositoryfile in repositoryfiles:
extlen = len(extension)
if len(repositoryfile) >= extlen:
if repositoryfile[-extlen:] == extension:
# prepend a forward slash (for non recursive,)
# escape a backslash with a backslash
# append a newline
repositoryfilestoignore.append('/' + repositoryfile.replace('\\', '\\\\') + '\n')
return repositoryfilestoignore
|
|
import datetime
import unittest2
import webtest
import json
import md5
import api_main
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.auth_type import AuthType
from consts.event_type import EventType
from models.account import Account
from models.api_auth_access import ApiAuthAccess
from models.award import Award
from models.event import Event
from models.event_team import EventTeam
from models.match import Match
from models.sitevar import Sitevar
from models.team import Team
class TestApiTrustedController(unittest2.TestCase):
def setUp(self):
self.testapp = webtest.TestApp(api_main.app)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_urlfetch_stub()
self.testbed.init_memcache_stub()
self.testbed.init_user_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self.teams_auth = ApiAuthAccess(id='tEsT_id_0',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_TEAMS])
self.matches_auth = ApiAuthAccess(id='tEsT_id_1',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES])
self.rankings_auth = ApiAuthAccess(id='tEsT_id_2',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_RANKINGS])
self.alliances_auth = ApiAuthAccess(id='tEsT_id_3',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_ALLIANCES])
self.awards_auth = ApiAuthAccess(id='tEsT_id_4',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_AWARDS])
self.video_auth = ApiAuthAccess(id='tEsT_id_5',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.MATCH_VIDEO])
self.expired_auth = ApiAuthAccess(id='tEsT_id_6',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES],
expiration=datetime.datetime(year=1970, month=1, day=1))
self.owned_auth = ApiAuthAccess(id='tEsT_id_7',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES],
owner=ndb.Key(Account, "42"))
self.owned_auth_expired = ApiAuthAccess(id='tEsT_id_8',
secret='321tEsTsEcReT',
description='test',
event_list=[ndb.Key(Event, '2014casj')],
auth_types_enum=[AuthType.EVENT_MATCHES],
owner=ndb.Key(Account, "42"),
expiration=datetime.datetime(year=1970, month=1, day=1))
self.event = Event(
id='2014casj',
event_type_enum=EventType.REGIONAL,
event_short='casj',
year=2014,
)
self.event.put()
def tearDown(self):
self.testbed.deactivate()
def loginUser(self, is_admin=False):
self.testbed.setup_env(
user_email="[email protected]",
user_id="42",
user_is_admin='1' if is_admin else '0',
overwrite=True)
def test_auth(self):
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_path_caps_key = '/api/trusted/v1/event/2014CASJ/matches/update'
# Fail
response = self.testapp.post(request_path, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
# Fail
request_body = json.dumps([])
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
self.rankings_auth.put()
self.matches_auth.put()
# Pass
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Pass; all caps key
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path_caps_key, request_body)).hexdigest()
response = self.testapp.post(request_path_caps_key, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Fail; bad X-TBA-Auth-Id
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'badTestAuthId', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; bad sig
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': '123abc'}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; bad sig due to wrong body
body2 = json.dumps([{}])
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, body2, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; bad event
request_path2 = '/api/trusted/v1/event/2014cama/matches/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path2, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; insufficient auth_types_enum
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
# Fail; expired keys
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_6', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
self.assertTrue('Error' in response.json)
def test_admin_auth(self):
# Ensure that a logged in admin user can access any evet
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
self.loginUser(is_admin=True)
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 200)
def test_user_auth(self):
# Ensure that a logged in user can use auths granted to their account
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
self.owned_auth.put()
self.loginUser()
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 200)
def test_user_expired_auth(self):
# Ensure that a logged in user can use auths granted to their account
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
self.owned_auth_expired.put()
self.loginUser()
# Should end up with a 400 error because the expired key didn't count and no explicit
# Auth-Id header was passed
response = self.testapp.post(request_path, request_body, expect_errors=True)
self.assertEqual(response.status_code, 400)
self.assertTrue('Error' in response.json)
def test_killswitch(self):
request_path = '/api/trusted/v1/event/2014casj/matches/update'
request_body = json.dumps([])
# Pass
self.matches_auth.put()
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
# Now, set the disable sitevar
trusted_sitevar = Sitevar(
id='trustedapi',
values_json=json.dumps({
3: False,
})
)
trusted_sitevar.put()
# Fail
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 401)
def test_alliance_selections_update(self):
self.alliances_auth.put()
alliances = [['frc971', 'frc254', 'frc1662'],
['frc1678', 'frc368', 'frc4171'],
['frc2035', 'frc192', 'frc4990'],
['frc1323', 'frc846', 'frc2135'],
['frc2144', 'frc1388', 'frc668'],
['frc1280', 'frc604', 'frc100'],
['frc114', 'frc852', 'frc841'],
['frc2473', 'frc3256', 'frc1868']]
request_body = json.dumps(alliances)
request_path = '/api/trusted/v1/event/2014casj/alliance_selections/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_3', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(self.event.alliance_selections), 8)
for i, selection in enumerate(self.event.alliance_selections):
self.assertEqual(alliances[i], selection['picks'])
def test_empty_alliance_selections_update(self):
self.alliances_auth.put()
alliances = [['frc971', 'frc254', 'frc1662'],
['frc1678', 'frc368', 'frc4171'],
['frc2035', 'frc192', 'frc4990'],
['frc1323', 'frc846', 'frc2135'],
[],[],[],[]]
request_body = json.dumps(alliances)
request_path = '/api/trusted/v1/event/2014casj/alliance_selections/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_3', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(self.event.alliance_selections), 4)
for i, selection in enumerate(self.event.alliance_selections):
self.assertEqual(alliances[i], selection['picks'])
def test_awards_update(self):
self.awards_auth.put()
awards = [{'name_str': 'Winner', 'team_key': 'frc254'},
{'name_str': 'Winner', 'team_key': 'frc604'},
{'name_str': 'Volunteer Blahblah', 'team_key': 'frc1', 'awardee': 'Bob Bobby'}]
request_body = json.dumps(awards)
request_path = '/api/trusted/v1/event/2014casj/awards/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_awards = Award.query(Award.event == self.event.key).fetch(None)
self.assertEqual(len(db_awards), 2)
self.assertTrue('2014casj_1' in [a.key.id() for a in db_awards])
self.assertTrue('2014casj_5' in [a.key.id() for a in db_awards])
awards = [{'name_str': 'Winner', 'team_key': 'frc254'},
{'name_str': 'Winner', 'team_key': 'frc604'}]
request_body = json.dumps(awards)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_4', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_awards = Award.query(Award.event == self.event.key).fetch(None)
self.assertEqual(len(db_awards), 1)
self.assertTrue('2014casj_1' in [a.key.id() for a in db_awards])
def test_matches_update(self):
self.matches_auth.put()
update_request_path = '/api/trusted/v1/event/2014casj/matches/update'
delete_request_path = '/api/trusted/v1/event/2014casj/matches/delete'
delete_all_request_path = '/api/trusted/v1/event/2014casj/matches/delete_all'
# add one match
matches = [{
'comp_level': 'qm',
'set_number': 1,
'match_number': 1,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 25},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 26},
},
'time_string': '9:00 AM',
'time_utc': '2014-08-31T16:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 1)
self.assertTrue('2014casj_qm1' in [m.key.id() for m in db_matches])
# add another match
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 1,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260},
},
'time_string': '10:00 AM',
'time_utc': '2014-08-31T17:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_qm1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
# add a match and delete a match
matches = [{
'comp_level': 'f',
'set_number': 1,
'match_number': 2,
'alliances': {
'red': {'teams': ['frc1', 'frc2', 'frc3'],
'score': 250},
'blue': {'teams': ['frc4', 'frc5', 'frc6'],
'score': 260},
},
'score_breakdown': {
'red': {'auto': 20, 'assist': 40, 'truss+catch': 20, 'teleop_goal+foul': 20},
'blue': {'auto': 40, 'assist': 60, 'truss+catch': 10, 'teleop_goal+foul': 40},
},
'time_string': '11:00 AM',
'time_utc': '2014-08-31T18:00:00',
}]
request_body = json.dumps(matches)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', update_request_path, request_body)).hexdigest()
response = self.testapp.post(update_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
keys_to_delete = ['qm1']
request_body = json.dumps(keys_to_delete)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', delete_request_path, request_body)).hexdigest()
response = self.testapp.post(delete_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json['keys_deleted'], ['qm1'])
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m2' in [m.key.id() for m in db_matches])
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 2)
self.assertTrue('2014casj_f1m1' in [m.key.id() for m in db_matches])
self.assertTrue('2014casj_f1m2' in [m.key.id() for m in db_matches])
# verify match data
match = Match.get_by_id('2014casj_f1m2')
self.assertEqual(match.time, datetime.datetime(2014, 8, 31, 18, 0))
self.assertEqual(match.time_string, '11:00 AM')
self.assertEqual(match.alliances['red']['score'], 250)
self.assertEqual(match.score_breakdown['red']['truss+catch'], 20)
# test delete all matches
request_body = ''
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', delete_all_request_path, request_body)).hexdigest()
response = self.testapp.post(delete_all_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 400)
request_body = '2014casj'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', delete_all_request_path, request_body)).hexdigest()
response = self.testapp.post(delete_all_request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_1', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_matches = Match.query(Match.event == self.event.key).fetch(None)
self.assertEqual(len(db_matches), 0)
def test_rankings_update(self):
self.rankings_auth.put()
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C'],
'rankings': [
{'team_key': 'frc254', 'rank': 1, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc971', 'rank': 2, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200}
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '254', 20, 500, 500, 200, 0, 10])
def test_rankings_wlt_update(self):
self.rankings_auth.put()
rankings = {
'breakdowns': ['QS', 'Auton', 'Teleop', 'T&C', 'wins', 'losses', 'ties'],
'rankings': [
{'team_key': 'frc254', 'rank': 1, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200},
{'team_key': 'frc971', 'rank': 2, 'wins': 10, 'losses': 0, 'ties': 0, 'played': 10, 'dqs': 0, 'QS': 20, 'Auton': 500, 'Teleop': 500, 'T&C': 200}
],
}
request_body = json.dumps(rankings)
request_path = '/api/trusted/v1/event/2014casj/rankings/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_2', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.event.rankings[0], ['Rank', 'Team', 'QS', 'Auton', 'Teleop', 'T&C', 'Record (W-L-T)', 'DQ', 'Played'])
self.assertEqual(self.event.rankings[1], [1, '254', 20, 500, 500, 200, '10-0-0', 0, 10])
def test_eventteams_update(self):
self.teams_auth.put()
team_list = ['frc254', 'frc971', 'frc604']
request_body = json.dumps(team_list)
# Insert teams into db, otherwise they won't get added (see 072058b)
Team(id='frc254', team_number=254).put()
Team(id='frc971', team_number=971).put()
Team(id='frc604', team_number=604).put()
Team(id='frc100', team_number=100).put()
request_path = '/api/trusted/v1/event/2014casj/team_list/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 3)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc971' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc604' in [et.key.id() for et in db_eventteams])
team_list = ['frc254', 'frc100']
request_body = json.dumps(team_list)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 2)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc100' in [et.key.id() for et in db_eventteams])
def test_eventteams_unknown(self):
self.teams_auth.put()
team_list = ['frc254', 'frc971', 'frc604']
request_body = json.dumps(team_list)
# Insert teams into db, otherwise they won't get added (see 072058b)
Team(id='frc254', team_number=254).put()
Team(id='frc971', team_number=971).put()
request_path = '/api/trusted/v1/event/2014casj/team_list/update'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 2)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc971' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc604' not in [et.key.id() for et in db_eventteams])
team_list = ['frc254', 'frc100']
request_body = json.dumps(team_list)
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_0', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
db_eventteams = EventTeam.query(EventTeam.event == self.event.key).fetch(None)
self.assertEqual(len(db_eventteams), 1)
self.assertTrue('2014casj_frc254' in [et.key.id() for et in db_eventteams])
self.assertTrue('2014casj_frc100' not in [et.key.id() for et in db_eventteams])
def test_match_videos_add(self):
self.video_auth.put()
match1 = Match(
id="2014casj_qm1",
alliances_json="""{"blue": {"score": -1, "teams": ["frc3464", "frc20", "frc1073"]}, "red": {"score": -1, "teams": ["frc69", "frc571", "frc176"]}}""",
comp_level="qm",
event=ndb.Key(Event, '2014casj'),
year=2014,
set_number=1,
match_number=1,
team_key_names=[u'frc69', u'frc571', u'frc176', u'frc3464', u'frc20', u'frc1073'],
youtube_videos=["abcdef"]
)
match1.put()
match2 = Match(
id="2014casj_sf1m1",
alliances_json="""{"blue": {"score": -1, "teams": ["frc3464", "frc20", "frc1073"]}, "red": {"score": -1, "teams": ["frc69", "frc571", "frc176"]}}""",
comp_level="sf",
event=ndb.Key(Event, '2014casj'),
year=2014,
set_number=1,
match_number=1,
team_key_names=[u'frc69', u'frc571', u'frc176', u'frc3464', u'frc20', u'frc1073'],
)
match2.put()
match_videos = {'qm1': 'aFZy8iibMD0', 'sf1m1': 'RpSgUrsghv4'}
request_body = json.dumps(match_videos)
request_path = '/api/trusted/v1/event/2014casj/match_videos/add'
sig = md5.new('{}{}{}'.format('321tEsTsEcReT', request_path, request_body)).hexdigest()
response = self.testapp.post(request_path, request_body, headers={'X-TBA-Auth-Id': 'tEsT_id_5', 'X-TBA-Auth-Sig': sig}, expect_errors=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(set(Match.get_by_id('2014casj_qm1').youtube_videos), {'abcdef', 'aFZy8iibMD0'})
self.assertEqual(set(Match.get_by_id('2014casj_sf1m1').youtube_videos), {'RpSgUrsghv4'})
|
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import collections
import collections.abc
import inspect
import importlib.util
import sys
import traceback
import types
from typing import Any, Callable, Mapping, List, Dict, TYPE_CHECKING, Optional, TypeVar, Type, Union, overload
import discord
from discord import app_commands
from discord.app_commands.tree import _retrieve_guild_ids
from .core import GroupMixin
from .view import StringView
from .context import Context
from . import errors
from .help import HelpCommand, DefaultHelpCommand
from .cog import Cog
if TYPE_CHECKING:
from typing_extensions import Self
import importlib.machinery
from discord.message import Message
from discord.abc import User, Snowflake
from ._types import (
Check,
CoroFunc,
)
__all__ = (
'when_mentioned',
'when_mentioned_or',
'Bot',
'AutoShardedBot',
)
MISSING: Any = discord.utils.MISSING
T = TypeVar('T')
CFT = TypeVar('CFT', bound='CoroFunc')
CXT = TypeVar('CXT', bound='Context')
BT = TypeVar('BT', bound='Union[Bot, AutoShardedBot]')
def when_mentioned(bot: Union[Bot, AutoShardedBot], msg: Message) -> List[str]:
"""A callable that implements a command prefix equivalent to being mentioned.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
"""
# bot.user will never be None when this is called
return [f'<@{bot.user.id}> ', f'<@!{bot.user.id}> '] # type: ignore
def when_mentioned_or(*prefixes: str) -> Callable[[Union[Bot, AutoShardedBot], Message], List[str]]:
"""A callable that implements when mentioned or other prefixes provided.
These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.
Example
--------
.. code-block:: python3
bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))
.. note::
This callable returns another callable, so if this is done inside a custom
callable, you must call the returned callable, for example:
.. code-block:: python3
async def get_prefix(bot, message):
extras = await prefixes_for(message.guild) # returns a list
return commands.when_mentioned_or(*extras)(bot, message)
See Also
----------
:func:`.when_mentioned`
"""
def inner(bot, msg):
r = list(prefixes)
r = when_mentioned(bot, msg) + r
return r
return inner
def _is_submodule(parent: str, child: str) -> bool:
return parent == child or child.startswith(parent + ".")
class _DefaultRepr:
def __repr__(self):
return '<default-help-command>'
_default = _DefaultRepr()
class BotBase(GroupMixin):
def __init__(self, command_prefix, help_command=_default, description=None, **options):
super().__init__(**options)
self.command_prefix = command_prefix
self.extra_events: Dict[str, List[CoroFunc]] = {}
# Self doesn't have the ClientT bound, but since this is a mixin it technically does
self.__tree: app_commands.CommandTree[Self] = app_commands.CommandTree(self) # type: ignore
self.__cogs: Dict[str, Cog] = {}
self.__extensions: Dict[str, types.ModuleType] = {}
self._checks: List[Check] = []
self._check_once = []
self._before_invoke = None
self._after_invoke = None
self._help_command = None
self.description = inspect.cleandoc(description) if description else ''
self.owner_id = options.get('owner_id')
self.owner_ids = options.get('owner_ids', set())
self.strip_after_prefix = options.get('strip_after_prefix', False)
if self.owner_id and self.owner_ids:
raise TypeError('Both owner_id and owner_ids are set.')
if self.owner_ids and not isinstance(self.owner_ids, collections.abc.Collection):
raise TypeError(f'owner_ids must be a collection not {self.owner_ids.__class__!r}')
if help_command is _default:
self.help_command = DefaultHelpCommand()
else:
self.help_command = help_command
# internal helpers
def dispatch(self, event_name: str, *args: Any, **kwargs: Any) -> None:
# super() will resolve to Client
super().dispatch(event_name, *args, **kwargs) # type: ignore
ev = 'on_' + event_name
for event in self.extra_events.get(ev, []):
self._schedule_event(event, ev, *args, **kwargs) # type: ignore
@discord.utils.copy_doc(discord.Client.close)
async def close(self) -> None:
for extension in tuple(self.__extensions):
try:
self.unload_extension(extension)
except Exception:
pass
for cog in tuple(self.__cogs):
try:
self.remove_cog(cog)
except Exception:
pass
await super().close() # type: ignore
async def on_command_error(self, context: Context, exception: errors.CommandError) -> None:
"""|coro|
The default command error handler provided by the bot.
By default this prints to :data:`sys.stderr` however it could be
overridden to have a different implementation.
This only fires if you do not specify any listeners for command error.
"""
if self.extra_events.get('on_command_error', None):
return
command = context.command
if command and command.has_error_handler():
return
cog = context.cog
if cog and cog.has_error_handler():
return
print(f'Ignoring exception in command {context.command}:', file=sys.stderr)
traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
# global check registration
def check(self, func: T) -> T:
r"""A decorator that adds a global check to the bot.
A global check is similar to a :func:`.check` that is applied
on a per command basis except it is run before any command checks
have been verified and applies to every command the bot has.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check
def check_commands(ctx):
return ctx.command.qualified_name in allowed_commands
"""
# T was used instead of Check to ensure the type matches on return
self.add_check(func) # type: ignore
return func
def add_check(self, func: Check, /, *, call_once: bool = False) -> None:
"""Adds a global check to the bot.
This is the non-decorator interface to :meth:`.check`
and :meth:`.check_once`.
.. versionchanged:: 2.0
``func`` parameter is now positional-only.
Parameters
-----------
func
The function that was used as a global check.
call_once: :class:`bool`
If the function should only be called once per
:meth:`.invoke` call.
"""
if call_once:
self._check_once.append(func)
else:
self._checks.append(func)
def remove_check(self, func: Check, /, *, call_once: bool = False) -> None:
"""Removes a global check from the bot.
This function is idempotent and will not raise an exception
if the function is not in the global checks.
.. versionchanged:: 2.0
``func`` parameter is now positional-only.
Parameters
-----------
func
The function to remove from the global checks.
call_once: :class:`bool`
If the function was added with ``call_once=True`` in
the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
"""
l = self._check_once if call_once else self._checks
try:
l.remove(func)
except ValueError:
pass
def check_once(self, func: CFT) -> CFT:
r"""A decorator that adds a "call once" global check to the bot.
Unlike regular global checks, this one is called only once
per :meth:`.invoke` call.
Regular global checks are called whenever a command is called
or :meth:`.Command.can_run` is called. This type of check
bypasses that and ensures that it's called only once, even inside
the default help command.
.. note::
When using this function the :class:`.Context` sent to a group subcommand
may only parse the parent command and not the subcommands due to it
being invoked once per :meth:`.Bot.invoke` call.
.. note::
This function can either be a regular function or a coroutine.
Similar to a command :func:`.check`\, this takes a single parameter
of type :class:`.Context` and can only raise exceptions inherited from
:exc:`.CommandError`.
Example
---------
.. code-block:: python3
@bot.check_once
def whitelist(ctx):
return ctx.message.author.id in my_whitelist
"""
self.add_check(func, call_once=True)
return func
async def can_run(self, ctx: Context, *, call_once: bool = False) -> bool:
data = self._check_once if call_once else self._checks
if len(data) == 0:
return True
# type-checker doesn't distinguish between functions and methods
return await discord.utils.async_all(f(ctx) for f in data) # type: ignore
async def is_owner(self, user: User) -> bool:
"""|coro|
Checks if a :class:`~discord.User` or :class:`~discord.Member` is the owner of
this bot.
If an :attr:`owner_id` is not set, it is fetched automatically
through the use of :meth:`~.Bot.application_info`.
.. versionchanged:: 1.3
The function also checks if the application is team-owned if
:attr:`owner_ids` is not set.
Parameters
-----------
user: :class:`.abc.User`
The user to check for.
Returns
--------
:class:`bool`
Whether the user is the owner.
"""
if self.owner_id:
return user.id == self.owner_id
elif self.owner_ids:
return user.id in self.owner_ids
else:
app = await self.application_info() # type: ignore
if app.team:
self.owner_ids = ids = {m.id for m in app.team.members}
return user.id in ids
else:
self.owner_id = owner_id = app.owner.id
return user.id == owner_id
def before_invoke(self, coro: CFT) -> CFT:
"""A decorator that registers a coroutine as a pre-invoke hook.
A pre-invoke hook is called directly before the command is
called. This makes it a useful function to set up database
connections or any type of set up required.
This pre-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke` hooks are
only called if all checks and argument parsing procedures pass
without error. If any check or argument parsing procedures fail
then the hooks are not called.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the pre-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The pre-invoke hook must be a coroutine.')
self._before_invoke = coro
return coro
def after_invoke(self, coro: CFT) -> CFT:
r"""A decorator that registers a coroutine as a post-invoke hook.
A post-invoke hook is called directly after the command is
called. This makes it a useful function to clean-up database
connections or any type of clean up required.
This post-invoke hook takes a sole parameter, a :class:`.Context`.
.. note::
Similar to :meth:`~.Bot.before_invoke`\, this is not called unless
checks and argument parsing procedures succeed. This hook is,
however, **always** called regardless of the internal command
callback raising an error (i.e. :exc:`.CommandInvokeError`\).
This makes it ideal for clean-up scenarios.
Parameters
-----------
coro: :ref:`coroutine <coroutine>`
The coroutine to register as the post-invoke hook.
Raises
-------
TypeError
The coroutine passed is not actually a coroutine.
"""
if not asyncio.iscoroutinefunction(coro):
raise TypeError('The post-invoke hook must be a coroutine.')
self._after_invoke = coro
return coro
# listener registration
def add_listener(self, func: CoroFunc, name: str = MISSING) -> None:
"""The non decorator alternative to :meth:`.listen`.
Parameters
-----------
func: :ref:`coroutine <coroutine>`
The function to call.
name: :class:`str`
The name of the event to listen for. Defaults to ``func.__name__``.
Example
--------
.. code-block:: python3
async def on_ready(): pass
async def my_message(message): pass
bot.add_listener(on_ready)
bot.add_listener(my_message, 'on_message')
"""
name = func.__name__ if name is MISSING else name
if not asyncio.iscoroutinefunction(func):
raise TypeError('Listeners must be coroutines')
if name in self.extra_events:
self.extra_events[name].append(func)
else:
self.extra_events[name] = [func]
def remove_listener(self, func: CoroFunc, name: str = MISSING) -> None:
"""Removes a listener from the pool of listeners.
Parameters
-----------
func
The function that was used as a listener to remove.
name: :class:`str`
The name of the event we want to remove. Defaults to
``func.__name__``.
"""
name = func.__name__ if name is MISSING else name
if name in self.extra_events:
try:
self.extra_events[name].remove(func)
except ValueError:
pass
def listen(self, name: str = MISSING) -> Callable[[CFT], CFT]:
"""A decorator that registers another function as an external
event listener. Basically this allows you to listen to multiple
events from different places e.g. such as :func:`.on_ready`
The functions being listened to must be a :ref:`coroutine <coroutine>`.
Example
--------
.. code-block:: python3
@bot.listen()
async def on_message(message):
print('one')
# in some other file...
@bot.listen('on_message')
async def my_message(message):
print('two')
Would print one and two in an unspecified order.
Raises
-------
TypeError
The function being listened to is not a coroutine.
"""
def decorator(func: CFT) -> CFT:
self.add_listener(func, name)
return func
return decorator
# cogs
def add_cog(
self,
cog: Cog,
/,
*,
override: bool = False,
guild: Optional[Snowflake] = MISSING,
guilds: List[Snowflake] = MISSING,
) -> None:
"""Adds a "cog" to the bot.
A cog is a class that has its own event listeners and commands.
If the cog is a :class:`.app_commands.Group` then it is added to
the bot's :class:`~discord.app_commands.CommandTree` as well.
.. versionchanged:: 2.0
:exc:`.ClientException` is raised when a cog with the same name
is already loaded.
.. versionchanged:: 2.0
``cog`` parameter is now positional-only.
Parameters
-----------
cog: :class:`.Cog`
The cog to register to the bot.
override: :class:`bool`
If a previously loaded cog with the same name should be ejected
instead of raising an error.
.. versionadded:: 2.0
guild: Optional[:class:`~discord.abc.Snowflake`]
If the cog is an application command group, then this would be the
guild where the cog group would be added to. If not given then
it becomes a global command instead.
.. versionadded:: 2.0
guilds: List[:class:`~discord.abc.Snowflake`]
If the cog is an application command group, then this would be the
guilds where the cog group would be added to. If not given then
it becomes a global command instead. Cannot be mixed with
``guild``.
.. versionadded:: 2.0
Raises
-------
TypeError
The cog does not inherit from :class:`.Cog`.
CommandError
An error happened during loading.
ClientException
A cog with the same name is already loaded.
"""
if not isinstance(cog, Cog):
raise TypeError('cogs must derive from Cog')
cog_name = cog.__cog_name__
existing = self.__cogs.get(cog_name)
if existing is not None:
if not override:
raise discord.ClientException(f'Cog named {cog_name!r} already loaded')
self.remove_cog(cog_name, guild=guild, guilds=guilds)
if isinstance(cog, app_commands.Group):
self.__tree.add_command(cog, override=override, guild=guild, guilds=guilds)
cog = cog._inject(self)
self.__cogs[cog_name] = cog
def get_cog(self, name: str, /) -> Optional[Cog]:
"""Gets the cog instance requested.
If the cog is not found, ``None`` is returned instead.
.. versionchanged:: 2.0
``name`` parameter is now positional-only.
Parameters
-----------
name: :class:`str`
The name of the cog you are requesting.
This is equivalent to the name passed via keyword
argument in class creation or the class name if unspecified.
Returns
--------
Optional[:class:`Cog`]
The cog that was requested. If not found, returns ``None``.
"""
return self.__cogs.get(name)
def remove_cog(
self,
name: str,
/,
guild: Optional[Snowflake] = MISSING,
guilds: List[Snowflake] = MISSING,
) -> Optional[Cog]:
"""Removes a cog from the bot and returns it.
All registered commands and event listeners that the
cog has registered will be removed as well.
If no cog is found then this method has no effect.
.. versionchanged:: 2.0
``name`` parameter is now positional-only.
Parameters
-----------
name: :class:`str`
The name of the cog to remove.
guild: Optional[:class:`~discord.abc.Snowflake`]
If the cog is an application command group, then this would be the
guild where the cog group would be removed from. If not given then
a global command is removed instead instead.
.. versionadded:: 2.0
guilds: List[:class:`~discord.abc.Snowflake`]
If the cog is an application command group, then this would be the
guilds where the cog group would be removed from. If not given then
a global command is removed instead instead. Cannot be mixed with
``guild``.
.. versionadded:: 2.0
Returns
-------
Optional[:class:`.Cog`]
The cog that was removed. ``None`` if not found.
"""
cog = self.__cogs.pop(name, None)
if cog is None:
return
help_command = self._help_command
if help_command and help_command.cog is cog:
help_command.cog = None
if isinstance(cog, app_commands.Group):
guild_ids = _retrieve_guild_ids(cog, guild, guilds)
if guild_ids is None:
self.__tree.remove_command(name)
else:
for guild_id in guild_ids:
self.__tree.remove_command(name, guild=discord.Object(guild_id))
cog._eject(self)
return cog
@property
def cogs(self) -> Mapping[str, Cog]:
"""Mapping[:class:`str`, :class:`Cog`]: A read-only mapping of cog name to cog."""
return types.MappingProxyType(self.__cogs)
# extensions
def _remove_module_references(self, name: str) -> None:
# find all references to the module
# remove the cogs registered from the module
for cogname, cog in self.__cogs.copy().items():
if _is_submodule(name, cog.__module__):
self.remove_cog(cogname)
# remove all the commands from the module
for cmd in self.all_commands.copy().values():
if cmd.module is not None and _is_submodule(name, cmd.module):
if isinstance(cmd, GroupMixin):
cmd.recursively_remove_all_commands()
self.remove_command(cmd.name)
# remove all the listeners from the module
for event_list in self.extra_events.copy().values():
remove = []
for index, event in enumerate(event_list):
if event.__module__ is not None and _is_submodule(name, event.__module__):
remove.append(index)
for index in reversed(remove):
del event_list[index]
def _call_module_finalizers(self, lib: types.ModuleType, key: str) -> None:
try:
func = getattr(lib, 'teardown')
except AttributeError:
pass
else:
try:
func(self)
except Exception:
pass
finally:
self.__extensions.pop(key, None)
sys.modules.pop(key, None)
name = lib.__name__
for module in list(sys.modules.keys()):
if _is_submodule(name, module):
del sys.modules[module]
def _load_from_module_spec(self, spec: importlib.machinery.ModuleSpec, key: str) -> None:
# precondition: key not in self.__extensions
lib = importlib.util.module_from_spec(spec)
sys.modules[key] = lib
try:
spec.loader.exec_module(lib) # type: ignore
except Exception as e:
del sys.modules[key]
raise errors.ExtensionFailed(key, e) from e
try:
setup = getattr(lib, 'setup')
except AttributeError:
del sys.modules[key]
raise errors.NoEntryPointError(key)
try:
setup(self)
except Exception as e:
del sys.modules[key]
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, key)
raise errors.ExtensionFailed(key, e) from e
else:
self.__extensions[key] = lib
def _resolve_name(self, name: str, package: Optional[str]) -> str:
try:
return importlib.util.resolve_name(name, package)
except ImportError:
raise errors.ExtensionNotFound(name)
def load_extension(self, name: str, *, package: Optional[str] = None) -> None:
"""Loads an extension.
An extension is a python module that contains commands, cogs, or
listeners.
An extension must have a global function, ``setup`` defined as
the entry point on what to do when the extension is loaded. This entry
point must have a single argument, the ``bot``.
Parameters
------------
name: :class:`str`
The extension name to load. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when loading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
--------
ExtensionNotFound
The extension could not be imported.
This is also raised if the name of the extension could not
be resolved using the provided ``package`` parameter.
ExtensionAlreadyLoaded
The extension is already loaded.
NoEntryPointError
The extension does not have a setup function.
ExtensionFailed
The extension or its setup function had an execution error.
"""
name = self._resolve_name(name, package)
if name in self.__extensions:
raise errors.ExtensionAlreadyLoaded(name)
spec = importlib.util.find_spec(name)
if spec is None:
raise errors.ExtensionNotFound(name)
self._load_from_module_spec(spec, name)
def unload_extension(self, name: str, *, package: Optional[str] = None) -> None:
"""Unloads an extension.
When the extension is unloaded, all commands, listeners, and cogs are
removed from the bot and the module is un-imported.
The extension can provide an optional global function, ``teardown``,
to do miscellaneous clean-up if necessary. This function takes a single
parameter, the ``bot``, similar to ``setup`` from
:meth:`~.Bot.load_extension`.
Parameters
------------
name: :class:`str`
The extension name to unload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when unloading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
-------
ExtensionNotFound
The name of the extension could not
be resolved using the provided ``package`` parameter.
ExtensionNotLoaded
The extension was not loaded.
"""
name = self._resolve_name(name, package)
lib = self.__extensions.get(name)
if lib is None:
raise errors.ExtensionNotLoaded(name)
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
def reload_extension(self, name: str, *, package: Optional[str] = None) -> None:
"""Atomically reloads an extension.
This replaces the extension with the same extension, only refreshed. This is
equivalent to a :meth:`unload_extension` followed by a :meth:`load_extension`
except done in an atomic way. That is, if an operation fails mid-reload then
the bot will roll-back to the prior working state.
Parameters
------------
name: :class:`str`
The extension name to reload. It must be dot separated like
regular Python imports if accessing a sub-module. e.g.
``foo.test`` if you want to import ``foo/test.py``.
package: Optional[:class:`str`]
The package name to resolve relative imports with.
This is required when reloading an extension using a relative path, e.g ``.foo.test``.
Defaults to ``None``.
.. versionadded:: 1.7
Raises
-------
ExtensionNotLoaded
The extension was not loaded.
ExtensionNotFound
The extension could not be imported.
This is also raised if the name of the extension could not
be resolved using the provided ``package`` parameter.
NoEntryPointError
The extension does not have a setup function.
ExtensionFailed
The extension setup function had an execution error.
"""
name = self._resolve_name(name, package)
lib = self.__extensions.get(name)
if lib is None:
raise errors.ExtensionNotLoaded(name)
# get the previous module states from sys modules
# fmt: off
modules = {
name: module
for name, module in sys.modules.items()
if _is_submodule(lib.__name__, name)
}
# fmt: on
try:
# Unload and then load the module...
self._remove_module_references(lib.__name__)
self._call_module_finalizers(lib, name)
self.load_extension(name)
except Exception:
# if the load failed, the remnants should have been
# cleaned from the load_extension function call
# so let's load it from our old compiled library.
lib.setup(self) # type: ignore
self.__extensions[name] = lib
# revert sys.modules back to normal and raise back to caller
sys.modules.update(modules)
raise
@property
def extensions(self) -> Mapping[str, types.ModuleType]:
"""Mapping[:class:`str`, :class:`py:types.ModuleType`]: A read-only mapping of extension name to extension."""
return types.MappingProxyType(self.__extensions)
# help command stuff
@property
def help_command(self) -> Optional[HelpCommand]:
return self._help_command
@help_command.setter
def help_command(self, value: Optional[HelpCommand]) -> None:
if value is not None:
if not isinstance(value, HelpCommand):
raise TypeError('help_command must be a subclass of HelpCommand')
if self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = value
value._add_to_bot(self)
elif self._help_command is not None:
self._help_command._remove_from_bot(self)
self._help_command = None
else:
self._help_command = None
# application command interop
# As mentioned above, this is a mixin so the Self type hint fails here.
# However, since the only classes that can use this are subclasses of Client
# anyway, then this is sound.
@property
def tree(self) -> app_commands.CommandTree[Self]: # type: ignore
""":class:`~discord.app_commands.CommandTree`: The command tree responsible for handling the application commands
in this bot.
.. versionadded:: 2.0
"""
return self.__tree
# command processing
async def get_prefix(self, message: Message) -> Union[List[str], str]:
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`discord.Message`
The message context to get the prefix of.
Returns
--------
Union[List[:class:`str`], :class:`str`]
A list of prefixes or a single prefix that the bot is
listening for.
"""
prefix = ret = self.command_prefix
if callable(prefix):
ret = await discord.utils.maybe_coroutine(prefix, self, message)
if not isinstance(ret, str):
try:
ret = list(ret)
except TypeError:
# It's possible that a generator raised this exception. Don't
# replace it with our own error if that's the case.
if isinstance(ret, collections.abc.Iterable):
raise
raise TypeError(
"command_prefix must be plain string, iterable of strings, or callable "
f"returning either of these, not {ret.__class__.__name__}"
)
if not ret:
raise ValueError("Iterable command_prefix must contain at least one prefix")
return ret
@overload
async def get_context(
self,
message: Message,
) -> Context[Self]: # type: ignore
...
@overload
async def get_context(
self,
message: Message,
*,
cls: Type[CXT] = ...,
) -> CXT: # type: ignore
...
async def get_context(
self,
message: Message,
*,
cls: Type[CXT] = MISSING,
) -> Any:
r"""|coro|
Returns the invocation context from the message.
This is a more low-level counter-part for :meth:`.process_commands`
to allow users more fine grained control over the processing.
The returned context is not guaranteed to be a valid invocation
context, :attr:`.Context.valid` must be checked to make sure it is.
If the context is not valid then it is not a valid candidate to be
invoked under :meth:`~.Bot.invoke`.
Parameters
-----------
message: :class:`discord.Message`
The message to get the invocation context from.
cls
The factory class that will be used to create the context.
By default, this is :class:`.Context`. Should a custom
class be provided, it must be similar enough to :class:`.Context`\'s
interface.
Returns
--------
:class:`.Context`
The invocation context. The type of this can change via the
``cls`` parameter.
"""
if cls is MISSING:
cls = Context # type: ignore
view = StringView(message.content)
ctx = cls(prefix=None, view=view, bot=self, message=message)
if message.author.id == self.user.id: # type: ignore
return ctx
prefix = await self.get_prefix(message)
invoked_prefix = prefix
if isinstance(prefix, str):
if not view.skip_string(prefix):
return ctx
else:
try:
# if the context class' __init__ consumes something from the view this
# will be wrong. That seems unreasonable though.
if message.content.startswith(tuple(prefix)):
invoked_prefix = discord.utils.find(view.skip_string, prefix)
else:
return ctx
except TypeError:
if not isinstance(prefix, list):
raise TypeError(
"get_prefix must return either a string or a list of string, " f"not {prefix.__class__.__name__}"
)
# It's possible a bad command_prefix got us here.
for value in prefix:
if not isinstance(value, str):
raise TypeError(
"Iterable command_prefix or list returned from get_prefix must "
f"contain only strings, not {value.__class__.__name__}"
)
# Getting here shouldn't happen
raise
if self.strip_after_prefix:
view.skip_ws()
invoker = view.get_word()
ctx.invoked_with = invoker
# type-checker fails to narrow invoked_prefix type.
ctx.prefix = invoked_prefix # type: ignore
ctx.command = self.all_commands.get(invoker)
return ctx
async def invoke(self, ctx: Context) -> None:
"""|coro|
Invokes the command given under the invocation context and
handles all the internal event dispatch mechanisms.
Parameters
-----------
ctx: :class:`.Context`
The invocation context to invoke.
"""
if ctx.command is not None:
self.dispatch('command', ctx)
try:
if await self.can_run(ctx, call_once=True):
await ctx.command.invoke(ctx)
else:
raise errors.CheckFailure('The global check once functions failed.')
except errors.CommandError as exc:
await ctx.command.dispatch_error(ctx, exc)
else:
self.dispatch('command_completion', ctx)
elif ctx.invoked_with:
exc = errors.CommandNotFound(f'Command "{ctx.invoked_with}" is not found')
self.dispatch('command_error', ctx, exc)
async def process_commands(self, message: Message) -> None:
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`.on_message`
event. If you choose to override the :func:`.on_message` event, then
you should invoke this coroutine as well.
This is built using other low level tools, and is equivalent to a
call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.
This also checks if the message's author is a bot and doesn't
call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.
Parameters
-----------
message: :class:`discord.Message`
The message to process commands for.
"""
if message.author.bot:
return
ctx = await self.get_context(message)
await self.invoke(ctx)
async def on_message(self, message):
await self.process_commands(message)
class Bot(BotBase, discord.Client):
"""Represents a discord bot.
This class is a subclass of :class:`discord.Client` and as a result
anything that you can do with a :class:`discord.Client` you can do with
this bot.
This class also subclasses :class:`.GroupMixin` to provide the functionality
to manage commands.
Attributes
-----------
command_prefix
The command prefix is what the message content must contain initially
to have a command invoked. This prefix could either be a string to
indicate what the prefix should be, or a callable that takes in the bot
as its first parameter and :class:`discord.Message` as its second
parameter and returns the prefix. This is to facilitate "dynamic"
command prefixes. This callable can be either a regular function or
a coroutine.
An empty string as the prefix always matches, enabling prefix-less
command invocation. While this may be useful in DMs it should be avoided
in servers, as it's likely to cause performance issues and unintended
command invocations.
The command prefix could also be an iterable of strings indicating that
multiple checks for the prefix should be used and the first one to
match will be the invocation prefix. You can get this prefix via
:attr:`.Context.prefix`. To avoid confusion empty iterables are not
allowed.
.. note::
When passing multiple prefixes be careful to not pass a prefix
that matches a longer prefix occurring later in the sequence. For
example, if the command prefix is ``('!', '!?')`` the ``'!?'``
prefix will never be matched to any message as the previous one
matches messages starting with ``!?``. This is especially important
when passing an empty string, it should always be last as no prefix
after it will be matched.
case_insensitive: :class:`bool`
Whether the commands should be case insensitive. Defaults to ``False``. This
attribute does not carry over to groups. You must set it to every group if
you require group commands to be case insensitive as well.
description: :class:`str`
The content prefixed into the default help message.
help_command: Optional[:class:`.HelpCommand`]
The help command implementation to use. This can be dynamically
set at runtime. To remove the help command pass ``None``. For more
information on implementing a help command, see :ref:`ext_commands_help_command`.
owner_id: Optional[:class:`int`]
The user ID that owns the bot. If this is not set and is then queried via
:meth:`.is_owner` then it is fetched automatically using
:meth:`~.Bot.application_info`.
owner_ids: Optional[Collection[:class:`int`]]
The user IDs that owns the bot. This is similar to :attr:`owner_id`.
If this is not set and the application is team based, then it is
fetched automatically using :meth:`~.Bot.application_info`.
For performance reasons it is recommended to use a :class:`set`
for the collection. You cannot set both ``owner_id`` and ``owner_ids``.
.. versionadded:: 1.3
strip_after_prefix: :class:`bool`
Whether to strip whitespace characters after encountering the command
prefix. This allows for ``! hello`` and ``!hello`` to both work if
the ``command_prefix`` is set to ``!``. Defaults to ``False``.
.. versionadded:: 1.7
"""
pass
class AutoShardedBot(BotBase, discord.AutoShardedClient):
"""This is similar to :class:`.Bot` except that it is inherited from
:class:`discord.AutoShardedClient` instead.
"""
pass
|
|
# Copyright (c) 2014 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
import six
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import ra
from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc
from neutron.common import utils as common_utils
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
EXTERNAL_INGRESS_MARK_MASK = '0xffffffff'
FLOATINGIP_STATUS_NOCHANGE = object()
class RouterInfo(object):
def __init__(self,
router_id,
router,
agent_conf,
interface_driver,
use_ipv6=False):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.floating_ips = set()
# Invoke the setter for establishing initial SNAT action
self.router = router
self.use_ipv6 = use_ipv6
self.ns_name = None
self.router_namespace = None
if agent_conf.use_namespaces:
ns = namespaces.RouterNamespace(
router_id, agent_conf, interface_driver, use_ipv6)
self.router_namespace = ns
self.ns_name = ns.name
self.iptables_manager = iptables_manager.IptablesManager(
use_ipv6=use_ipv6,
namespace=self.ns_name)
self.routes = []
self.agent_conf = agent_conf
self.driver = interface_driver
# radvd is a neutron.agent.linux.ra.DaemonMonitor
self.radvd = None
def initialize(self, process_monitor):
"""Initialize the router on the system.
This differs from __init__ in that this method actually affects the
system creating namespaces, starting processes, etc. The other merely
initializes the python object. This separates in-memory object
initialization from methods that actually go do stuff to the system.
:param process_monitor: The agent's process monitor instance.
"""
self.process_monitor = process_monitor
self.radvd = ra.DaemonMonitor(self.router_id,
self.ns_name,
process_monitor,
self.get_internal_device_name)
if self.router_namespace:
self.router_namespace.create()
@property
def router(self):
return self._router
@router.setter
def router(self, value):
self._router = value
if not self._router:
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
# Set a SNAT action for the router
if self._router.get('gw_port'):
self._snat_action = ('add_rules' if self._snat_enabled
else 'remove_rules')
elif self.ex_gw_port:
# Gateway port was removed, remove rules
self._snat_action = 'remove_rules'
@property
def is_ha(self):
# TODO(Carl) Refactoring should render this obsolete. Remove it.
return False
def get_internal_device_name(self, port_id):
return (INTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_name(self, port_id):
return (EXTERNAL_DEV_PREFIX + port_id)[:self.driver.DEV_NAME_LEN]
def get_external_device_interface_name(self, ex_gw_port):
return self.get_external_device_name(ex_gw_port['id'])
def perform_snat_action(self, snat_callback, *args):
# Process SNAT rules for attached subnets
if self._snat_action:
snat_callback(self._router.get('gw_port'),
*args,
action=self._snat_action)
self._snat_action = None
def _update_routing_table(self, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_wrapper.netns.execute(cmd, check_exit_code=False)
def routes_updated(self):
new_routes = self.router['routes']
old_routes = self.routes
adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes)
for route in adds:
LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route
for del_route in removes:
if route['destination'] == del_route['destination']:
removes.remove(del_route)
#replace success even if there is no existing route
self._update_routing_table('replace', route)
for route in removes:
LOG.debug("Removed route entry is '%s'", route)
self._update_routing_table('delete', route)
self.routes = new_routes
def get_ex_gw_port(self):
return self.router.get('gw_port')
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
return self.router.get(l3_constants.FLOATINGIP_KEY, [])
def floating_forward_rules(self, floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' %
(floating_ip, fixed_ip)),
('float-snat', '-s %s -j SNAT --to %s' %
(fixed_ip, floating_ip))]
def process_floating_ip_nat_rules(self):
"""Configure NAT rules for the router's floating IPs.
Configures iptables rules for the floating ips of the given router
"""
# Clear out all iptables rules for floating ips
self.iptables_manager.ipv4['nat'].clear_rules_by_tag('floating_ip')
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
# Rebuild iptables rules for the floating ip.
fixed = fip['fixed_ip_address']
fip_ip = fip['floating_ip_address']
for chain, rule in self.floating_forward_rules(fip_ip, fixed):
self.iptables_manager.ipv4['nat'].add_rule(chain, rule,
tag='floating_ip')
self.iptables_manager.apply()
def process_snat_dnat_for_fip(self):
try:
self.process_floating_ip_nat_rules()
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException(
'L3 agent failure to setup NAT for floating IPs')
def _add_fip_addr_to_device(self, fip, device):
"""Configures the floating ip address on the device.
"""
try:
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
device.addr.add(ip_cidr)
return True
except RuntimeError:
# any exception occurred here should cause the floating IP
# to be set in error state
LOG.warn(_LW("Unable to configure IP address for "
"floating IP: %s"), fip['id'])
def add_floating_ip(self, fip, interface_name, device):
raise NotImplementedError()
def remove_floating_ip(self, device, ip_cidr):
device.delete_addr_and_conntrack_state(ip_cidr)
def get_router_cidrs(self, device):
return set([addr['cidr'] for addr in device.addr.list()])
def process_floating_ip_addresses(self, interface_name):
"""Configure IP addresses on router's external gateway interface.
Ensures addresses for existing floating IPs and cleans up
those that should not longer be configured.
"""
fip_statuses = {}
if interface_name is None:
LOG.debug('No Interface for floating IPs router: %s',
self.router['id'])
return fip_statuses
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
existing_cidrs = self.get_router_cidrs(device)
new_cidrs = set()
floating_ips = self.get_floating_ips()
# Loop once to ensure that floating ips are configured.
for fip in floating_ips:
fip_ip = fip['floating_ip_address']
ip_cidr = common_utils.ip_to_cidr(fip_ip)
new_cidrs.add(ip_cidr)
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ACTIVE
if ip_cidr not in existing_cidrs:
fip_statuses[fip['id']] = self.add_floating_ip(
fip, interface_name, device)
LOG.debug('Floating ip %(id)s added, status %(status)s',
{'id': fip['id'],
'status': fip_statuses.get(fip['id'])})
# mark the status as not changed. we can't remove it because
# that's how the caller determines that it was removed
if fip_statuses[fip['id']] == fip['status']:
fip_statuses[fip['id']] = FLOATINGIP_STATUS_NOCHANGE
fips_to_remove = (
ip_cidr for ip_cidr in existing_cidrs - new_cidrs
if common_utils.is_cidr_host(ip_cidr))
for ip_cidr in fips_to_remove:
self.remove_floating_ip(device, ip_cidr)
return fip_statuses
def configure_fip_addresses(self, interface_name):
try:
return self.process_floating_ip_addresses(interface_name)
except Exception:
# TODO(salv-orlando): Less broad catching
raise n_exc.FloatingIpSetupException('L3 agent failure to setup '
'floating IPs')
def put_fips_in_error_state(self):
fip_statuses = {}
for fip in self.router.get(l3_constants.FLOATINGIP_KEY, []):
fip_statuses[fip['id']] = l3_constants.FLOATINGIP_STATUS_ERROR
return fip_statuses
def delete(self, agent):
self.router['gw_port'] = None
self.router[l3_constants.INTERFACE_KEY] = []
self.router[l3_constants.FLOATINGIP_KEY] = []
self.process(agent)
self.disable_radvd()
if self.router_namespace:
self.router_namespace.delete()
def _internal_network_added(self, ns_name, network_id, port_id,
fixed_ips, mac_address,
interface_name, prefix):
self.driver.plug(network_id, port_id, interface_name, mac_address,
namespace=ns_name,
prefix=prefix)
ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips)
self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name)
for fixed_ip in fixed_ips:
ip_lib.send_ip_addr_adv_notif(ns_name,
interface_name,
fixed_ip['ip_address'],
self.agent_conf)
def internal_network_added(self, port):
network_id = port['network_id']
port_id = port['id']
fixed_ips = port['fixed_ips']
mac_address = port['mac_address']
interface_name = self.get_internal_device_name(port_id)
self._internal_network_added(self.ns_name,
network_id,
port_id,
fixed_ips,
mac_address,
interface_name,
INTERNAL_DEV_PREFIX)
def internal_network_removed(self, port):
interface_name = self.get_internal_device_name(port['id'])
if ip_lib.device_exists(interface_name, namespace=self.ns_name):
self.driver.unplug(interface_name, namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def _get_existing_devices(self):
ip_wrapper = ip_lib.IPWrapper(namespace=self.ns_name)
ip_devs = ip_wrapper.get_devices(exclude_loopback=True)
return [ip_dev.name for ip_dev in ip_devs]
@staticmethod
def _get_updated_ports(existing_ports, current_ports):
updated_ports = dict()
current_ports_dict = {p['id']: p for p in current_ports}
for existing_port in existing_ports:
current_port = current_ports_dict.get(existing_port['id'])
if current_port:
if sorted(existing_port['fixed_ips']) != (
sorted(current_port['fixed_ips'])):
updated_ports[current_port['id']] = current_port
return updated_ports
@staticmethod
def _port_has_ipv6_subnet(port):
if 'subnets' in port:
for subnet in port['subnets']:
if netaddr.IPNetwork(subnet['cidr']).version == 6:
return True
def enable_radvd(self, internal_ports=None):
LOG.debug('Spawning radvd daemon in router device: %s', self.router_id)
if not internal_ports:
internal_ports = self.internal_ports
self.radvd.enable(internal_ports)
def disable_radvd(self):
LOG.debug('Terminating radvd daemon in router device: %s',
self.router_id)
self.radvd.disable()
def internal_network_updated(self, interface_name, ip_cidrs):
self.driver.init_l3(interface_name, ip_cidrs=ip_cidrs,
namespace=self.ns_name)
def _process_internal_ports(self):
existing_port_ids = set(p['id'] for p in self.internal_ports)
internal_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
current_port_ids = set(p['id'] for p in internal_ports
if p['admin_state_up'])
new_port_ids = current_port_ids - existing_port_ids
new_ports = [p for p in internal_ports if p['id'] in new_port_ids]
old_ports = [p for p in self.internal_ports
if p['id'] not in current_port_ids]
updated_ports = self._get_updated_ports(self.internal_ports,
internal_ports)
enable_ra = False
for p in new_ports:
self.internal_network_added(p)
self.internal_ports.append(p)
enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
for p in old_ports:
self.internal_network_removed(p)
self.internal_ports.remove(p)
enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
if updated_ports:
for index, p in enumerate(internal_ports):
if not updated_ports.get(p['id']):
continue
self.internal_ports[index] = updated_ports[p['id']]
interface_name = self.get_internal_device_name(p['id'])
ip_cidrs = common_utils.fixed_ip_cidrs(p['fixed_ips'])
self.internal_network_updated(interface_name, ip_cidrs)
enable_ra = enable_ra or self._port_has_ipv6_subnet(p)
# Enable RA
if enable_ra:
self.enable_radvd(internal_ports)
existing_devices = self._get_existing_devices()
current_internal_devs = set(n for n in existing_devices
if n.startswith(INTERNAL_DEV_PREFIX))
current_port_devs = set(self.get_internal_device_name(port_id)
for port_id in current_port_ids)
stale_devs = current_internal_devs - current_port_devs
for stale_dev in stale_devs:
LOG.debug('Deleting stale internal router device: %s',
stale_dev)
self.driver.unplug(stale_dev,
namespace=self.ns_name,
prefix=INTERNAL_DEV_PREFIX)
def _list_floating_ip_cidrs(self):
# Compute a list of addresses this router is supposed to have.
# This avoids unnecessarily removing those addresses and
# causing a momentarily network outage.
floating_ips = self.get_floating_ips()
return [common_utils.ip_to_cidr(ip['floating_ip_address'])
for ip in floating_ips]
def _plug_external_gateway(self, ex_gw_port, interface_name, ns_name):
self.driver.plug(ex_gw_port['network_id'],
ex_gw_port['id'],
interface_name,
ex_gw_port['mac_address'],
bridge=self.agent_conf.external_network_bridge,
namespace=ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _get_external_gw_ips(self, ex_gw_port):
gateway_ips = []
enable_ra_on_gw = False
if 'subnets' in ex_gw_port:
gateway_ips = [subnet['gateway_ip']
for subnet in ex_gw_port['subnets']
if subnet['gateway_ip']]
if self.use_ipv6 and not self.is_v6_gateway_set(gateway_ips):
# No IPv6 gateway is available, but IPv6 is enabled.
if self.agent_conf.ipv6_gateway:
# ipv6_gateway configured, use address for default route.
gateway_ips.append(self.agent_conf.ipv6_gateway)
else:
# ipv6_gateway is also not configured.
# Use RA for default route.
enable_ra_on_gw = True
return gateway_ips, enable_ra_on_gw
def _external_gateway_added(self, ex_gw_port, interface_name,
ns_name, preserve_ips):
self._plug_external_gateway(ex_gw_port, interface_name, ns_name)
# Build up the interface and gateway IP addresses that
# will be added to the interface.
ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips'])
gateway_ips, enable_ra_on_gw = self._get_external_gw_ips(ex_gw_port)
self.driver.init_l3(interface_name,
ip_cidrs,
namespace=ns_name,
gateway_ips=gateway_ips,
extra_subnets=ex_gw_port.get('extra_subnets', []),
preserve_ips=preserve_ips,
enable_ra_on_gw=enable_ra_on_gw,
clean_connections=True)
for fixed_ip in ex_gw_port['fixed_ips']:
ip_lib.send_ip_addr_adv_notif(ns_name,
interface_name,
fixed_ip['ip_address'],
self.agent_conf)
def is_v6_gateway_set(self, gateway_ips):
"""Check to see if list of gateway_ips has an IPv6 gateway.
"""
# Note - don't require a try-except here as all
# gateway_ips elements are valid addresses, if they exist.
return any(netaddr.IPAddress(gw_ip).version == 6
for gw_ip in gateway_ips)
def external_gateway_added(self, ex_gw_port, interface_name):
preserve_ips = self._list_floating_ip_cidrs()
self._external_gateway_added(
ex_gw_port, interface_name, self.ns_name, preserve_ips)
def external_gateway_updated(self, ex_gw_port, interface_name):
preserve_ips = self._list_floating_ip_cidrs()
self._external_gateway_added(
ex_gw_port, interface_name, self.ns_name, preserve_ips)
def external_gateway_removed(self, ex_gw_port, interface_name):
self.driver.unplug(interface_name,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
def _process_external_gateway(self, ex_gw_port):
# TODO(Carl) Refactor to clarify roles of ex_gw_port vs self.ex_gw_port
ex_gw_port_id = (ex_gw_port and ex_gw_port['id'] or
self.ex_gw_port and self.ex_gw_port['id'])
interface_name = None
if ex_gw_port_id:
interface_name = self.get_external_device_name(ex_gw_port_id)
if ex_gw_port:
def _gateway_ports_equal(port1, port2):
def _get_filtered_dict(d, ignore):
return dict((k, v) for k, v in six.iteritems(d)
if k not in ignore)
keys_to_ignore = set(['binding:host_id'])
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
if not self.ex_gw_port:
self.external_gateway_added(ex_gw_port, interface_name)
elif not _gateway_ports_equal(ex_gw_port, self.ex_gw_port):
self.external_gateway_updated(ex_gw_port, interface_name)
elif not ex_gw_port and self.ex_gw_port:
self.external_gateway_removed(self.ex_gw_port, interface_name)
existing_devices = self._get_existing_devices()
stale_devs = [dev for dev in existing_devices
if dev.startswith(EXTERNAL_DEV_PREFIX)
and dev != interface_name]
for stale_dev in stale_devs:
LOG.debug('Deleting stale external router device: %s', stale_dev)
self.driver.unplug(stale_dev,
bridge=self.agent_conf.external_network_bridge,
namespace=self.ns_name,
prefix=EXTERNAL_DEV_PREFIX)
# Process SNAT rules for external gateway
self.perform_snat_action(self._handle_router_snat_rules,
interface_name)
def external_gateway_nat_rules(self, ex_gw_ip, interface_name):
mark = self.agent_conf.external_ingress_mark
rules = [('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name}),
('snat', '-o %s -j SNAT --to-source %s' %
(interface_name, ex_gw_ip)),
('snat', '-m mark ! --mark %s '
'-m conntrack --ctstate DNAT '
'-j SNAT --to-source %s' % (mark, ex_gw_ip))]
return rules
def external_gateway_mangle_rules(self, interface_name):
mark = self.agent_conf.external_ingress_mark
rules = [('mark', '-i %s -j MARK --set-xmark %s/%s' %
(interface_name, mark, EXTERNAL_INGRESS_MARK_MASK))]
return rules
def _empty_snat_chains(self, iptables_manager):
iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
iptables_manager.ipv4['nat'].empty_chain('snat')
iptables_manager.ipv4['mangle'].empty_chain('mark')
def _add_snat_rules(self, ex_gw_port, iptables_manager,
interface_name, action):
if action == 'add_rules' and ex_gw_port:
# ex_gw_port should not be None in this case
# NAT rules are added only if ex_gw_port has an IPv4 address
for ip_addr in ex_gw_port['fixed_ips']:
ex_gw_ip = ip_addr['ip_address']
if netaddr.IPAddress(ex_gw_ip).version == 4:
rules = self.external_gateway_nat_rules(ex_gw_ip,
interface_name)
for rule in rules:
iptables_manager.ipv4['nat'].add_rule(*rule)
rules = self.external_gateway_mangle_rules(interface_name)
for rule in rules:
iptables_manager.ipv4['mangle'].add_rule(*rule)
break
def _handle_router_snat_rules(self, ex_gw_port,
interface_name, action):
self._empty_snat_chains(self.iptables_manager)
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
self._add_snat_rules(ex_gw_port,
self.iptables_manager,
interface_name,
action)
def process_external(self, agent):
existing_floating_ips = self.floating_ips
try:
with self.iptables_manager.defer_apply():
ex_gw_port = self.get_ex_gw_port()
self._process_external_gateway(ex_gw_port)
# TODO(Carl) Return after setting existing_floating_ips and
# still call update_fip_statuses?
if not ex_gw_port:
return
# Process SNAT/DNAT rules and addresses for floating IPs
self.process_snat_dnat_for_fip()
# Once NAT rules for floating IPs are safely in place
# configure their addresses on the external gateway port
interface_name = self.get_external_device_interface_name(
ex_gw_port)
fip_statuses = self.configure_fip_addresses(interface_name)
except (n_exc.FloatingIpSetupException,
n_exc.IpTablesApplyException) as e:
# All floating IPs must be put in error state
LOG.exception(e)
fip_statuses = self.put_fips_in_error_state()
agent.update_fip_statuses(self, existing_floating_ips, fip_statuses)
@common_utils.exception_logger()
def process(self, agent):
"""Process updates to this router
This method is the point where the agent requests that updates be
applied to this router.
:param agent: Passes the agent in order to send RPC messages.
"""
self._process_internal_ports()
self.process_external(agent)
# Process static routes for router
self.routes_updated()
# Update ex_gw_port and enable_snat on the router info cache
self.ex_gw_port = self.get_ex_gw_port()
self.snat_ports = self.router.get(
l3_constants.SNAT_ROUTER_INTF_KEY, [])
self.enable_snat = self.router.get('enable_snat')
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the astropy test suite
from the installed astropy. It makes use of the `pytest` testing framework.
"""
import os
import sys
import types
import pickle
import warnings
import functools
from distutils.version import LooseVersion
import pytest
try:
# Import pkg_resources to prevent it from issuing warnings upon being
# imported from within py.test. See
# https://github.com/astropy/astropy/pull/537 for a detailed explanation.
import pkg_resources # pylint: disable=W0611 # noqa
except ImportError:
pass
from astropy.units import allclose as quantity_allclose # noqa
from astropy.utils.exceptions import (AstropyDeprecationWarning,
AstropyPendingDeprecationWarning)
# For backward-compatibility with affiliated packages
from .runner import TestRunner # pylint: disable=W0611 # noqa
__all__ = ['raises', 'enable_deprecations_as_exceptions', 'remote_data',
'treat_deprecations_as_exceptions', 'catch_warnings',
'assert_follows_unicode_guidelines',
'assert_quantity_allclose', 'check_pickling_recovery',
'pickle_protocol', 'generic_recursive_equality_test']
# pytest marker to mark tests which get data from the web
# This is being maintained for backwards compatibility
remote_data = pytest.mark.remote_data
# distutils expects options to be Unicode strings
def _fix_user_options(options):
def to_str_or_none(x):
if x is None:
return None
return str(x)
return [tuple(to_str_or_none(x) for x in y) for y in options]
def _save_coverage(cov, result, rootdir, testing_path):
"""
This method is called after the tests have been run in coverage mode
to cleanup and then save the coverage data and report.
"""
from astropy.utils.console import color_print
if result != 0:
return
# The coverage report includes the full path to the temporary
# directory, so we replace all the paths with the true source
# path. Note that this will not work properly for packages that still
# rely on 2to3.
try:
# Coverage 4.0: _harvest_data has been renamed to get_data, the
# lines dict is private
cov.get_data()
except AttributeError:
# Coverage < 4.0
cov._harvest_data()
lines = cov.data.lines
else:
lines = cov.data._lines
for key in list(lines.keys()):
new_path = os.path.relpath(
os.path.realpath(key),
os.path.realpath(testing_path))
new_path = os.path.abspath(
os.path.join(rootdir, new_path))
lines[new_path] = lines.pop(key)
color_print('Saving coverage data in .coverage...', 'green')
cov.save()
color_print('Saving HTML coverage report in htmlcov...', 'green')
cov.html_report(directory=os.path.join(rootdir, 'htmlcov'))
class raises:
"""
A decorator to mark that a test should raise a given exception.
Use as follows::
@raises(ZeroDivisionError)
def test_foo():
x = 1/0
This can also be used a context manager, in which case it is just
an alias for the ``pytest.raises`` context manager (because the
two have the same name this help avoid confusion by being
flexible).
"""
# pep-8 naming exception -- this is a decorator class
def __init__(self, exc):
self._exc = exc
self._ctx = None
def __call__(self, func):
@functools.wraps(func)
def run_raises_test(*args, **kwargs):
pytest.raises(self._exc, func, *args, **kwargs)
return run_raises_test
def __enter__(self):
self._ctx = pytest.raises(self._exc)
return self._ctx.__enter__()
def __exit__(self, *exc_info):
return self._ctx.__exit__(*exc_info)
_deprecations_as_exceptions = False
_include_astropy_deprecations = True
_modules_to_ignore_on_import = set([
r'compiler', # A deprecated stdlib module used by py.test
r'scipy',
r'pygments',
r'ipykernel',
r'IPython', # deprecation warnings for async and await
r'setuptools'])
_warnings_to_ignore_entire_module = set([])
_warnings_to_ignore_by_pyver = {
None: set([ # Python version agnostic
# https://github.com/astropy/astropy/pull/7372
(r"Importing from numpy\.testing\.decorators is deprecated, "
r"import from numpy\.testing instead\.", DeprecationWarning),
# inspect raises this slightly different warning on Python 3.6-3.7.
# Keeping it since e.g. lxml as of 3.8.0 is still calling getargspec()
(r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) or inspect\.getfullargspec\(\)",
DeprecationWarning),
# https://github.com/astropy/pytest-doctestplus/issues/29
(r"split\(\) requires a non-empty pattern match", FutureWarning),
# Package resolution warning that we can do nothing about
(r"can't resolve package from __spec__ or __package__, "
r"falling back on __name__ and __path__", ImportWarning)]),
(3, 7): set([
# Deprecation warning for collections.abc, fixed in Astropy but still
# used in lxml, and maybe others
(r"Using or importing the ABCs from 'collections'",
DeprecationWarning)])
}
def enable_deprecations_as_exceptions(include_astropy_deprecations=True,
modules_to_ignore_on_import=[],
warnings_to_ignore_entire_module=[],
warnings_to_ignore_by_pyver={}):
"""
Turn on the feature that turns deprecations into exceptions.
Parameters
----------
include_astropy_deprecations : bool
If set to `True`, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also turned into exceptions.
modules_to_ignore_on_import : list of str
List of additional modules that generate deprecation warnings
on import, which are to be ignored. By default, these are already
included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and
``setuptools``.
warnings_to_ignore_entire_module : list of str
List of modules with deprecation warnings to ignore completely,
not just during import. If ``include_astropy_deprecations=True``
is given, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also ignored for the modules.
warnings_to_ignore_by_pyver : dict
Dictionary mapping tuple of ``(major, minor)`` Python version to
a list of ``(warning_message, warning_class)`` to ignore.
Python version-agnostic warnings should be mapped to `None` key.
This is in addition of those already ignored by default
(see ``_warnings_to_ignore_by_pyver`` values).
"""
global _deprecations_as_exceptions
_deprecations_as_exceptions = True
global _include_astropy_deprecations
_include_astropy_deprecations = include_astropy_deprecations
global _modules_to_ignore_on_import
_modules_to_ignore_on_import.update(modules_to_ignore_on_import)
global _warnings_to_ignore_entire_module
_warnings_to_ignore_entire_module.update(warnings_to_ignore_entire_module)
global _warnings_to_ignore_by_pyver
for key, val in warnings_to_ignore_by_pyver.items():
if key in _warnings_to_ignore_by_pyver:
_warnings_to_ignore_by_pyver[key].update(val)
else:
_warnings_to_ignore_by_pyver[key] = set(val)
def treat_deprecations_as_exceptions():
"""
Turn all DeprecationWarnings (which indicate deprecated uses of
Python itself or Numpy, but not within Astropy, where we use our
own deprecation warning class) into exceptions so that we find
out about them early.
This completely resets the warning filters and any "already seen"
warning state.
"""
# First, totally reset the warning state. The modules may change during
# this iteration thus we copy the original state to a list to iterate
# on. See https://github.com/astropy/astropy/pull/5513.
for module in list(sys.modules.values()):
# We don't want to deal with six.MovedModules, only "real"
# modules. FIXME: we no more use six, this should be useless ?
if (isinstance(module, types.ModuleType) and
hasattr(module, '__warningregistry__')):
del module.__warningregistry__
if not _deprecations_as_exceptions:
return
warnings.resetwarnings()
# Hide the next couple of DeprecationWarnings
warnings.simplefilter('ignore', DeprecationWarning)
# Here's the wrinkle: a couple of our third-party dependencies
# (py.test and scipy) are still using deprecated features
# themselves, and we'd like to ignore those. Fortunately, those
# show up only at import time, so if we import those things *now*,
# before we turn the warnings into exceptions, we're golden.
for m in _modules_to_ignore_on_import:
try:
__import__(m)
except ImportError:
pass
# Now, start over again with the warning filters
warnings.resetwarnings()
# Now, turn these warnings into exceptions
_all_warns = [DeprecationWarning, FutureWarning, ImportWarning]
# Only turn astropy deprecation warnings into exceptions if requested
if _include_astropy_deprecations:
_all_warns += [AstropyDeprecationWarning,
AstropyPendingDeprecationWarning]
for w in _all_warns:
warnings.filterwarnings("error", ".*", w)
# This ignores all specified warnings from given module(s),
# not just on import, for use of Astropy affiliated packages.
for m in _warnings_to_ignore_entire_module:
for w in _all_warns:
warnings.filterwarnings('ignore', category=w, module=m)
# This ignores only specified warnings by Python version, if applicable.
for v in _warnings_to_ignore_by_pyver:
if v is None or sys.version_info[:2] == v:
for s in _warnings_to_ignore_by_pyver[v]:
warnings.filterwarnings("ignore", s[0], s[1])
# If using Matplotlib < 3, we should ignore the following warning since
# this is beyond our control
try:
import matplotlib
except ImportError:
pass
else:
if LooseVersion(matplotlib.__version__) < '3':
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='numpy.lib.type_check')
class catch_warnings(warnings.catch_warnings):
"""
A high-powered version of warnings.catch_warnings to use for testing
and to make sure that there is no dependence on the order in which
the tests are run.
This completely blitzes any memory of any warnings that have
appeared before so that all warnings will be caught and displayed.
``*args`` is a set of warning classes to collect. If no arguments are
provided, all warnings are collected.
Use as follows::
with catch_warnings(MyCustomWarning) as w:
do.something.bad()
assert len(w) > 0
"""
def __init__(self, *classes):
super().__init__(record=True)
self.classes = classes
def __enter__(self):
warning_list = super().__enter__()
treat_deprecations_as_exceptions()
if len(self.classes) == 0:
warnings.simplefilter('always')
else:
warnings.simplefilter('ignore')
for cls in self.classes:
warnings.simplefilter('always', cls)
return warning_list
def __exit__(self, type, value, traceback):
treat_deprecations_as_exceptions()
class ignore_warnings(catch_warnings):
"""
This can be used either as a context manager or function decorator to
ignore all warnings that occur within a function or block of code.
An optional category option can be supplied to only ignore warnings of a
certain category or categories (if a list is provided).
"""
def __init__(self, category=None):
super().__init__()
if isinstance(category, type) and issubclass(category, Warning):
self.category = [category]
else:
self.category = category
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Originally this just reused self, but that doesn't work if the
# function is called more than once so we need to make a new
# context manager instance for each call
with self.__class__(category=self.category):
return func(*args, **kwargs)
return wrapper
def __enter__(self):
retval = super().__enter__()
if self.category is not None:
for category in self.category:
warnings.simplefilter('ignore', category)
else:
warnings.simplefilter('ignore')
return retval
def assert_follows_unicode_guidelines(
x, roundtrip=None):
"""
Test that an object follows our Unicode policy. See
"Unicode guidelines" in the coding guidelines.
Parameters
----------
x : object
The instance to test
roundtrip : module, optional
When provided, this namespace will be used to evaluate
``repr(x)`` and ensure that it roundtrips. It will also
ensure that ``__bytes__(x)`` roundtrip.
If not provided, no roundtrip testing will be performed.
"""
from astropy import conf
with conf.set_temp('unicode_output', False):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, str)
unicode_x.encode('ascii')
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
with conf.set_temp('unicode_output', True):
bytes_x = bytes(x)
unicode_x = str(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, str)
assert isinstance(repr_x, str)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
@pytest.fixture(params=[0, 1, -1])
def pickle_protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
(Originally from astropy.table.tests.test_pickle)
"""
return request.param
def generic_recursive_equality_test(a, b, class_history):
"""
Check if the attributes of a and b are equal. Then,
check if the attributes of the attributes are equal.
"""
dict_a = a.__dict__
dict_b = b.__dict__
for key in dict_a:
assert key in dict_b,\
f"Did not pickle {key}"
if hasattr(dict_a[key], '__eq__'):
eq = (dict_a[key] == dict_b[key])
if '__iter__' in dir(eq):
eq = (False not in eq)
assert eq, f"Value of {key} changed by pickling"
if hasattr(dict_a[key], '__dict__'):
if dict_a[key].__class__ in class_history:
# attempt to prevent infinite recursion
pass
else:
new_class_history = [dict_a[key].__class__]
new_class_history.extend(class_history)
generic_recursive_equality_test(dict_a[key],
dict_b[key],
new_class_history)
def check_pickling_recovery(original, protocol):
"""
Try to pickle an object. If successful, make sure
the object's attributes survived pickling and unpickling.
"""
f = pickle.dumps(original, protocol=protocol)
unpickled = pickle.loads(f)
class_history = [original.__class__]
generic_recursive_equality_test(original, unpickled,
class_history)
def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None,
**kwargs):
"""
Raise an assertion if two objects are not equal up to desired tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.testing.assert_allclose`.
"""
import numpy as np
from astropy.units.quantity import _unquantify_allclose_arguments
np.testing.assert_allclose(*_unquantify_allclose_arguments(
actual, desired, rtol, atol), **kwargs)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import logging
import argparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .mininode import NetworkThread
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class BitcoinTestMetaClass(type):
"""Metaclass for BitcoinTestFramework.
Ensures that any attempt to register a subclass of `BitcoinTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'BitcoinTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("BitcoinTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("BitcoinTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
"""Base class for a bitcoin test script.
Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.mocktime = 0
self.rpc_timewait = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = False
self.bind_to_localhost_only = True
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop bitcoinds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use bgold-cli instead of RPC for all commands")
self.add_options(parser)
self.options = parser.parse_args()
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.options.bitcoind = os.getenv("BITCOIND", default=config["environment"]["BUILDDIR"] + '/src/bgoldd' + config["environment"]["EXEEXT"])
self.options.bitcoincli = os.getenv("BITCOINCLI", default=config["environment"]["BUILDDIR"] + '/src/bgold-cli' + config["environment"]["EXEEXT"])
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'),
os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.import_deterministic_coinbase_privkeys()
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: bitcoinds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
cleanup_tree_on_exit = False
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def import_deterministic_coinbase_privkeys(self):
if self.setup_clean_chain:
return
for n in self.nodes:
try:
n.getwalletinfo()
except JSONRPCException as e:
assert str(e).startswith('Method not found')
continue
n.importprivkey(n.get_deterministic_priv_key()[1])
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, *, rpchost=None, binary=None):
"""Instantiate TestNode objects"""
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [self.options.bitcoind] * num_nodes
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, get_datadir_path(self.options.tmpdir, i), rpchost=rpchost, timewait=self.rpc_timewait, bitcoind=binary[i], bitcoin_cli=self.options.bitcoincli, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, extra_conf=extra_confs[i], extra_args=extra_args[i], use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a bitcoind"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple bitcoinds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr=''):
"""Stop a bitcoind test node"""
self.nodes[i].stop_node(expected_stderr)
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple bitcoind test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backward compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1388534400 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [self.options.bitcoind, "-datadir=" + datadir, '-disablewallet']
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, get_datadir_path(self.options.cachedir, i), extra_conf=["bind=127.0.0.1"], extra_args=[], rpchost=None, timewait=self.rpc_timewait, bitcoind=self.options.bitcoind, bitcoin_cli=self.options.bitcoincli, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generatetoaddress(1, self.nodes[peer].get_deterministic_priv_key()[0])
block_time += 10 * 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
os.rmdir(cache_path(i, 'wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path(i)):
if entry not in ['chainstate', 'blocks']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoingold.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("bitcoind has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
def skip_if_no_cli(self):
"""Skip the running test if bgold-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("bgold-cli has not been compiled.")
def is_cli_compiled(self):
"""Checks whether bgold-cli was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_UTILS")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_WALLET")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
return config["components"].getboolean("ENABLE_ZMQ")
|
|
import plac
import mordecai
import random
import jsonlines
from tqdm import tqdm
import re
import numpy as np
import editdistance
import pandas as pd
import os
import json
import pickle
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping, ModelCheckpoint
import sklearn
geo = mordecai.Geoparser()
# Here's the format of the Prodigy labeled place picking data:
# ```
# {"text":"On July 15, state security services in Idleb arrested Mahmoud Barish, an opposition activist, for his dealings with the Damascus Declaration.",
# "spans":[{"start":39,"end":44}],
# "options":[
# {"id":1,"text":"\"Idlib District\", a second-order administrative division in SYR, id: 169388"},
# {"id":2,"text":"\"Idlib\", a seat of a first-order administrative division in SYR, id: 169389,
# {"id":4,"text":"None/Other/Incorrect"}],
# "_input_hash":1212285619,"_task_hash":-1410881973,
# "accept":[2],
# "answer":"accept"}
# ```
def ingest_prodigy_ranks(filename):
"""
Ingest Prodigy-labeled Mordecai data for place picking and produce training data
for Keras.
For each labeled example, match it to the output of Mordecai, and make sure there's an accepted answer
from Prodigy.
Parameters
----------
filename: filepath, location of Prodigy data
Returns
-------
X: list of matrices, Mordecai features.
Each element in the list is a matrix of features for ranking (so 5 rows)
Y: list of arrays of length 5, indicating correct location.
"""
with jsonlines.open(filename) as reader:
X = []
Y = []
i = 0
accumulate = []
for obj in reader:
i = i+1
if i % 250 == 0:
print(i)
# run the text through mordecai
proced = geo.geoparse(obj['text'], verbose = True,)
for proc in proced:
# for each result, see if the spans overlap the labeled spans
if proc['spans'][0]['start'] != obj['spans'][0]['start']:
# make sure we have the right entity
continue
ent_word = proc['word']
if not ent_word:
continue
# if it all works, take the results.
results = geo.query_geonames_country(ent_word, proc['country_predicted'])
if obj['answer'] == 'accept':
#start_char = obj['spans']['start']
# get the geonames ids of the options
geoids = [re.findall("id: (.+)", i['text']) for i in obj['options']]
geoids = [i[0] for i in geoids if i]
# get the correct of if any
try:
correct = obj['accept'][0]
correct_id = str(geoids[correct - 1])
except (KeyError, IndexError):
continue
elif obj['answer'] != 'accept':
correct_id = 4
try:
fl, meta = geo.features_for_rank(proc, results)
# just look at the top 4 results by deterministic rule
# This matches what went into the annotation task
choices, sorted_meta, fl_subset = geo.format_for_prodigy(fl, meta, ent_word, return_feature_subset=True)
result_ids = np.asarray([m['geonameid'] for m in sorted_meta])
if obj['answer'] == 'accept':
labels = result_ids == correct_id
elif obj['answer'] == 'reject':
# give rejects their own special category
# reject means the country was right but none of the options were.
labels = np.asarray([0, 0, 0, 0, 1])
else:
# skip ignores
continue
#print(labels)
if labels.sum() == 0:
#print("No correct labels")
pass
# if fewer than 4 options were presented for tagging,
# pad it out with 0s to length 4 + 1 (1 for the all wrong reject answer)
labels = np.pad(labels, (0, 5 - len(labels)), 'constant')
# pad the matrix with empty rows
fl_pad = np.pad(fl_subset, ((0, 5 - fl_subset.shape[0]), (0, 0)), 'constant')
# turn the matrix into a vector
fl_unwrap = fl_pad.flatten()
Y.append(labels)
X.append(fl_unwrap)
except Exception as e:
print(e)
#print(meta)
continue
return X, Y
def prep_data(X, Y, train_split):
X_stack = np.vstack(X)
X_stack.shape
Y_stack = np.vstack(Y)
Y_stack = Y_stack.astype(int)
Y_stack.shape
X_df = pd.DataFrame(X_stack)
print("Using a cutpoint of ", train_split)
np.random.seed(73071)
msk = np.random.rand(len(X_df)) < train_split
X_train = X_df[msk].as_matrix()
X_test = X_df[~msk].as_matrix()
y_train = Y_stack[msk]
y_test = Y_stack[~msk]
for i in [X_train, X_test, y_train, y_test]:
print(i.shape)
return X_train, X_test, y_train, y_test
def train_model(X_train, X_test, y_train, y_test, save_file):
model = Sequential()
model.add(Dense(128, activation='relu', input_shape = (X_train.shape[1],)))
model.add(Dropout(0.3))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(y_train.shape[1], activation='softmax'))
#sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
callbacks = [EarlyStopping(monitor='val_loss', patience=50)]
save_model = ModelCheckpoint(save_file, monitor='val_loss',
verbose=0, save_best_only=True,
save_weights_only=False)
callbacks.append(save_model)
model.fit(X_train, y_train,
epochs=100,
validation_split=0.2,
callbacks = callbacks,
batch_size=16)
return model
@plac.annotations(
input_file=("Location of Prodigy labeled output", "option", "i", str),
train_split=("Fraction of data to use for training vs. validation", "option", "s", float),
use_cache=("Use cached data?", "flag", "c"))
def main(input_file, train_split, use_cache):
save_file = "rank_model_new.h5"
if use_cache:
print("Using saved data...")
with open("ranker_X.pkl", "rb") as f:
X = pickle.load(f)
with open("ranker_y.pkl", "rb") as f:
Y = pickle.load(f)
else:
print("Recalculating data...")
X, Y = ingest_prodigy_ranks(input_file)
#print("X.shape:", X.shape)
#print("Y.shape:", Y.shape)
with open("ranker_X.pkl", "wb") as f:
pickle.dump(X, f)
with open("ranker_Y.pkl", "wb") as f:
pickle.dump(Y, f)
X_train, X_test, y_train, y_test = prep_data(X, Y, train_split)
model = train_model(X_train, X_test, y_train, y_test, save_file)
score = model.evaluate(X_test, y_test)
print(score)
y_predicted = model.predict(X_test)
print(sklearn.metrics.classification_report(y_pred = y_predicted>0.5, y_true = y_test))
#model.save()
if __name__ == '__main__':
plac.call(main)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
While SQLAlchemy/sqlalchemy-migrate should abstract this correctly,
there are known issues with these libraries so SQLite and non-SQLite
migrations must be done separately.
"""
import copy
import migrate
import sqlalchemy
import glance.common.utils
meta = sqlalchemy.MetaData()
def upgrade(migrate_engine):
"""
Call the correct dialect-specific upgrade.
"""
meta.bind = migrate_engine
t_images = _get_table('images', meta)
t_image_members = _get_table('image_members', meta)
t_image_properties = _get_table('image_properties', meta)
if migrate_engine.url.get_dialect().name == "sqlite":
_upgrade_sqlite(t_images, t_image_members, t_image_properties)
else:
_upgrade_other(t_images, t_image_members, t_image_properties)
_update_all_ids_to_uuids(t_images, t_image_members, t_image_properties)
def downgrade(migrate_engine):
"""
Call the correct dialect-specific downgrade.
"""
meta.bind = migrate_engine
t_images = _get_table('images', meta)
t_image_members = _get_table('image_members', meta)
t_image_properties = _get_table('image_properties', meta)
if migrate_engine.url.get_dialect().name == "sqlite":
_downgrade_sqlite(t_images, t_image_members, t_image_properties)
else:
_downgrade_other(t_images, t_image_members, t_image_properties)
_update_all_uuids_to_ids(t_images, t_image_members, t_image_properties)
def _upgrade_sqlite(t_images, t_image_members, t_image_properties):
"""
Upgrade 011 -> 012 with special SQLite-compatible logic.
"""
t_images.c.id.alter(sqlalchemy.Column("id",
sqlalchemy.String(36),
primary_key=True))
sql_commands = [
"""CREATE TABLE image_members_backup (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
member VARCHAR(255) NOT NULL,
can_share BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
UNIQUE (image_id, member),
CHECK (can_share IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(image_id) REFERENCES images (id)
);""",
"""INSERT INTO image_members_backup
SELECT * FROM image_members;""",
"""CREATE TABLE image_properties_backup (
id INTEGER NOT NULL,
image_id VARCHAR(36) NOT NULL,
name VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
UNIQUE (image_id, name),
FOREIGN KEY(image_id) REFERENCES images (id)
);""",
"""INSERT INTO image_properties_backup
SELECT * FROM image_properties;""",
]
for command in sql_commands:
meta.bind.execute(command)
_sqlite_table_swap(t_image_members, t_image_properties)
def _downgrade_sqlite(t_images, t_image_members, t_image_properties):
"""
Downgrade 012 -> 011 with special SQLite-compatible logic.
"""
t_images.c.id.alter(sqlalchemy.Column("id",
sqlalchemy.Integer(),
primary_key=True))
sql_commands = [
"""CREATE TABLE image_members_backup (
id INTEGER NOT NULL,
image_id INTEGER NOT NULL,
member VARCHAR(255) NOT NULL,
can_share BOOLEAN NOT NULL,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
UNIQUE (image_id, member),
CHECK (can_share IN (0, 1)),
CHECK (deleted IN (0, 1)),
FOREIGN KEY(image_id) REFERENCES images (id)
);""",
"""INSERT INTO image_members_backup
SELECT * FROM image_members;""",
"""CREATE TABLE image_properties_backup (
id INTEGER NOT NULL,
image_id INTEGER NOT NULL,
name VARCHAR(255) NOT NULL,
value TEXT,
created_at DATETIME NOT NULL,
updated_at DATETIME,
deleted_at DATETIME,
deleted BOOLEAN NOT NULL,
PRIMARY KEY (id),
CHECK (deleted IN (0, 1)),
UNIQUE (image_id, name),
FOREIGN KEY(image_id) REFERENCES images (id)
);""",
"""INSERT INTO image_properties_backup
SELECT * FROM image_properties;""",
]
for command in sql_commands:
meta.bind.execute(command)
_sqlite_table_swap(t_image_members, t_image_properties)
def _upgrade_other(t_images, t_image_members, t_image_properties):
"""
Upgrade 011 -> 012 with logic for non-SQLite databases.
"""
foreign_keys = _get_foreign_keys(t_images,
t_image_members,
t_image_properties)
for fk in foreign_keys:
fk.drop()
t_images.c.id.alter(sqlalchemy.String(36), primary_key=True)
t_image_members.c.image_id.alter(sqlalchemy.String(36))
t_image_properties.c.image_id.alter(sqlalchemy.String(36))
_update_all_ids_to_uuids(t_images, t_image_members, t_image_properties)
for fk in foreign_keys:
fk.create()
def _downgrade_other(t_images, t_image_members, t_image_properties):
"""
Downgrade 012 -> 011 with logic for non-SQLite databases.
"""
foreign_keys = _get_foreign_keys(t_images,
t_image_members,
t_image_properties)
for fk in foreign_keys:
fk.drop()
t_images.c.id.alter(sqlalchemy.Integer(), primary_key=True)
t_image_members.c.image_id.alter(sqlalchemy.Integer())
t_image_properties.c.image_id.alter(sqlalchemy.Integer())
_update_all_uuids_to_ids(t_images, t_image_members, t_image_properties)
for fk in foreign_keys:
fk.create()
def _sqlite_table_swap(t_image_members, t_image_properties):
t_image_members.drop()
t_image_properties.drop()
meta.bind.execute("ALTER TABLE image_members_backup "
"RENAME TO image_members")
meta.bind.execute("ALTER TABLE image_properties_backup "
"RENAME TO image_properties")
for index in t_image_members.indexes.union(t_image_properties.indexes):
index.create()
def _get_table(table_name, metadata):
"""Return a sqlalchemy Table definition with associated metadata."""
return sqlalchemy.Table(table_name, metadata, autoload=True)
def _get_foreign_keys(t_images, t_image_members, t_image_properties):
"""Retrieve and return foreign keys for members/properties tables."""
image_members_fk_name = list(t_image_members.foreign_keys)[0].name
image_properties_fk_name = list(t_image_properties.foreign_keys)[0].name
fk1 = migrate.ForeignKeyConstraint([t_image_members.c.image_id],
[t_images.c.id],
name=image_members_fk_name)
fk2 = migrate.ForeignKeyConstraint([t_image_properties.c.image_id],
[t_images.c.id],
name=image_properties_fk_name)
return fk1, fk2
def _update_all_ids_to_uuids(t_images, t_image_members, t_image_properties):
"""Transition from INTEGER id to VARCHAR(36) id."""
images = list(t_images.select().execute())
for image in images:
old_id = image["id"]
new_id = glance.common.utils.generate_uuid()
t_images.update().\
where(t_images.c.id == old_id).\
values(id=new_id).execute()
t_image_members.update().\
where(t_image_members.c.image_id == old_id).\
values(image_id=new_id).execute()
t_image_properties.update().\
where(t_image_properties.c.image_id == old_id).\
values(image_id=new_id).execute()
def _update_all_uuids_to_ids(t_images, t_image_members, t_image_properties):
"""Transition from VARCHAR(36) id to INTEGER id."""
images = list(t_images.select().execute())
for image in images:
old_id = image["id"]
new_id = 0
t_images.update().\
where(t_images.c.id == old_id).\
values(id=new_id).execute()
t_image_members.update().\
where(t_image_members.c.image_id == old_id).\
values(image_id=new_id).execute()
t_image_properties.update().\
where(t_image_properties.c.image_id == old_id).\
values(image_id=new_id).execute()
new_id += 1
|
|
# coding: utf-8
"""Flask-SQLAlchemy-Cache
A SQLAlchemy CachingQuery implementation for Flask, using Flask-Cache.
It is based in SQLAlchemy docs example:
http://docs.sqlalchemy.org/en/latest/orm/examples.html#module-examples.dogpile_caching
"""
from hashlib import md5
from sqlalchemy.orm.interfaces import MapperOption
from flask_sqlalchemy import BaseQuery
class CachingQuery(BaseQuery):
"""
A Query subclass which optionally loads full results from cache.
The CachingQuery optionally stores additional state that allows it to
consult a cache before accessing the database, in the form of a FromCache
or RelationshipCache object.
Each of these objects refer to a cache. When such an object has associated
itself with the CachingQuery, it is used to locate a cached result.
If none is present, then the Query is invoked normally, the results
being cached.
The FromCache and RelationshipCache mapper options below represent
the "public" method of configuring this state upon the CachingQuery.
"""
def __iter__(self):
"""
Override __iter__ to pull results from cache if particular
attributes have been configured.
This approach does *not* detach the loaded objects from the current
session. If the cache backend is an in-process cache (like "memory")
and lives beyond the scope of the current session's transaction, those
objects may be expired.
The method here can be modified to first expunge() each loaded item
from the current session before returning the list of items, so that
the items in the cache are not the same ones in the current Session.
"""
if hasattr(self, '_cache'):
func = lambda: list(BaseQuery.__iter__(self))
return iter(self.get_value(createfunc=func))
else:
return BaseQuery.__iter__(self)
def _get_cache_plus_key(self):
"""Return a cache region plus key."""
key = getattr(self, '_cache_key', self.key_from_query())
return self._cache.cache, key
def invalidate(self):
"""Invalidate the cache value represented by this Query."""
cache, cache_key = self._get_cache_plus_key()
cache.delete(cache_key)
def get_value(self, merge=True, createfunc=None,
expiration_time=None, ignore_expiration=False):
"""
Return the value from the cache for this query.
"""
cache, cache_key = self._get_cache_plus_key()
# ignore_expiration means, if the value is in the cache
# but is expired, return it anyway. This doesn't make sense
# with createfunc, which says, if the value is expired, generate
# a new value.
assert not ignore_expiration or not createfunc, \
"Can't ignore expiration and also provide createfunc"
if ignore_expiration or not createfunc:
cached_value = cache.get(cache_key,
expiration_time=expiration_time,
ignore_expiration=ignore_expiration)
else:
cached_value = cache.get(cache_key)
if not cached_value:
cached_value = createfunc()
cache.set(cache_key, cached_value, timeout=expiration_time)
if cached_value and merge:
cached_value = self.merge_result(cached_value, load=False)
return cached_value
def set_value(self, value):
"""Set the value in the cache for this query."""
cache, cache_key = self._get_cache_plus_key()
cache.set(cache_key, value)
def key_from_query(self, qualifier=None):
"""
Given a Query, create a cache key.
There are many approaches to this; here we use the simplest, which is
to create an md5 hash of the text of the SQL statement, combined with
stringified versions of all the bound parameters within it.
There's a bit of a performance hit with compiling out "query.statement"
here; other approaches include setting up an explicit cache key with a
particular Query, then combining that with the bound parameter values.
"""
stmt = self.with_labels().statement
compiled = stmt.compile()
params = compiled.params
values = [str(compiled)]
for k in sorted(params):
values.append(repr(params[k]))
key = u" ".join(values)
return md5(key.encode('utf8')).hexdigest()
class _CacheableMapperOption(MapperOption):
def __init__(self, cache, cache_key=None):
"""
Construct a new `_CacheableMapperOption`.
:param cache: the cache. Should be a Flask-Cache instance.
:param cache_key: optional. A string cache key that will serve as
the key to the query. Use this if your query has a huge amount of
parameters (such as when using in_()) which correspond more simply to
some other identifier.
"""
self.cache = cache
self.cache_key = cache_key
def __getstate__(self):
"""
Flask-Cache instance is not picklable because it has references
to Flask.app. Also, I don't want it cached.
"""
d = self.__dict__.copy()
d.pop('cache', None)
return d
class FromCache(_CacheableMapperOption):
"""Specifies that a Query should load results from a cache."""
propagate_to_loaders = False
def process_query(self, query):
"""Process a Query during normal loading operation."""
query._cache = self
class RelationshipCache(_CacheableMapperOption):
"""
Specifies that a Query as called within a "lazy load" should load
results from a cache.
"""
propagate_to_loaders = True
def __init__(self, attribute, cache, cache_key=None):
"""
Construct a new RelationshipCache.
:param attribute: A Class.attribute which indicates a particular
class relationship() whose lazy loader should be pulled from the cache.
:param cache_key: optional. A string cache key that will serve as the
key to the query, bypassing the usual means of forming a key from the
Query itself.
"""
super(RelationshipCache, self).__init__(cache, cache_key)
self._relationship_options = {
(attribute.property.parent.class_, attribute.property.key): self
}
def process_query_conditionally(self, query):
"""
Process a Query that is used within a lazy loader.
(the process_query_conditionally() method is a SQLAlchemy
hook invoked only within lazyload.)
"""
if query._current_path:
mapper, prop = query._current_path[-2:]
for cls in mapper.class_.__mro__:
k = (cls, prop.key)
relationship_option = self._relationship_options.get(k)
if relationship_option:
query._cache = relationship_option
break
def and_(self, option):
"""
Chain another RelationshipCache option to this one.
While many RelationshipCache objects can be specified on a single
Query separately, chaining them together allows for a more efficient
lookup during load.
"""
self._relationship_options.update(option._relationship_options)
return self
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Building Blocks of TensorFlow Debugger Command-Line Interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import re
import sre_constants
import traceback
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python import pywrap_tensorflow_internal
from tensorflow.python.platform import gfile
HELP_INDENT = " "
EXPLICIT_USER_EXIT = "explicit_user_exit"
REGEX_MATCH_LINES_KEY = "regex_match_lines"
INIT_SCROLL_POS_KEY = "init_scroll_pos"
MAIN_MENU_KEY = "mm:"
class CommandLineExit(Exception):
def __init__(self, exit_token=None):
Exception.__init__(self)
self._exit_token = exit_token
@property
def exit_token(self):
return self._exit_token
class RichLine(object):
"""Rich single-line text.
Attributes:
text: A plain string, the raw text represented by this object. Should not
contain newlines.
font_attr_segs: A list of (start, end, font attribute) triples, representing
richness information applied to substrings of text.
"""
def __init__(self, text="", font_attr=None):
"""Construct a RichLine with no rich attributes or a single attribute.
Args:
text: Raw text string
font_attr: If specified, a single font attribute to be applied to the
entire text. Extending this object via concatenation allows creation
of text with varying attributes.
"""
# TODO(ebreck) Make .text and .font_attr protected members when we no
# longer need public access.
self.text = text
if font_attr:
self.font_attr_segs = [(0, len(text), font_attr)]
else:
self.font_attr_segs = []
def __add__(self, other):
"""Concatenate two chunks of maybe rich text to make a longer rich line.
Does not modify self.
Args:
other: Another piece of text to concatenate with this one.
If it is a plain str, it will be appended to this string with no
attributes. If it is a RichLine, it will be appended to this string
with its attributes preserved.
Returns:
A new RichLine comprising both chunks of text, with appropriate
attributes applied to the corresponding substrings.
"""
ret = RichLine()
if isinstance(other, six.string_types):
ret.text = self.text + other
ret.font_attr_segs = self.font_attr_segs[:]
return ret
elif isinstance(other, RichLine):
ret.text = self.text + other.text
ret.font_attr_segs = self.font_attr_segs[:]
old_len = len(self.text)
for start, end, font_attr in other.font_attr_segs:
ret.font_attr_segs.append((old_len + start, old_len + end, font_attr))
return ret
else:
raise TypeError("%r cannot be concatenated with a RichLine" % other)
def __len__(self):
return len(self.text)
def rich_text_lines_from_rich_line_list(rich_text_list, annotations=None):
"""Convert a list of RichLine objects or strings to a RichTextLines object.
Args:
rich_text_list: a list of RichLine objects or strings
annotations: annotatoins for the resultant RichTextLines object.
Returns:
A corresponding RichTextLines object.
"""
lines = []
font_attr_segs = {}
for i, rl in enumerate(rich_text_list):
if isinstance(rl, RichLine):
lines.append(rl.text)
if rl.font_attr_segs:
font_attr_segs[i] = rl.font_attr_segs
else:
lines.append(rl)
return RichTextLines(lines, font_attr_segs, annotations=annotations)
def get_tensorflow_version_lines(include_dependency_versions=False):
"""Generate RichTextLines with TensorFlow version info.
Args:
include_dependency_versions: Include the version of TensorFlow's key
dependencies, such as numpy.
Returns:
A formatted, multi-line `RichTextLines` object.
"""
lines = ["TensorFlow version: %s" % pywrap_tensorflow_internal.__version__]
lines.append("")
if include_dependency_versions:
lines.append("Dependency version(s):")
lines.append(" numpy: %s" % np.__version__)
lines.append("")
return RichTextLines(lines)
class RichTextLines(object):
"""Rich multi-line text.
Line-by-line text output, with font attributes (e.g., color) and annotations
(e.g., indices in a multi-dimensional tensor). Used as the text output of CLI
commands. Can be rendered on terminal environments such as curses.
This is not to be confused with Rich Text Format (RTF). This class is for text
lines only.
"""
def __init__(self, lines, font_attr_segs=None, annotations=None):
"""Constructor of RichTextLines.
Args:
lines: A list of str or a single str, representing text output to
screen. The latter case is for convenience when the text output is
single-line.
font_attr_segs: A map from 0-based row index to a list of 3-tuples.
It lists segments in each row that have special font attributes, such
as colors, that are not the default attribute. For example:
{1: [(0, 3, "red"), (4, 7, "green")], 2: [(10, 20, "yellow")]}
In each tuple, the 1st element is the start index of the segment. The
2nd element is the end index, in an "open interval" fashion. The 3rd
element is an object or a list of objects that represents the font
attribute. Colors are represented as strings as in the examples above.
annotations: A map from 0-based row index to any object for annotating
the row. A typical use example is annotating rows of the output as
indices in a multi-dimensional tensor. For example, consider the
following text representation of a 3x2x2 tensor:
[[[0, 0], [0, 0]],
[[0, 0], [0, 0]],
[[0, 0], [0, 0]]]
The annotation can indicate the indices of the first element shown in
each row, i.e.,
{0: [0, 0, 0], 1: [1, 0, 0], 2: [2, 0, 0]}
This information can make display of tensors on screen clearer and can
help the user navigate (scroll) to the desired location in a large
tensor.
Raises:
ValueError: If lines is of invalid type.
"""
if isinstance(lines, list):
self._lines = lines
elif isinstance(lines, six.string_types):
self._lines = [lines]
else:
raise ValueError("Unexpected type in lines: %s" % type(lines))
self._font_attr_segs = font_attr_segs
if not self._font_attr_segs:
self._font_attr_segs = {}
# TODO(cais): Refactor to collections.defaultdict(list) to simplify code.
self._annotations = annotations
if not self._annotations:
self._annotations = {}
# TODO(cais): Refactor to collections.defaultdict(list) to simplify code.
@property
def lines(self):
return self._lines
@property
def font_attr_segs(self):
return self._font_attr_segs
@property
def annotations(self):
return self._annotations
def num_lines(self):
return len(self._lines)
def slice(self, begin, end):
"""Slice a RichTextLines object.
The object itself is not changed. A sliced instance is returned.
Args:
begin: (int) Beginning line index (inclusive). Must be >= 0.
end: (int) Ending line index (exclusive). Must be >= 0.
Returns:
(RichTextLines) Sliced output instance of RichTextLines.
Raises:
ValueError: If begin or end is negative.
"""
if begin < 0 or end < 0:
raise ValueError("Encountered negative index.")
# Copy lines.
lines = self.lines[begin:end]
# Slice font attribute segments.
font_attr_segs = {}
for key in self.font_attr_segs:
if key >= begin and key < end:
font_attr_segs[key - begin] = self.font_attr_segs[key]
# Slice annotations.
annotations = {}
for key in self.annotations:
if not isinstance(key, int):
# Annotations can contain keys that are not line numbers.
annotations[key] = self.annotations[key]
elif key >= begin and key < end:
annotations[key - begin] = self.annotations[key]
return RichTextLines(
lines, font_attr_segs=font_attr_segs, annotations=annotations)
def extend(self, other):
"""Extend this instance of RichTextLines with another instance.
The extension takes effect on the text lines, the font attribute segments,
as well as the annotations. The line indices in the font attribute
segments and the annotations are adjusted to account for the existing
lines. If there are duplicate, non-line-index fields in the annotations,
the value from the input argument "other" will override that in this
instance.
Args:
other: (RichTextLines) The other RichTextLines instance to be appended at
the end of this instance.
"""
orig_num_lines = self.num_lines() # Record original number of lines.
# Merge the lines.
self._lines.extend(other.lines)
# Merge the font_attr_segs.
for line_index in other.font_attr_segs:
self._font_attr_segs[orig_num_lines + line_index] = (
other.font_attr_segs[line_index])
# Merge the annotations.
for key in other.annotations:
if isinstance(key, int):
self._annotations[orig_num_lines + key] = (other.annotations[key])
else:
self._annotations[key] = other.annotations[key]
def _extend_before(self, other):
"""Add another RichTextLines object to the front.
Args:
other: (RichTextLines) The other object to add to the front to this
object.
"""
other_num_lines = other.num_lines() # Record original number of lines.
# Merge the lines.
self._lines = other.lines + self._lines
# Merge the font_attr_segs.
new_font_attr_segs = {}
for line_index in self.font_attr_segs:
new_font_attr_segs[other_num_lines + line_index] = (
self.font_attr_segs[line_index])
new_font_attr_segs.update(other.font_attr_segs)
self._font_attr_segs = new_font_attr_segs
# Merge the annotations.
new_annotations = {}
for key in self._annotations:
if isinstance(key, int):
new_annotations[other_num_lines + key] = (self.annotations[key])
else:
new_annotations[key] = other.annotations[key]
new_annotations.update(other.annotations)
self._annotations = new_annotations
def append(self, line, font_attr_segs=None):
"""Append a single line of text.
Args:
line: (str) The text to be added to the end.
font_attr_segs: (list of tuples) Font attribute segments of the appended
line.
"""
self._lines.append(line)
if font_attr_segs:
self._font_attr_segs[len(self._lines) - 1] = font_attr_segs
def append_rich_line(self, rich_line):
self.append(rich_line.text, rich_line.font_attr_segs)
def prepend(self, line, font_attr_segs=None):
"""Prepend (i.e., add to the front) a single line of text.
Args:
line: (str) The text to be added to the front.
font_attr_segs: (list of tuples) Font attribute segments of the appended
line.
"""
other = RichTextLines(line)
if font_attr_segs:
other.font_attr_segs[0] = font_attr_segs
self._extend_before(other)
def write_to_file(self, file_path):
"""Write the object itself to file, in a plain format.
The font_attr_segs and annotations are ignored.
Args:
file_path: (str) path of the file to write to.
"""
with gfile.Open(file_path, "w") as f:
for line in self._lines:
f.write(line + "\n")
# TODO(cais): Add a method to allow appending to a line in RichTextLines with
# both text and font_attr_segs.
def regex_find(orig_screen_output, regex, font_attr):
"""Perform regex match in rich text lines.
Produces a new RichTextLines object with font_attr_segs containing highlighted
regex matches.
Example use cases include:
1) search for specific items in a large list of items, and
2) search for specific numerical values in a large tensor.
Args:
orig_screen_output: The original RichTextLines, in which the regex find
is to be performed.
regex: The regex used for matching.
font_attr: Font attribute used for highlighting the found result.
Returns:
A modified copy of orig_screen_output.
Raises:
ValueError: If input str regex is not a valid regular expression.
"""
new_screen_output = RichTextLines(
orig_screen_output.lines,
font_attr_segs=copy.deepcopy(orig_screen_output.font_attr_segs),
annotations=orig_screen_output.annotations)
try:
re_prog = re.compile(regex)
except sre_constants.error:
raise ValueError("Invalid regular expression: \"%s\"" % regex)
regex_match_lines = []
for i in xrange(len(new_screen_output.lines)):
line = new_screen_output.lines[i]
find_it = re_prog.finditer(line)
match_segs = []
for match in find_it:
match_segs.append((match.start(), match.end(), font_attr))
if match_segs:
if i not in new_screen_output.font_attr_segs:
new_screen_output.font_attr_segs[i] = match_segs
else:
new_screen_output.font_attr_segs[i].extend(match_segs)
new_screen_output.font_attr_segs[i] = sorted(
new_screen_output.font_attr_segs[i], key=lambda x: x[0])
regex_match_lines.append(i)
new_screen_output.annotations[REGEX_MATCH_LINES_KEY] = regex_match_lines
return new_screen_output
def wrap_rich_text_lines(inp, cols):
"""Wrap RichTextLines according to maximum number of columns.
Produces a new RichTextLines object with the text lines, font_attr_segs and
annotations properly wrapped. This ought to be used sparingly, as in most
cases, command handlers producing RichTextLines outputs should know the
screen/panel width via the screen_info kwarg and should produce properly
length-limited lines in the output accordingly.
Args:
inp: Input RichTextLines object.
cols: Number of columns, as an int.
Returns:
1) A new instance of RichTextLines, with line lengths limited to cols.
2) A list of new (wrapped) line index. For example, if the original input
consists of three lines and only the second line is wrapped, and it's
wrapped into two lines, this return value will be: [0, 1, 3].
Raises:
ValueError: If inputs have invalid types.
"""
new_line_indices = []
if not isinstance(inp, RichTextLines):
raise ValueError("Invalid type of input screen_output")
if not isinstance(cols, int):
raise ValueError("Invalid type of input cols")
out = RichTextLines([])
row_counter = 0 # Counter for new row index
for i in xrange(len(inp.lines)):
new_line_indices.append(out.num_lines())
line = inp.lines[i]
if i in inp.annotations:
out.annotations[row_counter] = inp.annotations[i]
if len(line) <= cols:
# No wrapping.
out.lines.append(line)
if i in inp.font_attr_segs:
out.font_attr_segs[row_counter] = inp.font_attr_segs[i]
row_counter += 1
else:
# Wrap.
wlines = [] # Wrapped lines.
osegs = []
if i in inp.font_attr_segs:
osegs = inp.font_attr_segs[i]
idx = 0
while idx < len(line):
if idx + cols > len(line):
rlim = len(line)
else:
rlim = idx + cols
wlines.append(line[idx:rlim])
for seg in osegs:
if (seg[0] < rlim) and (seg[1] >= idx):
# Calculate left bound within wrapped line.
if seg[0] >= idx:
lb = seg[0] - idx
else:
lb = 0
# Calculate right bound within wrapped line.
if seg[1] < rlim:
rb = seg[1] - idx
else:
rb = rlim - idx
if rb > lb: # Omit zero-length segments.
wseg = (lb, rb, seg[2])
if row_counter not in out.font_attr_segs:
out.font_attr_segs[row_counter] = [wseg]
else:
out.font_attr_segs[row_counter].append(wseg)
idx += cols
row_counter += 1
out.lines.extend(wlines)
# Copy over keys of annotation that are not row indices.
for key in inp.annotations:
if not isinstance(key, int):
out.annotations[key] = inp.annotations[key]
return out, new_line_indices
class CommandHandlerRegistry(object):
"""Registry of command handlers for CLI.
Handler methods (callables) for user commands can be registered with this
class, which then is able to dispatch commands to the correct handlers and
retrieve the RichTextLines output.
For example, suppose you have the following handler defined:
def echo(argv, screen_info=None):
return RichTextLines(["arguments = %s" % " ".join(argv),
"screen_info = " + repr(screen_info)])
you can register the handler with the command prefix "echo" and alias "e":
registry = CommandHandlerRegistry()
registry.register_command_handler("echo", echo,
"Echo arguments, along with screen info", prefix_aliases=["e"])
then to invoke this command handler with some arguments and screen_info, do:
registry.dispatch_command("echo", ["foo", "bar"], screen_info={"cols": 80})
or with the prefix alias:
registry.dispatch_command("e", ["foo", "bar"], screen_info={"cols": 80})
The call will return a RichTextLines object which can be rendered by a CLI.
"""
HELP_COMMAND = "help"
HELP_COMMAND_ALIASES = ["h"]
VERSION_COMMAND = "version"
VERSION_COMMAND_ALIASES = ["ver"]
def __init__(self):
# A dictionary from command prefix to handler.
self._handlers = {}
# A dictionary from prefix alias to prefix.
self._alias_to_prefix = {}
# A dictionary from prefix to aliases.
self._prefix_to_aliases = {}
# A dictionary from command prefix to help string.
self._prefix_to_help = {}
# Introductory text to help information.
self._help_intro = None
# Register a default handler for the command "help".
self.register_command_handler(
self.HELP_COMMAND,
self._help_handler,
"Print this help message.",
prefix_aliases=self.HELP_COMMAND_ALIASES)
# Register a default handler for the command "version".
self.register_command_handler(
self.VERSION_COMMAND,
self._version_handler,
"Print the versions of TensorFlow and its key dependencies.",
prefix_aliases=self.VERSION_COMMAND_ALIASES)
def register_command_handler(self,
prefix,
handler,
help_info,
prefix_aliases=None):
"""Register a callable as a command handler.
Args:
prefix: Command prefix, i.e., the first word in a command, e.g.,
"print" as in "print tensor_1".
handler: A callable of the following signature:
foo_handler(argv, screen_info=None),
where argv is the argument vector (excluding the command prefix) and
screen_info is a dictionary containing information about the screen,
such as number of columns, e.g., {"cols": 100}.
The callable should return:
1) a RichTextLines object representing the screen output.
The callable can also raise an exception of the type CommandLineExit,
which if caught by the command-line interface, will lead to its exit.
The exception can optionally carry an exit token of arbitrary type.
help_info: A help string.
prefix_aliases: Aliases for the command prefix, as a list of str. E.g.,
shorthands for the command prefix: ["p", "pr"]
Raises:
ValueError: If
1) the prefix is empty, or
2) handler is not callable, or
3) a handler is already registered for the prefix, or
4) elements in prefix_aliases clash with existing aliases.
5) help_info is not a str.
"""
if not prefix:
raise ValueError("Empty command prefix")
if prefix in self._handlers:
raise ValueError(
"A handler is already registered for command prefix \"%s\"" % prefix)
# Make sure handler is callable.
if not callable(handler):
raise ValueError("handler is not callable")
# Make sure that help info is a string.
if not isinstance(help_info, six.string_types):
raise ValueError("help_info is not a str")
# Process prefix aliases.
if prefix_aliases:
for alias in prefix_aliases:
if self._resolve_prefix(alias):
raise ValueError(
"The prefix alias \"%s\" clashes with existing prefixes or "
"aliases." % alias)
self._alias_to_prefix[alias] = prefix
self._prefix_to_aliases[prefix] = prefix_aliases
# Store handler.
self._handlers[prefix] = handler
# Store help info.
self._prefix_to_help[prefix] = help_info
def dispatch_command(self, prefix, argv, screen_info=None):
"""Handles a command by dispatching it to a registered command handler.
Args:
prefix: Command prefix, as a str, e.g., "print".
argv: Command argument vector, excluding the command prefix, represented
as a list of str, e.g.,
["tensor_1"]
screen_info: A dictionary containing screen info, e.g., {"cols": 100}.
Returns:
An instance of RichTextLines or None. If any exception is caught during
the invocation of the command handler, the RichTextLines will wrap the
error type and message.
Raises:
ValueError: If
1) prefix is empty, or
2) no command handler is registered for the command prefix, or
3) the handler is found for the prefix, but it fails to return a
RichTextLines or raise any exception.
CommandLineExit:
If the command handler raises this type of exception, this method will
simply pass it along.
"""
if not prefix:
raise ValueError("Prefix is empty")
resolved_prefix = self._resolve_prefix(prefix)
if not resolved_prefix:
raise ValueError("No handler is registered for command prefix \"%s\"" %
prefix)
handler = self._handlers[resolved_prefix]
try:
output = handler(argv, screen_info=screen_info)
except CommandLineExit as e:
raise e
except SystemExit as e:
# Special case for syntax errors caught by argparse.
lines = ["Syntax error for command: %s" % prefix,
"For help, do \"help %s\"" % prefix]
output = RichTextLines(lines)
except BaseException as e: # pylint: disable=broad-except
lines = ["Error occurred during handling of command: %s %s:" %
(resolved_prefix, " ".join(argv)), "%s: %s" % (type(e), str(e))]
# Include traceback of the exception.
lines.append("")
lines.extend(traceback.format_exc().split("\n"))
output = RichTextLines(lines)
if not isinstance(output, RichTextLines) and output is not None:
raise ValueError(
"Return value from command handler %s is not None or a RichTextLines "
"instance" % str(handler))
return output
def is_registered(self, prefix):
"""Test if a command prefix or its alias is has a registered handler.
Args:
prefix: A prefix or its alias, as a str.
Returns:
True iff a handler is registered for prefix.
"""
return self._resolve_prefix(prefix) is not None
def get_help(self, cmd_prefix=None):
"""Compile help information into a RichTextLines object.
Args:
cmd_prefix: Optional command prefix. As the prefix itself or one of its
aliases.
Returns:
A RichTextLines object containing the help information. If cmd_prefix
is None, the return value will be the full command-line help. Otherwise,
it will be the help information for the specified command.
"""
if not cmd_prefix:
# Print full help information, in sorted order of the command prefixes.
help_info = RichTextLines([])
if self._help_intro:
# If help intro is available, show it at the beginning.
help_info.extend(self._help_intro)
sorted_prefixes = sorted(self._handlers)
for cmd_prefix in sorted_prefixes:
lines = self._get_help_for_command_prefix(cmd_prefix)
lines.append("")
lines.append("")
help_info.extend(RichTextLines(lines))
return help_info
else:
return RichTextLines(self._get_help_for_command_prefix(cmd_prefix))
def set_help_intro(self, help_intro):
"""Set an introductory message to help output.
Args:
help_intro: (RichTextLines) Rich text lines appended to the
beginning of the output of the command "help", as introductory
information.
"""
self._help_intro = help_intro
def _help_handler(self, args, screen_info=None):
"""Command handler for "help".
"help" is a common command that merits built-in support from this class.
Args:
args: Command line arguments to "help" (not including "help" itself).
screen_info: (dict) Information regarding the screen, e.g., the screen
width in characters: {"cols": 80}
Returns:
(RichTextLines) Screen text output.
"""
_ = screen_info # Unused currently.
if not args:
return self.get_help()
elif len(args) == 1:
return self.get_help(args[0])
else:
return RichTextLines(["ERROR: help takes only 0 or 1 input argument."])
def _version_handler(self, args, screen_info=None):
del args # Unused currently.
del screen_info # Unused currently.
return get_tensorflow_version_lines(include_dependency_versions=True)
def _resolve_prefix(self, token):
"""Resolve command prefix from the prefix itself or its alias.
Args:
token: a str to be resolved.
Returns:
If resolvable, the resolved command prefix.
If not resolvable, None.
"""
if token in self._handlers:
return token
elif token in self._alias_to_prefix:
return self._alias_to_prefix[token]
else:
return None
def _get_help_for_command_prefix(self, cmd_prefix):
"""Compile the help information for a given command prefix.
Args:
cmd_prefix: Command prefix, as the prefix itself or one of its
aliases.
Returns:
A list of str as the help information fo cmd_prefix. If the cmd_prefix
does not exist, the returned list of str will indicate that.
"""
lines = []
resolved_prefix = self._resolve_prefix(cmd_prefix)
if not resolved_prefix:
lines.append("Invalid command prefix: \"%s\"" % cmd_prefix)
return lines
lines.append(resolved_prefix)
if resolved_prefix in self._prefix_to_aliases:
lines.append(HELP_INDENT + "Aliases: " + ", ".join(
self._prefix_to_aliases[resolved_prefix]))
lines.append("")
help_lines = self._prefix_to_help[resolved_prefix].split("\n")
for line in help_lines:
lines.append(HELP_INDENT + line)
return lines
class TabCompletionRegistry(object):
"""Registry for tab completion responses."""
def __init__(self):
self._comp_dict = {}
# TODO(cais): Rename method names with "comp" to "*completion*" to avoid
# confusion.
def register_tab_comp_context(self, context_words, comp_items):
"""Register a tab-completion context.
Register that, for each word in context_words, the potential tab-completions
are the words in comp_items.
A context word is a pre-existing, completed word in the command line that
determines how tab-completion works for another, incomplete word in the same
command line.
Completion items consist of potential candidates for the incomplete word.
To give a general example, a context word can be "drink", and the completion
items can be ["coffee", "tea", "water"]
Note: A context word can be empty, in which case the context is for the
top-level commands.
Args:
context_words: A list of context words belonging to the context being
registered. It is a list of str, instead of a single string, to support
synonym words triggering the same tab-completion context, e.g.,
both "drink" and the short-hand "dr" can trigger the same context.
comp_items: A list of completion items, as a list of str.
Raises:
TypeError: if the input arguments are not all of the correct types.
"""
if not isinstance(context_words, list):
raise TypeError("Incorrect type in context_list: Expected list, got %s" %
type(context_words))
if not isinstance(comp_items, list):
raise TypeError("Incorrect type in comp_items: Expected list, got %s" %
type(comp_items))
# Sort the completion items on registration, so that later during
# get_completions calls, no sorting will be necessary.
sorted_comp_items = sorted(comp_items)
for context_word in context_words:
self._comp_dict[context_word] = sorted_comp_items
def deregister_context(self, context_words):
"""Deregister a list of context words.
Args:
context_words: A list of context words to deregister, as a list of str.
Raises:
KeyError: if there are word(s) in context_words that do not correspond
to any registered contexts.
"""
for context_word in context_words:
if context_word not in self._comp_dict:
raise KeyError("Cannot deregister unregistered context word \"%s\"" %
context_word)
for context_word in context_words:
del self._comp_dict[context_word]
def extend_comp_items(self, context_word, new_comp_items):
"""Add a list of completion items to a completion context.
Args:
context_word: A single completion word as a string. The extension will
also apply to all other context words of the same context.
new_comp_items: (list of str) New completion items to add.
Raises:
KeyError: if the context word has not been registered.
"""
if context_word not in self._comp_dict:
raise KeyError("Context word \"%s\" has not been registered" %
context_word)
self._comp_dict[context_word].extend(new_comp_items)
self._comp_dict[context_word] = sorted(self._comp_dict[context_word])
def remove_comp_items(self, context_word, comp_items):
"""Remove a list of completion items from a completion context.
Args:
context_word: A single completion word as a string. The removal will
also apply to all other context words of the same context.
comp_items: Completion items to remove.
Raises:
KeyError: if the context word has not been registered.
"""
if context_word not in self._comp_dict:
raise KeyError("Context word \"%s\" has not been registered" %
context_word)
for item in comp_items:
self._comp_dict[context_word].remove(item)
def get_completions(self, context_word, prefix):
"""Get the tab completions given a context word and a prefix.
Args:
context_word: The context word.
prefix: The prefix of the incomplete word.
Returns:
(1) None if no registered context matches the context_word.
A list of str for the matching completion items. Can be an empty list
of a matching context exists, but no completion item matches the
prefix.
(2) Common prefix of all the words in the first return value. If the
first return value is None, this return value will be None, too. If
the first return value is not None, i.e., a list, this return value
will be a str, which can be an empty str if there is no common
prefix among the items of the list.
"""
if context_word not in self._comp_dict:
return None, None
comp_items = self._comp_dict[context_word]
comp_items = sorted(
[item for item in comp_items if item.startswith(prefix)])
return comp_items, self._common_prefix(comp_items)
def _common_prefix(self, m):
"""Given a list of str, returns the longest common prefix.
Args:
m: (list of str) A list of strings.
Returns:
(str) The longest common prefix.
"""
if not m:
return ""
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
class CommandHistory(object):
"""Keeps command history and supports lookup."""
_HISTORY_FILE_NAME = ".tfdbg_history"
def __init__(self, limit=100, history_file_path=None):
"""CommandHistory constructor.
Args:
limit: Maximum number of the most recent commands that this instance
keeps track of, as an int.
history_file_path: (str) Manually specified path to history file. Used in
testing.
"""
self._commands = []
self._limit = limit
self._history_file_path = (
history_file_path or self._get_default_history_file_path())
self._load_history_from_file()
def _load_history_from_file(self):
if os.path.isfile(self._history_file_path):
try:
with open(self._history_file_path, "rt") as history_file:
commands = history_file.readlines()
self._commands = [command.strip() for command in commands
if command.strip()]
# Limit the size of the history file.
if len(self._commands) > self._limit:
self._commands = self._commands[-self._limit:]
with open(self._history_file_path, "wt") as history_file:
for command in self._commands:
history_file.write(command + "\n")
except IOError:
print("WARNING: writing history file failed.")
def _add_command_to_history_file(self, command):
try:
with open(self._history_file_path, "at") as history_file:
history_file.write(command + "\n")
except IOError:
pass
@classmethod
def _get_default_history_file_path(cls):
return os.path.join(os.path.expanduser("~"), cls._HISTORY_FILE_NAME)
def add_command(self, command):
"""Add a command to the command history.
Args:
command: The history command, as a str.
Raises:
TypeError: if command is not a str.
"""
if self._commands and command == self._commands[-1]:
# Ignore repeating commands in a row.
return
if not isinstance(command, six.string_types):
raise TypeError("Attempt to enter non-str entry to command history")
self._commands.append(command)
if len(self._commands) > self._limit:
self._commands = self._commands[-self._limit:]
self._add_command_to_history_file(command)
def most_recent_n(self, n):
"""Look up the n most recent commands.
Args:
n: Number of most recent commands to look up.
Returns:
A list of n most recent commands, or all available most recent commands,
if n exceeds size of the command history, in chronological order.
"""
return self._commands[-n:]
def lookup_prefix(self, prefix, n):
"""Look up the n most recent commands that starts with prefix.
Args:
prefix: The prefix to lookup.
n: Number of most recent commands to look up.
Returns:
A list of n most recent commands that have the specified prefix, or all
available most recent commands that have the prefix, if n exceeds the
number of history commands with the prefix.
"""
commands = [cmd for cmd in self._commands if cmd.startswith(prefix)]
return commands[-n:]
# TODO(cais): Lookup by regex.
class MenuItem(object):
"""A class for an item in a text-based menu."""
def __init__(self, caption, content, enabled=True):
"""Menu constructor.
TODO(cais): Nested menu is currently not supported. Support it.
Args:
caption: (str) caption of the menu item.
content: Content of the menu item. For a menu item that triggers
a command, for example, content is the command string.
enabled: (bool) whether this menu item is enabled.
"""
self._caption = caption
self._content = content
self._enabled = enabled
@property
def caption(self):
return self._caption
@property
def type(self):
return self._node_type
@property
def content(self):
return self._content
def is_enabled(self):
return self._enabled
def disable(self):
self._enabled = False
def enable(self):
self._enabled = True
class Menu(object):
"""A class for text-based menu."""
def __init__(self, name=None):
"""Menu constructor.
Args:
name: (str or None) name of this menu.
"""
self._name = name
self._items = []
def append(self, item):
"""Append an item to the Menu.
Args:
item: (MenuItem) the item to be appended.
"""
self._items.append(item)
def insert(self, index, item):
self._items.insert(index, item)
def num_items(self):
return len(self._items)
def captions(self):
return [item.caption for item in self._items]
def caption_to_item(self, caption):
"""Get a MenuItem from the caption.
Args:
caption: (str) The caption to look up.
Returns:
(MenuItem) The first-match menu item with the caption, if any.
Raises:
LookupError: If a menu item with the caption does not exist.
"""
captions = self.captions()
if caption not in captions:
raise LookupError("There is no menu item with the caption \"%s\"" %
caption)
return self._items[captions.index(caption)]
def format_as_single_line(self,
prefix=None,
divider=" | ",
enabled_item_attrs=None,
disabled_item_attrs=None):
"""Format the menu as a single-line RichTextLines object.
Args:
prefix: (str) String added to the beginning of the line.
divider: (str) The dividing string between the menu items.
enabled_item_attrs: (list or str) Attributes applied to each enabled
menu item, e.g., ["bold", "underline"].
disabled_item_attrs: (list or str) Attributes applied to each
disabled menu item, e.g., ["red"].
Returns:
(RichTextLines) A single-line output representing the menu, with
font_attr_segs marking the individual menu items.
"""
if (enabled_item_attrs is not None and
not isinstance(enabled_item_attrs, list)):
enabled_item_attrs = [enabled_item_attrs]
if (disabled_item_attrs is not None and
not isinstance(disabled_item_attrs, list)):
disabled_item_attrs = [disabled_item_attrs]
menu_line = prefix if prefix is not None else ""
attr_segs = []
for item in self._items:
menu_line += item.caption
item_name_begin = len(menu_line) - len(item.caption)
if item.is_enabled():
final_attrs = [item]
if enabled_item_attrs:
final_attrs.extend(enabled_item_attrs)
attr_segs.append((item_name_begin, len(menu_line), final_attrs))
else:
if disabled_item_attrs:
attr_segs.append(
(item_name_begin, len(menu_line), disabled_item_attrs))
menu_line += divider
return RichTextLines(menu_line, font_attr_segs={0: attr_segs})
|
|
#!/usr/bin/env python
from compliance_checker.suite import CheckSuite
from compliance_checker.cf import CFBaseCheck, BaseCheck, dimless_vertical_coordinates
from compliance_checker.cf.util import is_vertical_coordinate, is_time_variable, units_convertible, units_temporal
from compliance_checker.base import DSPair
from wicken.netcdf_dogma import NetCDFDogma
from netCDF4 import Dataset
from tempfile import gettempdir
from pkg_resources import resource_filename
import unittest
import os
import re
static_files = {
'rutgers' : resource_filename('compliance_checker', 'tests/data/ru07-20130824T170228_rt0.nc'),
'conv_multi' : resource_filename('compliance_checker', 'tests/data/conv_multi.nc'),
'conv_bad' : resource_filename('compliance_checker', 'tests/data/conv_bad.nc'),
'example-grid' : resource_filename('compliance_checker', 'tests/data/example-grid.nc'),
'badname' : resource_filename('compliance_checker', 'tests/data/non-comp/badname.netcdf'),
'bad' : resource_filename('compliance_checker', 'tests/data/non-comp/bad.nc'),
'dimensionless' : resource_filename('compliance_checker', 'tests/data/dimensionless.nc'),
'2dim' : resource_filename('compliance_checker', 'tests/data/2dim-grid.nc'),
'bad2dim' : resource_filename('compliance_checker', 'tests/data/non-comp/bad2dim.nc'),
'rhgrid' : resource_filename('compliance_checker', 'tests/data/rhgrid.nc'),
'bad-rhgrid' : resource_filename('compliance_checker', 'tests/data/non-comp/bad-rhgrid.nc'),
'bad_data_type' : resource_filename('compliance_checker', 'tests/data/bad_data_type.nc'),
'mapping' : resource_filename('compliance_checker', 'tests/data/mapping.nc'),
'bad_region' : resource_filename('compliance_checker', 'tests/data/bad_region.nc'),
'featureType' : resource_filename('compliance_checker', 'tests/data/example-grid.nc'),
'cont_ragged' : resource_filename('compliance_checker', 'tests/data/cont_ragged.nc'),
'index_ragged' : resource_filename('compliance_checker', 'tests/data/index_ragged.nc'),
'bad_missing_data' : resource_filename('compliance_checker', 'tests/data/bad_missing_data.nc'),
'self-referencing-var' : resource_filename('compliance_checker', 'tests/data/self-referencing-var.nc'),
'scalar_coordinate_variable' : resource_filename('compliance_checker', 'tests/data/scalar_coordinate_variable.nc'),
'coordinates_and_metadata' : resource_filename('compliance_checker', 'tests/data/coordinates_and_metadata.nc'),
'ints64' : resource_filename('compliance_checker', 'tests/data/ints64.nc'),
'units_check' : resource_filename('compliance_checker', 'tests/data/units_check.nc'),
'self_referencing' : resource_filename('compliance_checker', 'tests/data/non-comp/self_referencing.nc'),
'time_units' : resource_filename('compliance_checker', 'tests/data/non-comp/time_units.nc'),
}
class MockVariable(object):
'''
For mocking a dataset variable
'''
pass
class TestCF(unittest.TestCase):
# @see
# http://www.saltycrane.com/blog/2012/07/how-prevent-nose-unittest-using-docstring-when-verbosity-2/
def shortDescription(self):
return None
# override __str__ and __repr__ behavior to show a copy-pastable nosetest name for ion tests
# ion.module:TestClassName.test_function_name
def __repr__(self):
name = self.id()
name = name.split('.')
if name[0] not in ["ion", "pyon"]:
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
else:
return "%s ( %s )" % (name[-1], '.'.join(name[:-2]) + ":" + '.'.join(name[-2:]))
__str__ = __repr__
def setUp(self):
'''
Initialize the dataset
'''
self.cf = CFBaseCheck()
#--------------------------------------------------------------------------------
# Helper Methods
#--------------------------------------------------------------------------------
def new_nc_file(self):
'''
Make a new temporary netCDF file for the scope of the test
'''
nc_file_path = os.path.join(gettempdir(), 'example.nc')
if os.path.exists(nc_file_path):
raise IOError('File Exists: %s' % nc_file_path)
nc = Dataset(nc_file_path, 'w')
self.addCleanup(os.remove, nc_file_path)
self.addCleanup(nc.close)
return nc
def get_pair(self, nc_dataset):
'''
Return a pairwise object for the dataset
'''
if isinstance(nc_dataset, basestring):
nc_dataset = Dataset(nc_dataset, 'r')
self.addCleanup(nc_dataset.close)
dogma = NetCDFDogma('nc', self.cf.beliefs(), nc_dataset)
pair = DSPair(nc_dataset, dogma)
return pair
def get_results(self, results):
'''
Returns a tuple of the value scored, possible, and a list of messages
in the result set.
'''
out_of = 0
scored = 0
for r in results:
if isinstance(r.value, tuple):
out_of += r.value[1]
scored += r.value[0]
else:
out_of += 1
scored += int(r.value)
# Store the messages
messages = []
for r in results:
messages.extend(r.msgs)
return scored, out_of, messages
#--------------------------------------------------------------------------------
# Compliance Tests
#--------------------------------------------------------------------------------
def test_check_data_types(self):
"""
2.2 The netCDF data types char, byte, short, int, float or real, and double are all acceptable
"""
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_data_types(dataset)
self.assertTrue(result.value)
dpair = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_data_types(dpair)
assert result.value == (5, 6)
def test_naming_conventions(self):
'''
Section 2.3 Naming Conventions
Variable, dimension and attribute names should begin with a letter and be composed of letters, digits, and underscores.
'''
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_naming_conventions(dataset)
num_var = len(dataset.dataset.variables)
expected = (num_var,) * 2
self.assertEquals(result.value, expected)
dataset = self.get_pair(static_files['bad'])
result = self.cf.check_naming_conventions(dataset)
num_var = len(dataset.dataset.variables)
expected = (num_var-1, num_var)
self.assertEquals(result.value, expected)
assert '_poor_dim' in result.msgs [0]
def test_check_names_unique(self):
"""
2.3 names should not be distinguished purely by case, i.e., if case is disregarded, no two names should be the same.
"""
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_names_unique(dataset)
num_var = len(dataset.dataset.variables)
expected = (num_var,) * 2
self.assertEquals(result.value, expected)
#TODO: Add bad unique names to bad.nc
def test_check_dimension_names(self):
"""
2.4 A variable may have any number of dimensions, including zero, and the dimensions must all have different names.
"""
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_dimension_names(dataset)
assert result.value == (5, 6)
def test_check_dimension_order(self):
"""
2.4 If any or all of the dimensions of a variable have the interpretations of "date or time" (T), "height or depth" (Z),
"latitude" (Y), or "longitude" (X) then we recommend, those dimensions to appear in the relative order T, then Z, then Y,
then X in the CDL definition corresponding to the file. All other dimensions should, whenever possible, be placed to the
left of the spatiotemporal dimensions.
"""
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_dimension_order(dataset)
assert result.value == (11, 12)
def test_check_fill_value_outside_valid_range(self):
"""
2.5.1 The _FillValue should be outside the range specified by valid_range (if used) for a variable.
"""
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_fill_value_outside_valid_range(dataset)
assert sum((result.value for result in results)) == 1
assert len(results) == 2
def test_check_conventions_are_cf_16(self):
"""
2.6.1 the NUG defined global attribute Conventions to the string value "CF-1.6"
"""
# :Conventions = "CF-1.6"
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_conventions_are_cf_16(dataset)
self.assertTrue(result.value)
# :Conventions = "CF-1.6 ,ACDD" ;
dataset = self.get_pair(static_files['conv_multi'])
result = self.cf.check_conventions_are_cf_16(dataset)
self.assertTrue(result.value)
# :Conventions = "NoConvention"
dataset = self.get_pair(static_files['conv_bad'])
result = self.cf.check_conventions_are_cf_16(dataset)
self.assertFalse(result.value)
def test_check_convention_globals(self):
"""
2.6.2 title/history global attributes, must be strings. Do not need to exist.
"""
#check for pass
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_convention_globals(dataset)
for each in result:
self.assertTrue(each.value)
#check if it doesn't exist that we pass
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_convention_globals(dataset)
for each in result:
self.assertTrue(each.value)
def test_check_convention_possibly_var_attrs(self):
"""
3.1 The units attribute is required for all variables that represent dimensional quantities
(except for boundary variables defined in Section 7.1, "Cell Boundaries" and climatology variables
defined in Section 7.4, "Climatological Statistics").
Units are not required for dimensionless quantities. A variable with no units attribute is assumed
to be dimensionless. However, a units attribute specifying a dimensionless unit may optionally be
included.
- units required
- type must be recognized by udunits
- if std name specified, must be consistent with standard name table, must also be consistent with a
specified cell_methods attribute if present
"""
dataset = self.get_pair(static_files['rutgers'])
result = self.cf.check_convention_possibly_var_attrs(dataset)
for each in result:
self.assertTrue(each.value)
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_convention_possibly_var_attrs(dataset)
for each in result:
self.assertFalse(each.value)
def test_check_standard_name(self):
"""
3.3 A standard name is associated with a variable via the attribute standard_name which takes a
string value comprised of a standard name optionally followed by one or more blanks and a
standard name modifier
"""
dataset = self.get_pair(static_files['2dim'])
result = self.cf.check_standard_name(dataset)
for each in result:
self.assertTrue(each.value)
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_standard_name(dataset)
for each in result:
self.assertFalse(each.value)
def test_check_units(self):
dataset = self.get_pair(static_files['2dim'])
result = self.cf.check_units(dataset)
for each in result:
self.assertTrue(each.value)
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_units(dataset)
for each in result:
self.assertFalse(each.value)
def test_coordinate_types(self):
'''
Section 4 Coordinate Types
We strongly recommend that coordinate variables be used for all coordinate types whenever they are applicable.
'''
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_coordinate_vars_for_all_coordinate_types(dataset)
for each in result:
self.assertTrue(each.value)
def test_check_coordinate_axis_attr(self):
dataset = self.get_pair(static_files['2dim'])
result = self.cf.check_coordinate_axis_attr(dataset)
for each in result:
self.assertTrue(each.value)
dataset = self.get_pair(static_files['bad_data_type'])
result = self.cf.check_coordinate_axis_attr(dataset)
for each in result:
if each.name[1] in ['time', 'latitude']:
self.assertTrue(each.value)
if each.name[1] in ['salinity']:
if each.name[2] not in ['does_not_depend_on_mult_coord_vars']:
self.assertFalse(each.value)
def test_latitude(self):
'''
Section 4.1 Latitude Coordinate
'''
# Check compliance
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_latitude(dataset)
for r in results:
if isinstance(r.value, tuple):
self.assertEquals(r.value[0], r.value[1])
else:
self.assertTrue(r.value)
# Verify non-compliance
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_latitude(dataset)
scored, out_of, messages = self.get_results(results)
assert 'lat does not have units attribute' in messages
assert 'lat_uv units are acceptable, but not recommended' in messages
assert 'lat_like does not have units attribute' in messages
assert scored == 5
assert out_of == 12
def test_longitude(self):
'''
Section 4.2 Longitude Coordinate
'''
# Check compliance
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_longitude(dataset)
for r in results:
if isinstance(r.value, tuple):
self.assertEquals(r.value[0], r.value[1])
else:
self.assertTrue(r.value)
# Verify non-compliance
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_longitude(dataset)
scored, out_of, messages = self.get_results(results)
assert 'lon does not have units attribute' in messages
assert 'lon_uv units are acceptable, but not recommended' in messages
assert 'lon_like does not have units attribute' in messages
assert scored == 5
assert out_of == 12
def test_is_vertical_coordinate(self):
'''
Section 4.3 Qualifiers for Vertical Coordinate
NOTE: The standard doesn't explicitly say that vertical coordinates must be a
coordinate type.
'''
# Make something that I can attach attrs to
mock_variable = MockVariable
# Proper name/standard_name
known_name = mock_variable()
known_name.standard_name = 'depth'
self.assertTrue(is_vertical_coordinate('not_known', known_name))
# Proper Axis
axis_set = mock_variable()
axis_set.axis = 'Z'
self.assertTrue(is_vertical_coordinate('not_known', axis_set))
# Proper units
units_set = mock_variable()
units_set.units = 'dbar'
self.assertTrue(is_vertical_coordinate('not_known', units_set))
# Proper units/positive
positive = mock_variable()
positive.units = 'm'
positive.positive = 'up'
self.assertTrue(is_vertical_coordinate('not_known', positive))
def test_vertical_coordinate(self):
'''
Section 4.3 Vertical (Height or Depth) coordinate
'''
# Check compliance
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_vertical_coordinate(dataset)
for r in results:
self.assertTrue(r.value)
# Check non-compliance
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_vertical_coordinate(dataset)
scored, out_of, messages = self.get_results(results)
assert 'height does not have units' in messages
assert 'vertical variable depth needs to define positive attribute'
assert 'vertical variable depth2 needs to define positive attribute'
def test_vertical_dimension(self):
'''
Section 4.3.1 Dimensional Vertical Coordinate
'''
# Check for compliance
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_dimensional_vertical_coordinate(dataset)
for r in results:
self.assertTrue(r.value)
# Check for non-compliance
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_dimensional_vertical_coordinate(dataset)
for r in results:
self.assertFalse(r.value)
def test_appendix_d(self):
'''
CF 1.6
Appendix D
The definitions given here allow an application to compute dimensional
coordinate values from the dimensionless ones and associated variables.
The formulas are expressed for a gridpoint (n,k,j,i) where i and j are
the horizontal indices, k is the vertical index and n is the time index.
A coordinate variable is associated with its definition by the value of
the standard_name attribute. The terms in the definition are associated
with file variables by the formula_terms attribute. The formula_terms
attribute takes a string value, the string being comprised of
blank-separated elements of the form "term: variable", where term is a
keyword that represents one of the terms in the definition, and variable
is the name of the variable in a netCDF file that contains the values
for that term. The order of elements is not significant.
'''
dimless = dict(dimless_vertical_coordinates)
def verify(std_name, test_str):
regex_matches = re.match(dimless[std_name], test_str)
self.assertIsNotNone(regex_matches)
# For each of the listed dimensionless vertical coordinates,
# verify that the formula_terms match the provided regex
verify('atmosphere_ln_pressure_coordinate',
"p0: var1 lev: var2")
verify('atmosphere_sigma_coordinate',
"sigma: var1 ps: var2 ptop: var3")
verify('atmosphere_hybrid_sigma_pressure_coordinate',
"a: var1 b: var2 ps: var3 p0: var4")
verify('atmosphere_hybrid_height_coordinate',
"a: var1 b: var2 orog: var3")
verify('atmosphere_sleve_coordinate',
"a: var1 b1: var2 b2: var3 ztop: var4 zsurf1: var5 zsurf2: var6")
verify('ocean_sigma_coordinate',
"sigma: var1 eta: var2 depth: var3")
verify('ocean_s_coordinate',
"s: var1 eta: var2 depth: var3 a: var4 b: var5 depth_c: var6")
verify('ocean_sigma_z_coordinate',
"sigma: var1 eta: var2 depth: var3 depth_c: var4 nsigma: var5 zlev: var6")
verify('ocean_double_sigma_coordinate',
"sigma: var1 depth: var2 z1: var3 z2: var4 a: var5 href: var6 k_c: var7")
def test_dimensionless_vertical(self):
'''
Section 4.3.2
'''
# Check affirmative compliance
dataset = self.get_pair(static_files['dimensionless'])
results = self.cf.check_dimensionless_vertical_coordinate(dataset)
for r in results:
self.assertTrue(r.value)
# Check negative compliance
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_dimensionless_vertical_coordinate(dataset)
scored, out_of, messages = self.get_results(results)
assert u'formula_terms missing from dimensionless coordinate lev1' in messages
assert u'formula_terms not defined for dimensionless coordinate lev1' in messages
assert u'var1 missing for dimensionless coordinate lev2' in messages
assert u'var2 missing for dimensionless coordinate lev2' in messages
assert u'var3 missing for dimensionless coordinate lev2' in messages
assert scored == 1
assert out_of == 4
def test_is_time_variable(self):
var1 = MockVariable()
var1.standard_name = 'time'
self.assertTrue(is_time_variable('not_time', var1))
var2 = MockVariable()
self.assertTrue(is_time_variable('time', var2))
self.assertFalse(is_time_variable('not_time', var2))
var3 = MockVariable()
var3.axis = 'T'
self.assertTrue(is_time_variable('maybe_time', var3))
var4 = MockVariable()
var4.units = 'seconds since 1900-01-01'
self.assertTrue(is_time_variable('maybe_time', var4))
def test_check_time_coordinate(self):
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_time_coordinate(dataset)
for r in results:
self.assertTrue(r.value)
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_time_coordinate(dataset)
scored, out_of, messages = self.get_results(results)
assert u'bad_time_1 does not have units' in messages
assert u'bad_time_2 doesn not have correct time units' in messages
assert scored == 1
assert out_of == 3
def test_check_calendar(self):
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_calendar(dataset)
for r in results:
self.assertTrue(r.value)
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_calendar(dataset)
scored, out_of, messages = self.get_results(results)
assert u'Variable bad_time_1 should have a calendar attribute' in messages
assert u"Variable bad_time_2 should have a valid calendar: 'nope' is not a valid calendar" in messages
def test_self_referencing(self):
'''
This test captures a check where a coordinate has circular references
'''
dataset = self.get_pair(static_files['self_referencing'])
results = self.cf.check_two_dimensional(dataset)
scored, out_of, messages = self.get_results(results)
assert u"Variable TEMP_H's coordinate references itself" in messages
assert scored == 0
assert out_of == 44
def test_check_independent_axis_dimensions(self):
dataset = self.get_pair(static_files['example-grid'])
results = self.cf.check_independent_axis_dimensions(dataset)
for r in results:
self.assertTrue(r.value)
dataset = self.get_pair(static_files['bad'])
results = self.cf.check_independent_axis_dimensions(dataset)
scored, out_of, messages = self.get_results(results)
assert u'The lev dimension for the variable lev1 does not have an associated coordinate variable, but is a Lat/Lon/Time/Height dimension.' \
in messages
assert u'The lev dimension for the variable lev2 does not have an associated coordinate variable, but is a Lat/Lon/Time/Height dimension.' \
in messages
assert u'The time dimension for the variable bad_time_1 does not have an associated coordinate variable, but is a Lat/Lon/Time/Height dimension.' \
in messages
assert u'The time dimension for the variable bad_time_2 does not have an associated coordinate variable, but is a Lat/Lon/Time/Height dimension.' \
in messages
assert u'The time dimension for the variable column_temp does not have an associated coordinate variable, but is a Lat/Lon/Time/Height dimension.' \
in messages
assert scored == 6
assert out_of == 11
def test_check_two_dimensional(self):
dataset = self.get_pair(static_files['2dim'])
results = self.cf.check_two_dimensional(dataset)
for r in results:
self.assertTrue(r.value)
# Need the bad testing
dataset = self.get_pair(static_files['bad2dim'])
results = self.cf.check_two_dimensional(dataset)
self.assertTrue(results[0].value)
self.assertFalse(results[1].value)
self.assertFalse(results[2].value)
self.assertTrue(results[3].value)
self.assertFalse(results[4].value)
self.assertTrue(results[5].value)
# Test the self referencing variables
dataset = self.get_pair(static_files['self-referencing-var'])
try:
results = self.cf.check_two_dimensional(dataset)
self.assertFalse(results[0].value)
except:
self.assertTrue(False)
def test_check_reduced_horizontal_grid(self):
dataset = self.get_pair(static_files['rhgrid'])
results = self.cf.check_reduced_horizontal_grid(dataset)
rd = { r.name[1] : r.value for r in results }
self.assertTrue(rd['PS'])
dataset = self.get_pair(static_files['bad-rhgrid'])
results = self.cf.check_reduced_horizontal_grid(dataset)
rd = { r.name[1] : (r.value, r.msgs) for r in results }
for name, (value, msg) in rd.iteritems():
self.assertFalse(value)
self.assertIn('Coordinate longitude is not a proper variable', rd['PSa'][1])
self.assertIn("Coordinate latitude's dimension, latdim, is not a dimension of PSb", rd['PSb'][1])
assert 'PSc' not in rd.keys()
def test_check_horz_crs_grid_mappings_projections(self):
dataset = self.get_pair(static_files['mapping'])
results = self.cf.check_horz_crs_grid_mappings_projections(dataset)
rd = { r.name[1] : r.value for r in results }
assert rd['wgs84'] == (3, 3)
assert rd['epsg'] == (7, 8)
def test_check_scalar_coordinate_system(self):
dataset = self.get_pair(static_files['scalar_coordinate_variable'])
results = self.cf.check_scalar_coordinate_system(dataset)
self.assertEqual(len(results), 2)
for r in results:
if r.name[1] == 'HEIGHT':
self.assertEqual(r.value, (0, 1))
elif r.name[1] == 'DEPTH':
self.assertEqual(r.value, (2, 2))
else:
self.assertTrue(False, 'Unexpected variable in results of check_scalar_coordinate_system')
def test_check_geographic_region(self):
dataset = self.get_pair(static_files['bad_region'])
results = self.cf.check_geographic_region(dataset)
self.assertFalse(results[0].value)
self.assertTrue(results[1].value)
def test_check_alternative_coordinates(self):
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_alternative_coordinates(dataset)
self.assertTrue(results[0].value)
#def test_check_cell_boundaries(self):
# dataset = self.get_pair(static_files['bad_data_type'])
# results = self.cf.check_cell_boundaries(dataset)
# print results
# self.assertTrue(results[0].value)
def test_check_packed_data(self):
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_packed_data(dataset)
self.assertEqual(len(results), 4)
self.assertFalse(results[0].value)
self.assertTrue(results[1].value)
self.assertTrue(results[2].value)
self.assertFalse(results[3].value)
def test_check_compression(self):
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_compression(dataset)
assert results[0].value == (2,2)
assert results[1].value == (0,2)
def test_check_all_features_are_same_type(self):
dataset = self.get_pair(static_files['rutgers'])
results = self.cf.check_all_features_are_same_type(dataset)
assert results == None
dataset = self.get_pair(static_files['featureType'])
results = self.cf.check_all_features_are_same_type(dataset)
self.assertTrue(results.value)
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_all_features_are_same_type(dataset)
self.assertFalse(results.value)
def test_check_orthogonal_multidim_array(self):
dataset = self.get_pair(static_files['rutgers'])
results = self.cf.check_orthogonal_multidim_array(dataset)
for each in results:
self.assertTrue(each.value)
def test_check_incomplete_multidim_array(self):
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_incomplete_multidim_array(dataset)
for each in results:
self.assertTrue(each.value)
def test_check_contiguous_ragged_array(self):
dataset = self.get_pair(static_files['cont_ragged'])
results = self.cf.check_contiguous_ragged_array(dataset)
for each in results:
self.assertTrue(each.value)
def test_check_indexed_ragged_array(self):
dataset = self.get_pair(static_files['index_ragged'])
results = self.cf.check_indexed_ragged_array(dataset)
for each in results:
self.assertTrue(each.value)
def test_check_feature_type(self):
dataset = self.get_pair(static_files['index_ragged'])
results = self.cf.check_feature_type(dataset)
self.assertTrue(results.value)
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_feature_type(dataset)
self.assertFalse(results.value)
def test_check_coordinates_and_metadata(self):
dataset = self.get_pair(static_files['bad_data_type'])
results = self.cf.check_coordinates_and_metadata(dataset)
self.assertFalse(results[0].value)
self.assertTrue(results[1].value)
self.assertFalse(results[2].value)
dataset = self.get_pair(static_files['index_ragged'])
results = self.cf.check_coordinates_and_metadata(dataset)
self.assertTrue(results[-1].value)
dataset = self.get_pair(static_files['coordinates_and_metadata'])
results = self.cf.check_coordinates_and_metadata(dataset)
self.assertTrue(len(results) == 2)
self.assertFalse(results[0].value)
self.assertFalse(results[1].value)
def test_check_missing_data(self):
dataset = self.get_pair(static_files['index_ragged'])
results = self.cf.check_missing_data(dataset)
for each in results:
self.assertTrue(each.value)
dataset = self.get_pair(static_files['bad_missing_data'])
results = self.cf.check_missing_data(dataset)
for each in results:
self.assertFalse(each.value)
def test_check_units(self):
'''
Ensure that container variables are not checked for units but geophysical variables are
'''
dataset = self.get_pair(static_files['units_check'])
results = self.cf.check_units(dataset)
# We don't keep track of the variables names for checks that passed, so
# we can make a strict assertion about how many checks were performed
# and if there were errors, which there shouldn't be.
scored, out_of, messages = self.get_results(results)
assert scored == 4
assert out_of == 4
assert messages == []
def test_64bit(self):
dataset = self.get_pair(static_files['ints64'])
suite = CheckSuite()
suite.checkers = {
'cf' : CFBaseCheck
}
suite.run(dataset, 'cf')
def test_time_units(self):
dataset = self.get_pair(static_files['time_units'])
results = self.cf.check_units(dataset)
scored, out_of, messages = self.get_results(results)
assert u'units are days since 1970-01-01, standard_name units should be K' in messages
assert scored == 1
assert out_of == 2
#--------------------------------------------------------------------------------
# Utility Method Tests
#--------------------------------------------------------------------------------
def test_temporal_unit_conversion(self):
self.assertTrue(units_convertible('hours', 'seconds'))
self.assertFalse(units_convertible('hours', 'hours since 2000-01-01'))
def test_units_temporal(self):
self.assertTrue(units_temporal('hours since 2000-01-01'))
self.assertFalse(units_temporal('hours'))
self.assertFalse(units_temporal('days since the big bang'))
def breakpoint(scope=None, global_scope=None):
import traceback
from IPython.config.loader import Config
ipy_config = Config()
ipy_config.PromptManager.in_template = '><> '
ipy_config.PromptManager.in2_template = '... '
ipy_config.PromptManager.out_template = '--> '
ipy_config.InteractiveShellEmbed.confirm_exit = False
# First import the embeddable shell class
from IPython.frontend.terminal.embed import InteractiveShellEmbed
from mock import patch
if scope is not None:
locals().update(scope)
if global_scope is not None:
globals().update(global_scope)
# Update namespace of interactive shell
# TODO: Cleanup namespace even further
# Now create an instance of the embeddable shell. The first argument is a
# string with options exactly as you would type them if you were starting
# IPython at the system command line. Any parameters you want to define for
# configuration can thus be specified here.
with patch("IPython.core.interactiveshell.InteractiveShell.init_virtualenv"):
ipshell = InteractiveShellEmbed(config=ipy_config,
banner1="Entering Breakpoint Shell",
exit_msg = 'Returning...')
stack = traceback.extract_stack(limit=2)
message = 'File %s, line %s, in %s' % stack[0][:-1]
try:
import growl
growl.growl('breakpoint', 'Ready')
except:
pass
ipshell('(%s) Breakpoint @ %s' % ('breakpoint', message))
|
|
from random import randrange
import string
import math
import logging
class CaesarCipher(object):
def __init__(self, message=None, encode=False, decode=False, offset=False,
crack=None, verbose=None, alphabet=None):
"""
A class that encodes, decodes and cracks strings using the Caesar shift
cipher.
Accepts messages in a string and encodes or decodes by shifting the
value of the letter by an arbitrary integer to a different letter in
the alphabet provided.
http://en.wikipedia.org/wiki/Caesar_cipher
Do not ever use this for real communication, but definitely use it for
fun events like the Hacker Olympics.
Attributes:
message: The string you wish to encode.
encode: A boolean indicating desire to encode the string, used as
command line script flag.
decoded: A boolean indicating desire to decode the string, used as
command line script flag.
cracked: A boolean indicating to desire to crack the string, used
as command line script flag.
verbose: A boolean indicating the desire to turn on debug output,
use as command line script flag.
offset: Integer by which you want to shift the value of a letter.
alphabet: A tuple containing the ASCII alphabet in uppercase.
Examples:
Encode a string with a random letter offset.
>>> cipher = CaesarCipher('I want to encode this string.')
>>> cipher.encoded
'W kobh hc sbqcrs hvwg ghfwbu.'
Encode a string with a specific letter offset.
>>> cipher = CaesarCipher('I want to encode this string.',
... offset=14)
>>> cipher.encoded
'W kobh hc sbqcrs hvwg ghfwbu.'
Decode a string with a specific letter offset.
>>> cipher = CaesarCipher('W kobh hc sbqcrs hvwg ghfwbu.',
... offset=14)
>>> cipher.decoded
'I want to encode this string.'
Crack a string of ciphertext without knowing the letter offset.
>>> cipher = CaesarCipher('W kobh hc sbqcrs hvwg ghfwbu.')
>>> cipher.cracked
'I want to encode this string.'
"""
self.message = message
self.encode = encode
self.decode = decode
self.offset = offset
self.verbose = verbose
self.crack = crack
self.alphabet = alphabet
# Frequency of letters used in English, taken from Wikipedia.
# http://en.wikipedia.org/wiki/Letter_frequency
self.frequency = {
'a': 0.08167,
'b': 0.01492,
'c': 0.02782,
'd': 0.04253,
'e': 0.130001,
'f': 0.02228,
'g': 0.02015,
'h': 0.06094,
'i': 0.06966,
'j': 0.00153,
'k': 0.00772,
'l': 0.04025,
'm': 0.02406,
'n': 0.06749,
'o': 0.07507,
'p': 0.01929,
'q': 0.00095,
'r': 0.05987,
's': 0.06327,
't': 0.09056,
'u': 0.02758,
'v': 0.00978,
'w': 0.02360,
'x': 0.00150,
'y': 0.01974,
'z': 0.00074}
# Get ASCII alphabet if one is not provided by the user.
if alphabet is None:
self.alphabet = tuple(string.ascii_lowercase)
def cipher(self):
"""Applies the Caesar shift cipher.
Based on the attributes of the object, applies the Caesar shift cipher
to the message attribute. Accepts positive and negative integers as
offsets.
Required attributes:
message
offset
Returns:
String with cipher applied.
"""
# If no offset is selected, pick random one with sufficient distance
# from original.
if self.offset is False:
self.offset = randrange(5, 25)
logging.info("Random offset selected: {0}".format(self.offset))
logging.debug("Offset set: {0}".format(self.offset))
# Cipher
ciphered_message_list = list(self.message)
for i, letter in enumerate(ciphered_message_list):
if letter.isalpha():
# Use default upper and lower case characters if alphabet
# not supplied by user.
if letter.isupper():
alphabet = [character.upper()
for character in self.alphabet]
else:
alphabet = self.alphabet
logging.debug("Letter: {0}".format(letter))
logging.debug("Alphabet: {0}".format(alphabet))
value = alphabet.index(letter)
cipher_value = value + self.offset
if cipher_value > 25 or cipher_value < 0:
cipher_value = cipher_value % 26
logging.debug("Cipher value: {0}".format(cipher_value))
ciphered_message_list[i] = alphabet[cipher_value]
logging.debug("Ciphered letter: {0}".format(letter))
self.message = ''.join(ciphered_message_list)
return self.message
def calculate_entropy(self, entropy_string):
"""Calculates the entropy of a string based on known frequency of
English letters.
Args:
entropy_string: A str representing the string to calculate.
Returns:
A negative float with the total entropy of the string (higher
is better).
"""
total = 0
for char in entropy_string:
if char.isalpha():
prob = self.frequency[char.lower()]
total += - math.log(prob) / math.log(2)
logging.debug("Entropy score: {0}".format(total))
return total
@property
def cracked(self):
"""Attempts to crack ciphertext using frequency of letters in English.
Returns:
String of most likely message.
"""
logging.info("Cracking message: {0}".format(self.message))
entropy_values = {}
attempt_cache = {}
message = self.message
for i in range(25):
self.message = message
self.offset = i * -1
logging.debug("Attempting crack with offset: "
"{0}".format(self.offset))
test_cipher = self.cipher()
logging.debug("Attempting plaintext: {0}".format(test_cipher))
entropy_values[i] = self.calculate_entropy(test_cipher)
attempt_cache[i] = test_cipher
sorted_by_entropy = sorted(entropy_values, key=entropy_values.get)
self.offset = sorted_by_entropy[0] * -1
cracked_text = attempt_cache[sorted_by_entropy[0]]
self.message = cracked_text
logging.debug("Entropy scores: {0}".format(entropy_values))
logging.debug("Lowest entropy score: "
"{0}".format(str(entropy_values[sorted_by_entropy[0]])))
logging.debug("Most likely offset: {0}".format(self.offset))
logging.debug("Most likely message: {0}".format(cracked_text))
return cracked_text
@property
def encoded(self):
"""Encodes message using Caesar shift cipher
Returns:
String encoded with cipher.
"""
logging.info("Encoding message: {0}".format(self.message))
return self.cipher()
@property
def decoded(self):
"""Decodes message using Caesar shift cipher
Inverse operation of encoding, applies negative offset to Caesar shift
cipher.
Returns:
String decoded with cipher.
"""
logging.info("Decoding message: {0}".format(self.message))
self.offset = self.offset * -1
return self.cipher()
class CaesarCipherError(Exception):
def __init__(self, message):
logging.error("ERROR: {0}".format(message))
logging.error("Try running with --help for more information.")
|
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from botocore.exceptions import ClientError
from concurrent.futures import as_completed
from c7n.actions import BaseAction
from c7n.filters import FilterRegistry
from c7n import query
from c7n.manager import resources
from c7n.tags import TagDelayedAction, RemoveTag, TagActionFilter, Tag
from c7n.utils import (
local_session, get_retry, chunks, type_schema)
filters = FilterRegistry('dynamodb-table.filters')
filters.register('marked-for-op', TagActionFilter)
@resources.register('dynamodb-table')
class Table(query.QueryResourceManager):
class resource_type(object):
service = 'dynamodb'
type = 'table'
enum_spec = ('list_tables', 'TableNames', None)
detail_spec = ("describe_table", "TableName", None, "Table")
id = 'Table'
filter_name = None
name = 'TableName'
date = 'CreationDateTime'
dimension = 'TableName'
config_type = 'AWS::DynamoDB::Table'
filter_registry = filters
retry = staticmethod(get_retry(('Throttled',)))
permissions = ('dynamodb:ListTagsOfResource')
def get_source(self, source_type):
if source_type == 'describe':
return DescribeTable(self)
elif source_type == 'config':
return query.ConfigSource(self)
raise ValueError('invalid source %s' % source_type)
class DescribeTable(query.DescribeSource):
def augment(self, tables):
resources = super(DescribeTable, self).augment(tables)
return list(filter(None, _dynamodb_table_tags(
self.manager.get_model(),
resources,
self.manager.session_factory,
self.manager.executor_factory,
self.manager.retry,
self.manager.log)))
def _dynamodb_table_tags(
model, tables, session_factory, executor_factory, retry, log):
""" Augment DynamoDB tables with their respective tags
"""
def process_tags(table):
client = local_session(session_factory).client('dynamodb')
arn = table['TableArn']
try:
tag_list = retry(
client.list_tags_of_resource,
ResourceArn=arn)['Tags']
except ClientError as e:
log.warning("Exception getting DynamoDB tags \n %s", e)
return None
table['Tags'] = tag_list or []
return table
with executor_factory(max_workers=2) as w:
return list(w.map(process_tags, tables))
class StatusFilter(object):
"""Filter tables by status"""
valid_states = ()
def filter_table_state(self, tables, states=None):
states = states or self.valid_states
orig_count = len(tables)
result = [t for t in tables if t['TableStatus'] in states]
self.log.info("%s %d of %d tables" % (
self.__class__.__name__, len(result), orig_count))
return result
@Table.action_registry.register('mark-for-op')
class TagDelayedAction(TagDelayedAction):
"""Action to specify an action to occur at a later date
:example:
.. code-block: yaml
policies:
- name: dynamo-mark-tag-compliance
resource: dynamodb-table
filters:
- "tag:custodian_cleanup": absent
- "tag:OwnerName": absent
actions:
- type: mark-for-op
tag: custodian_cleanup
msg: "Cluster does not have valid OwnerName tag: {op}@{action_date}"
op: delete
days: 7
"""
permission = ('dynamodb:TagResource',)
batch_size = 1
def process_resource_set(self, tables, tags):
client = local_session(self.manager.session_factory).client(
'dynamodb')
for t in tables:
arn = t['TableArn']
client.tag_resource(ResourceArn=arn, Tags=tags)
@Table.action_registry.register('tag')
class TagTable(Tag):
"""Action to create tag(s) on a resource
:example:
.. code-block: yaml
policies:
- name: dynamodb-tag-table
resource: dynamodb-table
filters:
- "tag:target-tag": absent
actions:
- type: tag
key: target-tag
value: target-tag-value
"""
permissions = ('dynamodb:TagResource',)
batch_size = 1
def process_resource_set(self, tables, tags):
client = local_session(self.manager.session_factory).client('dynamodb')
for t in tables:
arn = t['TableArn']
client.tag_resource(ResourceArn=arn, Tags=tags)
@Table.action_registry.register('remove-tag')
class UntagTable(RemoveTag):
"""Action to remove tag(s) on a resource
:example:
.. code-block: yaml
policies:
- name: dynamodb-remove-tag
resource: dynamodb-table
filters:
- "tag:OutdatedTag": present
actions:
- type: remove-tag
tags: ["OutdatedTag"]
"""
concurrency = 2
batch_size = 5
permissions = ('dynamodb:UntagResource',)
def process_resource_set(self, tables, tag_keys):
client = local_session(
self.manager.session_factory).client('dynamodb')
for t in tables:
arn = t['TableArn']
client.untag_resource(
ResourceArn=arn, TagKeys=tag_keys)
@Table.action_registry.register('delete')
class DeleteTable(BaseAction, StatusFilter):
"""Action to delete dynamodb tables
:example:
.. code-block: yaml
policies:
- name: delete-empty-tables
resource: dynamodb-table
filters:
- TableSizeBytes: 0
actions:
- delete
"""
valid_status = ('ACTIVE',)
schema = type_schema('delete')
permissions = ("dynamodb:DeleteTable",)
def delete_table(self, table_set):
client = local_session(self.manager.session_factory).client('dynamodb')
for t in table_set:
client.delete_table(TableName=t['TableName'])
def process(self, resources):
resources = self.filter_table_state(
resources, self.valid_status)
if not len(resources):
return
for table_set in chunks(resources, 20):
with self.executor_factory(max_workers=3) as w:
futures = []
futures.append(w.submit(self.delete_table, table_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception deleting dynamodb table set \n %s" % (
f.exception()))
@resources.register('dynamodb-stream')
class Stream(query.QueryResourceManager):
# Note stream management takes place on the table resource
class resource_type(object):
service = 'dynamodbstreams'
# Note max rate of 5 calls per second
enum_spec = ('list_streams', 'Streams', None)
# Note max rate of 10 calls per second.
detail_spec = (
"describe_stream", "StreamArn", "StreamArn", "StreamDescription")
id = 'StreamArn'
# TODO, we default to filtering by id, but the api takes table names, which
# require additional client side filtering as multiple streams may be present
# per table.
# filter_name = 'TableName'
filter_name = None
name = 'TableName'
date = 'CreationDateTime'
dimension = 'TableName'
|
|
#!/usr/bin/env python
# Copyright 2017 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compares the performance of two versions of the pdfium code."""
import argparse
import functools
import glob
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
# pylint: disable=relative-import
from common import GetBooleanGnArg
from common import PrintErr
from common import RunCommandPropagateErr
from githelper import GitHelper
from safetynet_conclusions import ComparisonConclusions
from safetynet_conclusions import PrintConclusionsDictHumanReadable
from safetynet_conclusions import RATING_IMPROVEMENT
from safetynet_conclusions import RATING_REGRESSION
from safetynet_image import ImageComparison
def RunSingleTestCaseParallel(this, run_label, build_dir, test_case):
result = this.RunSingleTestCase(run_label, build_dir, test_case)
return (test_case, result)
class CompareRun(object):
"""A comparison between two branches of pdfium."""
def __init__(self, args):
self.git = GitHelper()
self.args = args
self._InitPaths()
def _InitPaths(self):
if self.args.this_repo:
self.safe_script_dir = self.args.build_dir
else:
self.safe_script_dir = os.path.join('testing', 'tools')
self.safe_measure_script_path = os.path.abspath(
os.path.join(self.safe_script_dir, 'safetynet_measure.py'))
input_file_re = re.compile('^.+[.]pdf$')
self.test_cases = []
for input_path in self.args.input_paths:
if os.path.isfile(input_path):
self.test_cases.append(input_path)
elif os.path.isdir(input_path):
for file_dir, _, filename_list in os.walk(input_path):
for input_filename in filename_list:
if input_file_re.match(input_filename):
file_path = os.path.join(file_dir, input_filename)
if os.path.isfile(file_path):
self.test_cases.append(file_path)
self.after_build_dir = self.args.build_dir
if self.args.build_dir_before:
self.before_build_dir = self.args.build_dir_before
else:
self.before_build_dir = self.after_build_dir
def Run(self):
"""Runs comparison by checking out branches, building and measuring them.
Returns:
Exit code for the script.
"""
if self.args.this_repo:
self._FreezeMeasureScript()
if self.args.branch_after:
if self.args.this_repo:
before, after = self._ProfileTwoOtherBranchesInThisRepo(
self.args.branch_before, self.args.branch_after)
else:
before, after = self._ProfileTwoOtherBranches(self.args.branch_before,
self.args.branch_after)
elif self.args.branch_before:
if self.args.this_repo:
before, after = self._ProfileCurrentAndOtherBranchInThisRepo(
self.args.branch_before)
else:
before, after = self._ProfileCurrentAndOtherBranch(
self.args.branch_before)
else:
if self.args.this_repo:
before, after = self._ProfileLocalChangesAndCurrentBranchInThisRepo()
else:
before, after = self._ProfileLocalChangesAndCurrentBranch()
conclusions = self._DrawConclusions(before, after)
conclusions_dict = conclusions.GetOutputDict()
conclusions_dict.setdefault('metadata', {})['profiler'] = self.args.profiler
self._PrintConclusions(conclusions_dict)
self._CleanUp(conclusions)
if self.args.png_dir:
image_comparison = ImageComparison(
self.after_build_dir, self.args.png_dir, ('before', 'after'),
self.args.num_workers, self.args.png_threshold)
image_comparison.Run(open_in_browser=not self.args.machine_readable)
return 0
def _FreezeMeasureScript(self):
"""Freezes a version of the measuring script.
This is needed to make sure we are comparing the pdfium library changes and
not script changes that may happen between the two branches.
"""
self.__FreezeFile(os.path.join('testing', 'tools', 'safetynet_measure.py'))
self.__FreezeFile(os.path.join('testing', 'tools', 'common.py'))
def __FreezeFile(self, filename):
RunCommandPropagateErr(['cp', filename, self.safe_script_dir],
exit_status_on_error=1)
def _ProfileTwoOtherBranchesInThisRepo(self, before_branch, after_branch):
"""Profiles two branches that are not the current branch.
This is done in the local repository and changes may not be restored if the
script fails or is interrupted.
after_branch does not need to descend from before_branch, they will be
measured the same way
Args:
before_branch: One branch to profile.
after_branch: Other branch to profile.
Returns:
A tuple (before, after), where each of before and after is a dict
mapping a test case name to the profiling values for that test case
in the given branch.
"""
branch_to_restore = self.git.GetCurrentBranchName()
self._StashLocalChanges()
self._CheckoutBranch(after_branch)
self._BuildCurrentBranch(self.after_build_dir)
after = self._MeasureCurrentBranch('after', self.after_build_dir)
self._CheckoutBranch(before_branch)
self._BuildCurrentBranch(self.before_build_dir)
before = self._MeasureCurrentBranch('before', self.before_build_dir)
self._CheckoutBranch(branch_to_restore)
self._RestoreLocalChanges()
return before, after
def _ProfileTwoOtherBranches(self, before_branch, after_branch):
"""Profiles two branches that are not the current branch.
This is done in new, cloned repositories, therefore it is safer but slower
and requires downloads.
after_branch does not need to descend from before_branch, they will be
measured the same way
Args:
before_branch: One branch to profile.
after_branch: Other branch to profile.
Returns:
A tuple (before, after), where each of before and after is a dict
mapping a test case name to the profiling values for that test case
in the given branch.
"""
after = self._ProfileSeparateRepo('after', self.after_build_dir,
after_branch)
before = self._ProfileSeparateRepo('before', self.before_build_dir,
before_branch)
return before, after
def _ProfileCurrentAndOtherBranchInThisRepo(self, other_branch):
"""Profiles the current branch (with uncommitted changes) and another one.
This is done in the local repository and changes may not be restored if the
script fails or is interrupted.
The current branch does not need to descend from other_branch.
Args:
other_branch: Other branch to profile that is not the current.
Returns:
A tuple (before, after), where each of before and after is a dict
mapping a test case name to the profiling values for that test case
in the given branch. The current branch is considered to be "after" and
the other branch is considered to be "before".
"""
branch_to_restore = self.git.GetCurrentBranchName()
self._BuildCurrentBranch(self.after_build_dir)
after = self._MeasureCurrentBranch('after', self.after_build_dir)
self._StashLocalChanges()
self._CheckoutBranch(other_branch)
self._BuildCurrentBranch(self.before_build_dir)
before = self._MeasureCurrentBranch('before', self.before_build_dir)
self._CheckoutBranch(branch_to_restore)
self._RestoreLocalChanges()
return before, after
def _ProfileCurrentAndOtherBranch(self, other_branch):
"""Profiles the current branch (with uncommitted changes) and another one.
This is done in new, cloned repositories, therefore it is safer but slower
and requires downloads.
The current branch does not need to descend from other_branch.
Args:
other_branch: Other branch to profile that is not the current. None will
compare to the same branch.
Returns:
A tuple (before, after), where each of before and after is a dict
mapping a test case name to the profiling values for that test case
in the given branch. The current branch is considered to be "after" and
the other branch is considered to be "before".
"""
self._BuildCurrentBranch(self.after_build_dir)
after = self._MeasureCurrentBranch('after', self.after_build_dir)
before = self._ProfileSeparateRepo('before', self.before_build_dir,
other_branch)
return before, after
def _ProfileLocalChangesAndCurrentBranchInThisRepo(self):
"""Profiles the current branch with and without uncommitted changes.
This is done in the local repository and changes may not be restored if the
script fails or is interrupted.
Returns:
A tuple (before, after), where each of before and after is a dict
mapping a test case name to the profiling values for that test case
using the given version. The current branch without uncommitted changes is
considered to be "before" and with uncommitted changes is considered to be
"after".
"""
self._BuildCurrentBranch(self.after_build_dir)
after = self._MeasureCurrentBranch('after', self.after_build_dir)
pushed = self._StashLocalChanges()
if not pushed and not self.args.build_dir_before:
PrintErr('Warning: No local changes to compare')
before_build_dir = self.before_build_dir
self._BuildCurrentBranch(before_build_dir)
before = self._MeasureCurrentBranch('before', before_build_dir)
self._RestoreLocalChanges()
return before, after
def _ProfileLocalChangesAndCurrentBranch(self):
"""Profiles the current branch with and without uncommitted changes.
This is done in new, cloned repositories, therefore it is safer but slower
and requires downloads.
Returns:
A tuple (before, after), where each of before and after is a dict
mapping a test case name to the profiling values for that test case
using the given version. The current branch without uncommitted changes is
considered to be "before" and with uncommitted changes is considered to be
"after".
"""
return self._ProfileCurrentAndOtherBranch(other_branch=None)
def _ProfileSeparateRepo(self, run_label, relative_build_dir, branch):
"""Profiles a branch in a a temporary git repository.
Args:
run_label: String to differentiate this version of the code in output
files from other versions.
relative_build_dir: Path to the build dir in the current working dir to
clone build args from.
branch: Branch to checkout in the new repository. None will
profile the same branch checked out in the original repo.
Returns:
A dict mapping each test case name to the profiling values for that
test case.
"""
build_dir = self._CreateTempRepo('repo_%s' % run_label, relative_build_dir,
branch)
self._BuildCurrentBranch(build_dir)
return self._MeasureCurrentBranch(run_label, build_dir)
def _CreateTempRepo(self, dir_name, relative_build_dir, branch):
"""Clones a temporary git repository out of the current working dir.
Args:
dir_name: Name for the temporary repository directory
relative_build_dir: Path to the build dir in the current working dir to
clone build args from.
branch: Branch to checkout in the new repository. None will keep checked
out the same branch as the local repo.
Returns:
Path to the build directory of the new repository.
"""
cwd = os.getcwd()
repo_dir = tempfile.mkdtemp(suffix='-%s' % dir_name)
src_dir = os.path.join(repo_dir, 'pdfium')
self.git.CloneLocal(os.getcwd(), src_dir)
if branch is not None:
os.chdir(src_dir)
self.git.Checkout(branch)
os.chdir(repo_dir)
PrintErr('Syncing...')
cmd = [
'gclient', 'config', '--unmanaged',
'https://pdfium.googlesource.com/pdfium.git'
]
if self.args.cache_dir:
cmd.append('--cache-dir=%s' % self.args.cache_dir)
RunCommandPropagateErr(cmd, exit_status_on_error=1)
RunCommandPropagateErr(['gclient', 'sync', '--force'],
exit_status_on_error=1)
PrintErr('Done.')
build_dir = os.path.join(src_dir, relative_build_dir)
os.makedirs(build_dir)
os.chdir(src_dir)
source_gn_args = os.path.join(cwd, relative_build_dir, 'args.gn')
dest_gn_args = os.path.join(build_dir, 'args.gn')
shutil.copy(source_gn_args, dest_gn_args)
RunCommandPropagateErr(['gn', 'gen', relative_build_dir],
exit_status_on_error=1)
os.chdir(cwd)
return build_dir
def _CheckoutBranch(self, branch):
PrintErr("Checking out branch '%s'" % branch)
self.git.Checkout(branch)
def _StashLocalChanges(self):
PrintErr('Stashing local changes')
return self.git.StashPush()
def _RestoreLocalChanges(self):
PrintErr('Restoring local changes')
self.git.StashPopAll()
def _BuildCurrentBranch(self, build_dir):
"""Synchronizes and builds the current version of pdfium.
Args:
build_dir: String with path to build directory
"""
PrintErr('Syncing...')
RunCommandPropagateErr(['gclient', 'sync', '--force'],
exit_status_on_error=1)
PrintErr('Done.')
PrintErr('Building...')
cmd = ['ninja', '-C', build_dir, 'pdfium_test']
if GetBooleanGnArg('use_goma', build_dir):
cmd.extend(['-j', '250'])
RunCommandPropagateErr(cmd, stdout_has_errors=True, exit_status_on_error=1)
PrintErr('Done.')
def _MeasureCurrentBranch(self, run_label, build_dir):
PrintErr('Measuring...')
if self.args.num_workers > 1 and len(self.test_cases) > 1:
results = self._RunAsync(run_label, build_dir)
else:
results = self._RunSync(run_label, build_dir)
PrintErr('Done.')
return results
def _RunSync(self, run_label, build_dir):
"""Profiles the test cases synchronously.
Args:
run_label: String to differentiate this version of the code in output
files from other versions.
build_dir: String with path to build directory
Returns:
A dict mapping each test case name to the profiling values for that
test case.
"""
results = {}
for test_case in self.test_cases:
result = self.RunSingleTestCase(run_label, build_dir, test_case)
if result is not None:
results[test_case] = result
return results
def _RunAsync(self, run_label, build_dir):
"""Profiles the test cases asynchronously.
Uses as many workers as configured by --num-workers.
Args:
run_label: String to differentiate this version of the code in output
files from other versions.
build_dir: String with path to build directory
Returns:
A dict mapping each test case name to the profiling values for that
test case.
"""
results = {}
pool = multiprocessing.Pool(self.args.num_workers)
worker_func = functools.partial(RunSingleTestCaseParallel, self, run_label,
build_dir)
try:
# The timeout is a workaround for http://bugs.python.org/issue8296
# which prevents KeyboardInterrupt from working.
one_year_in_seconds = 3600 * 24 * 365
worker_results = (
pool.map_async(worker_func, self.test_cases).get(one_year_in_seconds))
for worker_result in worker_results:
test_case, result = worker_result
if result is not None:
results[test_case] = result
except KeyboardInterrupt:
pool.terminate()
sys.exit(1)
else:
pool.close()
pool.join()
return results
def RunSingleTestCase(self, run_label, build_dir, test_case):
"""Profiles a single test case.
Args:
run_label: String to differentiate this version of the code in output
files from other versions.
build_dir: String with path to build directory
test_case: Path to the test case.
Returns:
The measured profiling value for that test case.
"""
command = [
self.safe_measure_script_path, test_case,
'--build-dir=%s' % build_dir
]
if self.args.interesting_section:
command.append('--interesting-section')
if self.args.profiler:
command.append('--profiler=%s' % self.args.profiler)
profile_file_path = self._GetProfileFilePath(run_label, test_case)
if profile_file_path:
command.append('--output-path=%s' % profile_file_path)
if self.args.png_dir:
command.append('--png')
if self.args.pages:
command.extend(['--pages', self.args.pages])
output = RunCommandPropagateErr(command)
if output is None:
return None
if self.args.png_dir:
self._MoveImages(test_case, run_label)
# Get the time number as output, making sure it's just a number
output = output.strip()
if re.match('^[0-9]+$', output):
return int(output)
return None
def _MoveImages(self, test_case, run_label):
png_dir = os.path.join(self.args.png_dir, run_label)
if not os.path.exists(png_dir):
os.makedirs(png_dir)
test_case_dir, test_case_filename = os.path.split(test_case)
test_case_png_matcher = '%s.*.png' % test_case_filename
for output_png in glob.glob(
os.path.join(test_case_dir, test_case_png_matcher)):
shutil.move(output_png, png_dir)
def _GetProfileFilePath(self, run_label, test_case):
if self.args.output_dir:
output_filename = (
'callgrind.out.%s.%s' % (test_case.replace('/', '_'), run_label))
return os.path.join(self.args.output_dir, output_filename)
else:
return None
def _DrawConclusions(self, times_before_branch, times_after_branch):
"""Draws conclusions comparing results of test runs in two branches.
Args:
times_before_branch: A dict mapping each test case name to the
profiling values for that test case in the branch to be considered
as the baseline.
times_after_branch: A dict mapping each test case name to the
profiling values for that test case in the branch to be considered
as the new version.
Returns:
ComparisonConclusions with all test cases processed.
"""
conclusions = ComparisonConclusions(self.args.threshold_significant)
for test_case in sorted(self.test_cases):
before = times_before_branch.get(test_case)
after = times_after_branch.get(test_case)
conclusions.ProcessCase(test_case, before, after)
return conclusions
def _PrintConclusions(self, conclusions_dict):
"""Prints the conclusions as the script output.
Depending on the script args, this can output a human or a machine-readable
version of the conclusions.
Args:
conclusions_dict: Dict to print returned from
ComparisonConclusions.GetOutputDict().
"""
if self.args.machine_readable:
print json.dumps(conclusions_dict)
else:
PrintConclusionsDictHumanReadable(
conclusions_dict, colored=True, key=self.args.case_order)
def _CleanUp(self, conclusions):
"""Removes profile output files for uninteresting cases.
Cases without significant regressions or improvements and considered
uninteresting.
Args:
conclusions: A ComparisonConclusions.
"""
if not self.args.output_dir:
return
if self.args.profiler != 'callgrind':
return
for case_result in conclusions.GetCaseResults().values():
if case_result.rating not in [RATING_REGRESSION, RATING_IMPROVEMENT]:
self._CleanUpOutputFile('before', case_result.case_name)
self._CleanUpOutputFile('after', case_result.case_name)
def _CleanUpOutputFile(self, run_label, case_name):
"""Removes one profile output file.
If the output file does not exist, fails silently.
Args:
run_label: String to differentiate a version of the code in output
files from other versions.
case_name: String identifying test case for which to remove the output
file.
"""
try:
os.remove(self._GetProfileFilePath(run_label, case_name))
except OSError:
pass
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_paths',
nargs='+',
help='pdf files or directories to search for pdf files '
'to run as test cases')
parser.add_argument(
'--branch-before',
help='git branch to use as "before" for comparison. '
'Omitting this will use the current branch '
'without uncommitted changes as the baseline.')
parser.add_argument(
'--branch-after',
help='git branch to use as "after" for comparison. '
'Omitting this will use the current branch '
'with uncommitted changes.')
parser.add_argument(
'--build-dir',
default=os.path.join('out', 'Release'),
help='relative path from the base source directory '
'to the build directory')
parser.add_argument(
'--build-dir-before',
help='relative path from the base source directory '
'to the build directory for the "before" branch, if '
'different from the build directory for the '
'"after" branch')
parser.add_argument(
'--cache-dir',
default=None,
help='directory with a new or preexisting cache for '
'downloads. Default is to not use a cache.')
parser.add_argument(
'--this-repo',
action='store_true',
help='use the repository where the script is instead of '
'checking out a temporary one. This is faster and '
'does not require downloads, but although it '
'restores the state of the local repo, if the '
'script is killed or crashes the changes can remain '
'stashed and you may be on another branch.')
parser.add_argument(
'--profiler',
default='callgrind',
help='which profiler to use. Supports callgrind, '
'perfstat, and none. Default is callgrind.')
parser.add_argument(
'--interesting-section',
action='store_true',
help='whether to measure just the interesting section or '
'the whole test harness. Limiting to only the '
'interesting section does not work on Release since '
'the delimiters are optimized out')
parser.add_argument(
'--pages',
help='selects some pages to be rendered. Page numbers '
'are 0-based. "--pages A" will render only page A. '
'"--pages A-B" will render pages A to B '
'(inclusive).')
parser.add_argument(
'--num-workers',
default=multiprocessing.cpu_count(),
type=int,
help='run NUM_WORKERS jobs in parallel')
parser.add_argument(
'--output-dir', help='directory to write the profile data output files')
parser.add_argument(
'--png-dir',
default=None,
help='outputs pngs to the specified directory that can '
'be compared with a static html generated. Will '
'affect performance measurements.')
parser.add_argument(
'--png-threshold',
default=0.0,
type=float,
help='Requires --png-dir. Threshold above which a png '
'is considered to have changed.')
parser.add_argument(
'--threshold-significant',
default=0.02,
type=float,
help='variations in performance above this factor are '
'considered significant')
parser.add_argument(
'--machine-readable',
action='store_true',
help='whether to get output for machines. If enabled the '
'output will be a json with the format specified in '
'ComparisonConclusions.GetOutputDict(). Default is '
'human-readable.')
parser.add_argument(
'--case-order',
default=None,
help='what key to use when sorting test cases in the '
'output. Accepted values are "after", "before", '
'"ratio" and "rating". Default is sorting by test '
'case path.')
args = parser.parse_args()
# Always start at the pdfium src dir, which is assumed to be two level above
# this script.
pdfium_src_dir = os.path.join(
os.path.dirname(__file__), os.path.pardir, os.path.pardir)
os.chdir(pdfium_src_dir)
git = GitHelper()
if args.branch_after and not args.branch_before:
PrintErr('--branch-after requires --branch-before to be specified.')
return 1
if args.branch_after and not git.BranchExists(args.branch_after):
PrintErr('Branch "%s" does not exist' % args.branch_after)
return 1
if args.branch_before and not git.BranchExists(args.branch_before):
PrintErr('Branch "%s" does not exist' % args.branch_before)
return 1
if args.output_dir:
args.output_dir = os.path.expanduser(args.output_dir)
if not os.path.isdir(args.output_dir):
PrintErr('"%s" is not a directory' % args.output_dir)
return 1
if args.png_dir:
args.png_dir = os.path.expanduser(args.png_dir)
if not os.path.isdir(args.png_dir):
PrintErr('"%s" is not a directory' % args.png_dir)
return 1
if args.threshold_significant <= 0.0:
PrintErr('--threshold-significant should receive a positive float')
return 1
if args.png_threshold:
if not args.png_dir:
PrintErr('--png-threshold requires --png-dir to be specified.')
return 1
if args.png_threshold <= 0.0:
PrintErr('--png-threshold should receive a positive float')
return 1
if args.pages:
if not re.match(r'^\d+(-\d+)?$', args.pages):
PrintErr('Supported formats for --pages are "--pages 7" and '
'"--pages 3-6"')
return 1
run = CompareRun(args)
return run.Run()
if __name__ == '__main__':
sys.exit(main())
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import math
import numpy as np
from collections import defaultdict
import ray
from ray.rllib.evaluation.metrics import LEARNER_STATS_KEY
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.optimizers.multi_gpu_impl import LocalSyncParallelOptimizer
from ray.rllib.optimizers.rollout import collect_samples
from ray.rllib.utils.annotations import override
from ray.rllib.utils.timer import TimerStat
from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
class LocalMultiGPUOptimizer(PolicyOptimizer):
"""A synchronous optimizer that uses multiple local GPUs.
Samples are pulled synchronously from multiple remote workers,
concatenated, and then split across the memory of multiple local GPUs.
A number of SGD passes are then taken over the in-memory data. For more
details, see `multi_gpu_impl.LocalSyncParallelOptimizer`.
This optimizer is Tensorflow-specific and require the underlying
Policy to be a TFPolicy instance that support `.copy()`.
Note that all replicas of the TFPolicy will merge their
extra_compute_grad and apply_grad feed_dicts and fetches. This
may result in unexpected behavior.
"""
def __init__(self,
workers,
sgd_batch_size=128,
num_sgd_iter=10,
sample_batch_size=200,
num_envs_per_worker=1,
train_batch_size=1024,
num_gpus=0,
standardize_fields=[],
shuffle_sequences=True):
"""Initialize a synchronous multi-gpu optimizer.
Arguments:
workers (WorkerSet): all workers
sgd_batch_size (int): SGD minibatch size within train batch size
num_sgd_iter (int): number of passes to learn on per train batch
sample_batch_size (int): size of batches to sample from workers
num_envs_per_worker (int): num envs in each rollout worker
train_batch_size (int): size of batches to learn on
num_gpus (int): number of GPUs to use for data-parallel SGD
standardize_fields (list): list of fields in the training batch
to normalize
shuffle_sequences (bool): whether to shuffle the train batch prior
to SGD to break up correlations
"""
PolicyOptimizer.__init__(self, workers)
self.batch_size = sgd_batch_size
self.num_sgd_iter = num_sgd_iter
self.num_envs_per_worker = num_envs_per_worker
self.sample_batch_size = sample_batch_size
self.train_batch_size = train_batch_size
self.shuffle_sequences = shuffle_sequences
if not num_gpus:
self.devices = ["/cpu:0"]
else:
self.devices = [
"/gpu:{}".format(i) for i in range(int(math.ceil(num_gpus)))
]
self.batch_size = int(sgd_batch_size / len(self.devices)) * len(
self.devices)
assert self.batch_size % len(self.devices) == 0
assert self.batch_size >= len(self.devices), "batch size too small"
self.per_device_batch_size = int(self.batch_size / len(self.devices))
self.sample_timer = TimerStat()
self.load_timer = TimerStat()
self.grad_timer = TimerStat()
self.update_weights_timer = TimerStat()
self.standardize_fields = standardize_fields
logger.info("LocalMultiGPUOptimizer devices {}".format(self.devices))
self.policies = dict(self.workers.local_worker()
.foreach_trainable_policy(lambda p, i: (i, p)))
logger.debug("Policies to train: {}".format(self.policies))
for policy_id, policy in self.policies.items():
if not isinstance(policy, TFPolicy):
raise ValueError(
"Only TF graph policies are supported with multi-GPU. "
"Try setting `simple_optimizer=True` instead.")
# per-GPU graph copies created below must share vars with the policy
# reuse is set to AUTO_REUSE because Adam nodes are created after
# all of the device copies are created.
self.optimizers = {}
with self.workers.local_worker().tf_sess.graph.as_default():
with self.workers.local_worker().tf_sess.as_default():
for policy_id, policy in self.policies.items():
with tf.variable_scope(policy_id, reuse=tf.AUTO_REUSE):
if policy._state_inputs:
rnn_inputs = policy._state_inputs + [
policy._seq_lens
]
else:
rnn_inputs = []
self.optimizers[policy_id] = (
LocalSyncParallelOptimizer(
policy._optimizer, self.devices,
[v
for _, v in policy._loss_inputs], rnn_inputs,
self.per_device_batch_size, policy.copy))
self.sess = self.workers.local_worker().tf_sess
self.sess.run(tf.global_variables_initializer())
@override(PolicyOptimizer)
def step(self):
with self.update_weights_timer:
if self.workers.remote_workers():
weights = ray.put(self.workers.local_worker().get_weights())
for e in self.workers.remote_workers():
e.set_weights.remote(weights)
with self.sample_timer:
if self.workers.remote_workers():
samples = collect_samples(
self.workers.remote_workers(), self.sample_batch_size,
self.num_envs_per_worker, self.train_batch_size)
if samples.count > self.train_batch_size * 2:
logger.info(
"Collected more training samples than expected "
"(actual={}, train_batch_size={}). ".format(
samples.count, self.train_batch_size) +
"This may be because you have many workers or "
"long episodes in 'complete_episodes' batch mode.")
else:
samples = []
while sum(s.count for s in samples) < self.train_batch_size:
samples.append(self.workers.local_worker().sample())
samples = SampleBatch.concat_samples(samples)
# Handle everything as if multiagent
if isinstance(samples, SampleBatch):
samples = MultiAgentBatch({
DEFAULT_POLICY_ID: samples
}, samples.count)
for policy_id, policy in self.policies.items():
if policy_id not in samples.policy_batches:
continue
batch = samples.policy_batches[policy_id]
for field in self.standardize_fields:
value = batch[field]
standardized = (value - value.mean()) / max(1e-4, value.std())
batch[field] = standardized
num_loaded_tuples = {}
with self.load_timer:
for policy_id, batch in samples.policy_batches.items():
if policy_id not in self.policies:
continue
policy = self.policies[policy_id]
policy._debug_vars()
tuples = policy._get_loss_inputs_dict(
batch, shuffle=self.shuffle_sequences)
data_keys = [ph for _, ph in policy._loss_inputs]
if policy._state_inputs:
state_keys = policy._state_inputs + [policy._seq_lens]
else:
state_keys = []
num_loaded_tuples[policy_id] = (
self.optimizers[policy_id].load_data(
self.sess, [tuples[k] for k in data_keys],
[tuples[k] for k in state_keys]))
fetches = {}
with self.grad_timer:
for policy_id, tuples_per_device in num_loaded_tuples.items():
optimizer = self.optimizers[policy_id]
num_batches = max(
1,
int(tuples_per_device) // int(self.per_device_batch_size))
logger.debug("== sgd epochs for {} ==".format(policy_id))
for i in range(self.num_sgd_iter):
iter_extra_fetches = defaultdict(list)
permutation = np.random.permutation(num_batches)
for batch_index in range(num_batches):
batch_fetches = optimizer.optimize(
self.sess, permutation[batch_index] *
self.per_device_batch_size)
for k, v in batch_fetches[LEARNER_STATS_KEY].items():
iter_extra_fetches[k].append(v)
logger.debug("{} {}".format(i,
_averaged(iter_extra_fetches)))
fetches[policy_id] = _averaged(iter_extra_fetches)
self.num_steps_sampled += samples.count
self.num_steps_trained += tuples_per_device * len(self.devices)
self.learner_stats = fetches
return fetches
@override(PolicyOptimizer)
def stats(self):
return dict(
PolicyOptimizer.stats(self), **{
"sample_time_ms": round(1000 * self.sample_timer.mean, 3),
"load_time_ms": round(1000 * self.load_timer.mean, 3),
"grad_time_ms": round(1000 * self.grad_timer.mean, 3),
"update_time_ms": round(1000 * self.update_weights_timer.mean,
3),
"learner": self.learner_stats,
})
def _averaged(kv):
out = {}
for k, v in kv.items():
if v[0] is not None and not isinstance(v[0], dict):
out[k] = np.mean(v)
return out
|
|
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
iSCSI Cinder Volume driver for Hitachi Unified Storage (HUS-HNAS) platform.
"""
from xml.etree import ElementTree as ETree
from oslo.config import cfg
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder.volume import driver
from cinder.volume.drivers.hds.hnas_backend import HnasBackend
from cinder.volume import utils
HDS_HNAS_ISCSI_VERSION = '1.0.0'
LOG = logging.getLogger(__name__)
iSCSI_OPTS = [
cfg.StrOpt('hds_hnas_iscsi_config_file',
default='/opt/hds/hnas/cinder_iscsi_conf.xml',
help='Configuration file for HDS iSCSI cinder plugin')]
CONF = cfg.CONF
CONF.register_opts(iSCSI_OPTS)
HNAS_DEFAULT_CONFIG = {'hnas_cmd': 'ssc', 'chap_enabled': 'True'}
def factory_bend(type):
return HnasBackend()
def _loc_info(loc):
"""Parse info from location string."""
LOG.info("Parse_loc: %s" % loc)
info = {}
tup = loc.split(',')
if len(tup) < 5:
info['id_lu'] = tup[0].split('.')
return info
info['id_lu'] = tup[2].split('.')
info['tgt'] = tup
return info
def _xml_read(root, element, check=None):
"""Read an xml element."""
try:
val = root.findtext(element)
LOG.info(_("%(element)s: %(val)s"),
{'element': element,
'val': val if element != 'password' else '***'})
if val:
return val.strip()
if check:
raise exception.ParameterNotFound(param=element)
return None
except ETree.ParseError:
if check:
with excutils.save_and_reraise_exception():
LOG.error(_("XML exception reading parameter: %s") % element)
else:
LOG.info(_("XML exception reading parameter: %s") % element)
return None
def _read_config(xml_config_file):
"""Read hds driver specific xml config file."""
try:
root = ETree.parse(xml_config_file).getroot()
except Exception:
raise exception.NotFound(message='config file not found: '
+ xml_config_file)
# mandatory parameters
config = {}
arg_prereqs = ['mgmt_ip0', 'username', 'password']
for req in arg_prereqs:
config[req] = _xml_read(root, req, 'check')
# optional parameters
for opt in ['hnas_cmd', 'chap_enabled']:
config[opt] = _xml_read(root, opt) or\
HNAS_DEFAULT_CONFIG[opt]
config['hdp'] = {}
config['services'] = {}
# min one needed
for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
if _xml_read(root, svc) is None:
continue
service = {'label': svc}
# none optional
for arg in ['volume_type', 'hdp', 'iscsi_ip']:
service[arg] = _xml_read(root, svc + '/' + arg, 'check')
config['services'][service['volume_type']] = service
config['hdp'][service['hdp']] = service['hdp']
# at least one service required!
if config['services'].keys() is None:
raise exception.ParameterNotFound(param="No service found")
return config
class HDSISCSIDriver(driver.ISCSIDriver):
"""HDS HNAS volume driver."""
def __init__(self, *args, **kwargs):
"""Initialize, read different config parameters."""
super(HDSISCSIDriver, self).__init__(*args, **kwargs)
self.driver_stats = {}
self.context = {}
self.configuration.append_config_values(iSCSI_OPTS)
self.config = _read_config(
self.configuration.hds_hnas_iscsi_config_file)
self.type = 'HNAS'
self.platform = self.type.lower()
LOG.info(_("Backend type: %s") % self.type)
self.bend = factory_bend(self.type)
def _array_info_get(self):
"""Get array parameters."""
out = self.bend.get_version(self.config['hnas_cmd'],
HDS_HNAS_ISCSI_VERSION,
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
inf = out.split()
return inf[1], 'hnas_' + inf[1], inf[6]
def _get_iscsi_info(self):
"""Validate array iscsi parameters."""
out = self.bend.get_iscsi_info(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
lines = out.split('\n')
# dict based on iSCSI portal ip addresses
conf = {}
for line in lines:
# only record up links
if 'CTL' in line and 'Up' in line:
inf = line.split()
(ctl, port, ip, ipp) = (inf[1], inf[3], inf[5], inf[7])
conf[ip] = {}
conf[ip]['ctl'] = ctl
conf[ip]['port'] = port
conf[ip]['iscsi_port'] = ipp
msg = ('portal: %(ip)s:%(ipp)s, CTL: %(ctl)s, port: %(port)s')
LOG.debug(msg
% {'ip': ip,
'ipp': ipp,
'ctl': ctl,
'port': port})
return conf
def _get_service(self, volume):
"""Get the available service parameters for a given volume using
its type.
:param volume: dictionary volume reference
"""
label = None
if volume['volume_type']:
label = volume['volume_type']['name']
label = label or 'default'
if label not in self.config['services'].keys():
# default works if no match is found
label = 'default'
LOG.info(_("Using default: instead of %s") % label)
LOG.info(_("Available services: %s")
% self.config['services'].keys())
if label in self.config['services'].keys():
svc = self.config['services'][label]
# HNAS - one time lookup
# see if the client supports CHAP authentication and if
# iscsi_secret has already been set, retrieve the secret if
# available, otherwise generate and store
if self.config['chap_enabled'] == 'True':
# it may not exist, create and set secret
if 'iscsi_secret' not in svc:
LOG.info(_("Retrieving secret for service: %s")
% label)
out = self.bend.get_targetsecret(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
'cinder-' + label,
svc['hdp'])
svc['iscsi_secret'] = out
if svc['iscsi_secret'] == "":
svc['iscsi_secret'] = utils.generate_password()[0:15]
self.bend.set_targetsecret(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
svc['iscsi_target'],
svc['hdp'],
svc['iscsi_secret'])
LOG.info("Set tgt CHAP secret for service: %s"
% (label))
else:
# We set blank password when the client does not
# support CHAP. Later on, if the client tries to create a new
# target that does not exists in the backend, we check for this
# value and use a temporary dummy password.
if 'iscsi_secret' not in svc:
# Warns in the first time
LOG.info("CHAP authentication disabled")
svc['iscsi_secret'] = ""
if 'iscsi_target' not in svc:
LOG.info(_("Retrieving target for service: %s") % label)
out = self.bend.get_targetiqn(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
'cinder-' + label,
svc['hdp'],
svc['iscsi_secret'])
svc['iscsi_target'] = out
self.config['services'][label] = svc
service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
svc['port'], svc['hdp'], svc['iscsi_target'],
svc['iscsi_secret'])
else:
LOG.info(_("Available services: %s")
% self.config['services'].keys())
LOG.error(_("No configuration found for service: %s")
% label)
raise exception.ParameterNotFound(param=label)
return service
def _get_stats(self):
"""Get HDP stats from HNAS."""
total_cap = 0
total_used = 0
out = self.bend.get_hdp_info(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
for line in out.split('\n'):
if 'HDP' in line:
(hdp, size, _ign, used) = line.split()[1:5] # in MB
LOG.debug("stats: looking for: %s", hdp)
if int(hdp) >= units.Ki: # HNAS fsid
hdp = line.split()[11]
if hdp in self.config['hdp'].keys():
total_cap += int(size)
total_used += int(used)
LOG.info("stats: total: %d used: %d" % (total_cap, total_used))
hnas_stat = {}
hnas_stat['total_capacity_gb'] = int(total_cap / units.Ki) # in GB
hnas_stat['free_capacity_gb'] = \
int((total_cap - total_used) / units.Ki)
be_name = self.configuration.safe_get('volume_backend_name')
hnas_stat["volume_backend_name"] = be_name or 'HDSISCSIDriver'
hnas_stat["vendor_name"] = 'HDS'
hnas_stat["driver_version"] = HDS_HNAS_ISCSI_VERSION
hnas_stat["storage_protocol"] = 'iSCSI'
hnas_stat['QoS_support'] = False
hnas_stat['reserved_percentage'] = 0
LOG.info(_("stats: stats: %s") % hnas_stat)
return hnas_stat
def _get_hdp_list(self):
"""Get HDPs from HNAS."""
out = self.bend.get_hdp_info(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'])
hdp_list = []
for line in out.split('\n'):
if 'HDP' in line:
inf = line.split()
if int(inf[1]) >= units.Ki:
# HDP fsids start at units.Ki (1024)
hdp_list.append(inf[11])
else:
# HDP pools are 2-digits max
hdp_list.extend(inf[1:2])
# returns a list of HDP IDs
LOG.info(_("HDP list: %s") % hdp_list)
return hdp_list
def _check_hdp_list(self):
"""Verify HDPs in HNAS array.
Verify that all HDPs specified in the configuration files actually
exists on the storage.
"""
hdpl = self._get_hdp_list()
lst = self.config['hdp'].keys()
for hdp in lst:
if hdp not in hdpl:
LOG.error(_("HDP not found: %s") % hdp)
err = "HDP not found: " + hdp
raise exception.ParameterNotFound(param=err)
# status, verify corresponding status is Normal
def _id_to_vol(self, volume_id):
"""Given the volume id, retrieve the volume object from database.
:param volume_id: volume id string
"""
vol = self.db.volume_get(self.context, volume_id)
return vol
def _update_vol_location(self, volume_id, loc):
"""Update the provider location.
:param volume_id: volume id string
:param loc: string provider location value
"""
update = {'provider_location': loc}
self.db.volume_update(self.context, volume_id, update)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
pass
def do_setup(self, context):
"""Setup and verify HDS HNAS storage connection."""
self.context = context
(self.arid, self.hnas_name, self.lumax) = self._array_info_get()
self._check_hdp_list()
iscsi_info = self._get_iscsi_info()
LOG.info(_("do_setup: %s") % iscsi_info)
for svc in self.config['services'].keys():
svc_ip = self.config['services'][svc]['iscsi_ip']
if svc_ip in iscsi_info.keys():
LOG.info(_("iSCSI portal found for service: %s") % svc_ip)
self.config['services'][svc]['port'] = \
iscsi_info[svc_ip]['port']
self.config['services'][svc]['ctl'] = iscsi_info[svc_ip]['ctl']
self.config['services'][svc]['iscsi_port'] = \
iscsi_info[svc_ip]['iscsi_port']
else: # config iscsi address not found on device!
LOG.error(_("iSCSI portal not found for service: %s") % svc_ip)
raise exception.ParameterNotFound(param=svc_ip)
def ensure_export(self, context, volume):
pass
def create_export(self, context, volume):
"""Create an export. Moved to initialize_connection.
:param context:
:param volume: volume reference
"""
name = volume['name']
LOG.debug("create_export %(name)s" % {'name': name})
pass
def remove_export(self, context, volume):
"""Disconnect a volume from an attached instance.
:param context: context
:param volume: dictionary volume referencej
"""
provider = volume['provider_location']
name = volume['name']
LOG.debug("remove_export provider %(provider)s on %(name)s"
% {'provider': provider,
'name': name})
pass
def create_volume(self, volume):
"""Create a LU on HNAS.
:param volume: ditctionary volume reference
"""
service = self._get_service(volume)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
out = self.bend.create_lu(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
hdp,
'%s' % (int(volume['size']) * units.Ki),
volume['name'])
LOG.info(_("create_volume: create_lu returns %s") % out)
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
# Example: 92210013.volume-44d7e29b-2aa4-4606-8bc4-9601528149fd
LOG.info(_("LUN %(lun)s of size %(sz)s MB is created.")
% {'lun': lun, 'sz': sz})
return {'provider_location': lun}
def create_cloned_volume(self, dst, src):
"""Create a clone of a volume.
:param dst: ditctionary destination volume reference
:param src: ditctionary source volume reference
"""
if src['size'] != dst['size']:
msg = 'clone volume size mismatch'
raise exception.VolumeBackendAPIException(data=msg)
service = self._get_service(dst)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
size = int(src['size']) * units.Ki
source_vol = self._id_to_vol(src['id'])
(arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
out = self.bend.create_dup(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
slun, hdp, '%s' % size,
dst['name'])
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(size)s MB is cloned."
% {'lun': lun,
'size': size})
return {'provider_location': lun}
def extend_volume(self, volume, new_size):
"""Extend an existing volume.
:param volume: dictionary volume reference
:param new_size: int size in GB to extend
"""
service = self._get_service(volume)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
(arid, lun) = _loc_info(volume['provider_location'])['id_lu']
self.bend.extend_vol(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
hdp, lun,
'%s' % (new_size * units.Ki),
volume['name'])
LOG.info(_("LUN %(lun)s extended to %(size)s GB.")
% {'lun': lun, 'size': new_size})
def delete_volume(self, volume):
"""Delete an LU on HNAS.
:param volume: dictionary volume reference
"""
prov_loc = volume['provider_location']
if prov_loc is None:
LOG.error("delete_vol: provider location empty.")
return
info = _loc_info(prov_loc)
(arid, lun) = info['id_lu']
if 'tgt' in info.keys(): # connected?
LOG.info("delete lun loc %s" % info['tgt'])
# loc = id.lun
(_portal, iqn, loc, ctl, port, hlun) = info['tgt']
self.bend.del_iscsi_conn(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
ctl, iqn, hlun)
name = self.hnas_name
LOG.debug("delete lun %(lun)s on %(name)s"
% {'lun': lun,
'name': name})
service = self._get_service(volume)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
self.bend.delete_lu(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
hdp, lun)
def initialize_connection(self, volume, connector):
"""Map the created volume to connector['initiator'].
:param volume: dictionary volume reference
:param connector: dictionary connector reference
"""
LOG.info("initialize volume %s connector %s" % (volume, connector))
# connector[ip, host, wwnns, unititator, wwp/
service = self._get_service(volume)
(ip, ipp, ctl, port, _hdp, target, secret) = service
info = _loc_info(volume['provider_location'])
if 'tgt' in info.keys(): # spurious repeat connection
# print info.keys()
LOG.debug("initiate_conn: tgt already set %s" % info['tgt'])
(arid, lun) = info['id_lu']
loc = arid + '.' + lun
# sps, use target if provided
iqn = target
out = self.bend.add_iscsi_conn(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
lun, _hdp, port, iqn,
connector['initiator'])
hnas_portal = ip + ':' + ipp
# sps need hlun, fulliqn
hlun = out.split()[1]
fulliqn = out.split()[13]
tgt = hnas_portal + ',' + iqn + ',' + loc + ',' + ctl + ','
tgt += port + ',' + hlun
LOG.info("initiate: connection %s" % tgt)
properties = {}
properties['provider_location'] = tgt
self._update_vol_location(volume['id'], tgt)
properties['target_discovered'] = False
properties['target_portal'] = hnas_portal
properties['target_iqn'] = fulliqn
properties['target_lun'] = hlun
properties['volume_id'] = volume['id']
properties['auth_username'] = connector['initiator']
if self.config['chap_enabled'] == 'True':
properties['auth_method'] = 'CHAP'
properties['auth_password'] = secret
return {'driver_volume_type': 'iscsi', 'data': properties}
def terminate_connection(self, volume, connector, **kwargs):
"""Terminate a connection to a volume.
:param volume: dictionary volume reference
:param connector: dictionary connector reference
"""
info = _loc_info(volume['provider_location'])
if 'tgt' not in info.keys(): # spurious disconnection
LOG.warn("terminate_conn: provider location empty.")
return
(arid, lun) = info['id_lu']
(_portal, iqn, loc, ctl, port, hlun) = info['tgt']
LOG.info("terminate: connection %s" % volume['provider_location'])
self.bend.del_iscsi_conn(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
ctl, iqn, hlun)
self._update_vol_location(volume['id'], loc)
return {'provider_location': loc}
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot.
:param volume: dictionary volume reference
:param snapshot: dictionary snapshot reference
"""
size = int(snapshot['volume_size']) * units.Ki
(arid, slun) = _loc_info(snapshot['provider_location'])['id_lu']
service = self._get_service(volume)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
out = self.bend.create_dup(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
slun, hdp, '%s' % (size),
volume['name'])
lun = self.arid + '.' + out.split()[1]
sz = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(sz)s MB is created from snapshot."
% {'lun': lun, 'sz': sz})
return {'provider_location': lun}
def create_snapshot(self, snapshot):
"""Create a snapshot.
:param snapshot: dictionary snapshot reference
"""
source_vol = self._id_to_vol(snapshot['volume_id'])
service = self._get_service(source_vol)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
size = int(snapshot['volume_size']) * units.Ki
(arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
out = self.bend.create_dup(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
slun, hdp,
'%s' % (size),
snapshot['name'])
lun = self.arid + '.' + out.split()[1]
size = int(out.split()[5])
LOG.debug("LUN %(lun)s of size %(size)s MB is created."
% {'lun': lun, 'size': size})
return {'provider_location': lun}
def delete_snapshot(self, snapshot):
"""Delete a snapshot.
:param snapshot: dictionary snapshot reference
"""
loc = snapshot['provider_location']
# to take care of spurious input
if loc is None:
# which could cause exception.
return
(arid, lun) = loc.split('.')
source_vol = self._id_to_vol(snapshot['volume_id'])
service = self._get_service(source_vol)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
myid = self.arid
if arid != myid:
LOG.error(_('Array mismatch %(myid)s vs %(arid)s')
% {'myid': myid,
'arid': arid})
msg = 'Array id mismatch in delete snapshot'
raise exception.VolumeBackendAPIException(data=msg)
self.bend.delete_lu(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
self.config['username'],
self.config['password'],
hdp, lun)
LOG.debug("LUN %s is deleted.", lun)
return
def get_volume_stats(self, refresh=False):
"""Get volume stats. If 'refresh', run update the stats first."""
if refresh:
self.driver_stats = self._get_stats()
return self.driver_stats
|
|
# -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import pymongo
import json
import time
import os
import gettext
import re
# put your localpath here
localpath = 'var/www/antchain.org/web/'
zh_trans = gettext.translation('lang', localpath+'locale', languages=['zh_CN'])
en_trans = gettext.translation('lang', localpath+'locale', languages=['en_US'])
#from block import *
import block
import tx
import asset
import ads
import rank
import api
#DEFINE
BLOCK_PER_PAGE = 50
TX_PER_PAGE = 50
ADS_PER_PAGE = 50
ASSET_PER_PAGE = 20
ASSET_ADS_PER_PAGE = 50
def ENUM(**enums):
return type('Enum', (), enums)
CoinState = ENUM( Unconfirmed=0, Confirmed=1<<0, Spent=1<<1, Vote=1<<2, Claimed=1<<3, Locked=1<<4, Frozen=1<<5, WatchOnly=1<<6 )
##################################################
###
### functions
###
##################################################
def GetLogo() :
file_logo = open(localpath+'/logo.html')
try:
html_logo = file_logo.read()
finally:
file_logo.close()
return html_logo
def GetLocalTime(times) :
x = time.localtime(times)
return time.strftime('%Y-%m-%d %H:%M:%S',x)
def GetLanguageByRequest() :
supported_languages = ["zh_CN", "zh", "en"]
lang = request.accept_languages.best_match(supported_languages)
if lang == "zh_CN" or lang == "zh" :
return "zh-CN"
else :
return "en"
def InstallLanguages() :
lang = GetLanguageByRequest()
if lang == "zh-CN" :
zh_trans.install()
else :
en_trans.install()
def GetHeader(name) :
InstallLanguages()
html = '<html>\n'
html = html + '<head>\n'
html = html + '<meta charset="utf-8">\n'
html = html + '<meta http-equiv="X-UA-Compatible" content="IE=edge">\n'
html = html + '<meta name="viewport" content="width=device-width, initial-scale=1">\n'
html = html + '<title>'+ _("Antshares Blockchain Explorer") +'</title>\n'
html = html + '<link rel="shortcut icon" href="/static/images/logo.png" media="screen" />\n'
html = html + '<link rel="stylesheet" href="/static/css/fonts.css">\n'
html = html + '<link rel="stylesheet" href="/static/css/normalize.css">'
html = html + '<link rel="stylesheet" href="/static/css/milligram.min.css">'
html = html + '<style type="text/css">\n'
html = html + 'html,body,td,th{height: 100%;font-size:12px;font-family:"Roboto"}\n'
html = html + 'body{}\n'
html = html + '.column {text-align:left}\n'
html = html + 'td {text-overflow:ellipsis; white-space:nowrap; overflow:hidden;}\n'
html = html + 'a{text-decoration:none;}\n'
html = html + '</style>\n'
html = html + '<script>\n'
html = html + ' var _hmt = _hmt || [];\n'
html = html + ' (function() {\n'
html = html + ' var hm = document.createElement("script");\n'
html = html + ' hm.src = "https://hm.baidu.com/hm.js?8a4cd1b36cec648c82133995fa7f0f39";\n'
html = html + ' var s = document.getElementsByTagName("script")[0]; \n'
html = html + ' s.parentNode.insertBefore(hm, s);\n'
html = html + ' })();\n'
html = html + '</script>\n'
html = html + '</head>\n'
html = html + '<body>\n'
html = html + '<div align="center">\n'
html = html + '[ '+ _("Antshares Blockchain Explorer") +' antchain.org ]<br/>\n'
if name == "index" :
html = html + '<a href="/"><b>' + _("Index") + '</b></a>  \n'
else :
html = html + '<a href="/">' + _("Index") + '</a>  \n'
if name == "block" :
html = html + '<a href="/block/"><b>' + _("Block") + '</b></a>  \n'
else :
html = html + '<a href="/block/">' + _("Block") + '</a>  \n'
if name == "tx" :
html = html + '<a href="/tx/"><b>' + _("Transaction") + '</b></a>  \n'
else :
html = html + '<a href="/tx/">' + _("Transaction") + '</a>  \n'
if name == "address" :
html = html + '<a href="/address/"><b>' + _("Address") + '</b></a>  \n'
else :
html = html + '<a href="/address/">' + _("Address") + '</a>  \n'
if name == "asset" :
html = html + '<a href="/asset/"><b>' + _("Asset") + '</b></a>  \n'
else :
html = html + '<a href="/asset/">' + _("Asset") + '</a>  \n'
if name == "rank" :
html = html + '<a href="/rank/"><b>' + _("Rank") + '</b></a>  \n'
else :
html = html + '<a href="/rank/">' + _("Rank") + '</a>  \n'
if name == "api" :
html = html + '<a href="/api/"><b>' + "API" + '</b></a>  \n'
else :
html = html + '<a href="/api/">' + "API" + '</a>  \n'
html = html + '<br/><br/>\n'
html = html + '</div>\n'
html = html + '<div class="container">\n'
html = html + '<form action="/search" method="post">\n'
html = html + ' <fieldset>\n'
html = html + '<div class="row">\n'
html = html + '<div class="column column-30"></div>\n'
html = html + '<div class="column column-30">\n'
html = html + ' <input type="text" placeholder="' + _('height/address/hash/txid') + '" name="searchdata" id="searchdata">\n'
html = html + '</div>\n'
html = html + '<div class="column column-10"><input class="button" type="submit" value="'+ _('Search') +'"></div>\n'
html = html + '<div class="column column-30"></div>\n'
html = html + ' </fieldset>\n'
html = html + '</form>\n'
html = html + '<br/>\n'
html = html + '</div>\n'
html = html + '<div class="column column-30"></div>\n'
html = html + '</div>\n'
return html
def GetFooter() :
html = "<br/><hr>\n"
html = html + "<div align = 'center'>\n"
html = html + "<br/>antchain.org (c) 2016-2017\n"
html = html + "</div><br/>\n"
html = html + "</body>\n"
html = html + "</html>\n"
return html
def GetAssetName(txid) :
result = collection_txs.find_one({"txid":txid})
asset = result['asset']
return GetAssetNameByAsset(asset)
def GetAssetNameByAsset(asset) :
lang = GetLanguageByRequest()
for assetname in asset['name'] :
if assetname['lang'] == lang :
return assetname['name']
return asset['name'][0]['name']
def GetAssetByTxid(txid) :
result = collection_txs.find_one({"txid":txid})
asset = result['asset']
return asset
def GetAssetAmount(amount) :
if amount == "-0.00000001" :
amount = _("No limit")
return str(amount)
##################################################
###
### import
###
##################################################
from flask import Flask
from flask import request
from werkzeug.routing import BaseConverter
class RegexConverter(BaseConverter):
def __init__(self, map, *args):
self.map = map
self.regex = args[0]
app = Flask(__name__)
app.url_map.converters['regex'] = RegexConverter
client = pymongo.MongoClient("localhost", 27017)
db = client.antchain_main
collection_blocks = db.blocks
collection_txs = db.txs
collection_coins = db.coins
collection_ads = db.ads
@app.route("/")
def index():
html = GetHeader("index")
html = html + GetLogo()
html = html + '<div name="block" align="center">'
html = html + '<br/><br/>'
html = html + '<h2>' + _("Block Information") + '</h2><a href="/block/">[' + _("More") + ']</a>'
html = html + block.GetblockInternal(1,20)
html = html + '</div>'
html = html + '<div name="tx" align="center">'
html = html + '<br/><br/>'
html = html + '<h2>'+ _("Transaction Information") +'</h2><a href="/tx/">[' + _("More") + ']</a>'
html = html + tx.GetTxInternal(None,1,20)
html = html + '</div>'
html = html + '<div name="address" align="center">'
html = html + '<br/><br/>'
html = html + '<h2>'+ _("Address Information") +'</h2><a href="/address/">[' + _("More") + ']</a>'
html = html + ads.GetAddressInternal(None,1,20)
html = html + '</div>'
html = html + GetFooter()
return html
##################################################
###
### search
###
##################################################
@app.route('/search', methods=['GET','POST'])
def Search():
if request.method == 'POST':
data = request.form['searchdata']
# find address
matchObj = re.match( '[A][a-zA-Z0-9]{33}', data)
if matchObj:
m = matchObj.group()
result = collection_ads.find_one({"address":m})
if result :
html = '<meta http-equiv="refresh" content="0;url=/address/' + m + '"> '
return html
# find block hash or txid
matchObj = re.match( '[a-zA-Z0-9]{64}', data )
if matchObj:
m = matchObj.group()
result = collection_txs.find_one({"txid":m})
if result :
html = '<meta http-equiv="refresh" content="0;url=/tx/' + m + '"> '
return html
result = collection_blocks.find_one({"hash":m})
if result :
html = '<meta http-equiv="refresh" content="0;url=/block/' + m + '"> '
return html
# find block height
matchObj = re.match( '[0-9]{1,12}', data )
if matchObj:
m = matchObj.group()
result = collection_blocks.find_one({"height":int(m)})
if result :
html = '<meta http-equiv="refresh" content="0;url=/block/' + str(int(m)) + '"> '
return html
# not found!
html = GetHeader("index")
html = html + '<div class="container">\n'
html = html + '<div class="row">\n'
html = html + data + ' Not Found.'
html = html + '</div>\n'
html = html + '</div>\n'
html = html + GetFooter()
return html
else :
html = GetHeader("index")
html = html + GetFooter()
return html
##################################################
###
### block
###
##################################################
@app.route('/block/')
def Getblock():
return block.GetblockPage(1)
@app.route('/block/page/<int:page>')
def GetblockPages(page):
return block.GetblockPage(page)
@app.route('/block/<blockhash>')
def GetblockByHash(blockhash):
return block.GetblockByHashInternal(blockhash)
@app.route('/block/<int:block_height>')
def GetblockByHeight(block_height):
return block.GetblockByHeightInternal(block_height)
##################################################
###
### tx
###
##################################################
@app.route('/tx/')
def GetTx():
return tx.GetTxPage(None,1)
@app.route('/tx/page/<int:page>')
def GetTxPages(page):
return tx.GetTxPage(None,page)
# TransactionType
@app.route('/tx/<regex("[a-zA-Z]{10,30}"):txtype>')
def GetTxByType(txtype):
return tx.GetTxPage(txtype,1)
@app.route('/tx/<regex("[a-zA-Z]{10,30}"):txtype>/page/<int:page>')
def GetTxByTypePages(txtype,page):
return tx.GetTxPage(txtype,page)
@app.route('/tx/<regex("[a-zA-Z0-9]{64}"):txid>')
def GetTxByHash(txid):
return tx.GetTxByHashInternal(txid)
##################################################
###
### address
###
##################################################
@app.route('/address/')
def GetAds() :
return ads.GetAddressPage(None,1)
@app.route('/address/page/<int:page>')
def GetAdsPages(page) :
return ads.GetAddressPage(None,page)
@app.route('/address/<regex("[a-zA-Z0-9]{34}"):address>')
def GetAdsByAddress(address) :
return ads.GetAdsByAddressPagesInternal(address,None,1)
@app.route('/address/<regex("[a-zA-Z0-9]{34}"):address>/page/<int:page>')
def GetAdsByAddressPages(address,page) :
return ads.GetAdsByAddressPagesInternal(address,None,page)
@app.route('/address/<regex("[a-zA-Z0-9]{64}"):assetid>')
def GetAssetAds(assetid) :
return ads.GetAddressPage(assetid,1)
@app.route('/address/<regex("[a-zA-Z0-9]{64}"):assetid>/page/<int:page>')
def GetAssetAdsPages(assetid,page) :
return ads.GetAddressPage(assetid,page)
@app.route('/address/<regex("[a-zA-Z0-9]{34}"):address>/<regex("[a-zA-Z0-9]{64}"):assetid>')
def GetAdsAssetPages(address,assetid) :
return ads.GetAdsByAddressPagesInternal(address,assetid,1)
##################################################
###
### asset
###
##################################################
@app.route('/asset/')
def GetAsset() :
return asset.GetAssetPage(1)
@app.route('/asset/<assetid>')
def GetAssetByHash(assetid):
return asset.GetAssetByHashPagesInternal(assetid,1)
@app.route('/asset/<assetid>/page/<int:page>')
def GetAssetByHashPages(assetid,page):
return asset.GetAssetByHashPagesInternal(assetid,page)
##################################################
###
### rank
###
##################################################
@app.route('/rank/')
def GetRank() :
return rank.GetRankByHashInternal("c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b",100)
@app.route('/rank/<assetid>')
def GetRankByHash(assetid) :
return rank.GetRankByHashInternal(assetid,100)
##################################################
###
### api
###
##################################################
@app.route('/api/')
def GetApi() :
return api.GetApi()
@app.route('/api/v1/address/get_value/<regex("[a-zA-Z0-9]{34}"):address>')
def Api_V1_Address_Get_Value(address) :
return api.Api_V1_Address_Get_Value(address), {'content-type':'application/json'}
@app.route('/api/v1/block/get_current_height')
def Api_V1_Block_Get_Current_Height() :
return api.Api_V1_Block_Get_Current_Height(), {'content-type':'application/json'}
@app.route('/api/v1/block/get_current_block')
def Api_V1_Block_Get_Current_Block() :
return api.Api_V1_Block_Get_Current_Block(), {'content-type':'application/json'}
@app.route('/api/v1/block/get_block/<int:height>')
def Api_V1_Block_Get_Block_By_Height(height) :
return api.Api_V1_Block_Get_Block(height,None), {'content-type':'application/json'}
@app.route('/api/v1/block/get_block/<regex("[a-zA-Z0-9]{64}"):hash>')
def Api_V1_Block_Get_Block_By_Hash(hash) :
return api.Api_V1_Block_Get_Block(None,hash), {'content-type':'application/json'}
@app.route('/api/v1/tx/get_tx/<regex("[a-zA-Z0-9]{64}"):txid>')
def Api_V1_Tx_Get_Tx(txid) :
return api.Api_V1_Tx_Get_Tx(txid), {'content-type':'application/json'}
##################################################
###
### main
###
##################################################
if __name__ == "__main__":
app.run()
|
|
# This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import re
import os
import sys
import time
import unittest
import ConfigParser
from setuptools import setup, Command
class SQLiteTest(Command):
"""
Run the tests on SQLite
"""
description = "Run tests on SQLite"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
os.environ['TRYTOND_DATABASE_URI'] = 'sqlite://'
os.environ['DB_NAME'] = ':memory:'
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
class PostgresTest(Command):
"""
Run the tests on Postgres.
"""
description = "Run tests on Postgresql"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
os.environ['TRYTOND_DATABASE_URI'] = 'postgresql://'
os.environ['DB_NAME'] = 'test_' + str(int(time.time()))
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
class RunAudit(Command):
"""Audits source code using PyFlakes for following issues:
- Names which are used but not defined or used before they are defined.
- Names which are redefined without having been used.
"""
description = "Audit source code with PyFlakes"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys
try:
import pyflakes.scripts.pyflakes as flakes
except ImportError:
print "Audit requires PyFlakes installed in your system."
sys.exit(-1)
warns = 0
# Define top-level directories
dirs = ('.')
for dir in dirs:
for root, _, files in os.walk(dir):
if root.startswith(('./build')):
continue
for file in files:
if file != '__init__.py' and file.endswith('.py'):
warns += flakes.checkPath(os.path.join(root, file))
if warns > 0:
print "Audit finished with total %d warnings." % warns
else:
print "No problems found in sourcecode."
config = ConfigParser.ConfigParser()
config.readfp(open('trytond_nereid/tryton.cfg'))
info = dict(config.items('tryton'))
for key in ('depends', 'extras_depend', 'xml'):
if key in info:
info[key] = info[key].strip().splitlines()
major_version, minor_version, _ = info.get('version', '0.0.1').split('.', 2)
major_version = int(major_version)
minor_version = int(minor_version)
install_requires = [
'pytz',
'flask>=0.10',
'flask-wtf',
'babel==2.0', # TODO: unfreeze, when stable version is released
'blinker',
'speaklater',
'Flask-Babel>=0.9',
'Flask-Login',
]
MODULE2PREFIX = {
'email_queue': 'fio',
}
for dep in info.get('depends', []):
if not re.match(r'(ir|res|webdav)(\W|$)', dep):
install_requires.append(
'%s_%s >= %s.%s, < %s.%s' % (
MODULE2PREFIX.get(dep, 'trytond'), dep, major_version,
minor_version, major_version, minor_version + 1
)
)
install_requires.append(
'trytond >= %s.%s, < %s.%s' %
(major_version, minor_version, major_version, minor_version + 1)
)
# Testing dependencies
tests_require = [
'mock',
'pycountry',
]
setup(
name='trytond_nereid',
version=info.get('version'),
url='http://www.fulfil.io',
license='BSD',
author='Fulfil.IO',
author_email='[email protected]',
description='Tryton - Web Framework',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Tryton',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=install_requires,
packages=[
'nereid',
'nereid.contrib',
'nereid.tests',
'trytond.modules.nereid',
'trytond.modules.nereid.tests',
'trytond.modules.nereid_test',
],
package_dir={
'nereid': 'nereid',
'nereid.contrib': 'nereid/contrib',
'nereid.tests': 'nereid/tests',
'trytond.modules.nereid': 'trytond_nereid',
'trytond.modules.nereid.tests': 'trytond_nereid/tests',
'trytond.modules.nereid_test': 'nereid_test_module',
},
package_data={
'trytond.modules.nereid': info.get('xml', []) +
['tryton.cfg', 'view/*.xml', 'locale/*.po', 'tests/*.rst'] +
['i18n/*.pot', 'i18n/pt_BR/LC_MESSAGES/*'] +
['templates/*.*', 'templates/tests/*.*'],
'trytond.modules.nereid_test': ['*.xml'] +
['tryton.cfg', 'locale/*.po', 'tests/*.rst'] +
['templates/*.*', 'templates/tests/*.*'],
},
zip_safe=False,
platforms='any',
entry_points="""
[trytond.modules]
nereid = trytond.modules.nereid
nereid_test = trytond.modules.nereid_test
""",
test_suite='tests.suite',
test_loader='trytond.test_loader:Loader',
tests_require=tests_require,
cmdclass={
'audit': RunAudit,
'test': SQLiteTest,
'test_on_postgres': PostgresTest,
},
)
|
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from opencensus.trace.propagation import trace_context_http_header_format
class TestTraceContextPropagator(unittest.TestCase):
def test_from_headers_none(self):
from opencensus.trace.span_context import SpanContext
propagator = trace_context_http_header_format.\
TraceContextPropagator()
span_context = propagator.from_headers(None)
self.assertTrue(isinstance(span_context, SpanContext))
def test_from_headers_empty(self):
from opencensus.trace.span_context import SpanContext
propagator = trace_context_http_header_format.\
TraceContextPropagator()
span_context = propagator.from_headers({})
self.assertTrue(isinstance(span_context, SpanContext))
def test_from_headers_with_tracestate(self):
from opencensus.trace.span_context import SpanContext
propagator = trace_context_http_header_format.\
TraceContextPropagator()
span_context = propagator.from_headers({
'traceparent':
'00-12345678901234567890123456789012-1234567890123456-00',
'tracestate':
'foo=1,bar=2,baz=3',
})
self.assertTrue(isinstance(span_context, SpanContext))
self.assertTrue(span_context.tracestate)
def test_from_headers_tracestate_limit(self):
propagator = trace_context_http_header_format.\
TraceContextPropagator()
span_context = propagator.from_headers({
'traceparent':
'00-12345678901234567890123456789012-1234567890123456-00',
'tracestate':
','.join([
'a00=0,a01=1,a02=2,a03=3,a04=4,a05=5,a06=6,a07=7,a08=8,a09=9',
'b00=0,b01=1,b02=2,b03=3,b04=4,b05=5,b06=6,b07=7,b08=8,b09=9',
'c00=0,c01=1,c02=2,c03=3,c04=4,c05=5,c06=6,c07=7,c08=8,c09=9',
'd00=0,d01=1,d02=2',
]),
})
self.assertFalse(span_context.tracestate)
def test_from_headers_tracestate_duplicated_keys(self):
propagator = trace_context_http_header_format.\
TraceContextPropagator()
span_context = propagator.from_headers({
'traceparent':
'00-12345678901234567890123456789012-1234567890123456-00',
'tracestate':
'foo=1,bar=2,foo=3',
})
self.assertFalse(span_context.tracestate)
def test_header_all_zero(self):
propagator = trace_context_http_header_format. \
TraceContextPropagator()
trace_id = '00000000000000000000000000000000'
span_context = propagator.from_headers({
'traceparent':
'00-00000000000000000000000000000000-1234567890123456-00',
})
self.assertNotEqual(span_context.trace_id, trace_id)
span_id = '0000000000000000'
span_context = propagator.from_headers({
'traceparent':
'00-12345678901234567890123456789012-0000000000000000-00',
})
self.assertNotEqual(span_context.span_id, span_id)
def test_header_version_not_supported(self):
propagator = trace_context_http_header_format. \
TraceContextPropagator()
trace_id = '12345678901234567890123456789012'
span_context = propagator.from_headers({
'traceparent':
'ff-12345678901234567890123456789012-1234567890123456-00',
})
self.assertNotEqual(span_context.trace_id, trace_id)
span_context = propagator.from_headers({
'traceparent':
'00-12345678901234567890123456789012-1234567890123456-00-residue',
})
self.assertNotEqual(span_context.trace_id, trace_id)
def test_header_match(self):
propagator = trace_context_http_header_format.\
TraceContextPropagator()
trace_id = '12345678901234567890123456789012'
span_id = '1234567890123456'
# Trace option is not enabled.
span_context = propagator.from_headers({
'traceparent':
'00-12345678901234567890123456789012-1234567890123456-00',
})
self.assertEqual(span_context.trace_id, trace_id)
self.assertEqual(span_context.span_id, span_id)
self.assertFalse(span_context.trace_options.enabled)
# Trace option is enabled.
span_context = propagator.from_headers({
'traceparent':
'00-12345678901234567890123456789012-1234567890123456-01',
})
self.assertEqual(span_context.trace_id, trace_id)
self.assertEqual(span_context.span_id, span_id)
self.assertTrue(span_context.trace_options.enabled)
def test_header_not_match(self):
propagator = trace_context_http_header_format.\
TraceContextPropagator()
trace_id = 'invalid_trace_id'
span_context = propagator.from_headers({
'traceparent':
'00-invalid_trace_id-66666-00',
})
self.assertNotEqual(span_context.trace_id, trace_id)
def test_to_headers_without_tracestate(self):
from opencensus.trace import span_context
from opencensus.trace import trace_options
propagator = trace_context_http_header_format.\
TraceContextPropagator()
trace_id = '12345678901234567890123456789012'
span_id_hex = '1234567890123456'
span_context = span_context.SpanContext(
trace_id=trace_id,
span_id=span_id_hex,
trace_options=trace_options.TraceOptions('1'))
headers = propagator.to_headers(span_context)
self.assertTrue('traceparent' in headers)
self.assertEqual(headers['traceparent'], '00-{}-{}-01'.format(
trace_id, span_id_hex))
self.assertFalse('tracestate' in headers)
def test_to_headers_with_empty_tracestate(self):
from opencensus.trace import span_context
from opencensus.trace import trace_options
from opencensus.trace.tracestate import Tracestate
propagator = trace_context_http_header_format.\
TraceContextPropagator()
trace_id = '12345678901234567890123456789012'
span_id_hex = '1234567890123456'
span_context = span_context.SpanContext(
trace_id=trace_id,
span_id=span_id_hex,
tracestate=Tracestate(),
trace_options=trace_options.TraceOptions('1'))
headers = propagator.to_headers(span_context)
self.assertTrue('traceparent' in headers)
self.assertEqual(headers['traceparent'], '00-{}-{}-01'.format(
trace_id, span_id_hex))
self.assertFalse('tracestate' in headers)
def test_to_headers_with_tracestate(self):
from opencensus.trace import span_context
from opencensus.trace import trace_options
from opencensus.trace.tracestate import Tracestate
propagator = trace_context_http_header_format.\
TraceContextPropagator()
trace_id = '12345678901234567890123456789012'
span_id_hex = '1234567890123456'
span_context = span_context.SpanContext(
trace_id=trace_id,
span_id=span_id_hex,
tracestate=Tracestate(foo="xyz"),
trace_options=trace_options.TraceOptions('1'))
headers = propagator.to_headers(span_context)
self.assertTrue('traceparent' in headers)
self.assertEqual(headers['traceparent'], '00-{}-{}-01'.format(
trace_id, span_id_hex))
self.assertTrue('tracestate' in headers)
self.assertEqual(headers['tracestate'], 'foo=xyz')
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This script can move pages.
These command line parameters can be used to specify which pages to work on:
¶ms;
Furthermore, the following command line parameters are supported:
-from and -to The page to move from and the page to move to.
-noredirect Leave no redirect behind.
-notalkpage Do not move this page's talk page (if it exists)
-prefix Move pages by adding a namespace prefix to the names of the
pages. (Will remove the old namespace prefix if any)
Argument can also be given as "-prefix:namespace:".
-always Don't prompt to make changes, just do them.
-skipredirects Skip redirect pages (Warning: increases server load)
-summary Prompt for a custom summary, bypassing the predefined message
texts. Argument can also be given as "-summary:XYZ".
-pairsfile Read pairs of file names from a file. The file must be in a
format [[frompage]] [[topage]] [[frompage]] [[topage]] ...
Argument can also be given as "-pairsfile:filename"
"""
#
# (C) Leonardo Gregianin, 2006
# (C) Andreas J. Schwab, 2007
# (C) Pywikibot team, 2006-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import re
import pywikibot
from pywikibot.exceptions import ArgumentDeprecationWarning
from pywikibot.tools import issue_deprecation_warning
from pywikibot import i18n, pagegenerators
from pywikibot.bot import MultipleSitesBot
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp,
}
class MovePagesBot(MultipleSitesBot):
"""Page move bot."""
def __init__(self, generator, **kwargs):
"""Constructor."""
self.availableOptions.update({
'prefix': None,
'noredirect': False,
'movetalkpage': True,
'skipredirects': False,
'summary': None,
})
super(MovePagesBot, self).__init__(**kwargs)
self.generator = generator
self.appendAll = False
self.regexAll = False
self.noNamespace = False
def moveOne(self, page, newPageTitle):
"""Move on page to newPageTitle."""
try:
msg = self.getOption('summary')
if not msg:
msg = i18n.twtranslate(page.site, 'movepages-moving')
pywikibot.output(u'Moving page %s to [[%s]]'
% (page.title(asLink=True),
newPageTitle))
page.move(newPageTitle, reason=msg, movetalkpage=self.getOption('movetalkpage'),
deleteAndMove=self.getOption('noredirect'))
except pywikibot.PageRelatedError as error:
pywikibot.output(error)
def treat(self, page):
"""Treat a single page."""
self.current_page = page
if self.getOption('skipredirects') and page.isRedirectPage():
pywikibot.output(u'Page %s is a redirect; skipping.' % page.title())
return
pagetitle = page.title(withNamespace=False)
namesp = page.site.namespace(page.namespace())
if self.appendAll:
newPageTitle = (u'%s%s%s'
% (self.pagestart, pagetitle, self.pageend))
if not self.noNamespace and namesp:
newPageTitle = (u'%s:%s' % (namesp, newPageTitle))
elif self.regexAll:
newPageTitle = self.regex.sub(self.replacePattern, pagetitle)
if not self.noNamespace and namesp:
newPageTitle = (u'%s:%s' % (namesp, newPageTitle))
if self.getOption('prefix'):
newPageTitle = (u'%s%s' % (self.getOption('prefix'), pagetitle))
if self.getOption('prefix') or self.appendAll or self.regexAll:
if self.user_confirm('Change the page title to "%s"?'
% newPageTitle):
self.moveOne(page, newPageTitle)
else:
choice = pywikibot.input_choice(u'What do you want to do?',
[('change page name', 'c'),
('append to page name', 'a'),
('use a regular expression', 'r'),
('next page', 'n')])
if choice == 'c':
newPageTitle = pywikibot.input(u'New page name:')
self.moveOne(page, newPageTitle)
elif choice == 'a':
self.pagestart = pywikibot.input(u'Append this to the start:')
self.pageend = pywikibot.input(u'Append this to the end:')
newPageTitle = (u'%s%s%s'
% (self.pagestart, pagetitle, self.pageend))
if namesp:
if pywikibot.input_yn(u'Do you want to remove the '
'namespace prefix "%s:"?' % namesp,
automatic_quit=False):
self.noNamespace = True
else:
newPageTitle = (u'%s:%s' % (namesp, newPageTitle))
choice2 = pywikibot.input_choice(
u'Change the page title to "%s"?'
% newPageTitle, [('yes', 'y'), ('no', 'n'), ('all', 'a')])
if choice2 == 'y':
self.moveOne(page, newPageTitle)
elif choice2 == 'a':
self.appendAll = True
self.moveOne(page, newPageTitle)
elif choice == 'r':
searchPattern = pywikibot.input(u'Enter the search pattern:')
self.replacePattern = pywikibot.input(
u'Enter the replace pattern:')
self.regex = re.compile(searchPattern)
if page.title() == page.title(withNamespace=False):
newPageTitle = self.regex.sub(self.replacePattern,
page.title())
else:
if pywikibot.input_yn(u'Do you want to remove the '
'namespace prefix "%s:"?' % namesp,
automatic_quit=False):
newPageTitle = self.regex.sub(
self.replacePattern, page.title(withNamespace=False))
self.noNamespace = True
else:
newPageTitle = self.regex.sub(self.replacePattern,
page.title())
choice2 = pywikibot.input_choice(
u'Change the page title to "%s"?'
% newPageTitle, [('yes', 'y'), ('no', 'n'), ('all', 'a')])
if choice2 == 'y':
self.moveOne(page, newPageTitle)
elif choice2 == 'a':
self.regexAll = True
self.moveOne(page, newPageTitle)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
oldName = None
options = {}
fromToPairs = []
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if arg == '-pairs' or arg.startswith('-pairs:'):
issue_deprecation_warning(
'-pairs',
'-pairsfile',
2, ArgumentDeprecationWarning)
elif arg.startswith('-pairsfile'):
if len(arg) == len('-pairsfile'):
filename = pywikibot.input(
u'Enter the name of the file containing pairs:')
else:
filename = arg[len('-pairsfile:'):]
oldName1 = None
for page in pagegenerators.TextfilePageGenerator(filename):
if oldName1:
fromToPairs.append([oldName1, page.title()])
oldName1 = None
else:
oldName1 = page.title()
if oldName1:
pywikibot.warning(
u'file %s contains odd number of links' % filename)
elif arg == '-noredirect':
options['noredirect'] = True
elif arg == '-notalkpage':
options['movetalkpage'] = False
elif arg == '-always':
options['always'] = True
elif arg == '-skipredirects':
options['skipredirects'] = True
elif arg.startswith('-from:'):
if oldName:
pywikibot.warning(u'-from:%s without -to:' % oldName)
oldName = arg[len('-from:'):]
elif arg.startswith('-to:'):
if oldName:
fromToPairs.append([oldName, arg[len('-to:'):]])
oldName = None
else:
pywikibot.warning(u'%s without -from' % arg)
elif arg.startswith('-prefix'):
if len(arg) == len('-prefix'):
options['prefix'] = pywikibot.input(u'Enter the prefix:')
else:
options['prefix'] = arg[8:]
elif arg.startswith('-summary'):
if len(arg) == len('-summary'):
options['summary'] = pywikibot.input(u'Enter the summary:')
else:
options['summary'] = arg[9:]
else:
genFactory.handleArg(arg)
if oldName:
pywikibot.warning(u'-from:%s without -to:' % oldName)
site = pywikibot.Site()
for pair in fromToPairs:
page = pywikibot.Page(site, pair[0])
bot = MovePagesBot(None, **options)
bot.moveOne(page, pair[1])
gen = genFactory.getCombinedGenerator(preload=True)
if gen:
bot = MovePagesBot(gen, **options)
bot.run()
return True
if not fromToPairs:
pywikibot.bot.suggest_help(missing_generator=True)
return False
else:
return True
if __name__ == '__main__':
main()
|
|
import tensorflow as tf
import numpy as np
from tensorflow.python.ops.rnn_cell import LSTMStateTuple
from memory import Memory
import utility
import os
class DNC:
def __init__(self, controller_class, input_size, output_size, max_sequence_length=100,
memory_words_num = 256, memory_word_size = 64, memory_read_heads = 4,
batch_size = 1,hidden_controller_dim=256, use_emb=True,
use_mem=True, decoder_mode=False, emb_size=64,
write_protect=False, dual_controller=False, dual_emb=True,
use_teacher=False, attend_dim=0, persist_mode=False):
"""
constructs a complete DNC architecture as described in the DNC paper
http://www.nature.com/nature/journal/vaop/ncurrent/full/nature20101.html
Parameters:
-----------
controller_class: BaseController
a concrete implementation of the BaseController class
input_size: int
the size of the input vector
output_size: int
the size of the output vector
max_sequence_length: int
the maximum length of an input sequence
memory_words_num: int
the number of words that can be stored in memory
memory_word_size: int
the size of an individual word in memory
memory_read_heads: int
the number of read heads in the memory
batch_size: int
the size of the data batch
"""
saved_args = locals()
print("saved_args is", saved_args)
self.input_size = input_size
self.output_size = output_size
self.max_sequence_length = max_sequence_length
self.words_num = memory_words_num
self.word_size = memory_word_size
self.read_heads = memory_read_heads
self.batch_size = batch_size
self.unpacked_input_data = None
self.packed_output = None
self.packed_memory_view = None
self.decoder_mode = decoder_mode
self.decoder_point = tf.placeholder(tf.int32, name='decoder_point')#
self.emb_size = emb_size
self.emb_size2 = emb_size
self.dual_emb = dual_emb
self.use_mem = use_mem
self.use_emb = use_emb
self.hidden_controller_dim = hidden_controller_dim
self.attend_dim = attend_dim
self.use_teacher = use_teacher
self.teacher_force = tf.placeholder(tf.bool,[None], name='teacher')
self.persist_mode = persist_mode
self.clear_mem = tf.placeholder(tf.bool,None, name='clear_mem')
if self.use_emb is False:
self.emb_size=input_size
if self.use_emb is False:
self.emb_size2=output_size
if self.attend_dim>0:
self.W_a = tf.get_variable('W_a', [hidden_controller_dim, self.attend_dim],
initializer=tf.random_normal_initializer(stddev=0.1))
self.U_a = tf.get_variable('U_a', [hidden_controller_dim, self.attend_dim],
initializer=tf.random_normal_initializer(stddev=0.1))
self.v_a = tf.get_variable('v_a', [self.attend_dim],
initializer=tf.random_normal_initializer(stddev=0.1))
# DNC (or NTM) should be structurized into 2 main modules:
# all the graph is setup inside these twos:
self.W_emb_encoder = tf.get_variable('embe_w', [self.input_size, self.emb_size],
initializer=tf.random_uniform_initializer(minval=-1, maxval=1))
self.W_emb_decoder = tf.get_variable('embd_w', [self.output_size, self.emb_size],
initializer=tf.random_uniform_initializer(minval=-1, maxval=1))
self.memory = Memory(self.words_num, self.word_size, self.read_heads, self.batch_size)
self.controller = controller_class(self.emb_size, self.output_size, self.read_heads,
self.word_size, self.batch_size, use_mem, hidden_dim=hidden_controller_dim)
self.dual_controller = dual_controller
if self.dual_controller:
with tf.variable_scope('controller2_scope'):
if attend_dim==0:
self.controller2 = controller_class(self.emb_size2, self.output_size, self.read_heads,
self.word_size, self.batch_size, use_mem, hidden_dim=hidden_controller_dim)
else:
self.controller2 = controller_class(self.emb_size2+hidden_controller_dim, self.output_size, self.read_heads,
self.word_size, self.batch_size, use_mem,
hidden_dim=hidden_controller_dim)
self.write_protect = write_protect
# input data placeholders
self.input_data = tf.placeholder(tf.float32, [batch_size, None, input_size], name='input')
self.target_output = tf.placeholder(tf.float32, [batch_size, None, output_size], name='targets')
self.mask = tf.placeholder(tf.bool, [batch_size, None], name='mask')
self.sequence_length = tf.placeholder(tf.int32, name='sequence_length')# variant length?
if persist_mode:
self.cur_c = tf.get_variable('cur_c', [self.batch_size, hidden_controller_dim],
trainable=False)
self.assign_op_cur_c = self.cur_c.assign(np.ones([self.batch_size, hidden_controller_dim]) * 1e-6)
self.cur_h = tf.get_variable('cur_h', [self.batch_size, hidden_controller_dim],
trainable=False)
self.assign_op_cur_h = self.cur_h.assign(np.ones([self.batch_size, hidden_controller_dim]) * 1e-6)
self.cur_mem_content = tf.get_variable('cur_mc', [self.batch_size, self.words_num, self.word_size],trainable=False)
self.assign_op_cur_mem = self.cur_mem_content.assign(np.ones([self.batch_size, self.words_num, self.word_size])*1e-6)
self.cur_u = tf.get_variable('cur_u', [self.batch_size, self.words_num],trainable=False) # initial usage vector u
self.assign_op_cur_u = self.cur_u.assign(np.zeros([self.batch_size, self.words_num]))
self.cur_p = tf.get_variable('cur_p',[self.batch_size, self.words_num], trainable=False) # initial precedence vector p
self.assign_op_cur_p = self.cur_p.assign(np.zeros([self.batch_size, self.words_num]))
self.cur_L = tf.get_variable('cur_L',[self.batch_size, self.words_num, self.words_num], trainable=False) # initial link matrix L
self.assign_op_cur_L = self.cur_L.assign(np.ones([self.batch_size, self.words_num, self.words_num])*1e-6)
self.cur_ww = tf.get_variable('cur_ww',[self.batch_size, self.words_num], trainable=False) # initial write weighting
self.assign_op_cur_ww = self.cur_ww.assign(np.ones([self.batch_size, self.words_num])*1e-6)
self.cur_rw = tf.get_variable('cur_rw',[self.batch_size, self.words_num, self.read_heads], trainable=False) # initial read weightings
self.assign_op_cur_rw = self.cur_rw.assign(np.ones([self.batch_size, self.words_num, self.read_heads])*1e-6)
self.cur_rv = tf.get_variable('cur_rv',[self.batch_size, self.word_size, self.read_heads], trainable=False) # initial read vectors
self.assign_op_cur_rv = self.cur_rv.assign(np.ones([self.batch_size, self.word_size, self.read_heads])*1e-6)
self.build_graph()
# The nature of DNC is to process data by step and remmeber data at each time step when necessary
# If input has sequence format --> suitable with RNN core controller --> each time step in RNN equals 1 time step in DNC
# or just feed input to MLP --> each feed is 1 time step
def _step_op(self, time, step, memory_state, controller_state=None, controller_hiddens=None):
"""
performs a step operation on the input step data
Parameters:
----------
step: Tensor (batch_size, input_size)
memory_state: Tuple
a tuple of current memory parameters
controller_state: Tuple
the state of the controller if it's recurrent
Returns: Tuple
output: Tensor (batch_size, output_size)
memory_view: dict
"""
last_read_vectors = memory_state[6] # read values from memory
pre_output, interface, nn_state = None, None, None
# compute outputs from controller
if self.controller.has_recurrent_nn:
# controller state is the rnn cell state pass through each time step
def c1():
if not self.use_emb:
step2 = tf.reshape(step, [-1, self.input_size])
return self.controller.process_input(step2, last_read_vectors, controller_state)
else:
return self.controller.process_input(step, last_read_vectors, controller_state)
def c2():
if not self.use_emb:
step2=tf.reshape(step,[-1,self.output_size])
else:
step2=step
#attention
if controller_hiddens:
# there is some bug with the tensor array gather???
# that why I have to make these part so complicated
lll = self.decoder_point
controller_hiddens2 = tf.TensorArray(tf.float32, lll, clear_after_read=False)
def fn(tt, cc, dd):
cc = cc.write(tt, dd.read(tt))
return tt + 1, cc, dd
ltime = tf.constant(0, dtype=tf.int32)
tt, cc, dd = tf.while_loop(
cond=lambda ltime, *_: ltime < lll,
body=fn,
loop_vars=(
ltime, controller_hiddens2, controller_hiddens
), # do not need to provide intial values, the initial value lies in the variables themselves
parallel_iterations=1,
swap_memory=True,
)
values = utility.pack_into_tensor2(cc, axis=1)
# values=controller_hiddens.gather(tf.range(0,self.decoder_point))
encoder_outputs =\
tf.reshape(values,[self.batch_size,-1,self.hidden_controller_dim]) # bs x Lin x h
v = tf.tanh(
tf.reshape(tf.matmul(tf.reshape(encoder_outputs, [-1, self.hidden_controller_dim]), self.U_a),
[self.batch_size, -1, self.attend_dim])
+ tf.reshape(
tf.matmul(tf.reshape(controller_state[1], [-1, self.hidden_controller_dim]), self.W_a),
[self.batch_size, 1, self.attend_dim])) # bs.Lin x h_att
v = tf.reshape(v, [-1, self.attend_dim])
eijs = tf.matmul(v, tf.expand_dims(self.v_a,1)) # bs.Lin x 1
eijs = tf.reshape(eijs,[self.batch_size,-1])# bs x Lin
exps = tf.exp(eijs)
alphas = exps / tf.reshape(tf.reduce_sum(exps, 1), [-1, 1]) # bs x Lin
att = tf.reduce_sum(encoder_outputs*tf.expand_dims(alphas,2), 1) # bs x h x 1
att = tf.reshape(att,[self.batch_size, self.hidden_controller_dim]) # bs x h
step2=tf.concat([step2,att], axis=-1) #bs x (decoder_is + h)
return self.controller2.process_input(step2, last_read_vectors, controller_state)
if self.dual_controller:
pre_output, interface, nn_state = tf.cond(time>=self.decoder_point, c2, c1)
else:
pre_output, interface, nn_state = self.controller.process_input(step, last_read_vectors, controller_state)
else:
pre_output, interface = self.controller.process_input(step, last_read_vectors)
# memory_matrix isthe copy of memory for reading process later
# do the write first
def fn1():
return self.memory.write(
memory_state[0], memory_state[1], memory_state[5],
memory_state[4], memory_state[2], memory_state[3],
interface['write_key'],
interface['write_strength'],
interface['free_gates'],
interface['allocation_gate'],
interface['write_gate'],
interface['write_vector'],
interface['erase_vector'],
)
def fn2():
return memory_state[1], memory_state[4], memory_state[0], memory_state[3], memory_state[2]
if self.write_protect:
usage_vector, write_weighting, memory_matrix, link_matrix, precedence_vector\
= tf.cond(time>=self.decoder_point, fn2, fn1)
else:
usage_vector, write_weighting, memory_matrix, link_matrix, precedence_vector = self.memory.write(
memory_state[0], memory_state[1], memory_state[5],
memory_state[4], memory_state[2], memory_state[3],
interface['write_key'],
interface['write_strength'],
interface['free_gates'],
interface['allocation_gate'],
interface['write_gate'],
interface['write_vector'],
interface['erase_vector']
)
# then do the read, read after write because the write weight is needed to produce temporal linklage to guide the reading
read_weightings, read_vectors = self.memory.read(
memory_matrix,
memory_state[5],
interface['read_keys'],
interface['read_strengths'],
link_matrix,
interface['read_modes'],
)
return [
# report new memory state to be updated outside the condition branch
memory_matrix, #0
# neccesary for next step to compute memory stuffs
usage_vector, #1
precedence_vector, #2
link_matrix, #3
write_weighting, #4
read_weightings, #5
read_vectors, #6
# the final output of dnc
self.controller.final_output(pre_output, read_vectors), #7
# the values public info to outside
interface['read_modes'], #8
interface['allocation_gate'], #9
interface['write_gate'], #10
# report new state of RNN if exists, neccesary for next step to compute inner controller stuff
nn_state[0] if nn_state is not None else tf.zeros(1), #11
nn_state[1] if nn_state is not None else tf.zeros(1) #12
]
'''
THIS WRAPPER FOR ONE STEP OF COMPUTATION --> INTERFACE FOR SCAN/WHILE LOOP
'''
def _loop_body(self, time, memory_state, outputs, free_gates, allocation_gates, write_gates,
read_weightings, write_weightings, usage_vectors, controller_state,
outputs_cache, controller_hiddens):
"""
the body of the DNC sequence processing loop
Parameters:
----------
time: Tensor
outputs: TensorArray
memory_state: Tuple
free_gates: TensorArray
allocation_gates: TensorArray
write_gates: TensorArray
read_weightings: TensorArray,
write_weightings: TensorArray,
usage_vectors: TensorArray,
controller_state: Tuple
Returns: Tuple containing all updated arguments
"""
# dynamic tensor array input
def fn1():
return tf.matmul(self.unpacked_input_data.read(time), self.W_emb_encoder)
def fn2():
def fn2_1():
return self.target_output[:,time-1,:]
def fn2_2():
return tf.one_hot(tf.argmax(outputs_cache.read(time - 1), axis=-1), depth=self.output_size)
if self.use_teacher:
feed_value=tf.cond(self.teacher_force[time-1],fn2_1,fn2_2)
else:
feed_value=fn2_2()
if not self.use_emb:
#return outputs_cache.read(time - 1)
r = feed_value
r = tf.reshape(r,[self.batch_size,self.output_size])
print(r.shape)
return r
elif self.dual_emb:
return tf.matmul(feed_value, self.W_emb_decoder)
else:
return tf.matmul(feed_value, self.W_emb_encoder)
# if self.dual_emb:
# return tf.matmul(tf.nn.softmax(outputs_cache.read(time-1),dim=-1), self.W_emb_decoder)
# else:
# return tf.matmul(tf.nn.softmax(outputs_cache.read(time - 1),dim=-1), self.W_emb_encoder)
if self.decoder_mode:
step_input = tf.cond(time>=self.decoder_point, fn2, fn1)
else:
if self.use_emb:
step_input = tf.matmul(self.unpacked_input_data.read(time), self.W_emb_encoder)
else:
step_input = self.unpacked_input_data.read(time)
# compute one step of controller
if self.attend_dim>0:
output_list = self._step_op(time, step_input, memory_state, controller_state, controller_hiddens)
else:
output_list = self._step_op(time, step_input, memory_state, controller_state)
# update memory parameters
# new_controller_state = tf.zeros(1)
new_memory_state = tuple(output_list[0:7])
new_controller_state = LSTMStateTuple(output_list[11], output_list[12]) # state hidden values
controller_hiddens = controller_hiddens.write(time, output_list[12])
outputs = outputs.write(time, output_list[7])# new output is updated
outputs_cache = outputs_cache.write(time, output_list[7])# new output is updated
# collecting memory view for the current step
free_gates = free_gates.write(time, output_list[8])
allocation_gates = allocation_gates.write(time, output_list[9])
write_gates = write_gates.write(time, output_list[10])
read_weightings = read_weightings.write(time, output_list[5])
write_weightings = write_weightings.write(time, output_list[4])
usage_vectors = usage_vectors.write(time, output_list[1])
# all variables have been updated should be return for next step reference
return (
time + 1, #0
new_memory_state, #1
outputs, #2
free_gates,allocation_gates, write_gates, #3 4 5
read_weightings, write_weightings, usage_vectors, #6 7 8
new_controller_state, #9
outputs_cache, #10
controller_hiddens, #11
)
def build_graph(self):
"""
builds the computational graph that performs a step-by-step evaluation
of the input data batches
"""
# make dynamic time step length tensor
self.unpacked_input_data = utility.unpack_into_tensorarray(self.input_data, 1, self.sequence_length)
# want to store all time step values of these variables
outputs = tf.TensorArray(tf.float32, self.sequence_length)
outputs_cache = tf.TensorArray(tf.float32, self.sequence_length)
free_gates = tf.TensorArray(tf.float32, self.sequence_length)
allocation_gates = tf.TensorArray(tf.float32, self.sequence_length)
write_gates = tf.TensorArray(tf.float32, self.sequence_length)
read_weightings = tf.TensorArray(tf.float32, self.sequence_length)
write_weightings = tf.TensorArray(tf.float32, self.sequence_length)
usage_vectors = tf.TensorArray(tf.float32, self.sequence_length)
controller_hiddens = tf.TensorArray(tf.float32, self.sequence_length, clear_after_read=False)
# inital state for RNN controller
controller_state = self.controller.get_state() if self.controller.has_recurrent_nn else (tf.zeros(1), tf.zeros(1))
print(controller_state)
memory_state = self.memory.init_memory()
if self.persist_mode:
def p1():
return memory_state, controller_state
def p2():
return (self.cur_mem_content, self.cur_u, self.cur_p,
self.cur_L, self.cur_ww, self.cur_rw, self.cur_rv), LSTMStateTuple(self.cur_c, self.cur_h)
memory_state, controller_state=tf.cond(self.clear_mem, p1, p2)
if not isinstance(controller_state, LSTMStateTuple):
try:
controller_state = LSTMStateTuple(controller_state[0], controller_state[1])
print('seq state hid')
except:
print('dddd')
# final_results = None
with tf.variable_scope("sequence_loop"):
time = tf.constant(0, dtype=tf.int32)
# use while instead of scan --> suitable with dynamic time step
final_results = tf.while_loop(
cond=lambda time, *_: time < self.sequence_length,
body=self._loop_body,
loop_vars=(
time, memory_state, outputs,
free_gates, allocation_gates, write_gates,
read_weightings, write_weightings,
usage_vectors, controller_state,
outputs_cache, controller_hiddens
), # do not need to provide intial values, the initial value lies in the variables themselves
parallel_iterations=1,
swap_memory=True
)
self.cur_mem_content, self.cur_u, self.cur_p, \
self.cur_L, self.cur_ww, self.cur_rw, self.cur_rv = final_results[1]
self.cur_c = final_results[9][0]
self.cur_h = final_results[9][1]
dependencies = []
if self.controller.has_recurrent_nn:
# tensor array of pair of hidden and state values of rnn
dependencies.append(self.controller.update_state(final_results[9]))
with tf.control_dependencies(dependencies):
# convert output tensor array to normal tensor
self.packed_output = utility.pack_into_tensor(final_results[2], axis=1)
self.packed_memory_view = {
'free_gates': utility.pack_into_tensor(final_results[3], axis=1),
'allocation_gates': utility.pack_into_tensor(final_results[4], axis=1),
'write_gates': utility.pack_into_tensor(final_results[5], axis=1),
'read_weightings': utility.pack_into_tensor(final_results[6], axis=1),
'write_weightings': utility.pack_into_tensor(final_results[7], axis=1),
'usage_vectors': utility.pack_into_tensor(final_results[8], axis=1),
'final_controller_ch':final_results[9],
}
def get_outputs(self):
"""
returns the graph nodes for the output and memory view
Returns: Tuple
outputs: Tensor (batch_size, time_steps, output_size)
memory_view: dict
"""
return self.packed_output, self.packed_memory_view
def assign_pretrain_emb_encoder(self, sess, lookup_mat):
assign_op_W_emb_encoder = self.W_emb_encoder.assign(lookup_mat)
sess.run([assign_op_W_emb_encoder])
def assign_pretrain_emb_decoder(self, sess, lookup_mat):
assign_op_W_emb_decoder = self.W_emb_decoder.assign(lookup_mat)
sess.run([assign_op_W_emb_decoder])
def build_loss_function_multiple(self, optimizer=None, output_sizes=[]):
print('build loss....')
if optimizer is None:
optimizer = tf.train.AdamOptimizer()
output, _ = self.get_outputs()
target = tf.slice(self.target_output, [0, 0, 0],
[self.batch_size, self.sequence_length, output_sizes[0]])
subout = tf.slice(output, [0, 0, 0],
[self.batch_size, self.sequence_length, output_sizes[0]])
prob = tf.nn.softmax(subout, dim=-1)
probs = [prob]
subouts=[subout]
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=tf.slice(target, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, output_sizes[0]]),
logits=tf.slice(prob, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, output_sizes[0]]), dim=-1)
)
for ii,si in enumerate(output_sizes[1:]):
target = tf.slice(self.target_output, [0, 0, output_sizes[ii]],
[self.batch_size, self.sequence_length, si])
subout = tf.slice(output, [0, 0, output_sizes[ii]],
[self.batch_size, self.sequence_length, si])
prob = tf.nn.softmax(subout, dim=-1)
probs += [prob]
subouts+=[subout]
loss += tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=tf.slice(target, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, si]),
logits=tf.slice(prob, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, si]), dim=-1)
)
gradients = optimizer.compute_gradients(loss)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_value(grad, -10, 10), var)
apply_gradients = optimizer.apply_gradients(gradients)
return subouts, probs, loss, apply_gradients
def build_loss_function(self, optimizer=None, clip_s=10):
print('build loss....')
if optimizer is None:
optimizer = tf.train.AdamOptimizer()
output, _ = self.get_outputs()
prob = tf.nn.softmax(output, dim=-1)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=tf.slice(self.target_output, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]),
logits=tf.slice(output, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]), dim=-1)
)
gradients = optimizer.compute_gradients(loss)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_value(grad, -clip_s, clip_s), var)
apply_gradients = optimizer.apply_gradients(gradients)
return output, prob, loss, apply_gradients
def build_loss_function_multi_label(self, optimizer=None, clip_s=10):
print('build loss....')
if optimizer is None:
optimizer = tf.train.AdamOptimizer()
output, _ = self.get_outputs()
prob = tf.nn.sigmoid(output)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.slice(self.target_output, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]),
logits=tf.slice(output, [0, self.decoder_point, 0],
[self.batch_size, self.sequence_length - self.decoder_point, self.output_size]))
)
gradients = optimizer.compute_gradients(loss)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_value(grad, -clip_s, clip_s), var)
apply_gradients = optimizer.apply_gradients(gradients)
return output, prob, loss, apply_gradients
def build_loss_function_mask(self, optimizer=None, clip_s=10):
print('build loss mask....')
if optimizer is None:
optimizer = tf.train.AdamOptimizer()
output, _ = self.get_outputs()
prob = tf.nn.softmax(output, dim=-1)
score=tf.nn.softmax_cross_entropy_with_logits(
labels=self.target_output,
logits=output, dim=-1)
score_flatten=tf.reshape(score,[-1])
mask_flatten=tf.reshape(self.mask,[-1])
mask_score=tf.boolean_mask(score_flatten, mask_flatten)
loss = tf.reduce_mean(mask_score)
gradients = optimizer.compute_gradients(loss)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_value(grad, -clip_s, clip_s), var)
apply_gradients = optimizer.apply_gradients(gradients)
return output, prob, loss, apply_gradients
def print_config(self):
return '{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(self.use_mem,
self.decoder_mode,
self.dual_controller,
self.write_protect,
self.words_num,
self.word_size,
self.use_teacher,
self.attend_dim,
self.persist_mode)
@staticmethod
def save(session, ckpts_dir, name):
"""
saves the current values of the model's parameters to a checkpoint
Parameters:
----------
session: tf.Session
the tensorflow session to save
ckpts_dir: string
the path to the checkpoints directories
name: string
the name of the checkpoint subdirectory
"""
checkpoint_dir = os.path.join(ckpts_dir, name)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
tf.train.Saver(tf.trainable_variables()).save(session, os.path.join(checkpoint_dir, 'model.ckpt'))
def clear_current_mem(self,sess):
if self.persist_mode:
sess.run([self.assign_op_cur_mem, self.assign_op_cur_u, self.assign_op_cur_p,
self.assign_op_cur_L, self.assign_op_cur_ww, self.assign_op_cur_rw, self.assign_op_cur_rv])
sess.run([self.assign_op_cur_c, self.assign_op_cur_h])
@staticmethod
def restore(session, ckpts_dir, name):
"""
session: tf.Session
the tensorflow session to restore into
ckpts_dir: string
the path to the checkpoints directories
name: string
the name of the checkpoint subdirectory
"""
tf.train.Saver(tf.trainable_variables()).restore(session, os.path.join(ckpts_dir, name, 'model.ckpt'))
@staticmethod
def get_bool_rand(size_seq, prob_true=0.1):
ret = []
for i in range(size_seq):
if np.random.rand() < prob_true:
ret.append(True)
else:
ret.append(False)
return np.asarray(ret)
@staticmethod
def get_bool_rand_incremental(size_seq, prob_true_min=0, prob_true_max=0.25):
ret = []
for i in range(size_seq):
prob_true=(prob_true_max-prob_true_min)/size_seq*i
if np.random.rand() < prob_true:
ret.append(True)
else:
ret.append(False)
return np.asarray(ret)
@staticmethod
def get_bool_rand_curriculum(size_seq, epoch, k=0.99, type='exp'):
if type=='exp':
prob_true = k**epoch
elif type=='sig':
prob_true = k / (k + np.exp(epoch / k))
ret = []
for i in range(size_seq):
if np.random.rand() < prob_true:
ret.append(True)
else:
ret.append(False)
return np.asarray(ret)
|
|
"""ThreatConnect TI Adversary """
# standard library
from urllib.parse import quote_plus
from .mappings import Mappings
class Task(Mappings):
"""Unique API calls for Tasks API Endpoints"""
def __init__(self, tcex, **kwargs):
"""Initialize Class Properties.
Valid status:
+ Not Started
+ In Progress
+ Completed
+ Waiting on Someone
+ Deferred
Args:
tcex (TcEx): An instantiated instance of TcEx object.
name (str, kwargs): [Required for Create] The name for this Group.
owner (str, kwargs): The name for this Group. Default to default Org when not provided
status (str, kwargs): Not started, In Progress, Completed, Waiting on Someone, Deferred
due_date (str, kwargs): Converted to %Y-%m-%dT%H:%M:%SZ date format
reminder_date (str, kwargs): Converted to %Y-%m-%dT%H:%M:%SZ date format
escalation_date (str, kwargs): Converted to %Y-%m-%dT%H:%M:%SZ date format
"""
super().__init__(
tcex,
main_type='Task',
api_type='tasks',
sub_type=None,
api_entity='task',
api_branch=None,
owner=kwargs.pop('owner', None),
)
for arg, value in kwargs.items():
self.add_key_value(arg, value)
def _set_unique_id(self, json_response):
"""Set the unique id of the Group."""
self.unique_id = json_response.get('id', '')
@property
def _metadata_map(self):
"""Return metadata map for Group objects."""
return {
'due_date': 'dueDate',
'reminder_date': 'reminderDate',
'escalation_date': 'escalationDate',
}
@property
def as_entity(self):
"""Return the entity representation of the Task."""
return {
'type': 'Task',
'value': self.name,
'id': int(self.unique_id) if self.unique_id else None,
}
@property
def name(self):
"""Return Task name."""
return self._data.get('name')
@name.setter
def name(self, name):
"""Set the Group name."""
self._data['name'] = name
@staticmethod
def is_task():
"""Return True if object is a task."""
return True
def add_assignee(self, assignee):
"""Add the desired assignee from the Task.
Args:
assignee (str): The assignee username
Return:
obj: The response of the POST.
"""
return self.assignee(assignee)
def add_key_value(self, key, value):
"""Convert the value and adds it as a data field.
Args:
key:
value:
"""
key = self._metadata_map.get(key, key)
if key in ['unique_id', 'id']:
self._unique_id = quote_plus(str(value))
elif key in ['dueDate', 'reminderDate', 'escalationDate']:
self._data[key] = self._utils.datetime.format_datetime(
value, date_format='%Y-%m-%dT%H:%M:%SZ'
)
else:
self._data[key] = value
def add_escalatee(self, escalatee):
"""Add the desired escalatee from the Task.
Args:
escalatee (str): The escalatee username
Return:
obj: The response of the POST.
"""
return self.escalatee(escalatee)
def assignee(self, assignee, action='ADD'):
"""General method to perform actions on assignees
Valid Actions:
+ ADD
+ GET
+ DELETE
Args:
assignee (str): The username of the assignee.
action: [ADD, DELETE, GET] the action to be done on the escalatee. Defaults to
ADD if not provided.
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
return self.tc_requests.assignee(self.api_type, self.unique_id, assignee, action=action)
def assignees(self):
"""Yield the Assignee Users"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
return self.tc_requests.assignees(self.api_type, self.unique_id)
def can_create(self):
"""Return True if the Object can be created.
Return:
bool: Boolean value indicating whether the object can be created.
"""
if self.data.get('name'):
return True
return False
def delete_assignee(self, assignee):
"""Delete the desired assignee from the Task.
Args:
assignee (str): The assignee username
Return:
obj: The response of the DELETE.
"""
return self.assignee(assignee, action='DELETE')
def delete_escalatee(self, escalatee):
"""Delete the desired escalatee from the Task.
Args:
escalatee (str): The escalatee username
Return:
obj: The response of the DELETE.
"""
return self.escalatee(escalatee, action='DELETE')
def due_date(self, due_date):
"""Update the task due_date
Args:
due_date: Converted to %Y-%m-%dT%H:%M:%SZ date format
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
due_date = self._utils.datetime.format_datetime(due_date, date_format='%Y-%m-%dT%H:%M:%SZ')
self._data['dueDate'] = due_date
request = {'dueDate': due_date}
return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
def escalatee(self, escalatee, action='ADD'):
"""General method to perform actions on escalatees
Valid Actions:
+ ADD
+ GET
+ DELETE
Args:
escalatee (str): The username of the escalatee.
action: [ADD, DELETE, GET] the action to be done on the escalatee. Defaults to
ADD if not provided.
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
return self.tc_requests.escalatee(self.api_type, self.unique_id, escalatee, action=action)
def escalatees(self):
"""Yield the Escalatees Users"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
return self.tc_requests.escalatees(self.api_type, self.unique_id)
def escalation_date(self, escalation_date):
"""Update the task escalation_date
Args:
escalation_date: Converted to %Y-%m-%dT%H:%M:%SZ date format
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
escalation_date = self._utils.datetime.format_datetime(
escalation_date, date_format='%Y-%m-%dT%H:%M:%SZ'
)
self._data['escalationDate'] = escalation_date
request = {'escalationDate': escalation_date}
return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
def get_assignee(self, assignee):
"""Retrieve the desired assignee from the Task.
Args:
assignee (str): The assignee username
Return:
obj: The response of the GET.
"""
return self.assignee(assignee, action='GET')
def get_escalatee(self, escalatee):
"""Retrieve the desired escalatee from the Task.
Args:
escalatee (str): The escalatee username
Return:
obj: The response of the GET.
"""
return self.escalatee(escalatee, action='GET')
def reminder_date(self, reminder_date):
"""Update the task reminder_date
Args:
reminder_date: Converted to %Y-%m-%dT%H:%M:%SZ date format
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
reminder_date = self._utils.datetime.format_datetime(
reminder_date, date_format='%Y-%m-%dT%H:%M:%SZ'
)
self._data['reminderDate'] = reminder_date
request = {'reminderDate': reminder_date}
return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
def status(self, status):
"""Update the Task Status
Valid status:
+ Not Started
+ In Progress
+ Completed
+ Waiting on Someone
+ Deferred
Args:
status: Not Started, In Progress, Completed, Waiting on Someone, Deferred
"""
if not self.can_update():
self._tcex.handle_error(910, [self.type])
self._data['status'] = status
request = {'status': status}
return self.tc_requests.update(self.api_type, self.api_sub_type, self.unique_id, request)
|
|
from toee import *
from utilities import *
from Co8 import *
## Writen By Cerulean the Blue
# Modified by Sitra Achara 04-2011
# Miniature Chest internal flags:
# obj_f_item_pad_i_2 - miniature chest ID
# obj_f_item_pad_i_3 - indicates whether the chest can be summoned ("chest is in the Ethereal Plane (1) or in the Prime Plane (0) ")
# obj_f_item_pad_i_4 - Map # where the chest was summoned
def OnBeginSpellCast( spell ):
print " Extraplanar Chest OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-conjuration-conjure", spell.caster )
def OnSpellEffect ( spell ):
print " Extraplanar Chest OnSpellEffect"
spell.duration = 1
bgfilename = 'modules\\ToEE\\Bag_of_Holding.mes'
proto = 1113
mini = spell.caster.item_find(12105)
if mini == OBJ_HANDLE_NULL: # Caster hasn't used spell before. Create miniature chest and subtract 5050 gold from caster
if spell.caster.money_get() >= 505000:
mini = create_item_in_inventory( 12105, spell.caster )
set_flag(mini, 0) # sets flag for chest on the Prime Material Plane
mini.item_flag_set( OIF_IDENTIFIED ) # Makes the mini identified.
spell.caster.money_adj(-505000)
chest = game.obj_create(proto, spell.caster.location)
create_item_in_inventory(11300, chest) # Create Leomund's Secret Chest note in new chest
#game.particles( 'Orb-Summon-Balor', chest )
bagnum = boh_newbag() # boh-newbag() is in Co8.py
Set_ID(mini, chest, bagnum)
else:
game.particles( 'Fizzle', spell.caster )
elif (Get_ID_Mini(mini) == 0): # Miniature found but never used before
set_flag(mini, 0) # sets flag for chest on the Prime Material Plane
bagnum = boh_newbag() # boh-newbag() is in Co8.py
chest = game.obj_create(proto, spell.caster.location)
create_item_in_inventory(11300, chest) # Create Leomund's Secret Chest note in new chest
#game.particles( 'Orb-Summon-Balor', chest ) # causes crashes with saddlebags
Set_ID(mini, chest, bagnum)
set_mini_map(mini, chest) # record map where the chest was summoned - for forgetful players
else:
# Mini found and has been used before.
chest_here = 0 # flag for whether or not the right chest is in the casters vicinity
for chest in game.obj_list_vicinity( spell.caster.location, OLC_CONTAINER ):
# Find correct chest for that mini
if (chest.name == 1113 and Compare_ID(mini, chest)):
chest_here = 1
cxx, cyy = location_to_axis(chest.location)
#game.particles( 'Orb-Summon-Balor', chest )
allBagDict = readMes(bgfilename) # readMes is in Co8.py.
bagnum = boh_newbag() # boh-newbag() is in Co8.py
Set_ID_Mini(mini, bagnum)
contents = GetContents(chest)
allBagDict[bagnum] = contents
writeMes(bgfilename, allBagDict) # writeMes is in Co8.py
set_flag(mini, 1) # Sets fkag flag for chest on Ethereal Plane
# Time event added for chest destruction to allow time for game particles to fire
Timed_Destroy(chest, 500) # 500 = 1/2 second
if ( not (chest_here) and get_flag(mini) ): # Chest not on this plane: create chest and fill it.
chest = game.obj_create(proto, spell.caster.location)
bagnum = Get_ID_Mini(mini)
Set_ID(mini, chest, bagnum)
set_flag(mini, 0) # sets flag for chest on the Prime Material Plane
set_mini_map(mini, chest) # record map where the chest was summoned - for forgetful players
#game.particles( 'Orb-Summon-Balor', chest )
contents = boh_getContents(bagnum) # boh_getContents is in Co8.py
Create_Contents(contents, chest)
elif ( not (chest_here) and not get_flag(mini) ):
miniature_chest_map_number = get_mini_map(mini) # retrieve map where the chest was summoned - for forgetful players
spell.caster.float_mesfile_line( 'mes\\spell.mes', 16015, 1 ) # "Chest left at:"
spell.caster.float_mesfile_line( 'mes\\map_names.mes', miniature_chest_map_number, 1 )
spell.caster.float_mesfile_line( 'mes\\map_numbers.mes', miniature_chest_map_number, 1 )
# failsaife:
# if you are on the same map, and your X,Y coordinates are close enough to where the chest was supposed to be, yet no chest was found, then it's probably a bug - reset the miniature to "Ethereal Plane"
# likewise, if for some reason the chest has no recorded X,Y at all, allow respawning it (mainly catering to r0gershrubber here :) )
pxx, pyy = location_to_axis(spell.caster.location)
cxx, cyy = get_mini_xy(mini)
if ( ((pxx-cxx)**2) + ( (pyy-cyy)**2 )< 81 or cxx == 0 ) and game.leader.map == miniature_chest_map_number:
set_flag(mini, 1)
# error try again message
spell.caster.float_mesfile_line( 'mes\\spell.mes', 16017, 1 ) # "Failsafe activated!"
spell.caster.float_mesfile_line( 'mes\\spell.mes', 16018, 1 ) # "Try again."
elif miniature_chest_map_number in range(5070, 5079):
# lastly, if you lost it in the wilds, here's your second chance (exploitable, but it's easy enough to cheat in this game anyway)
spell.caster.float_mesfile_line( 'mes\\spell.mes', 16016, 1 ) # "LOST IN WILDERNESS!"
if not game.leader.map == miniature_chest_map_number:
set_flag(mini, 1)
spell.caster.float_mesfile_line( 'mes\\spell.mes', 16017, 1 ) # "Failsafe activated!"
spell.caster.float_mesfile_line( 'mes\\spell.mes', 16018, 1 ) # "Try again."
game.particles( 'Fizzle', spell.caster )
else:
game.particles( 'Fizzle', spell.caster )
End_Spell(spell)
spell.spell_end(spell.id, 1) # fix - adding the endDespiteTargetList flag to force the spell_end and prevent the spell trigger from going on indefinitely
def OnBeginRound( spell ):
print " Extraplanar Chest OnBeginRound"
return
def OnEndSpellCast( spell ):
print " Extraplanar Chest OnEndSpellCast"
def GetContents(chest):
# Gets contents of the chest by proto number and adds them to an ordered list. The list format is dynamic.
# Reads Exclude_List.mes. Items in the exclude list are not preserved in the contents of the chest.
# This is to prevent charged magic items from being recharged by being in the chest. Such items are lost if sent to the Ethereal Plane in the chest.
# Exclued list is not decoded because we only care about the dictionary keys (proto numbers). There is no need to decode the descriptions in the dictionary entries.
ExcludeDict = readMes('modules\\ToEE\\Exclude_List.mes')
exclude_list = ExcludeDict.keys()
contents = []
# Look for proto number 4000-12999. These are the proto numbers for items that could be in the chest.
num = 4000
while num <= 12999 :
# Check if proto number is on the exclude list
if num not in exclude_list:
item = chest.item_find_by_proto(num)
# Loop finding item to check for multiple occurences of the same item
while (item != OBJ_HANDLE_NULL):
# add the item to the list of contents
contents.append(num)
# check if item is stackable, and if so get the quantity stacked
quantity = 0
type = item.type
if (type == obj_t_ammo):
quantity = item.obj_get_int(obj_f_ammo_quantity)
else:
quantity = item.obj_get_int(obj_f_item_quantity)
# if there is more than one in the stack, add the quantity to the contents list. Max quantity 3999 to keep quantity from matching any proto number in the list.
if ( quantity > 1 ):
if quantity >= 4000:
quantity = 3999
contents.append(quantity)
# check to see if item is identified. If so, add the identified flag 1 to the list.
FLAGS = item.item_flags_get()
if (FLAGS & OIF_IDENTIFIED):
contents.append(1)
# destroy the item and check if there is another with the same proto number
item.destroy()
item = chest.item_find_by_proto(num)
num += 1
# add "end of list" number to the end of the list.
contents.append(99999)
return contents
def Create_Contents(contents, chest):
# Recreates the contents of the chest from the ordered list.
# Uses a "while" statement rather than a "for" statement in order to be able to step through the dynamic list.
i = 0
while i in range(len(contents)):
# check to make sure we are looking at a proto number and not a quantity, identified flag, or end of list marker.
if (contents[i] >= 4000 and contents[i] != 99999):
#create item in chest
item = create_item_in_inventory( contents[i], chest )
# step to the next number on the list
i += 1
if i in range(len(contents)): # probably not necessary here, but keeps us safe.
# check to see if next number on the list is a quantity or identified flag
if contents[i] < 4000:
quantity = contents[i]
# check if item is ammo
if item.type == obj_t_ammo:
# check if "quantity" is actually a quantity and not an identified flag
if quantity > 1:
# "quantity" is a quantity, Set item quantity and step to next number on the list
item.obj_set_int(obj_f_ammo_quantity, quantity)
i += 1
else:
# "quantity" is an identified flag. Set item quantity to 1.
item.obj_set_int(obj_f_ammo_quantity, 1)
# check if item is a potion, scroll or other stackable item.
else:
# check if "quantity" is actually a quantity and not an identified flag
if quantity > 1:
# "quantity" is a quantity, Set item quantity and step to next number on the list
item.obj_set_int(obj_f_item_quantity, quantity)
i += 1
else:
# "quantity" is an identified flag. Set item quantity to 1.
item.obj_set_int(obj_f_item_quantity, 1)
if i in range(len(contents)): # is necessary here
# check if contents[i] is an identified flag.
if contents[i] == 1:
# flag item as identified and step to next number on the list.
item.item_flag_set( OIF_IDENTIFIED )
i += 1
else:
i += 1
return
def Set_ID(mini, chest, num): # Generates a random number and sets a field in the item, chest and caster to that number.
# ID_number = game.random_range( 1,2147483647 )
# ID_number = ID_number^game.random_range( 1,2147483647 )#xor with next "random" number in line, should be more random
mini.obj_set_int(obj_f_item_pad_i_2, num)
chest.obj_set_int(obj_f_container_pad_i_1, num)
return num
def Set_ID_Mini(mini, num):
mini.obj_set_int(obj_f_item_pad_i_2, num)
return mini.obj_get_int(obj_f_item_pad_i_2)
def Get_ID_Mini(mini): # Reads the ID number of the miniature chest.
return mini.obj_get_int(obj_f_item_pad_i_2)
def Compare_ID(mini, chest): # Compares the ID number of the large chest and the miniature chest. Returns 1 if they match, otherwise returns o.
if (mini.obj_get_int(obj_f_item_pad_i_2) == chest.obj_get_int(obj_f_container_pad_i_1)):
return 1
else:
return 0
def set_flag(mini, x): # Store a flag in a field of the miniature chest. 1 means on the Ethereal Plane, 0 means on the Prime Material Plane.
mini.obj_set_int( obj_f_item_pad_i_3, x )
return mini.obj_get_int( obj_f_item_pad_i_3 )
def get_flag(mini): # Reads a flag from a field of the miniature chest.
return mini.obj_get_int( obj_f_item_pad_i_3 )
def set_mini_map(mini, chest):
cxx, cyy = location_to_axis(chest.location)
mini.obj_set_int( obj_f_item_pad_i_4, mini.map )
mini.obj_set_int( obj_f_item_pad_i_5, (cxx + (cyy << 10) ) )
return mini.obj_get_int( obj_f_item_pad_i_4 )
def get_mini_map(mini):
return mini.obj_get_int( obj_f_item_pad_i_4 )
def get_mini_xy(mini):
return ( mini.obj_get_int( obj_f_item_pad_i_5 )>>10 , mini.obj_get_int( obj_f_item_pad_i_5 ) & ((2**10) -1) )
|
|
"""Test Google Smart Home."""
from unittest.mock import Mock, patch
import pytest
from homeassistant.components import camera
from homeassistant.components.climate.const import (
ATTR_MAX_TEMP,
ATTR_MIN_TEMP,
HVAC_MODE_HEAT,
)
from homeassistant.components.demo.binary_sensor import DemoBinarySensor
from homeassistant.components.demo.cover import DemoCover
from homeassistant.components.demo.light import DemoLight
from homeassistant.components.demo.media_player import AbstractDemoPlayer
from homeassistant.components.demo.switch import DemoSwitch
from homeassistant.components.google_assistant import (
EVENT_COMMAND_RECEIVED,
EVENT_QUERY_RECEIVED,
EVENT_SYNC_RECEIVED,
const,
smart_home as sh,
trait,
)
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, TEMP_CELSIUS, __version__
from homeassistant.core import EVENT_CALL_SERVICE, State
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from . import BASIC_CONFIG, MockConfig
from tests.common import (
mock_area_registry,
mock_coro,
mock_device_registry,
mock_registry,
)
REQ_ID = "ff36a3cc-ec34-11e6-b1a0-64510650abcf"
@pytest.fixture
def registries(hass):
"""Registry mock setup."""
from types import SimpleNamespace
ret = SimpleNamespace()
ret.entity = mock_registry(hass)
ret.device = mock_device_registry(hass)
ret.area = mock_area_registry(hass)
return ret
async def test_sync_message(hass):
"""Test a sync message."""
light = DemoLight(None, "Demo Light", state=False, hs_color=(180, 75))
light.hass = hass
light.entity_id = "light.demo_light"
await light.async_update_ha_state()
# This should not show up in the sync request
hass.states.async_set("sensor.no_match", "something")
# Excluded via config
hass.states.async_set("light.not_expose", "on")
config = MockConfig(
should_expose=lambda state: state.entity_id != "light.not_expose",
entity_config={
"light.demo_light": {
const.CONF_ROOM_HINT: "Living Room",
const.CONF_ALIASES: ["Hello", "World"],
}
},
)
events = []
hass.bus.async_listen(EVENT_SYNC_RECEIVED, events.append)
result = await sh.async_handle_message(
hass,
config,
"test-agent",
{"requestId": REQ_ID, "inputs": [{"intent": "action.devices.SYNC"}]},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"agentUserId": "test-agent",
"devices": [
{
"id": "light.demo_light",
"name": {
"name": "Demo Light",
"nicknames": ["Demo Light", "Hello", "World"],
},
"traits": [
trait.TRAIT_BRIGHTNESS,
trait.TRAIT_ONOFF,
trait.TRAIT_COLOR_SETTING,
],
"type": const.TYPE_LIGHT,
"willReportState": False,
"attributes": {
"colorModel": "hsv",
"colorTemperatureRange": {
"temperatureMinK": 2000,
"temperatureMaxK": 6535,
},
},
"roomHint": "Living Room",
}
],
},
}
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].event_type == EVENT_SYNC_RECEIVED
assert events[0].data == {"request_id": REQ_ID, "source": "cloud"}
# pylint: disable=redefined-outer-name
async def test_sync_in_area(hass, registries):
"""Test a sync message where room hint comes from area."""
area = registries.area.async_create("Living Room")
device = registries.device.async_get_or_create(
config_entry_id="1234",
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
registries.device.async_update_device(device.id, area_id=area.id)
entity = registries.entity.async_get_or_create(
"light", "test", "1235", suggested_object_id="demo_light", device_id=device.id
)
light = DemoLight(None, "Demo Light", state=False, hs_color=(180, 75))
light.hass = hass
light.entity_id = entity.entity_id
await light.async_update_ha_state()
config = MockConfig(should_expose=lambda _: True, entity_config={})
events = []
hass.bus.async_listen(EVENT_SYNC_RECEIVED, events.append)
result = await sh.async_handle_message(
hass,
config,
"test-agent",
{"requestId": REQ_ID, "inputs": [{"intent": "action.devices.SYNC"}]},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"agentUserId": "test-agent",
"devices": [
{
"id": "light.demo_light",
"name": {"name": "Demo Light"},
"traits": [
trait.TRAIT_BRIGHTNESS,
trait.TRAIT_ONOFF,
trait.TRAIT_COLOR_SETTING,
],
"type": const.TYPE_LIGHT,
"willReportState": False,
"attributes": {
"colorModel": "hsv",
"colorTemperatureRange": {
"temperatureMinK": 2000,
"temperatureMaxK": 6535,
},
},
"roomHint": "Living Room",
}
],
},
}
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].event_type == EVENT_SYNC_RECEIVED
assert events[0].data == {"request_id": REQ_ID, "source": "cloud"}
async def test_query_message(hass):
"""Test a sync message."""
light = DemoLight(None, "Demo Light", state=False, hs_color=(180, 75))
light.hass = hass
light.entity_id = "light.demo_light"
await light.async_update_ha_state()
light2 = DemoLight(
None, "Another Light", state=True, hs_color=(180, 75), ct=400, brightness=78
)
light2.hass = hass
light2.entity_id = "light.another_light"
await light2.async_update_ha_state()
events = []
hass.bus.async_listen(EVENT_QUERY_RECEIVED, events.append)
result = await sh.async_handle_message(
hass,
BASIC_CONFIG,
"test-agent",
{
"requestId": REQ_ID,
"inputs": [
{
"intent": "action.devices.QUERY",
"payload": {
"devices": [
{"id": "light.demo_light"},
{"id": "light.another_light"},
{"id": "light.non_existing"},
]
},
}
],
},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"devices": {
"light.non_existing": {"online": False},
"light.demo_light": {"on": False, "online": True, "brightness": 0},
"light.another_light": {
"on": True,
"online": True,
"brightness": 30,
"color": {
"spectrumHsv": {
"hue": 180,
"saturation": 0.75,
"value": 0.3058823529411765,
},
"temperatureK": 2500,
},
},
}
},
}
assert len(events) == 3
assert events[0].event_type == EVENT_QUERY_RECEIVED
assert events[0].data == {
"request_id": REQ_ID,
"entity_id": "light.demo_light",
"source": "cloud",
}
assert events[1].event_type == EVENT_QUERY_RECEIVED
assert events[1].data == {
"request_id": REQ_ID,
"entity_id": "light.another_light",
"source": "cloud",
}
assert events[2].event_type == EVENT_QUERY_RECEIVED
assert events[2].data == {
"request_id": REQ_ID,
"entity_id": "light.non_existing",
"source": "cloud",
}
async def test_execute(hass):
"""Test an execute command."""
await async_setup_component(hass, "light", {"light": {"platform": "demo"}})
await hass.services.async_call(
"light", "turn_off", {"entity_id": "light.ceiling_lights"}, blocking=True
)
events = []
hass.bus.async_listen(EVENT_COMMAND_RECEIVED, events.append)
service_events = []
hass.bus.async_listen(EVENT_CALL_SERVICE, service_events.append)
result = await sh.async_handle_message(
hass,
BASIC_CONFIG,
None,
{
"requestId": REQ_ID,
"inputs": [
{
"intent": "action.devices.EXECUTE",
"payload": {
"commands": [
{
"devices": [
{"id": "light.non_existing"},
{"id": "light.ceiling_lights"},
],
"execution": [
{
"command": "action.devices.commands.OnOff",
"params": {"on": True},
},
{
"command": "action.devices.commands.BrightnessAbsolute",
"params": {"brightness": 20},
},
],
}
]
},
}
],
},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"commands": [
{
"ids": ["light.non_existing"],
"status": "ERROR",
"errorCode": "deviceOffline",
},
{
"ids": ["light.ceiling_lights"],
"status": "SUCCESS",
"states": {
"on": True,
"online": True,
"brightness": 20,
"color": {
"spectrumHsv": {
"hue": 56,
"saturation": 0.86,
"value": 0.2,
},
"temperatureK": 2631,
},
},
},
]
},
}
assert len(events) == 4
assert events[0].event_type == EVENT_COMMAND_RECEIVED
assert events[0].data == {
"request_id": REQ_ID,
"entity_id": "light.non_existing",
"execution": {
"command": "action.devices.commands.OnOff",
"params": {"on": True},
},
"source": "cloud",
}
assert events[1].event_type == EVENT_COMMAND_RECEIVED
assert events[1].data == {
"request_id": REQ_ID,
"entity_id": "light.non_existing",
"execution": {
"command": "action.devices.commands.BrightnessAbsolute",
"params": {"brightness": 20},
},
"source": "cloud",
}
assert events[2].event_type == EVENT_COMMAND_RECEIVED
assert events[2].data == {
"request_id": REQ_ID,
"entity_id": "light.ceiling_lights",
"execution": {
"command": "action.devices.commands.OnOff",
"params": {"on": True},
},
"source": "cloud",
}
assert events[3].event_type == EVENT_COMMAND_RECEIVED
assert events[3].data == {
"request_id": REQ_ID,
"entity_id": "light.ceiling_lights",
"execution": {
"command": "action.devices.commands.BrightnessAbsolute",
"params": {"brightness": 20},
},
"source": "cloud",
}
assert len(service_events) == 2
assert service_events[0].data == {
"domain": "light",
"service": "turn_on",
"service_data": {"entity_id": "light.ceiling_lights"},
}
assert service_events[0].context == events[2].context
assert service_events[1].data == {
"domain": "light",
"service": "turn_on",
"service_data": {"brightness_pct": 20, "entity_id": "light.ceiling_lights"},
}
assert service_events[1].context == events[2].context
assert service_events[1].context == events[3].context
async def test_raising_error_trait(hass):
"""Test raising an error while executing a trait command."""
hass.states.async_set(
"climate.bla",
HVAC_MODE_HEAT,
{ATTR_MIN_TEMP: 15, ATTR_MAX_TEMP: 30, ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS},
)
events = []
hass.bus.async_listen(EVENT_COMMAND_RECEIVED, events.append)
await hass.async_block_till_done()
result = await sh.async_handle_message(
hass,
BASIC_CONFIG,
"test-agent",
{
"requestId": REQ_ID,
"inputs": [
{
"intent": "action.devices.EXECUTE",
"payload": {
"commands": [
{
"devices": [{"id": "climate.bla"}],
"execution": [
{
"command": "action.devices.commands."
"ThermostatTemperatureSetpoint",
"params": {"thermostatTemperatureSetpoint": 10},
}
],
}
]
},
}
],
},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"commands": [
{
"ids": ["climate.bla"],
"status": "ERROR",
"errorCode": "valueOutOfRange",
}
]
},
}
assert len(events) == 1
assert events[0].event_type == EVENT_COMMAND_RECEIVED
assert events[0].data == {
"request_id": REQ_ID,
"entity_id": "climate.bla",
"execution": {
"command": "action.devices.commands.ThermostatTemperatureSetpoint",
"params": {"thermostatTemperatureSetpoint": 10},
},
"source": "cloud",
}
async def test_serialize_input_boolean(hass):
"""Test serializing an input boolean entity."""
state = State("input_boolean.bla", "on")
# pylint: disable=protected-access
entity = sh.GoogleEntity(hass, BASIC_CONFIG, state)
result = await entity.sync_serialize(None)
assert result == {
"id": "input_boolean.bla",
"attributes": {},
"name": {"name": "bla"},
"traits": ["action.devices.traits.OnOff"],
"type": "action.devices.types.SWITCH",
"willReportState": False,
}
async def test_unavailable_state_does_sync(hass):
"""Test that an unavailable entity does sync over."""
light = DemoLight(None, "Demo Light", state=False, hs_color=(180, 75))
light.hass = hass
light.entity_id = "light.demo_light"
light._available = False # pylint: disable=protected-access
await light.async_update_ha_state()
events = []
hass.bus.async_listen(EVENT_SYNC_RECEIVED, events.append)
result = await sh.async_handle_message(
hass,
BASIC_CONFIG,
"test-agent",
{"requestId": REQ_ID, "inputs": [{"intent": "action.devices.SYNC"}]},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"agentUserId": "test-agent",
"devices": [
{
"id": "light.demo_light",
"name": {"name": "Demo Light"},
"traits": [
trait.TRAIT_BRIGHTNESS,
trait.TRAIT_ONOFF,
trait.TRAIT_COLOR_SETTING,
],
"type": const.TYPE_LIGHT,
"willReportState": False,
"attributes": {
"colorModel": "hsv",
"colorTemperatureRange": {
"temperatureMinK": 2000,
"temperatureMaxK": 6535,
},
},
}
],
},
}
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].event_type == EVENT_SYNC_RECEIVED
assert events[0].data == {"request_id": REQ_ID, "source": "cloud"}
@pytest.mark.parametrize(
"device_class,google_type",
[
("non_existing_class", "action.devices.types.SWITCH"),
("switch", "action.devices.types.SWITCH"),
("outlet", "action.devices.types.OUTLET"),
],
)
async def test_device_class_switch(hass, device_class, google_type):
"""Test that a cover entity syncs to the correct device type."""
sensor = DemoSwitch(
None,
"Demo Sensor",
state=False,
icon="mdi:switch",
assumed=False,
device_class=device_class,
)
sensor.hass = hass
sensor.entity_id = "switch.demo_sensor"
await sensor.async_update_ha_state()
result = await sh.async_handle_message(
hass,
BASIC_CONFIG,
"test-agent",
{"requestId": REQ_ID, "inputs": [{"intent": "action.devices.SYNC"}]},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"agentUserId": "test-agent",
"devices": [
{
"attributes": {},
"id": "switch.demo_sensor",
"name": {"name": "Demo Sensor"},
"traits": ["action.devices.traits.OnOff"],
"type": google_type,
"willReportState": False,
}
],
},
}
@pytest.mark.parametrize(
"device_class,google_type",
[
("door", "action.devices.types.DOOR"),
("garage_door", "action.devices.types.GARAGE"),
("lock", "action.devices.types.SENSOR"),
("opening", "action.devices.types.SENSOR"),
("window", "action.devices.types.SENSOR"),
],
)
async def test_device_class_binary_sensor(hass, device_class, google_type):
"""Test that a binary entity syncs to the correct device type."""
sensor = DemoBinarySensor(
None, "Demo Sensor", state=False, device_class=device_class
)
sensor.hass = hass
sensor.entity_id = "binary_sensor.demo_sensor"
await sensor.async_update_ha_state()
result = await sh.async_handle_message(
hass,
BASIC_CONFIG,
"test-agent",
{"requestId": REQ_ID, "inputs": [{"intent": "action.devices.SYNC"}]},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"agentUserId": "test-agent",
"devices": [
{
"attributes": {"queryOnlyOpenClose": True},
"id": "binary_sensor.demo_sensor",
"name": {"name": "Demo Sensor"},
"traits": ["action.devices.traits.OpenClose"],
"type": google_type,
"willReportState": False,
}
],
},
}
@pytest.mark.parametrize(
"device_class,google_type",
[
("non_existing_class", "action.devices.types.BLINDS"),
("door", "action.devices.types.DOOR"),
("garage", "action.devices.types.GARAGE"),
],
)
async def test_device_class_cover(hass, device_class, google_type):
"""Test that a binary entity syncs to the correct device type."""
sensor = DemoCover(None, hass, "Demo Sensor", device_class=device_class)
sensor.hass = hass
sensor.entity_id = "cover.demo_sensor"
await sensor.async_update_ha_state()
result = await sh.async_handle_message(
hass,
BASIC_CONFIG,
"test-agent",
{"requestId": REQ_ID, "inputs": [{"intent": "action.devices.SYNC"}]},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"agentUserId": "test-agent",
"devices": [
{
"attributes": {},
"id": "cover.demo_sensor",
"name": {"name": "Demo Sensor"},
"traits": ["action.devices.traits.OpenClose"],
"type": google_type,
"willReportState": False,
}
],
},
}
@pytest.mark.parametrize(
"device_class,google_type",
[
("non_existing_class", "action.devices.types.SWITCH"),
("tv", "action.devices.types.TV"),
],
)
async def test_device_media_player(hass, device_class, google_type):
"""Test that a binary entity syncs to the correct device type."""
sensor = AbstractDemoPlayer("Demo", device_class=device_class)
sensor.hass = hass
sensor.entity_id = "media_player.demo"
await sensor.async_update_ha_state()
result = await sh.async_handle_message(
hass,
BASIC_CONFIG,
"test-agent",
{"requestId": REQ_ID, "inputs": [{"intent": "action.devices.SYNC"}]},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"agentUserId": "test-agent",
"devices": [
{
"attributes": {},
"id": sensor.entity_id,
"name": {"name": sensor.name},
"traits": ["action.devices.traits.OnOff"],
"type": google_type,
"willReportState": False,
}
],
},
}
async def test_query_disconnect(hass):
"""Test a disconnect message."""
config = MockConfig(hass=hass)
config.async_enable_report_state()
assert config._unsub_report_state is not None
with patch.object(
config, "async_disconnect_agent_user", side_effect=mock_coro
) as mock_disconnect:
result = await sh.async_handle_message(
hass,
config,
"test-agent",
{"inputs": [{"intent": "action.devices.DISCONNECT"}], "requestId": REQ_ID},
const.SOURCE_CLOUD,
)
assert result is None
assert len(mock_disconnect.mock_calls) == 1
async def test_trait_execute_adding_query_data(hass):
"""Test a trait execute influencing query data."""
hass.config.api = Mock(base_url="http://1.1.1.1:8123")
hass.states.async_set(
"camera.office", "idle", {"supported_features": camera.SUPPORT_STREAM}
)
with patch(
"homeassistant.components.camera.async_request_stream",
return_value=mock_coro("/api/streams/bla"),
):
result = await sh.async_handle_message(
hass,
BASIC_CONFIG,
None,
{
"requestId": REQ_ID,
"inputs": [
{
"intent": "action.devices.EXECUTE",
"payload": {
"commands": [
{
"devices": [{"id": "camera.office"}],
"execution": [
{
"command": "action.devices.commands.GetCameraStream",
"params": {
"StreamToChromecast": True,
"SupportedStreamProtocols": [
"progressive_mp4",
"hls",
"dash",
"smooth_stream",
],
},
}
],
}
]
},
}
],
},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"commands": [
{
"ids": ["camera.office"],
"status": "SUCCESS",
"states": {
"online": True,
"cameraStreamAccessUrl": "http://1.1.1.1:8123/api/streams/bla",
},
}
]
},
}
async def test_identify(hass):
"""Test identify message."""
user_agent_id = "mock-user-id"
proxy_device_id = user_agent_id
result = await sh.async_handle_message(
hass,
BASIC_CONFIG,
user_agent_id,
{
"requestId": REQ_ID,
"inputs": [
{
"intent": "action.devices.IDENTIFY",
"payload": {
"device": {
"mdnsScanData": {
"additionals": [
{
"type": "TXT",
"class": "IN",
"name": "devhome._home-assistant._tcp.local",
"ttl": 4500,
"data": [
"version=0.101.0.dev0",
"base_url=http://192.168.1.101:8123",
"requires_api_password=true",
],
}
]
}
},
"structureData": {},
},
}
],
"devices": [
{
"id": "light.ceiling_lights",
"customData": {
"httpPort": 8123,
"httpSSL": False,
"proxyDeviceId": proxy_device_id,
"webhookId": "dde3b9800a905e886cc4d38e226a6e7e3f2a6993d2b9b9f63d13e42ee7de3219",
},
}
],
},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {
"device": {
"id": proxy_device_id,
"isLocalOnly": True,
"isProxy": True,
"deviceInfo": {
"hwVersion": "UNKNOWN_HW_VERSION",
"manufacturer": "Home Assistant",
"model": "Home Assistant",
"swVersion": __version__,
},
}
},
}
async def test_reachable_devices(hass):
"""Test REACHABLE_DEVICES intent."""
# Matching passed in device.
hass.states.async_set("light.ceiling_lights", "on")
# Unsupported entity
hass.states.async_set("not_supported.entity", "something")
# Excluded via config
hass.states.async_set("light.not_expose", "on")
# Not passed in as google_id
hass.states.async_set("light.not_mentioned", "on")
# Has 2FA
hass.states.async_set("lock.has_2fa", "on")
config = MockConfig(
should_expose=lambda state: state.entity_id != "light.not_expose",
)
user_agent_id = "mock-user-id"
proxy_device_id = user_agent_id
result = await sh.async_handle_message(
hass,
config,
user_agent_id,
{
"requestId": REQ_ID,
"inputs": [
{
"intent": "action.devices.REACHABLE_DEVICES",
"payload": {
"device": {
"proxyDevice": {
"id": proxy_device_id,
"customData": "{}",
"proxyData": "{}",
}
},
"structureData": {},
},
}
],
"devices": [
{
"id": "light.ceiling_lights",
"customData": {
"httpPort": 8123,
"httpSSL": False,
"proxyDeviceId": proxy_device_id,
"webhookId": "dde3b9800a905e886cc4d38e226a6e7e3f2a6993d2b9b9f63d13e42ee7de3219",
},
},
{
"id": "light.not_expose",
"customData": {
"httpPort": 8123,
"httpSSL": False,
"proxyDeviceId": proxy_device_id,
"webhookId": "dde3b9800a905e886cc4d38e226a6e7e3f2a6993d2b9b9f63d13e42ee7de3219",
},
},
{
"id": "lock.has_2fa",
"customData": {
"httpPort": 8123,
"httpSSL": False,
"proxyDeviceId": proxy_device_id,
"webhookId": "dde3b9800a905e886cc4d38e226a6e7e3f2a6993d2b9b9f63d13e42ee7de3219",
},
},
{"id": proxy_device_id, "customData": {}},
],
},
const.SOURCE_CLOUD,
)
assert result == {
"requestId": REQ_ID,
"payload": {"devices": [{"verificationId": "light.ceiling_lights"}]},
}
|
|
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from tensorflow import nest
from autokeras.blocks import basic
from autokeras.blocks import preprocessing
from autokeras.blocks import reduction
from autokeras.engine import block as block_module
BLOCK_TYPE = "block_type"
RESNET = "resnet"
XCEPTION = "xception"
VANILLA = "vanilla"
EFFICIENT = "efficient"
NORMALIZE = "normalize"
AUGMENT = "augment"
TRANSFORMER = "transformer"
MAX_TOKENS = "max_tokens"
NGRAM = "ngram"
BERT = "bert"
class ImageBlock(block_module.Block):
"""Block for image data.
The image blocks is a block choosing from ResNetBlock, XceptionBlock, ConvBlock,
which is controlled by a hyperparameter, 'block_type'.
# Arguments
block_type: String. 'resnet', 'xception', 'vanilla'. The type of Block
to use. If unspecified, it will be tuned automatically.
normalize: Boolean. Whether to channel-wise normalize the images.
If unspecified, it will be tuned automatically.
augment: Boolean. Whether to do image augmentation. If unspecified,
it will be tuned automatically.
"""
def __init__(
self,
block_type: Optional[str] = None,
normalize: Optional[bool] = None,
augment: Optional[bool] = None,
**kwargs
):
super().__init__(**kwargs)
self.block_type = block_type
self.normalize = normalize
self.augment = augment
def get_config(self):
config = super().get_config()
config.update(
{
BLOCK_TYPE: self.block_type,
NORMALIZE: self.normalize,
AUGMENT: self.augment,
}
)
return config
def _build_block(self, hp, output_node, block_type):
if block_type == RESNET:
return basic.ResNetBlock().build(hp, output_node)
elif block_type == XCEPTION:
return basic.XceptionBlock().build(hp, output_node)
elif block_type == VANILLA:
return basic.ConvBlock().build(hp, output_node)
elif block_type == EFFICIENT:
return basic.EfficientNetBlock().build(hp, output_node)
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
if self.normalize is None and hp.Boolean(NORMALIZE):
with hp.conditional_scope(NORMALIZE, [True]):
output_node = preprocessing.Normalization().build(hp, output_node)
elif self.normalize:
output_node = preprocessing.Normalization().build(hp, output_node)
if self.augment is None and hp.Boolean(AUGMENT):
with hp.conditional_scope(AUGMENT, [True]):
output_node = preprocessing.ImageAugmentation().build(
hp, output_node
)
elif self.augment:
output_node = preprocessing.ImageAugmentation().build(hp, output_node)
if self.block_type is None:
block_type = hp.Choice(
BLOCK_TYPE, [RESNET, XCEPTION, VANILLA, EFFICIENT]
)
with hp.conditional_scope(BLOCK_TYPE, [block_type]):
output_node = self._build_block(hp, output_node, block_type)
else:
output_node = self._build_block(hp, output_node, self.block_type)
return output_node
class TextBlock(block_module.Block):
"""Block for text data.
# Arguments
block_type: String. 'vanilla', 'transformer', and 'ngram'. The type of Block
to use. 'vanilla' and 'transformer' use a TextToIntSequence vectorizer,
whereas 'ngram' uses TextToNgramVector. If unspecified, it will be tuned
automatically.
max_tokens: Int. The maximum size of the vocabulary.
If left unspecified, it will be tuned automatically.
pretraining: String. 'random' (use random weights instead any pretrained
model), 'glove', 'fasttext' or 'word2vec'. Use pretrained word embedding.
If left unspecified, it will be tuned automatically.
"""
def __init__(
self,
block_type: Optional[str] = None,
max_tokens: Optional[int] = None,
pretraining: Optional[str] = None,
**kwargs
):
super().__init__(**kwargs)
self.block_type = block_type
self.max_tokens = max_tokens
self.pretraining = pretraining
def get_config(self):
config = super().get_config()
config.update(
{
BLOCK_TYPE: self.block_type,
MAX_TOKENS: self.max_tokens,
"pretraining": self.pretraining,
}
)
return config
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
if self.block_type is None:
block_type = hp.Choice(BLOCK_TYPE, [VANILLA, TRANSFORMER, NGRAM, BERT])
with hp.conditional_scope(BLOCK_TYPE, [block_type]):
output_node = self._build_block(hp, output_node, block_type)
else:
output_node = self._build_block(hp, output_node, self.block_type)
return output_node
def _build_block(self, hp, output_node, block_type):
max_tokens = self.max_tokens or hp.Choice(
MAX_TOKENS, [500, 5000, 20000], default=5000
)
if block_type == NGRAM:
output_node = preprocessing.TextToNgramVector(
max_tokens=max_tokens
).build(hp, output_node)
return basic.DenseBlock().build(hp, output_node)
if block_type == BERT:
output_node = basic.BertBlock().build(hp, output_node)
else:
output_node = preprocessing.TextToIntSequence(
max_tokens=max_tokens
).build(hp, output_node)
if block_type == TRANSFORMER:
output_node = basic.Transformer(
max_features=max_tokens + 1,
pretraining=self.pretraining,
).build(hp, output_node)
else:
output_node = basic.Embedding(
max_features=max_tokens + 1,
pretraining=self.pretraining,
).build(hp, output_node)
output_node = basic.ConvBlock().build(hp, output_node)
output_node = reduction.SpatialReduction().build(hp, output_node)
output_node = basic.DenseBlock().build(hp, output_node)
return output_node
class StructuredDataBlock(block_module.Block):
"""Block for structured data.
# Arguments
categorical_encoding: Boolean. Whether to use the CategoricalToNumerical to
encode the categorical features to numerical features. Defaults to True.
normalize: Boolean. Whether to normalize the features.
If unspecified, it will be tuned automatically.
seed: Int. Random seed.
"""
def __init__(
self,
categorical_encoding: bool = True,
normalize: Optional[bool] = None,
seed: Optional[int] = None,
**kwargs
):
super().__init__(**kwargs)
self.categorical_encoding = categorical_encoding
self.normalize = normalize
self.seed = seed
self.column_types = None
self.column_names = None
@classmethod
def from_config(cls, config):
column_types = config.pop("column_types")
column_names = config.pop("column_names")
instance = cls(**config)
instance.column_types = column_types
instance.column_names = column_names
return instance
def get_config(self):
config = super().get_config()
config.update(
{
"categorical_encoding": self.categorical_encoding,
"normalize": self.normalize,
"seed": self.seed,
"column_types": self.column_types,
"column_names": self.column_names,
}
)
return config
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
if self.categorical_encoding:
block = preprocessing.CategoricalToNumerical()
block.column_types = self.column_types
block.column_names = self.column_names
output_node = block.build(hp, output_node)
if self.normalize is None and hp.Boolean(NORMALIZE):
with hp.conditional_scope(NORMALIZE, [True]):
output_node = preprocessing.Normalization().build(hp, output_node)
elif self.normalize:
output_node = preprocessing.Normalization().build(hp, output_node)
output_node = basic.DenseBlock().build(hp, output_node)
return output_node
class TimeseriesBlock(block_module.Block):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_config(self):
return super().get_config()
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
output_node = basic.RNNBlock().build(hp, output_node)
return output_node
class GeneralBlock(block_module.Block):
"""A general neural network block when the input type is unknown.
When the input type is unknown. The GeneralBlock would search in a large space
for a good model.
# Arguments
name: String.
"""
def build(self, hp, inputs=None):
raise NotImplementedError
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Iterator ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
def make_saveable_from_iterator(iterator):
"""Returns a SaveableObject for saving/restore iterator state using Saver.
Args:
iterator: Iterator.
For example:
```python
with tf.Graph().as_default():
ds = tf.data.Dataset.range(10)
iterator = ds.make_initializable_iterator()
# Build the iterator SaveableObject.
saveable_obj = tf.contrib.data.make_saveable_from_iterator(iterator)
# Add the SaveableObject to the SAVEABLE_OBJECTS collection so
# it can be automatically saved using Saver.
tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, saveable_obj)
saver = tf.train.Saver()
while continue_training:
... Perform training ...
if should_save_checkpoint:
saver.save()
```
Note: When restoring the iterator, the existing iterator state is completely
discarded. This means that any changes you may have made to the Dataset
graph will be discarded as well! This includes the new Dataset graph
that you may have built during validation. So, while running validation,
make sure to run the initializer for the validation input pipeline after
restoring the checkpoint.
Note: Not all iterators support checkpointing yet. Attempting to save the
state of an unsupported iterator will throw an error.
"""
return _Saveable(iterator._iterator_resource) # pylint: disable=protected-access
class _Saveable(saver_lib.BaseSaverBuilder.SaveableObject):
"""SaveableObject for saving/restoring iterator state."""
def __init__(self, iterator_resource):
serialized_iterator = gen_dataset_ops.serialize_iterator(iterator_resource)
specs = [
saver_lib.BaseSaverBuilder.SaveSpec(serialized_iterator, "",
iterator_resource.name + "-state")
]
super(_Saveable, self).__init__(iterator_resource, specs,
iterator_resource.name)
def restore(self, restored_tensors, unused_restored_shapes):
with ops.colocate_with(self.op):
return gen_dataset_ops.deserialize_iterator(self.op, restored_tensors[0])
class CheckpointInputPipelineHook(session_run_hook.SessionRunHook):
"""Checkpoints input pipeline state every N steps or seconds.
This hook saves the state of the iterators in the `Graph` so that when
training is resumed the input pipeline continues from where it left off.
This could potentially avoid overfitting in certain pipelines where the
number of training steps per eval are small compared to the dataset
size or if the training pipeline is pre-empted.
Differences from `CheckpointSaverHook`:
1. Saves only the input pipelines in the "iterators" collection and not the
global variables or other saveable objects.
2. Does not write the `GraphDef` and `MetaGraphDef` to the summary.
Example of checkpointing the training pipeline:
```python
est = tf.estimator.Estimator(model_fn)
while True:
est.train(
train_input_fn,
hooks=[tf.contrib.data.CheckpointInputPipelineHook(est)],
steps=train_steps_per_eval)
# Note: We do not pass the hook here.
metrics = est.evaluate(eval_input_fn)
if should_stop_the_training(metrics):
break
```
This hook should be used if the input pipeline state needs to be saved
separate from the model checkpoint. Doing so may be useful for a few reasons:
1. The input pipeline checkpoint may be large, if there are large shuffle
or prefetch buffers for instance, and may bloat the checkpoint size.
2. If the input pipeline is shared between training and validation, restoring
the checkpoint during validation may override the validation input
pipeline.
For saving the input pipeline checkpoint alongside the model weights use
@{tf.contrib.data.make_saveable_from_iterator} directly to create a
`SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,
that you will need to be careful not to restore the training iterator during
eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS
collector when building the eval graph.
"""
def __init__(self, estimator):
"""Initializes a `CheckpointInputPipelineHook`.
Args:
estimator: Estimator.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of saver or scaffold should be set.
"""
# `checkpoint_basename` is "input.ckpt" for non-distributed pipelines or
# of the form "input_<task_type>_<task_id>.ckpt" for distributed pipelines.
# Note: The default `checkpoint_basename` used by `CheckpointSaverHook` is
# "model.ckpt". We intentionally choose the input pipeline checkpoint prefix
# to be different to avoid conflicts with the model checkpoint.
# pylint: disable=protected-access
checkpoint_prefix = "input"
if estimator._config.num_worker_replicas > 1:
# Distributed setting.
suffix = "_{}_{}".format(estimator._config.task_type,
estimator._config.task_id)
checkpoint_prefix += suffix
# pylint: enable=protected-access
# We use a composition paradigm instead of inheriting from
# `CheckpointSaverHook` because `Estimator` does an `isinstance` check
# to check whether a `CheckpointSaverHook` is already present in the list
# of hooks and if not, adds one. Inheriting from `CheckpointSaverHook`
# would thwart this behavior. This hook checkpoints *only the iterators*
# and not the graph variables.
self._checkpoint_saver_hook = basic_session_run_hooks.CheckpointSaverHook(
estimator.model_dir,
save_secs=estimator._config.save_checkpoints_secs, # pylint: disable=protected-access
save_steps=estimator._config.save_checkpoints_steps, # pylint: disable=protected-access
checkpoint_basename=checkpoint_prefix + ".ckpt")
# Name for the protocol buffer file that will contain the list of most
# recent checkpoints stored as a `CheckpointState` protocol buffer.
# This file, kept in the same directory as the checkpoint files, is
# automatically managed by the `Saver` to keep track of recent checkpoints.
# The default name used by the `Saver` for this file is "checkpoint". Here
# we use the name "checkpoint_<checkpoint_prefix>" so that in case the
# `checkpoint_dir` is the same as the model checkpoint directory, there are
# no conflicts during restore.
self._latest_filename = "checkpoint_" + checkpoint_prefix
self._first_run = True
def begin(self):
# Build a Saver that saves all iterators in the `GLOBAL_ITERATORS`
# collection if no `Saver` or `Scaffold` is provided.
# pylint: disable=protected-access
if (self._checkpoint_saver_hook._saver is None and
self._checkpoint_saver_hook._scaffold is None):
iterators = ops.get_collection(iterator_ops.GLOBAL_ITERATORS)
saveables = [_Saveable(i) for i in iterators]
self._checkpoint_saver_hook._saver = _CustomSaver(saveables,
self._latest_filename)
# pylint: enable=protected-access
self._checkpoint_saver_hook.begin()
def _restore_or_save_initial_ckpt(self, session):
# Ideally this should be run in after_create_session but is not for the
# following reason:
# Currently there is no way of enforcing an order of running the
# `SessionRunHooks`. Hence it is possible that the `_DatasetInitializerHook`
# is run *after* this hook. That is troublesome because
# 1. If a checkpoint exists and this hook restores it, the initializer hook
# will override it.
# 2. If no checkpoint exists, this hook will try to save an initialized
# iterator which will result in an exception.
#
# As a temporary fix we enter the following implicit contract between this
# hook and the _DatasetInitializerHook.
# 1. The _DatasetInitializerHook initializes the iterator in the call to
# after_create_session.
# 2. This hook saves the iterator on the first call to `before_run()`, which
# is guaranteed to happen after `after_create_session()` of all hooks
# have been run.
# Check if there is an existing checkpoint. If so, restore from it.
# pylint: disable=protected-access
latest_checkpoint_path = checkpoint_management.latest_checkpoint(
self._checkpoint_saver_hook._checkpoint_dir,
latest_filename=self._latest_filename)
if latest_checkpoint_path:
self._checkpoint_saver_hook._get_saver().restore(session,
latest_checkpoint_path)
else:
# The checkpoint saved here is the state at step "global_step".
# Note: We do not save the GraphDef or MetaGraphDef here.
global_step = session.run(self._checkpoint_saver_hook._global_step_tensor)
self._checkpoint_saver_hook._save(session, global_step)
self._checkpoint_saver_hook._timer.update_last_triggered_step(global_step)
# pylint: enable=protected-access
def before_run(self, run_context):
if self._first_run:
self._restore_or_save_initial_ckpt(run_context.session)
self._first_run = False
return self._checkpoint_saver_hook.before_run(run_context)
def after_run(self, run_context, run_values):
self._checkpoint_saver_hook.after_run(run_context, run_values)
def end(self, session):
self._checkpoint_saver_hook.end(session)
class _CustomSaver(saver_lib.Saver):
"""`Saver` with a different default `latest_filename`.
This is used in the `CheckpointInputPipelineHook` to avoid conflicts with
the model ckpt saved by the `CheckpointSaverHook`.
"""
def __init__(self, var_list, latest_filename):
super(_CustomSaver, self).__init__(var_list)
self._latest_filename = latest_filename
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False):
return super(_CustomSaver, self).save(
sess, save_path, global_step, latest_filename or self._latest_filename,
meta_graph_suffix, write_meta_graph, write_state, strip_default_attrs)
|
|
import os
import re
import sys
import textwrap
import pytest
from _pytest.monkeypatch import MonkeyPatch
@pytest.fixture
def mp():
cwd = os.getcwd()
sys_path = list(sys.path)
yield MonkeyPatch()
sys.path[:] = sys_path
os.chdir(cwd)
def test_setattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
pytest.raises(AttributeError, monkeypatch.setattr, A, "notexists", 2)
monkeypatch.setattr(A, "y", 2, raising=False)
assert A.y == 2
monkeypatch.undo()
assert not hasattr(A, "y")
monkeypatch = MonkeyPatch()
monkeypatch.setattr(A, "x", 2)
assert A.x == 2
monkeypatch.setattr(A, "x", 3)
assert A.x == 3
monkeypatch.undo()
assert A.x == 1
A.x = 5
monkeypatch.undo() # double-undo makes no modification
assert A.x == 5
class TestSetattrWithImportPath:
def test_string_expression(self, monkeypatch):
monkeypatch.setattr("os.path.abspath", lambda x: "hello2")
assert os.path.abspath("123") == "hello2"
def test_string_expression_class(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
def test_unicode_string(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
monkeypatch.delattr("_pytest.config.Config")
def test_wrong_target(self, monkeypatch):
pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None))
def test_unknown_import(self, monkeypatch):
pytest.raises(ImportError, lambda: monkeypatch.setattr("unkn123.classx", None))
def test_unknown_attr(self, monkeypatch):
pytest.raises(
AttributeError, lambda: monkeypatch.setattr("os.path.qweqwe", None)
)
def test_unknown_attr_non_raising(self, monkeypatch):
# https://github.com/pytest-dev/pytest/issues/746
monkeypatch.setattr("os.path.qweqwe", 42, raising=False)
assert os.path.qweqwe == 42
def test_delattr(self, monkeypatch):
monkeypatch.delattr("os.path.abspath")
assert not hasattr(os.path, "abspath")
monkeypatch.undo()
assert os.path.abspath
def test_delattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, "x")
assert not hasattr(A, "x")
monkeypatch.undo()
assert A.x == 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, "x")
pytest.raises(AttributeError, monkeypatch.delattr, A, "y")
monkeypatch.delattr(A, "y", raising=False)
monkeypatch.setattr(A, "x", 5, raising=False)
assert A.x == 5
monkeypatch.undo()
assert A.x == 1
def test_setitem():
d = {"x": 1}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, "x", 2)
monkeypatch.setitem(d, "y", 1700)
monkeypatch.setitem(d, "y", 1700)
assert d["x"] == 2
assert d["y"] == 1700
monkeypatch.setitem(d, "x", 3)
assert d["x"] == 3
monkeypatch.undo()
assert d["x"] == 1
assert "y" not in d
d["x"] = 5
monkeypatch.undo()
assert d["x"] == 5
def test_setitem_deleted_meanwhile():
d = {}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, "x", 2)
del d["x"]
monkeypatch.undo()
assert not d
@pytest.mark.parametrize("before", [True, False])
def test_setenv_deleted_meanwhile(before):
key = "qwpeoip123"
if before:
os.environ[key] = "world"
monkeypatch = MonkeyPatch()
monkeypatch.setenv(key, "hello")
del os.environ[key]
monkeypatch.undo()
if before:
assert os.environ[key] == "world"
del os.environ[key]
else:
assert key not in os.environ
def test_delitem():
d = {"x": 1}
monkeypatch = MonkeyPatch()
monkeypatch.delitem(d, "x")
assert "x" not in d
monkeypatch.delitem(d, "y", raising=False)
pytest.raises(KeyError, monkeypatch.delitem, d, "y")
assert not d
monkeypatch.setitem(d, "y", 1700)
assert d["y"] == 1700
d["hello"] = "world"
monkeypatch.setitem(d, "x", 1500)
assert d["x"] == 1500
monkeypatch.undo()
assert d == {"hello": "world", "x": 1}
def test_setenv():
monkeypatch = MonkeyPatch()
with pytest.warns(pytest.PytestWarning):
monkeypatch.setenv("XYZ123", 2)
import os
assert os.environ["XYZ123"] == "2"
monkeypatch.undo()
assert "XYZ123" not in os.environ
def test_delenv():
name = "xyz1234"
assert name not in os.environ
monkeypatch = MonkeyPatch()
pytest.raises(KeyError, monkeypatch.delenv, name, raising=True)
monkeypatch.delenv(name, raising=False)
monkeypatch.undo()
os.environ[name] = "1"
try:
monkeypatch = MonkeyPatch()
monkeypatch.delenv(name)
assert name not in os.environ
monkeypatch.setenv(name, "3")
assert os.environ[name] == "3"
monkeypatch.undo()
assert os.environ[name] == "1"
finally:
if name in os.environ:
del os.environ[name]
class TestEnvironWarnings:
"""
os.environ keys and values should be native strings, otherwise it will cause problems with other modules (notably
subprocess). On Python 2 os.environ accepts anything without complaining, while Python 3 does the right thing
and raises an error.
"""
VAR_NAME = "PYTEST_INTERNAL_MY_VAR"
def test_setenv_non_str_warning(self, monkeypatch):
value = 2
msg = (
"Value of environment variable PYTEST_INTERNAL_MY_VAR type should be str, "
"but got 2 (type: int); converted to str implicitly"
)
with pytest.warns(pytest.PytestWarning, match=re.escape(msg)):
monkeypatch.setenv(str(self.VAR_NAME), value)
def test_setenv_prepend():
import os
monkeypatch = MonkeyPatch()
with pytest.warns(pytest.PytestWarning):
monkeypatch.setenv("XYZ123", 2, prepend="-")
assert os.environ["XYZ123"] == "2"
with pytest.warns(pytest.PytestWarning):
monkeypatch.setenv("XYZ123", 3, prepend="-")
assert os.environ["XYZ123"] == "3-2"
monkeypatch.undo()
assert "XYZ123" not in os.environ
def test_monkeypatch_plugin(testdir):
reprec = testdir.inline_runsource(
"""
def test_method(monkeypatch):
assert monkeypatch.__class__.__name__ == "MonkeyPatch"
"""
)
res = reprec.countoutcomes()
assert tuple(res) == (1, 0, 0), res
def test_syspath_prepend(mp):
old = list(sys.path)
mp.syspath_prepend("world")
mp.syspath_prepend("hello")
assert sys.path[0] == "hello"
assert sys.path[1] == "world"
mp.undo()
assert sys.path == old
mp.undo()
assert sys.path == old
def test_syspath_prepend_double_undo(mp):
old_syspath = sys.path[:]
try:
mp.syspath_prepend("hello world")
mp.undo()
sys.path.append("more hello world")
mp.undo()
assert sys.path[-1] == "more hello world"
finally:
sys.path[:] = old_syspath
def test_chdir_with_path_local(mp, tmpdir):
mp.chdir(tmpdir)
assert os.getcwd() == tmpdir.strpath
def test_chdir_with_str(mp, tmpdir):
mp.chdir(tmpdir.strpath)
assert os.getcwd() == tmpdir.strpath
def test_chdir_undo(mp, tmpdir):
cwd = os.getcwd()
mp.chdir(tmpdir)
mp.undo()
assert os.getcwd() == cwd
def test_chdir_double_undo(mp, tmpdir):
mp.chdir(tmpdir.strpath)
mp.undo()
tmpdir.chdir()
mp.undo()
assert os.getcwd() == tmpdir.strpath
def test_issue185_time_breaks(testdir):
testdir.makepyfile(
"""
import time
def test_m(monkeypatch):
def f():
raise Exception
monkeypatch.setattr(time, "time", f)
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*1 passed*
"""
)
def test_importerror(testdir):
p = testdir.mkpydir("package")
p.join("a.py").write(
textwrap.dedent(
"""\
import doesnotexist
x = 1
"""
)
)
testdir.tmpdir.join("test_importerror.py").write(
textwrap.dedent(
"""\
def test_importerror(monkeypatch):
monkeypatch.setattr('package.a.x', 2)
"""
)
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
"""
*import error in package.a: No module named 'doesnotexist'*
"""
)
class SampleNew:
@staticmethod
def hello():
return True
class SampleNewInherit(SampleNew):
pass
class SampleOld:
# oldstyle on python2
@staticmethod
def hello():
return True
class SampleOldInherit(SampleOld):
pass
@pytest.mark.parametrize(
"Sample",
[SampleNew, SampleNewInherit, SampleOld, SampleOldInherit],
ids=["new", "new-inherit", "old", "old-inherit"],
)
def test_issue156_undo_staticmethod(Sample):
monkeypatch = MonkeyPatch()
monkeypatch.setattr(Sample, "hello", None)
assert Sample.hello is None
monkeypatch.undo()
assert Sample.hello()
def test_undo_class_descriptors_delattr():
class SampleParent:
@classmethod
def hello(_cls):
pass
@staticmethod
def world():
pass
class SampleChild(SampleParent):
pass
monkeypatch = MonkeyPatch()
original_hello = SampleChild.hello
original_world = SampleChild.world
monkeypatch.delattr(SampleParent, "hello")
monkeypatch.delattr(SampleParent, "world")
assert getattr(SampleParent, "hello", None) is None
assert getattr(SampleParent, "world", None) is None
monkeypatch.undo()
assert original_hello == SampleChild.hello
assert original_world == SampleChild.world
def test_issue1338_name_resolving():
pytest.importorskip("requests")
monkeypatch = MonkeyPatch()
try:
monkeypatch.delattr("requests.sessions.Session.request")
finally:
monkeypatch.undo()
def test_context():
monkeypatch = MonkeyPatch()
import functools
import inspect
with monkeypatch.context() as m:
m.setattr(functools, "partial", 3)
assert not inspect.isclass(functools.partial)
assert inspect.isclass(functools.partial)
def test_syspath_prepend_with_namespace_packages(testdir, monkeypatch):
for dirname in "hello", "world":
d = testdir.mkdir(dirname)
ns = d.mkdir("ns_pkg")
ns.join("__init__.py").write(
"__import__('pkg_resources').declare_namespace(__name__)"
)
lib = ns.mkdir(dirname)
lib.join("__init__.py").write("def check(): return %r" % dirname)
monkeypatch.syspath_prepend("hello")
import ns_pkg.hello
assert ns_pkg.hello.check() == "hello"
with pytest.raises(ImportError):
import ns_pkg.world
# Prepending should call fixup_namespace_packages.
monkeypatch.syspath_prepend("world")
import ns_pkg.world
assert ns_pkg.world.check() == "world"
# Should invalidate caches via importlib.invalidate_caches.
tmpdir = testdir.tmpdir
modules_tmpdir = tmpdir.mkdir("modules_tmpdir")
monkeypatch.syspath_prepend(str(modules_tmpdir))
modules_tmpdir.join("main_app.py").write("app = True")
from main_app import app # noqa: F401
|
|
import asyncio
import os
import copy
import shutil
import tempfile
from unittest import mock
from decorator import decorator
import pytest
from tornado import testing
from tornado.platform.asyncio import AsyncIOMainLoop
from waterbutler.core import metadata
from waterbutler.core import provider
from waterbutler.server.app import make_app
from waterbutler.core.path import WaterButlerPath
class MockCoroutine(mock.Mock):
@asyncio.coroutine
def __call__(self, *args, **kwargs):
return super().__call__(*args, **kwargs)
@decorator
def async(func, *args, **kwargs):
future = func(*args, **kwargs)
asyncio.get_event_loop().run_until_complete(future)
class MockFileMetadata(metadata.BaseFileMetadata):
provider = 'mock'
name = 'Foo.name'
size = 1337
etag = 'etag'
path = '/Foo.name'
modified = 'never'
content_type = 'application/octet-stream'
def __init__(self):
super().__init__({})
class MockFolderMetadata(metadata.BaseFolderMetadata):
provider = 'mock'
name = 'Bar'
size = 1337
etag = 'etag'
path = '/Bar/'
modified = 'never'
content_type = 'application/octet-stream'
def __init__(self):
super().__init__({})
class MockProvider(provider.BaseProvider):
NAME = 'MockProvider'
copy = None
move = None
delete = None
upload = None
download = None
metadata = None
validate_path = None
revalidate_path = None
def __init__(self, auth=None, settings=None, creds=None):
super().__init__(auth or {}, settings or {}, creds or {})
self.copy = MockCoroutine()
self.move = MockCoroutine()
self.delete = MockCoroutine()
self.upload = MockCoroutine()
self.download = MockCoroutine()
self.metadata = MockCoroutine()
self.validate_path = MockCoroutine()
self.revalidate_path = MockCoroutine()
class MockProvider1(provider.BaseProvider):
NAME = 'MockProvider1'
@asyncio.coroutine
def validate_path(self, path, **kwargs):
return WaterButlerPath(path)
@asyncio.coroutine
def upload(self, stream, path, **kwargs):
return MockFileMetadata(), True
@asyncio.coroutine
def delete(self, path, **kwargs):
pass
@asyncio.coroutine
def metadata(self, path, throw=None, **kwargs):
if throw:
raise throw
return MockFolderMetadata()
@asyncio.coroutine
def download(self, path, **kwargs):
return b''
class MockProvider2(MockProvider1):
NAME = 'MockProvider2'
def can_intra_move(self, other, path=None):
return self.__class__ == other.__class__
def can_intra_copy(self, other, path=None):
return self.__class__ == other.__class__
class HandlerTestCase(testing.AsyncHTTPTestCase):
def setUp(self):
super().setUp()
def get_identity(*args, **kwargs):
return copy.deepcopy({
'auth': {},
'credentials': {},
'settings': {},
'callback_url': 'example.com'
})
self.mock_identity = MockCoroutine(side_effect=get_identity)
# self.mock_identity.return_value = identity_future
self.identity_patcher = mock.patch('waterbutler.server.handlers.core.auth_handler.fetch', self.mock_identity)
self.mock_provider = MockProvider1({}, {}, {})
self.mock_make_provider = mock.Mock(return_value=self.mock_provider)
self.make_provider_patcher = mock.patch('waterbutler.core.utils.make_provider', self.mock_make_provider)
self.identity_patcher.start()
self.make_provider_patcher.start()
def tearDown(self):
super().tearDown()
self.identity_patcher.stop()
self.make_provider_patcher.stop()
def get_app(self):
return make_app(debug=False)
def get_new_ioloop(self):
return AsyncIOMainLoop()
class MultiProviderHandlerTestCase(HandlerTestCase):
def setUp(self):
super().setUp()
self.source_provider = MockProvider2({}, {}, {})
self.destination_provider = MockProvider2({}, {}, {})
self.mock_send_hook = mock.Mock()
self.send_hook_patcher = mock.patch(self.HOOK_PATH, self.mock_send_hook)
self.send_hook_patcher.start()
self.mock_make_provider.return_value = None
self.mock_make_provider.side_effect = [
self.source_provider,
self.destination_provider
]
def tearDown(self):
super().tearDown()
self.send_hook_patcher.stop()
def payload(self):
return copy.deepcopy({
'source': {
'nid': 'foo',
'provider': 'source',
'path': '/source/path',
'callback_url': 'example.com'
},
'destination': {
'nid': 'bar',
'provider': 'destination',
'path': '/destination/path',
'callback_url': 'example.com'
}
})
class TempFilesContext:
def __init__(self):
self._dir = tempfile.mkdtemp()
self.files = []
def add_file(self, filename=None):
_, path = tempfile.mkstemp(dir=self._dir)
if filename:
os.rename(path, os.path.join(self._dir, filename))
return path
def tear_down(self):
shutil.rmtree(self._dir)
@pytest.yield_fixture
def temp_files():
context = TempFilesContext()
yield context
context.tear_down()
|
|
# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations on Hyper-V.
"""
import sys
import time
import uuid
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import six
from six.moves import range
from nova import exception
from nova.i18n import _, _LW
from nova.virt.hyperv import constants
from nova.virt.hyperv import hostutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(alexpilotti): Move the exceptions to a separate module
# TODO(alexpilotti): Add more domain exceptions
class HyperVException(exception.NovaException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
# TODO(alexpilotti): Add a storage exception base class
class VHDResizeException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class HyperVAuthorizationException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class UnsupportedConfigDriveFormatException(HyperVException):
def __init__(self, message=None):
super(HyperVException, self).__init__(message)
class VMUtils(object):
# These constants can be overridden by inherited classes
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft Physical Disk Drive'
_DISK_DRIVE_RES_SUB_TYPE = 'Microsoft Synthetic Disk Drive'
_DVD_DRIVE_RES_SUB_TYPE = 'Microsoft Synthetic DVD Drive'
_HARD_DISK_RES_SUB_TYPE = 'Microsoft Virtual Hard Disk'
_DVD_DISK_RES_SUB_TYPE = 'Microsoft Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft Synthetic SCSI Controller'
_SERIAL_PORT_RES_SUB_TYPE = 'Microsoft Serial Port'
_SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState'
_VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData'
_RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData'
_PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData'
_SERIAL_PORT_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
_STORAGE_ALLOC_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS
_SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = \
'Msvm_SyntheticEthernetPortSettingData'
_AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement"
_COMPUTER_SYSTEM_CLASS = "Msvm_ComputerSystem"
_VM_ENABLED_STATE_PROP = "EnabledState"
_SHUTDOWN_COMPONENT = "Msvm_ShutdownComponent"
_VIRTUAL_SYSTEM_CURRENT_SETTINGS = 3
_AUTOMATIC_STARTUP_ACTION_NONE = 0
_PHYS_DISK_CONNECTION_ATTR = "HostResource"
_VIRT_DISK_CONNECTION_ATTR = "Connection"
_CONCRETE_JOB_CLASS = "Msvm_ConcreteJob"
_KILL_JOB_STATE_CHANGE_REQUEST = 5
_completed_job_states = [constants.JOB_STATE_COMPLETED,
constants.JOB_STATE_TERMINATED,
constants.JOB_STATE_KILLED,
constants.JOB_STATE_COMPLETED_WITH_WARNINGS]
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4,
constants.HYPERV_VM_STATE_REBOOT: 10,
constants.HYPERV_VM_STATE_PAUSED: 32768,
constants.HYPERV_VM_STATE_SUSPENDED: 32769}
def __init__(self, host='.'):
self._enabled_states_map = {v: k for k, v in
six.iteritems(self._vm_power_states_map)}
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
# On version of Hyper-V prior to 2012 trying to directly set properties
# in default setting data WMI objects results in an exception
self._clone_wmi_objs = False
if sys.platform == 'win32':
hostutls = hostutils.HostUtils()
self._clone_wmi_objs = not hostutls.check_min_windows_version(6, 2)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host)
def list_instance_notes(self):
instance_notes = []
for vs in self._conn.Msvm_VirtualSystemSettingData(
['ElementName', 'Notes'],
SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS):
if vs.Notes is not None:
instance_notes.append(
(vs.ElementName, [v for v in vs.Notes.split('\n') if v]))
return instance_notes
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
return [v.ElementName for v in
self._conn.Msvm_VirtualSystemSettingData(
['ElementName'],
SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS)]
def get_vm_summary_info(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS,
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
settings_paths = [v.path_() for v in vmsettings]
# See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx
(ret_val, summary_info) = vs_man_svc.GetSummaryInformation(
[constants.VM_SUMMARY_NUM_PROCS,
constants.VM_SUMMARY_ENABLED_STATE,
constants.VM_SUMMARY_MEMORY_USAGE,
constants.VM_SUMMARY_UPTIME],
settings_paths)
if ret_val:
raise HyperVException(_('Cannot get VM summary data for: %s')
% vm_name)
si = summary_info[0]
memory_usage = None
if si.MemoryUsage is not None:
memory_usage = long(si.MemoryUsage)
up_time = None
if si.UpTime is not None:
up_time = long(si.UpTime)
# Nova requires a valid state to be returned. Hyper-V has more
# states than Nova, typically intermediate ones and since there is
# no direct mapping for those, ENABLED is the only reasonable option
# considering that in all the non mappable states the instance
# is running.
enabled_state = self._enabled_states_map.get(si.EnabledState,
constants.HYPERV_VM_STATE_ENABLED)
summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors,
'EnabledState': enabled_state,
'MemoryUsage': memory_usage,
'UpTime': up_time}
return summary_info_dict
def _lookup_vm_check(self, vm_name):
vm = self._lookup_vm(vm_name)
if not vm:
raise exception.NotFound(_('VM not found: %s') % vm_name)
return vm
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
return None
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def vm_exists(self, vm_name):
return self._lookup_vm(vm_name) is not None
def get_vm_id(self, vm_name):
vm = self._lookup_vm_check(vm_name)
return vm.Name
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if s.SettingType == 3][0]
def _set_vm_memory(self, vm, vmsetting, memory_mb, dynamic_memory_ratio):
mem_settings = vmsetting.associators(
wmi_result_class=self._MEMORY_SETTING_DATA_CLASS)[0]
max_mem = long(memory_mb)
mem_settings.Limit = max_mem
if dynamic_memory_ratio > 1:
mem_settings.DynamicMemoryEnabled = True
# Must be a multiple of 2
reserved_mem = min(
long(max_mem / dynamic_memory_ratio) >> 1 << 1,
max_mem)
else:
mem_settings.DynamicMemoryEnabled = False
reserved_mem = max_mem
mem_settings.Reservation = reserved_mem
# Start with the minimum memory
mem_settings.VirtualQuantity = reserved_mem
self._modify_virt_resource(mem_settings, vm.path_())
def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features):
procsetting = vmsetting.associators(
wmi_result_class=self._PROCESSOR_SETTING_DATA_CLASS)[0]
vcpus = long(vcpus_num)
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = 100000 # static assignment to 100%
procsetting.LimitProcessorFeatures = limit_cpu_features
self._modify_virt_resource(procsetting, vm.path_())
def update_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio):
vm = self._lookup_vm_check(vm_name)
vmsetting = self._get_vm_setting_data(vm)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def check_admin_permissions(self):
if not self._conn.Msvm_VirtualSystemManagementService():
msg = _("The Windows account running nova-compute on this Hyper-V"
" host doesn't have the required permissions to create or"
" operate the virtual machine.")
raise HyperVAuthorizationException(msg)
def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features,
dynamic_memory_ratio, vm_gen, instance_path, notes=None):
"""Creates a VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
LOG.debug('Creating VM %s', vm_name)
vm = self._create_vm_obj(vs_man_svc, vm_name, vm_gen, notes,
dynamic_memory_ratio, instance_path)
vmsetting = self._get_vm_setting_data(vm)
LOG.debug('Setting memory for vm %s', vm_name)
self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio)
LOG.debug('Set vCPUs for vm %s', vm_name)
self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features)
def _create_vm_obj(self, vs_man_svc, vm_name, vm_gen, notes,
dynamic_memory_ratio, instance_path):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = vm_name
# Don't start automatically on host boot
vs_gs_data.AutomaticStartupAction = self._AUTOMATIC_STARTUP_ACTION_NONE
vs_gs_data.ExternalDataRoot = instance_path
vs_gs_data.SnapshotDataRoot = instance_path
(vm_path,
job_path,
ret_val) = vs_man_svc.DefineVirtualSystem([], None,
vs_gs_data.GetText_(1))
self.check_ret_val(ret_val, job_path)
vm = self._get_wmi_obj(vm_path)
if notes:
vmsetting = self._get_vm_setting_data(vm)
vmsetting.Notes = '\n'.join(notes)
self._modify_virtual_system(vs_man_svc, vm_path, vmsetting)
return self._get_wmi_obj(vm_path)
def _modify_virtual_system(self, vs_man_svc, vm_path, vmsetting):
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystem(
ComputerSystem=vm_path,
SystemSettingData=vmsetting.GetText_(1))[1:]
self.check_ret_val(ret_val, job_path)
def get_vm_scsi_controller(self, vm_name):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_scsi_controller(vm)
def _get_vm_scsi_controller(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
res = [r for r in rasds
if r.ResourceSubType == self._SCSI_CTRL_RES_SUB_TYPE][0]
return res.path_()
def _get_vm_ide_controller(self, vm, ctrller_addr):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
ide_ctrls = [r for r in rasds
if r.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE
and r.Address == str(ctrller_addr)]
return ide_ctrls[0].path_() if ide_ctrls else None
def get_vm_ide_controller(self, vm_name, ctrller_addr):
vm = self._lookup_vm_check(vm_name)
return self._get_vm_ide_controller(vm, ctrller_addr)
def get_attached_disks(self, scsi_controller_path):
volumes = self._conn.query(
self._get_attached_disks_query_string(scsi_controller_path))
return volumes
def _get_attached_disks_query_string(self, scsi_controller_path):
return ("SELECT * FROM %(class_name)s WHERE ("
"ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s') AND "
"Parent='%(parent)s'" % {
'class_name': self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
'res_sub_type': self._PHYS_DISK_RES_SUB_TYPE,
'res_sub_type_virt': self._DISK_DRIVE_RES_SUB_TYPE,
'parent': scsi_controller_path.replace("'", "''")})
def _get_new_setting_data(self, class_name):
obj = self._conn.query("SELECT * FROM %s WHERE InstanceID "
"LIKE '%%\\Default'" % class_name)[0]
return self._check_clone_wmi_obj(class_name, obj)
def _get_new_resource_setting_data(self, resource_sub_type,
class_name=None):
if class_name is None:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
obj = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s' AND "
"InstanceID LIKE '%%\\Default'" %
{"class_name": class_name,
"res_sub_type": resource_sub_type})[0]
return self._check_clone_wmi_obj(class_name, obj)
def _check_clone_wmi_obj(self, class_name, obj):
if self._clone_wmi_objs:
return self._clone_wmi_obj(class_name, obj)
else:
return obj
def _clone_wmi_obj(self, class_name, obj):
wmi_class = getattr(self._conn, class_name)
new_obj = wmi_class.new()
# Copy the properties from the original.
for prop in obj._properties:
value = obj.Properties_.Item(prop).Value
new_obj.Properties_.Item(prop).Value = value
return new_obj
def attach_scsi_drive(self, vm_name, path, drive_type=constants.DISK):
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_scsi_controller(vm)
drive_addr = self.get_free_controller_slot(ctrller_path)
self.attach_drive(vm_name, path, ctrller_path, drive_addr, drive_type)
def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.DISK):
vm = self._lookup_vm_check(vm_name)
ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr)
self.attach_drive(vm_name, path, ctrller_path, drive_addr, drive_type)
def attach_drive(self, vm_name, path, ctrller_path, drive_addr,
drive_type=constants.DISK):
"""Create a drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
if drive_type == constants.DISK:
res_sub_type = self._DISK_DRIVE_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DRIVE_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
# Set the ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
# Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.DISK:
res_sub_type = self._HARD_DISK_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DISK_RES_SUB_TYPE
res = self._get_new_resource_setting_data(res_sub_type)
# Set the new drive as the parent.
res.Parent = drive_path
res.Connection = [path]
# Add the new vhd object as a virtual hard disk to the vm.
self._add_virt_resource(res, vm.path_())
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
vm = self._lookup_vm_check(vm_name)
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_CTRL_RES_SUB_TYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
self._add_virt_resource(scsicontrl, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.Address = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def _get_disk_resource_address(self, disk_resource):
return disk_resource.Address
def set_disk_host_resource(self, vm_name, controller_path, address,
mounted_disk_path):
disk_found = False
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
for disk_resource in disk_resources + volume_resources:
if (disk_resource.Parent == controller_path and
self._get_disk_resource_address(disk_resource) ==
str(address)):
if (disk_resource.HostResource and
disk_resource.HostResource[0] != mounted_disk_path):
LOG.debug('Updating disk host resource "%(old)s" to '
'"%(new)s"' %
{'old': disk_resource.HostResource[0],
'new': mounted_disk_path})
disk_resource.HostResource = [mounted_disk_path]
self._modify_virt_resource(disk_resource, vm.path_())
disk_found = True
break
if not disk_found:
LOG.warning(_LW('Disk not found on controller '
'"%(controller_path)s" with '
'address "%(address)s"'),
{'controller_path': controller_path,
'address': address})
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
nic_data.Connection = [vswitch_conn_data]
vm = self._lookup_vm_check(vm_name)
self._modify_virt_resource(nic_data, vm.path_())
def _get_nic_data_by_name(self, name):
return self._conn.Msvm_SyntheticEthernetPortSettingData(
ElementName=name)[0]
def create_nic(self, vm_name, nic_name, mac_address):
"""Create a (synthetic) nic and attach it to the vm."""
# Create a new nic
new_nic_data = self._get_new_setting_data(
self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS)
# Configure the nic
new_nic_data.ElementName = nic_name
new_nic_data.Address = mac_address.replace(':', '')
new_nic_data.StaticMacAddress = 'True'
new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
# Add the new nic to the vm
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(new_nic_data, vm.path_())
def soft_shutdown_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
shutdown_component = vm.associators(
wmi_result_class=self._SHUTDOWN_COMPONENT)
if not shutdown_component:
# If no shutdown_component is found, it means the VM is already
# in a shutdown state.
return
reason = 'Soft shutdown requested by OpenStack Nova.'
(ret_val, ) = shutdown_component[0].InitiateShutdown(Force=False,
Reason=reason)
self.check_ret_val(ret_val, None)
def set_vm_state(self, vm_name, req_state):
"""Set the desired state of the VM."""
vm = self._lookup_vm_check(vm_name)
(job_path,
ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state])
# Invalid state for current operation (32775) typically means that
# the VM is already in the state requested
self.check_ret_val(ret_val, job_path, [0, 32775])
LOG.debug("Successfully changed vm state of %(vm_name)s "
"to %(req_state)s",
{'vm_name': vm_name, 'req_state': req_state})
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.Connection
def get_vm_storage_paths(self, vm_name):
vm = self._lookup_vm_check(vm_name)
(disk_resources, volume_resources) = self._get_vm_disks(vm)
volume_drives = []
for volume_resource in volume_resources:
drive_path = volume_resource.HostResource[0]
volume_drives.append(drive_path)
disk_files = []
for disk_resource in disk_resources:
disk_files.extend(
[c for c in self._get_disk_resource_disk_path(disk_resource)])
return (disk_files, volume_drives)
def _get_vm_disks(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._STORAGE_ALLOC_SETTING_DATA_CLASS)
disk_resources = [r for r in rasds if
r.ResourceSubType in
[self._HARD_DISK_RES_SUB_TYPE,
self._DVD_DISK_RES_SUB_TYPE]]
if (self._RESOURCE_ALLOC_SETTING_DATA_CLASS !=
self._STORAGE_ALLOC_SETTING_DATA_CLASS):
rasds = vmsettings[0].associators(
wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS)
volume_resources = [r for r in rasds if
r.ResourceSubType == self._PHYS_DISK_RES_SUB_TYPE]
return (disk_resources, volume_resources)
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
# Remove the VM. Does not destroy disks.
(job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def check_ret_val(self, ret_val, job_path, success_values=[0]):
if ret_val == constants.WMI_JOB_STATUS_STARTED:
return self._wait_for_job(job_path)
elif ret_val not in success_values:
raise HyperVException(_('Operation failed with return value: %s')
% ret_val)
def _wait_for_job(self, job_path):
"""Poll WMI job state and wait for completion."""
job = self._get_wmi_obj(job_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = self._get_wmi_obj(job_path)
if job.JobState == constants.JOB_STATE_KILLED:
LOG.debug("WMI job killed with status %s.", job.JobState)
return job
if job.JobState != constants.WMI_JOB_STATE_COMPLETED:
job_state = job.JobState
if job.path().Class == "Msvm_ConcreteJob":
err_sum_desc = job.ErrorSummaryDescription
err_desc = job.ErrorDescription
err_code = job.ErrorCode
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(err_sum_desc)s - %(err_desc)s - "
"Error code: %(err_code)d") %
{'job_state': job_state,
'err_sum_desc': err_sum_desc,
'err_desc': err_desc,
'err_code': err_code})
else:
(error, ret_val) = job.GetError()
if not ret_val and error:
raise HyperVException(_("WMI job failed with status "
"%(job_state)d. Error details: "
"%(error)s") %
{'job_state': job_state,
'error': error})
else:
raise HyperVException(_("WMI job failed with status "
"%d. No error "
"description available") %
job_state)
desc = job.Description
elap = job.ElapsedTime
LOG.debug("WMI job succeeded: %(desc)s, Elapsed=%(elap)s",
{'desc': desc, 'elap': elap})
return job
def _get_wmi_obj(self, path):
return wmi.WMI(moniker=path.replace('\\', '/'))
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
ResourceSettingData=[res_setting_data.GetText_(1)],
ComputerSystem=vm_path)
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path,
vm_path)
self.check_ret_val(ret_val, job_path)
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val,
snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_())
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot(
snapshot_path)
self.check_ret_val(ret_val, job_path)
def detach_vm_disk(self, vm_name, disk_path, is_physical=True):
vm = self._lookup_vm_check(vm_name)
disk_resource = self._get_mounted_disk_resource_from_path(disk_path,
is_physical)
if disk_resource:
parent = self._conn.query("SELECT * FROM "
"Msvm_ResourceAllocationSettingData "
"WHERE __PATH = '%s'" %
disk_resource.Parent)[0]
self._remove_virt_resource(disk_resource, vm.path_())
if not is_physical:
self._remove_virt_resource(parent, vm.path_())
def _get_mounted_disk_resource_from_path(self, disk_path, is_physical):
if is_physical:
class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS
res_sub_type = self._PHYS_DISK_RES_SUB_TYPE
conn_attr = self._PHYS_DISK_CONNECTION_ATTR
else:
class_name = self._STORAGE_ALLOC_SETTING_DATA_CLASS
res_sub_type = self._HARD_DISK_RES_SUB_TYPE
conn_attr = self._VIRT_DISK_CONNECTION_ATTR
disk_resources = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = "
"'%(res_sub_type)s'" %
{"class_name": class_name,
"res_sub_type": res_sub_type})
for disk_resource in disk_resources:
conn = getattr(disk_resource, conn_attr, None)
if conn and conn[0].lower() == disk_path.lower():
return disk_resource
def get_mounted_disk_by_drive_number(self, device_number):
mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive "
"WHERE DriveNumber=" +
str(device_number))
if len(mounted_disks):
return mounted_disks[0].path_()
def get_controller_volume_paths(self, controller_path):
disks = self._conn.query("SELECT * FROM %(class_name)s "
"WHERE ResourceSubType = '%(res_sub_type)s' "
"AND Parent='%(parent)s'" %
{"class_name":
self._RESOURCE_ALLOC_SETTING_DATA_CLASS,
"res_sub_type":
self._PHYS_DISK_RES_SUB_TYPE,
"parent":
controller_path})
disk_data = {}
for disk in disks:
if disk.HostResource:
disk_data[disk.path().RelPath] = disk.HostResource[0]
return disk_data
def get_free_controller_slot(self, scsi_controller_path):
attached_disks = self.get_attached_disks(scsi_controller_path)
used_slots = [int(self._get_disk_resource_address(disk))
for disk in attached_disks]
for slot in range(constants.SCSI_CONTROLLER_SLOTS_NUMBER):
if slot not in used_slots:
return slot
raise HyperVException(_("Exceeded the maximum number of slots"))
def enable_vm_metrics_collection(self, vm_name):
raise NotImplementedError(_("Metrics collection is not supported on "
"this version of Hyper-V"))
def get_vm_serial_port_connection(self, vm_name, update_connection=None):
vm = self._lookup_vm_check(vm_name)
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
rasds = vmsettings[0].associators(
wmi_result_class=self._SERIAL_PORT_SETTING_DATA_CLASS)
serial_port = (
[r for r in rasds if
r.ResourceSubType == self._SERIAL_PORT_RES_SUB_TYPE][0])
if update_connection:
serial_port.Connection = [update_connection]
self._modify_virt_resource(serial_port, vm.path_())
if len(serial_port.Connection) > 0:
return serial_port.Connection[0]
def get_active_instances(self):
"""Return the names of all the active instances known to Hyper-V."""
vm_names = self.list_instances()
vms = [self._lookup_vm(vm_name) for vm_name in vm_names]
active_vm_names = [v.ElementName for v in vms
if v.EnabledState == constants.HYPERV_VM_STATE_ENABLED]
return active_vm_names
def get_vm_power_state_change_listener(self, timeframe, filtered_states):
field = self._VM_ENABLED_STATE_PROP
query = self._get_event_wql_query(cls=self._COMPUTER_SYSTEM_CLASS,
field=field,
timeframe=timeframe,
filtered_states=filtered_states)
return self._conn.Msvm_ComputerSystem.watch_for(raw_wql=query,
fields=[field])
def _get_event_wql_query(self, cls, field,
timeframe, filtered_states=None):
"""Return a WQL query used for polling WMI events.
:param cls: the WMI class polled for events
:param field: the field checked
:param timeframe: check for events that occurred in
the specified timeframe
:param filtered_states: only catch events triggered when a WMI
object transitioned into one of those
states.
"""
query = ("SELECT %(field)s, TargetInstance "
"FROM __InstanceModificationEvent "
"WITHIN %(timeframe)s "
"WHERE TargetInstance ISA '%(class)s' "
"AND TargetInstance.%(field)s != "
"PreviousInstance.%(field)s" %
{'class': cls,
'field': field,
'timeframe': timeframe})
if filtered_states:
checks = ["TargetInstance.%s = '%s'" % (field, state)
for state in filtered_states]
query += " AND (%s)" % " OR ".join(checks)
return query
def _get_instance_notes(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vmsettings = self._get_vm_setting_data(vm)
return [note for note in vmsettings.Notes.split('\n') if note]
def get_instance_uuid(self, vm_name):
instance_notes = self._get_instance_notes(vm_name)
if instance_notes and uuidutils.is_uuid_like(instance_notes[0]):
return instance_notes[0]
def get_vm_power_state(self, vm_enabled_state):
return self._enabled_states_map.get(vm_enabled_state,
constants.HYPERV_VM_STATE_OTHER)
def stop_vm_jobs(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vm_jobs = vm.associators(wmi_result_class=self._CONCRETE_JOB_CLASS)
for job in vm_jobs:
if job and job.Cancellable and not self._is_job_completed(job):
job.RequestStateChange(self._KILL_JOB_STATE_CHANGE_REQUEST)
return vm_jobs
def _is_job_completed(self, job):
return job.JobState in self._completed_job_states
|
|
#!/usr/bin/python
import boto.ec2
import collections
import itertools
import sys
from hotcidr import fetch
from hotcidr import util
class Action(object):
def __call__(self, conn):
try:
self.run(conn)
except:
print("Unexpected exception raised. Aborting.")
raise
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __hash__(self):
return tuple(self.__dict__.items()).__hash__()
class CreateSecurityGroup(Action):
def __init__(self, name, desc):
self.name = name
self.desc = desc
def run(self, conn):
conn.create_security_group(self.name, self.desc)
def __repr__(self):
return "Create new security group %s (%s)" % (self.name, self.desc)
class ModifyInstanceAttribute(Action):
def __init__(self, inst_id, attr, value):
self.inst_id = inst_id
self.attr = attr
self.value = value
def run(self, conn):
if self.attr == 'groupSet':
self.value = map(lambda g: util.get_id_for_group(conn, g), self.value)
conn.modify_instance_attribute(self.inst_id, self.attr, self.value)
def __repr__(self):
return "Set %s of %s to %s" % (self.attr, self.inst_id, self.value)
class ModifyRule(Action):
def __init__(self, group, rule):
self.group = group
self.rule = rule
def run(self, conn, f):
loc = self.rule.location
if loc == 'all':
loc = '0.0.0.0/0'
proto = self.rule.protocol
if proto == 'all':
proto = '-1'
if self.rule.ports:
fromport = self.rule.ports.fromport
toport = self.rule.ports.toport
else:
fromport = -1
toport = -1
k = {
'group_id': util.get_id_for_group(conn, self.group),
'ip_protocol': proto,
'from_port': fromport,
'to_port': toport
}
if util.is_cidr(loc):
f(cidr_ip=loc, **k)
else:
#Boto uses src_group_id or src_security_group_group_id to mean the
#same thing depending on which function f is used here.
loc = util.get_id_for_group(conn, loc)
try:
f(src_group_id=loc, **k)
except TypeError:
f(src_security_group_group_id=loc, **k)
class RemoveRule(ModifyRule):
def run(self, conn):
if self.rule.direction == 'inbound':
f = conn.revoke_security_group
elif self.rule.direction == 'outbound':
f = conn.revoke_security_group_egress
else:
raise Exception("Invalid direction %s" % self.rule.direction)
super(RemoveRule, self).run(conn, f)
def __repr__(self):
return "Del rule (%s, %s, %s) from %s" % (
self.rule.protocol, self.rule.ports,
self.rule.location, self.group)
class AddRule(ModifyRule):
def run(self, conn):
if self.rule.direction == 'inbound':
f = conn.authorize_security_group
elif self.rule.direction == 'outbound':
f = conn.authorize_security_group_egress
else:
raise Exception("Invalid direction %s" % self.rule.direction)
super(AddRule, self).run(conn, f)
def __repr__(self):
return "Add rule (%s, %s, %s) to %s" % (
self.rule.protocol, self.rule.ports,
self.rule.location, self.group)
rule_attr = ('direction', 'location', 'protocol', 'ports')
Rule = collections.namedtuple('Rule', rule_attr)
def rules(group):
if 'rules' in group:
for rule in group['rules']:
r = {k: rule[k] for k in rule_attr if k in rule}
for attr in rule_attr:
r.setdefault(attr, None)
yield Rule(**r)
def get_actions(old_dir, new_dir):
old_instances = util.load_boxes(old_dir)
old_groups = util.load_groups(old_dir)
new_instances = util.load_boxes(new_dir)
new_groups = util.load_groups(new_dir)
# Add missing groups to AWS
for g in new_groups:
if g not in old_groups:
if 'description' in new_groups[g]:
desc = new_groups[g]['description']
else:
desc = "Automatically created by HotCIDR"
yield CreateSecurityGroup(g, desc)
# Update associated security groups for instances
for old_id, old_inst in old_instances.items():
if old_id in new_instances and 'groups' in new_instances[old_id]:
groups = new_instances[old_id]['groups']
if set(groups) != set(old_inst['groups']):
yield ModifyInstanceAttribute(old_id, 'groupSet', groups)
else:
print("Skipping instance %s (Does not exist in AWS)" % old_id)
#TODO: Delete security groups that are unused
# Update rules for each security group
for g, new_group in new_groups.items():
new_rules = set(rules(new_group))
if g in old_groups:
old_rules = set(rules(old_groups[g]))
else:
old_rules = set()
if new_rules != old_rules:
for rule in old_rules - new_rules:
yield RemoveRule(g, rule)
for rule in new_rules - old_rules:
yield AddRule(g, rule)
def changes(actions, unauthorized=0):
objs = dict(zip([
(CreateSecurityGroup, "%d group(s) created"),
(ModifyInstanceAttribute, "%d instance(s) updated"),
(AddRule, "%d rule(s) added"),
(RemoveRule, "%d rule(s) removed"),
], itertools.repeat(0)))
r = []
for action in actions:
for k in objs.iterkeys():
if isinstance(action, k[0]):
objs[k] += 1
for k, v in objs.iteritems():
out = k[1] % v
x, _, y = out.partition('(s)')
if v > 0:
if v == 1:
r.append(x + y)
else:
r.append(x + "s" + y)
if unauthorized:
if unauthorized == 1:
r.append("1 unauthorized change found")
else:
r.append("%d unauthorized changes found" % unauthorized)
if not r:
return "No changes"
return ", ".join(r)
def main(git_repo, region_code, vpc_id, aws_key, aws_secret, dry_run, expected_repo=None):
with fetch.vpc(region_code, vpc_id, aws_key, aws_secret) as aws_dir,\
util.repo(git_repo) as git_dir:
unauthorized_actions = []
if expected_repo:
try:
with util.repo(git_repo, sha1=expected_repo) as exp_dir:
unauthorized_actions = list(get_actions(exp_dir, aws_dir))
for action in unauthorized_actions:
print("Unauthorized action: %s" % action)
except git.exc.GitCommandError:
print("Could not check for unauthorized changes.")
print("Have you recently changed git repositories?")
actions = list(get_actions(aws_dir, git_dir))
conn = util.get_connection(vpc_id, region_code,
aws_access_key_id=aws_key, aws_secret_access_key=aws_secret)
if not conn:
print("Could not establish conection wtih AWS")
sys.exit(1)
count = len(actions)
for num, action in enumerate(actions, 1):
print("Action %d/%d: %s" % (num, count, action))
sys.stdout.flush()
if not dry_run:
action(conn)
print("hc-apply complete against %s" % util.get_hexsha(git_dir))
print(changes(actions, unauthorized=len(unauthorized_actions)))
|
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from PyQt4 import QtGui, QtCore
from vistrails.core import get_vistrails_application
from vistrails.core.packagemanager import get_package_manager
from vistrails.core.modules.module_registry import get_module_registry
from vistrails.core.modules.package import Package
from vistrails.core.requirements import MissingRequirement
from vistrails.core.system import get_vistrails_basic_pkg_id
from vistrails.core.utils import InvalidPipeline
from vistrails.core.utils.uxml import (named_elements,
elements_filter, enter_named_element)
from vistrails.gui.configuration import QConfigurationWidget, \
QConfigurationPane
from vistrails.gui.module_palette import QModulePalette
from vistrails.gui.modules.output_configuration import OutputModeConfigurationWidget
from vistrails.gui.pipeline_view import QPipelineView
from vistrails.core.configuration import get_vistrails_persistent_configuration, \
get_vistrails_configuration, base_config
from vistrails.core import debug
import os.path
##############################################################################
class QPackageConfigurationDialog(QtGui.QDialog):
def __init__(self, parent, package):
QtGui.QDialog.__init__(self, parent)
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
self.setWindowTitle('Configuration for package "%s"' % package.name)
self._package = package
c = package.configuration
self._configuration_object = c
assert c is not None
layout = QtGui.QVBoxLayout(self)
self.setLayout(layout)
self._configuration_widget = QConfigurationWidget(self, c, c)
layout.addWidget(self._configuration_widget)
btns = (QtGui.QDialogButtonBox.Close |
QtGui.QDialogButtonBox.RestoreDefaults)
self._button_box = QtGui.QDialogButtonBox(btns,
QtCore.Qt.Horizontal,
self)
self.connect(self._button_box,
QtCore.SIGNAL('clicked(QAbstractButton *)'),
self.button_clicked)
self.connect(self._configuration_widget._tree.treeWidget,
QtCore.SIGNAL('configuration_changed'),
self.configuration_changed)
layout.addWidget(self._button_box)
def button_clicked(self, button):
role = self._button_box.buttonRole(button)
if role == QtGui.QDialogButtonBox.ResetRole:
txt = ("This will reset all configuration values of " +
"this package to their default values. Do you " +
"want to proceed?")
msg_box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
"Really reset?", txt,
(QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No))
if msg_box.exec_() == QtGui.QMessageBox.Yes:
self.reset_configuration()
else:
assert role == QtGui.QDialogButtonBox.RejectRole
self.close_dialog()
def reset_configuration(self):
self._package.reset_configuration()
conf = self._package.configuration
self._configuration_widget.configuration_changed(conf)
def close_dialog(self):
self.done(0)
def configuration_changed(self, item, new_value):
self._package.persist_configuration()
##############################################################################
class QPackagesWidget(QtGui.QWidget):
# Signals that a package should be selected after the event loop updates (to remove old references)
select_package_after_update_signal = QtCore.SIGNAL("select_package_after_update_signal")
##########################################################################
# Initialization
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
base_layout = QtGui.QHBoxLayout(self)
left = QtGui.QFrame(self)
right = QtGui.QFrame(self)
base_layout.addWidget(left)
base_layout.addWidget(right, 1)
######################################################################
left_layout = QtGui.QVBoxLayout(left)
left_layout.addWidget(QtGui.QLabel("Disabled packages:", left))
self._available_packages_list = QtGui.QListWidget(left)
left_layout.addWidget(self._available_packages_list)
left_layout.addWidget(QtGui.QLabel("Enabled packages:", left))
self._enabled_packages_list = QtGui.QListWidget(left)
left_layout.addWidget(self._enabled_packages_list)
self.update_button = QtGui.QPushButton("Refresh Lists", left)
left_layout.addWidget(self.update_button, 0, QtCore.Qt.AlignLeft)
self.update_button.clicked.connect(self.populate_lists)
self.connect(self._available_packages_list,
QtCore.SIGNAL('itemSelectionChanged()'),
self.selected_available_list,
QtCore.Qt.QueuedConnection)
self.connect(self._enabled_packages_list,
QtCore.SIGNAL('itemSelectionChanged()'),
self.selected_enabled_list,
QtCore.Qt.QueuedConnection)
sm = QtGui.QAbstractItemView.SingleSelection
self._available_packages_list.setSelectionMode(sm)
self._enabled_packages_list.setSelectionMode(sm)
######################################################################
right_layout = QtGui.QVBoxLayout(right)
info_frame = QtGui.QFrame(right)
info_layout = QtGui.QVBoxLayout(info_frame)
grid_frame = QtGui.QFrame(info_frame)
grid_frame.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
info_layout.addWidget(grid_frame)
grid_layout = QtGui.QGridLayout(grid_frame)
l1 = QtGui.QLabel("Package Name:", grid_frame)
grid_layout.addWidget(l1, 0, 0)
l2 = QtGui.QLabel("Identifier:", grid_frame)
grid_layout.addWidget(l2, 1, 0)
l3 = QtGui.QLabel("Version:", grid_frame)
grid_layout.addWidget(l3, 2, 0)
l4 = QtGui.QLabel("Dependencies:", grid_frame)
grid_layout.addWidget(l4, 3, 0)
l5 = QtGui.QLabel("Reverse Dependencies:", grid_frame)
grid_layout.addWidget(l5, 4, 0)
l6 = QtGui.QLabel("Description:", grid_frame)
grid_layout.addWidget(l6, 5, 0)
self._name_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._name_label, 0, 1)
self._identifier_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._identifier_label, 1, 1)
self._version_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._version_label, 2, 1)
self._dependencies_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._dependencies_label, 3, 1)
self._reverse_dependencies_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._reverse_dependencies_label, 4, 1)
self._description_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._description_label, 5, 1)
for lbl in [l1, l2, l3, l4, l5, l6,
self._name_label,
self._version_label,
self._dependencies_label,
self._identifier_label,
self._reverse_dependencies_label,
self._description_label]:
lbl.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
lbl.setWordWrap(True)
grid_layout.setRowStretch(4, 1)
grid_layout.setColumnStretch(1, 1)
right_layout.addWidget(info_frame)
self._enable_button = QtGui.QPushButton("&Enable")
self._enable_button.setEnabled(False)
self.connect(self._enable_button,
QtCore.SIGNAL("clicked()"),
self.enable_current_package)
self._disable_button = QtGui.QPushButton("&Disable")
self._disable_button.setEnabled(False)
self.connect(self._disable_button,
QtCore.SIGNAL("clicked()"),
self.disable_current_package)
self._configure_button = QtGui.QPushButton("&Configure...")
self._configure_button.setEnabled(False)
self.connect(self._configure_button,
QtCore.SIGNAL("clicked()"),
self.configure_current_package)
self._reload_button = QtGui.QPushButton("&Reload")
self._reload_button.setEnabled(False)
self.connect(self._reload_button,
QtCore.SIGNAL("clicked()"),
self.reload_current_package)
button_box = QtGui.QDialogButtonBox()
button_box.addButton(self._enable_button, QtGui.QDialogButtonBox.ActionRole)
button_box.addButton(self._disable_button, QtGui.QDialogButtonBox.ActionRole)
button_box.addButton(self._configure_button, QtGui.QDialogButtonBox.ActionRole)
button_box.addButton(self._reload_button, QtGui.QDialogButtonBox.ActionRole)
right_layout.addWidget(button_box)
self.connect(self,
self.select_package_after_update_signal,
self.select_package_after_update_slot,
QtCore.Qt.QueuedConnection)
# pm = get_package_manager()
# self.connect(pm,
# pm.reloading_package_signal,
# self.reload_current_package_finisher,
# QtCore.Qt.QueuedConnection)
app = get_vistrails_application()
app.register_notification("pm_reloading_package",
self.reload_current_package_finisher)
app.register_notification("package_added", self.package_added)
app.register_notification("package_removed", self.package_removed)
self.populate_lists()
self._current_package = None
def populate_lists(self):
pkg_manager = get_package_manager()
enabled_pkgs = sorted(pkg_manager.enabled_package_list())
enabled_pkg_dict = dict([(pkg.codepath, pkg) for
pkg in enabled_pkgs])
self._enabled_packages_list.clear()
for pkg in enabled_pkgs:
self._enabled_packages_list.addItem(pkg.codepath)
self._enabled_packages_list.sortItems()
available_pkg_names = [pkg for pkg in
sorted(pkg_manager.available_package_names_list())
if pkg not in enabled_pkg_dict]
self._available_packages_list.clear()
for pkg in available_pkg_names:
self._available_packages_list.addItem(pkg)
self._available_packages_list.sortItems()
##########################################################################
def enable_current_package(self):
av = self._available_packages_list
item = av.currentItem()
codepath = str(item.text())
pm = get_package_manager()
try:
new_deps = self._current_package.dependencies()
except Exception, e:
debug.critical("Failed getting dependencies of package %s, "
"so it will not be enabled" %
self._current_package.name,
e)
return
from vistrails.core.modules.basic_modules import identifier as basic_modules_identifier
if self._current_package.identifier != basic_modules_identifier:
new_deps.append(basic_modules_identifier)
try:
pm.check_dependencies(self._current_package, new_deps)
except Package.MissingDependency, e:
debug.critical("Missing dependencies", e)
else:
# Deselects available list to prevent another package from getting
# selected once the current item leaves the list
self._available_packages_list.setCurrentItem(None)
palette = QModulePalette.instance()
palette.setUpdatesEnabled(False)
try:
pm.late_enable_package(codepath)
except (Package.InitializationFailed, MissingRequirement), e:
debug.critical("Initialization of package '%s' failed" %
codepath,
e)
# Loading failed: reselect the item
self._available_packages_list.setCurrentItem(item)
raise
finally:
palette.setUpdatesEnabled(True)
# the old code that used to be here to update the lists
# has been moved to package_added
self.invalidate_current_pipeline()
def disable_current_package(self):
inst = self._enabled_packages_list
item = inst.currentItem()
codepath = str(item.text())
pm = get_package_manager()
dependency_graph = pm.dependency_graph()
identifier = pm.get_package_by_codepath(codepath).identifier
if dependency_graph.in_degree(identifier) > 0:
rev_deps = dependency_graph.inverse_adjacency_list[identifier]
debug.critical("Missing dependency",
("There are other packages that depend on this:\n %s" +
"Please disable those first.") % rev_deps)
else:
pm.late_disable_package(codepath)
self.invalidate_current_pipeline()
# the old code that used to be here to update the lists
# has been moved to package_removed
def configure_current_package(self):
dlg = QPackageConfigurationDialog(self, self._current_package)
dlg.exec_()
def reload_current_package(self):
if self._enabled_packages_list.currentItem() is not None:
# Disables the selected package (which was enabled) and all its
# reverse dependencies, then enables it all again
item = self._enabled_packages_list.currentItem()
pm = get_package_manager()
codepath = str(item.text())
palette = QModulePalette.instance()
palette.setUpdatesEnabled(False)
pm.reload_package_disable(codepath)
elif self._available_packages_list.currentItem() is not None:
# Reloads the selected package's (which was not enabled) __init__
# module
item = self._available_packages_list.currentItem()
pm = get_package_manager()
codepath = str(item.text())
pm._available_packages.pop(codepath).unload()
self.selected_available_list()
def reload_current_package_finisher(self, codepath, reverse_deps, prefix_dictionary):
# REENABLES the current package and all reverse dependencies
pm = get_package_manager()
try:
pm.reload_package_enable(reverse_deps, prefix_dictionary)
except Package.InitializationFailed, e:
debug.critical("Re-initialization of package '%s' failed" %
codepath,
e)
raise
finally:
self.populate_lists()
palette = QModulePalette.instance()
palette.setUpdatesEnabled(True)
self.select_package_after_update(codepath)
self.invalidate_current_pipeline()
def package_added(self, codepath):
# package was added, we need to update list
av = self._available_packages_list
inst = self._enabled_packages_list
items = av.findItems(codepath, QtCore.Qt.MatchExactly)
if len(items) < 1:
# this is required for basic_modules and abstraction since
# they are not in available_package_names_list initially
self.populate_lists()
items = av.findItems(codepath, QtCore.Qt.MatchExactly)
for item in items:
pos = av.indexFromItem(item).row()
av.takeItem(pos)
inst.addItem(item)
inst.sortItems()
self.select_package_after_update(codepath)
def package_removed(self, codepath):
# package was removed, we need to update list
# if we run a late-enable with a prefix (console_mode_test),
# we don't actually have the package later
self.populate_lists()
self.select_package_after_update(codepath)
def select_package_after_update(self, codepath):
# Selecting the package causes self._current_package to be set,
# which reference prevents the package from being freed, so we
# queue it to select after the event loop completes.
self.emit(self.select_package_after_update_signal, codepath)
def select_package_after_update_slot(self, codepath):
inst = self._enabled_packages_list
av = self._available_packages_list
for item in av.findItems(codepath, QtCore.Qt.MatchExactly):
av.setCurrentItem(item)
for item in inst.findItems(codepath, QtCore.Qt.MatchExactly):
inst.setCurrentItem(item)
def set_buttons_to_enabled_package(self):
self._enable_button.setEnabled(False)
assert self._current_package
pm = get_package_manager()
from vistrails.core.modules.basic_modules import identifier as basic_modules_identifier
from vistrails.core.modules.abstraction import identifier as abstraction_identifier
is_not_basic_modules = (self._current_package.identifier != basic_modules_identifier)
is_not_abstraction = (self._current_package.identifier != abstraction_identifier)
can_disable = (pm.can_be_disabled(self._current_package.identifier) and
is_not_basic_modules and
is_not_abstraction)
self._disable_button.setEnabled(can_disable)
if not can_disable and is_not_basic_modules and is_not_abstraction:
msg = ("Module has reverse dependencies that must\n"+
"be first disabled.")
self._disable_button.setToolTip(msg)
else:
self._disable_button.setToolTip("")
conf = self._current_package.configuration is not None
self._configure_button.setEnabled(conf)
self._reload_button.setEnabled(is_not_basic_modules)
def set_buttons_to_available_package(self):
self._configure_button.setEnabled(False)
self._disable_button.setEnabled(False)
self._enable_button.setEnabled(True)
self._reload_button.setEnabled(True)
def set_package_information(self):
"""Looks at current package and sets all labels (name,
dependencies, etc.) appropriately.
"""
assert self._current_package
p = self._current_package
try:
p.load()
except Exception, e:
msg = 'ERROR: Could not load package.'
self._name_label.setText(msg)
self._version_label.setText(msg)
self._identifier_label.setText(msg)
self._dependencies_label.setText(msg)
self._description_label.setText(msg)
self._reverse_dependencies_label.setText(msg)
debug.critical('Cannot load package', e)
else:
self._name_label.setText(p.name)
try:
deps = ', '.join(str(d) for d in p.dependencies()) or \
'No package dependencies.'
except Exception, e:
debug.critical("Failed getting dependencies of package %s" %
p.name,
e)
deps = "ERROR: Failed getting dependencies"
try:
pm = get_package_manager()
reverse_deps = \
(', '.join(pm.reverse_dependencies(p.identifier)) or
'No reverse dependencies.')
except KeyError:
reverse_deps = ("Reverse dependencies only " +
"available for enabled packages.")
self._identifier_label.setText(p.identifier)
self._version_label.setText(p.version)
self._dependencies_label.setText(deps)
self._description_label.setText(p.description)
self._reverse_dependencies_label.setText(reverse_deps)
##########################################################################
# Signal handling
def selected_enabled_list(self):
item = self._enabled_packages_list.currentItem()
if item is None:
return # prevent back and forth looping when clearing selection
self._available_packages_list.setCurrentItem(None)
codepath = str(item.text())
pm = get_package_manager()
self._current_package = pm.get_package_by_codepath(codepath)
self.set_buttons_to_enabled_package()
# A delayed signal can result in the package already has been removed
if not pm.has_package(self._current_package.identifier):
return
self.set_package_information()
self._enabled_packages_list.setFocus()
def selected_available_list(self):
item = self._available_packages_list.currentItem()
if item is None:
return # prevent back and forth looping when clearing selection
self._enabled_packages_list.setCurrentItem(None)
codepath = str(item.text())
pm = get_package_manager()
self._current_package = pm.look_at_available_package(codepath)
self.set_buttons_to_available_package()
self.set_package_information()
self._available_packages_list.setFocus()
def invalidate_current_pipeline(self):
from vistrails.gui.vistrails_window import _app
_app.invalidate_pipelines()
class QOutputConfigurationPane(QtGui.QWidget):
def __init__(self, parent, persistent_config, temp_config):
QtGui.QWidget.__init__(self, parent)
self.persistent_config = persistent_config
self.temp_config = temp_config
scroll_area = QtGui.QScrollArea()
inner_widget = QtGui.QWidget()
self.inner_layout = QtGui.QVBoxLayout()
inner_widget.setLayout(self.inner_layout)
scroll_area.setWidget(inner_widget)
scroll_area.setWidgetResizable(True)
self.setLayout(QtGui.QVBoxLayout())
self.layout().addWidget(scroll_area, 1)
self.layout().setContentsMargins(0,0,0,0)
app = get_vistrails_application()
app.register_notification("package_added", self.update_output_modules)
app.register_notification("package_removed", self.update_output_modules)
self.mode_widgets = {}
def update_output_modules(self, *args, **kwargs):
# need to find all currently loaded output modes (need to
# check after modules are loaded and spin through registery)
# and display them here
reg = get_module_registry()
output_d = reg.get_descriptor_by_name(get_vistrails_basic_pkg_id(),
"OutputModule")
sublist = reg.get_descriptor_subclasses(output_d)
modes = {}
for d in sublist:
if hasattr(d.module, '_output_modes'):
for mode in d.module._output_modes:
modes[mode.mode_type] = mode
found_modes = set()
for mode_type, mode in modes.iteritems():
found_modes.add(mode_type)
if mode_type not in self.mode_widgets:
mode_config = None
output_settings = self.persistent_config.outputDefaultSettings
if output_settings.has(mode_type):
mode_config = getattr(output_settings, mode_type)
widget = OutputModeConfigurationWidget(mode, mode_config)
widget.fieldChanged.connect(self.field_was_changed)
self.inner_layout.addWidget(widget)
self.mode_widgets[mode_type] = widget
for mode_type, widget in self.mode_widgets.items():
if mode_type not in found_modes:
self.inner_layout.removeWidget(self.mode_widgets[mode_type])
del self.mode_widgets[mode_type]
def field_was_changed(self, mode_widget):
# FIXME need to use temp_config to show command-line overrides
for k1, v_dict in mode_widget._changed_config.iteritems():
for k2, v in v_dict.iteritems():
k = "%s.%s" % (k1, k2)
self.persistent_config.outputDefaultSettings.set_deep_value(
k, v, True)
self.temp_config.outputDefaultSettings.set_deep_value(
k, v, True)
class QPreferencesDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QDialog.__init__(self, parent)
self.setWindowTitle('VisTrails Preferences')
layout = QtGui.QHBoxLayout(self)
layout.setMargin(0)
layout.setSpacing(0)
self.setLayout(layout)
f = QtGui.QFrame()
layout.addWidget(f)
l = QtGui.QVBoxLayout(f)
f.setLayout(l)
self._tab_widget = QtGui.QTabWidget(f)
l.addWidget(self._tab_widget)
self._tab_widget.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
tabs = [("General", ["General", "Packages"]),
("Interface", ["Interface", "Startup"]),
("Paths && URLs", ["Paths", "Web Sharing"]),
("Advanced", ["Upgrades", "Thumbnails", "Advanced"]),
]
for (tab_name, categories) in tabs:
tab = QConfigurationPane(self,
get_vistrails_persistent_configuration(),
get_vistrails_configuration(),
[(c, base_config[c]) for c in categories])
self._tab_widget.addTab(tab, tab_name)
output_tab = QOutputConfigurationPane(self,
get_vistrails_persistent_configuration(),
get_vistrails_configuration())
self._tab_widget.addTab(output_tab, "Output")
self._packages_tab = self.create_packages_tab()
self._tab_widget.addTab(self._packages_tab, 'Packages')
self._configuration_tab = self.create_configuration_tab()
self._tab_widget.addTab(self._configuration_tab, 'Expert')
self.connect(self._tab_widget,
QtCore.SIGNAL('currentChanged(int)'),
self.tab_changed)
self.connect(self._configuration_tab._tree.treeWidget,
QtCore.SIGNAL('configuration_changed'),
self.configuration_changed)
def close_dialog(self):
self.done(0)
def create_configuration_tab(self):
return QConfigurationWidget(self,
get_vistrails_persistent_configuration(),
get_vistrails_configuration())
def create_packages_tab(self):
return QPackagesWidget(self)
def sizeHint(self):
return QtCore.QSize(800, 600)
def tab_changed(self, index):
""" tab_changed(index: int) -> None
Keep general and advanced configurations in sync
"""
# FIXME Need to fix this
self._configuration_tab.configuration_changed(
get_vistrails_persistent_configuration(),
get_vistrails_configuration())
def configuration_changed(self, item, new_value):
""" configuration_changed(item: QTreeWidgetItem *,
new_value: QString) -> None
Write the current session configuration to startup.xml.
Note: This is already happening on close to capture configuration
items that are not set in preferences. We are doing this here too, so
we guarantee the changes were saved before VisTrails crashes.
"""
from vistrails.gui.application import get_vistrails_application
get_vistrails_application().save_configuration()
#############################################################################
import unittest
class TestPreferencesDialog(unittest.TestCase):
def test_remove_package(self):
""" Tests if the package really gets deleted, and that it gets
selected again in the available packages list.
"""
pkg = "dialogs"
_app = get_vistrails_application()
builder = _app.builderWindow
builder.showPreferences()
prefs = builder.preferencesDialog
packages = prefs._packages_tab
prefs._tab_widget.setCurrentWidget(packages)
QtGui.QApplication.processEvents()
# check if package is loaded
av = packages._available_packages_list
item, = av.findItems(pkg, QtCore.Qt.MatchExactly)
av.setCurrentItem(item)
QtGui.QApplication.processEvents()
QtGui.QApplication.processEvents()
packages.enable_current_package()
QtGui.QApplication.processEvents()
QtGui.QApplication.processEvents()
inst = packages._enabled_packages_list
item, = inst.findItems(pkg, QtCore.Qt.MatchExactly)
inst.setCurrentItem(item)
QtGui.QApplication.processEvents()
QtGui.QApplication.processEvents()
packages.disable_current_package()
QtGui.QApplication.processEvents()
QtGui.QApplication.processEvents()
# force delayed calls
packages.populate_lists()
packages.select_package_after_update_slot(pkg)
QtGui.QApplication.processEvents()
QtGui.QApplication.processEvents()
# This does not work because the selection is delayed
av = packages._available_packages_list
items = av.selectedItems()
self.assertEqual(len(items), 1, "No available items selected!")
self.assertEqual(items[0].text(), unicode(pkg),
"Wrong available item selected: %s" % items[0].text())
# check if configuration has been written correctly
startup = _app.startup
self.assertIn(pkg, startup.disabled_packages)
self.assertNotIn(pkg, startup.enabled_packages)
|
|
import sys
from rpython.rlib.debug import check_nonneg
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.rsre import rsre_char
from rpython.tool.sourcetools import func_with_new_name
from rpython.rlib.objectmodel import we_are_translated, not_rpython
from rpython.rlib import jit
from rpython.rlib.rsre.rsre_jit import install_jitdriver, install_jitdriver_spec
OPCODE_FAILURE = 0
OPCODE_SUCCESS = 1
OPCODE_ANY = 2
OPCODE_ANY_ALL = 3
OPCODE_ASSERT = 4
OPCODE_ASSERT_NOT = 5
OPCODE_AT = 6
OPCODE_BRANCH = 7
#OPCODE_CALL = 8
OPCODE_CATEGORY = 9
OPCODE_CHARSET = 10
OPCODE_BIGCHARSET = 11
OPCODE_GROUPREF = 12
OPCODE_GROUPREF_EXISTS = 13
OPCODE_GROUPREF_IGNORE = 14
OPCODE_IN = 15
OPCODE_IN_IGNORE = 16
OPCODE_INFO = 17
OPCODE_JUMP = 18
OPCODE_LITERAL = 19
OPCODE_LITERAL_IGNORE = 20
OPCODE_MARK = 21
OPCODE_MAX_UNTIL = 22
OPCODE_MIN_UNTIL = 23
OPCODE_NOT_LITERAL = 24
OPCODE_NOT_LITERAL_IGNORE = 25
OPCODE_NEGATE = 26
OPCODE_RANGE = 27
OPCODE_REPEAT = 28
OPCODE_REPEAT_ONE = 29
#OPCODE_SUBPATTERN = 30
OPCODE_MIN_REPEAT_ONE = 31
OPCODE_RANGE_IGNORE = 32
# not used by Python itself
OPCODE_UNICODE_GENERAL_CATEGORY = 70
# ____________________________________________________________
_seen_specname = {}
def specializectx(func):
"""A decorator that specializes 'func(ctx,...)' for each concrete subclass
of AbstractMatchContext. During annotation, if 'ctx' is known to be a
specific subclass, calling 'func' is a direct call; if 'ctx' is only known
to be of class AbstractMatchContext, calling 'func' is an indirect call.
"""
assert func.func_code.co_varnames[0] == 'ctx'
specname = '_spec_' + func.func_name
while specname in _seen_specname:
specname += '_'
_seen_specname[specname] = True
# Install a copy of the function under the name '_spec_funcname' in each
# concrete subclass
specialized_methods = []
for prefix, concreteclass in [('buf', BufMatchContext),
('str', StrMatchContext),
('uni', UnicodeMatchContext)]:
newfunc = func_with_new_name(func, prefix + specname)
assert not hasattr(concreteclass, specname)
setattr(concreteclass, specname, newfunc)
specialized_methods.append(newfunc)
# Return a dispatcher function, specialized on the exact type of 'ctx'
def dispatch(ctx, *args):
return getattr(ctx, specname)(*args)
dispatch._annspecialcase_ = 'specialize:argtype(0)'
dispatch._specialized_methods_ = specialized_methods
return func_with_new_name(dispatch, specname)
# ____________________________________________________________
class Error(Exception):
def __init__(self, msg):
self.msg = msg
class AbstractMatchContext(object):
"""Abstract base class"""
_immutable_fields_ = ['pattern[*]', 'flags', 'end']
match_start = 0
match_end = 0
match_marks = None
match_marks_flat = None
fullmatch_only = False
def __init__(self, pattern, match_start, end, flags):
# 'match_start' and 'end' must be known to be non-negative
# and they must not be more than len(string).
check_nonneg(match_start)
check_nonneg(end)
self.pattern = pattern
self.match_start = match_start
self.end = end
self.flags = flags
# check we don't get the old value of MAXREPEAT
# during the untranslated tests
if not we_are_translated():
assert 65535 not in pattern
def reset(self, start):
self.match_start = start
self.match_marks = None
self.match_marks_flat = None
def pat(self, index):
check_nonneg(index)
result = self.pattern[index]
# Check that we only return non-negative integers from this helper.
# It is possible that self.pattern contains negative integers
# (see set_charset() and set_bigcharset() in rsre_char.py)
# but they should not be fetched via this helper here.
assert result >= 0
return result
@not_rpython
def str(self, index):
"""Must be overridden in a concrete subclass.
The tag ^^^ here is used to generate a translation-time crash
if there is a call to str() that is indirect. All calls must
be direct for performance reasons; you need to specialize the
caller with @specializectx."""
raise NotImplementedError
@not_rpython
def lowstr(self, index):
"""Similar to str()."""
raise NotImplementedError
def get_mark(self, gid):
return find_mark(self.match_marks, gid)
def flatten_marks(self):
# for testing
if self.match_marks_flat is None:
self._compute_flattened_marks()
return self.match_marks_flat
def _compute_flattened_marks(self):
self.match_marks_flat = [self.match_start, self.match_end]
mark = self.match_marks
if mark is not None:
self.match_lastindex = mark.gid
else:
self.match_lastindex = -1
while mark is not None:
index = mark.gid + 2
while index >= len(self.match_marks_flat):
self.match_marks_flat.append(-1)
if self.match_marks_flat[index] == -1:
self.match_marks_flat[index] = mark.position
mark = mark.prev
self.match_marks = None # clear
def span(self, groupnum=0):
# compatibility
fmarks = self.flatten_marks()
groupnum *= 2
if groupnum >= len(fmarks):
return (-1, -1)
return (fmarks[groupnum], fmarks[groupnum+1])
def group(self, groupnum=0):
frm, to = self.span(groupnum)
if 0 <= frm <= to:
return self._string[frm:to]
else:
return None
def fresh_copy(self, start):
raise NotImplementedError
class BufMatchContext(AbstractMatchContext):
"""Concrete subclass for matching in a buffer."""
_immutable_fields_ = ["_buffer"]
def __init__(self, pattern, buf, match_start, end, flags):
AbstractMatchContext.__init__(self, pattern, match_start, end, flags)
self._buffer = buf
def str(self, index):
check_nonneg(index)
return ord(self._buffer.getitem(index))
def lowstr(self, index):
c = self.str(index)
return rsre_char.getlower(c, self.flags)
def fresh_copy(self, start):
return BufMatchContext(self.pattern, self._buffer, start,
self.end, self.flags)
class StrMatchContext(AbstractMatchContext):
"""Concrete subclass for matching in a plain string."""
_immutable_fields_ = ["_string"]
def __init__(self, pattern, string, match_start, end, flags):
AbstractMatchContext.__init__(self, pattern, match_start, end, flags)
self._string = string
if not we_are_translated() and isinstance(string, unicode):
self.flags |= rsre_char.SRE_FLAG_UNICODE # for rsre_re.py
def str(self, index):
check_nonneg(index)
return ord(self._string[index])
def lowstr(self, index):
c = self.str(index)
return rsre_char.getlower(c, self.flags)
def fresh_copy(self, start):
return StrMatchContext(self.pattern, self._string, start,
self.end, self.flags)
class UnicodeMatchContext(AbstractMatchContext):
"""Concrete subclass for matching in a unicode string."""
_immutable_fields_ = ["_unicodestr"]
def __init__(self, pattern, unicodestr, match_start, end, flags):
AbstractMatchContext.__init__(self, pattern, match_start, end, flags)
self._unicodestr = unicodestr
def str(self, index):
check_nonneg(index)
return ord(self._unicodestr[index])
def lowstr(self, index):
c = self.str(index)
return rsre_char.getlower(c, self.flags)
def fresh_copy(self, start):
return UnicodeMatchContext(self.pattern, self._unicodestr, start,
self.end, self.flags)
# ____________________________________________________________
class Mark(object):
_immutable_ = True
def __init__(self, gid, position, prev):
self.gid = gid
self.position = position
self.prev = prev # chained list
def find_mark(mark, gid):
while mark is not None:
if mark.gid == gid:
return mark.position
mark = mark.prev
return -1
# ____________________________________________________________
class MatchResult(object):
subresult = None
def move_to_next_result(self, ctx):
# returns either 'self' or None
result = self.subresult
if result is None:
return
if result.move_to_next_result(ctx):
return self
return self.find_next_result(ctx)
def find_next_result(self, ctx):
raise NotImplementedError
MATCHED_OK = MatchResult()
class BranchMatchResult(MatchResult):
def __init__(self, ppos, ptr, marks):
self.ppos = ppos
self.start_ptr = ptr
self.start_marks = marks
@jit.unroll_safe
def find_first_result(self, ctx):
ppos = jit.hint(self.ppos, promote=True)
while ctx.pat(ppos):
result = sre_match(ctx, ppos + 1, self.start_ptr, self.start_marks)
ppos += ctx.pat(ppos)
if result is not None:
self.subresult = result
self.ppos = ppos
return self
find_next_result = find_first_result
class RepeatOneMatchResult(MatchResult):
install_jitdriver('RepeatOne',
greens=['nextppos', 'ctx.pattern'],
reds=['ptr', 'self', 'ctx'],
debugprint=(1, 0)) # indices in 'greens'
def __init__(self, nextppos, minptr, ptr, marks):
self.nextppos = nextppos
self.minptr = minptr
self.start_ptr = ptr
self.start_marks = marks
def find_first_result(self, ctx):
ptr = self.start_ptr
nextppos = self.nextppos
while ptr >= self.minptr:
ctx.jitdriver_RepeatOne.jit_merge_point(
self=self, ptr=ptr, ctx=ctx, nextppos=nextppos)
result = sre_match(ctx, nextppos, ptr, self.start_marks)
ptr -= 1
if result is not None:
self.subresult = result
self.start_ptr = ptr
return self
find_next_result = find_first_result
class MinRepeatOneMatchResult(MatchResult):
install_jitdriver('MinRepeatOne',
greens=['nextppos', 'ppos3', 'ctx.pattern'],
reds=['ptr', 'self', 'ctx'],
debugprint=(2, 0)) # indices in 'greens'
def __init__(self, nextppos, ppos3, maxptr, ptr, marks):
self.nextppos = nextppos
self.ppos3 = ppos3
self.maxptr = maxptr
self.start_ptr = ptr
self.start_marks = marks
def find_first_result(self, ctx):
ptr = self.start_ptr
nextppos = self.nextppos
ppos3 = self.ppos3
while ptr <= self.maxptr:
ctx.jitdriver_MinRepeatOne.jit_merge_point(
self=self, ptr=ptr, ctx=ctx, nextppos=nextppos, ppos3=ppos3)
result = sre_match(ctx, nextppos, ptr, self.start_marks)
if result is not None:
self.subresult = result
self.start_ptr = ptr
return self
if not self.next_char_ok(ctx, ptr, ppos3):
break
ptr += 1
def find_next_result(self, ctx):
ptr = self.start_ptr
if not self.next_char_ok(ctx, ptr, self.ppos3):
return
self.start_ptr = ptr + 1
return self.find_first_result(ctx)
def next_char_ok(self, ctx, ptr, ppos):
if ptr == ctx.end:
return False
op = ctx.pat(ppos)
for op1, checkerfn in unroll_char_checker:
if op1 == op:
return checkerfn(ctx, ptr, ppos)
# obscure case: it should be a single char pattern, but isn't
# one of the opcodes in unroll_char_checker (see test_ext_opcode)
return sre_match(ctx, ppos, ptr, self.start_marks) is not None
class AbstractUntilMatchResult(MatchResult):
def __init__(self, ppos, tailppos, ptr, marks):
self.ppos = ppos
self.tailppos = tailppos
self.cur_ptr = ptr
self.cur_marks = marks
self.pending = None
self.num_pending = 0
class Pending(object):
def __init__(self, ptr, marks, enum, next):
self.ptr = ptr
self.marks = marks
self.enum = enum
self.next = next # chained list
class MaxUntilMatchResult(AbstractUntilMatchResult):
install_jitdriver('MaxUntil',
greens=['ppos', 'tailppos', 'match_more', 'ctx.pattern'],
reds=['ptr', 'marks', 'self', 'ctx'],
debugprint=(3, 0, 2))
def find_first_result(self, ctx):
return self.search_next(ctx, match_more=True)
def find_next_result(self, ctx):
return self.search_next(ctx, match_more=False)
def search_next(self, ctx, match_more):
ppos = self.ppos
tailppos = self.tailppos
ptr = self.cur_ptr
marks = self.cur_marks
while True:
ctx.jitdriver_MaxUntil.jit_merge_point(
ppos=ppos, tailppos=tailppos, match_more=match_more,
ptr=ptr, marks=marks, self=self, ctx=ctx)
if match_more:
max = ctx.pat(ppos+2)
if max == rsre_char.MAXREPEAT or self.num_pending < max:
# try to match one more 'item'
enum = sre_match(ctx, ppos + 3, ptr, marks)
else:
enum = None # 'max' reached, no more matches
else:
p = self.pending
if p is None:
return
self.pending = p.next
self.num_pending -= 1
ptr = p.ptr
marks = p.marks
enum = p.enum.move_to_next_result(ctx)
#
min = ctx.pat(ppos+1)
if enum is not None:
# matched one more 'item'. record it and continue.
last_match_length = ctx.match_end - ptr
self.pending = Pending(ptr, marks, enum, self.pending)
self.num_pending += 1
ptr = ctx.match_end
marks = ctx.match_marks
if last_match_length == 0 and self.num_pending >= min:
# zero-width protection: after an empty match, if there
# are enough matches, don't try to match more. Instead,
# fall through to trying to match 'tail'.
pass
else:
match_more = True
continue
# 'item' no longer matches.
if self.num_pending >= min:
# try to match 'tail' if we have enough 'item'
result = sre_match(ctx, tailppos, ptr, marks)
if result is not None:
self.subresult = result
self.cur_ptr = ptr
self.cur_marks = marks
return self
match_more = False
class MinUntilMatchResult(AbstractUntilMatchResult):
def find_first_result(self, ctx):
return self.search_next(ctx, resume=False)
def find_next_result(self, ctx):
return self.search_next(ctx, resume=True)
def search_next(self, ctx, resume):
# XXX missing jit support here
ppos = self.ppos
min = ctx.pat(ppos+1)
max = ctx.pat(ppos+2)
ptr = self.cur_ptr
marks = self.cur_marks
while True:
# try to match 'tail' if we have enough 'item'
if not resume and self.num_pending >= min:
result = sre_match(ctx, self.tailppos, ptr, marks)
if result is not None:
self.subresult = result
self.cur_ptr = ptr
self.cur_marks = marks
return self
resume = False
if max == rsre_char.MAXREPEAT or self.num_pending < max:
# try to match one more 'item'
enum = sre_match(ctx, ppos + 3, ptr, marks)
#
# zero-width match protection
if self.num_pending >= min:
while enum is not None and ptr == ctx.match_end:
enum = enum.move_to_next_result(ctx)
else:
enum = None # 'max' reached, no more matches
while enum is None:
# 'item' does not match; try to get further results from
# the 'pending' list.
p = self.pending
if p is None:
return
self.pending = p.next
self.num_pending -= 1
ptr = p.ptr
marks = p.marks
enum = p.enum.move_to_next_result(ctx)
# matched one more 'item'. record it and continue
self.pending = Pending(ptr, marks, enum, self.pending)
self.num_pending += 1
ptr = ctx.match_end
marks = ctx.match_marks
# ____________________________________________________________
@specializectx
@jit.unroll_safe
def sre_match(ctx, ppos, ptr, marks):
"""Returns either None or a MatchResult object. Usually we only need
the first result, but there is the case of REPEAT...UNTIL where we
need all results; in that case we use the method move_to_next_result()
of the MatchResult."""
while True:
op = ctx.pat(ppos)
ppos += 1
#jit.jit_debug("sre_match", op, ppos, ptr)
#
# When using the JIT, calls to sre_match() must always have a constant
# (green) argument for 'ppos'. If not, the following assert fails.
jit.assert_green(op)
if op == OPCODE_FAILURE:
return
elif op == OPCODE_SUCCESS:
if ctx.fullmatch_only:
if ptr != ctx.end:
return # not a full match
ctx.match_end = ptr
ctx.match_marks = marks
return MATCHED_OK
elif (op == OPCODE_MAX_UNTIL or
op == OPCODE_MIN_UNTIL):
ctx.match_end = ptr
ctx.match_marks = marks
return MATCHED_OK
elif op == OPCODE_ANY:
# match anything (except a newline)
# <ANY>
if ptr >= ctx.end or rsre_char.is_linebreak(ctx.str(ptr)):
return
ptr += 1
elif op == OPCODE_ANY_ALL:
# match anything
# <ANY_ALL>
if ptr >= ctx.end:
return
ptr += 1
elif op == OPCODE_ASSERT:
# assert subpattern
# <ASSERT> <0=skip> <1=back> <pattern>
ptr1 = ptr - ctx.pat(ppos+1)
saved = ctx.fullmatch_only
ctx.fullmatch_only = False
stop = ptr1 < 0 or sre_match(ctx, ppos + 2, ptr1, marks) is None
ctx.fullmatch_only = saved
if stop:
return
marks = ctx.match_marks
ppos += ctx.pat(ppos)
elif op == OPCODE_ASSERT_NOT:
# assert not subpattern
# <ASSERT_NOT> <0=skip> <1=back> <pattern>
ptr1 = ptr - ctx.pat(ppos+1)
saved = ctx.fullmatch_only
ctx.fullmatch_only = False
stop = (ptr1 >= 0 and sre_match(ctx, ppos + 2, ptr1, marks)
is not None)
ctx.fullmatch_only = saved
if stop:
return
ppos += ctx.pat(ppos)
elif op == OPCODE_AT:
# match at given position (e.g. at beginning, at boundary, etc.)
# <AT> <code>
if not sre_at(ctx, ctx.pat(ppos), ptr):
return
ppos += 1
elif op == OPCODE_BRANCH:
# alternation
# <BRANCH> <0=skip> code <JUMP> ... <NULL>
result = BranchMatchResult(ppos, ptr, marks)
return result.find_first_result(ctx)
elif op == OPCODE_CATEGORY:
# seems to be never produced, but used by some tests from
# pypy/module/_sre/test
# <CATEGORY> <category>
if (ptr == ctx.end or
not rsre_char.category_dispatch(ctx.pat(ppos), ctx.str(ptr))):
return
ptr += 1
ppos += 1
elif op == OPCODE_GROUPREF:
# match backreference
# <GROUPREF> <groupnum>
startptr, length = get_group_ref(marks, ctx.pat(ppos))
if length < 0:
return # group was not previously defined
if not match_repeated(ctx, ptr, startptr, length):
return # no match
ptr += length
ppos += 1
elif op == OPCODE_GROUPREF_IGNORE:
# match backreference
# <GROUPREF> <groupnum>
startptr, length = get_group_ref(marks, ctx.pat(ppos))
if length < 0:
return # group was not previously defined
if not match_repeated_ignore(ctx, ptr, startptr, length):
return # no match
ptr += length
ppos += 1
elif op == OPCODE_GROUPREF_EXISTS:
# conditional match depending on the existence of a group
# <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ...
_, length = get_group_ref(marks, ctx.pat(ppos))
if length >= 0:
ppos += 2 # jump to 'codeyes'
else:
ppos += ctx.pat(ppos+1) # jump to 'codeno'
elif op == OPCODE_IN:
# match set member (or non_member)
# <IN> <skip> <set>
if ptr >= ctx.end or not rsre_char.check_charset(ctx, ppos+1,
ctx.str(ptr)):
return
ppos += ctx.pat(ppos)
ptr += 1
elif op == OPCODE_IN_IGNORE:
# match set member (or non_member), ignoring case
# <IN> <skip> <set>
if ptr >= ctx.end or not rsre_char.check_charset(ctx, ppos+1,
ctx.lowstr(ptr)):
return
ppos += ctx.pat(ppos)
ptr += 1
elif op == OPCODE_INFO:
# optimization info block
# <INFO> <0=skip> <1=flags> <2=min> ...
if (ctx.end - ptr) < ctx.pat(ppos+2):
return
ppos += ctx.pat(ppos)
elif op == OPCODE_JUMP:
ppos += ctx.pat(ppos)
elif op == OPCODE_LITERAL:
# match literal string
# <LITERAL> <code>
if ptr >= ctx.end or ctx.str(ptr) != ctx.pat(ppos):
return
ppos += 1
ptr += 1
elif op == OPCODE_LITERAL_IGNORE:
# match literal string, ignoring case
# <LITERAL_IGNORE> <code>
if ptr >= ctx.end or ctx.lowstr(ptr) != ctx.pat(ppos):
return
ppos += 1
ptr += 1
elif op == OPCODE_MARK:
# set mark
# <MARK> <gid>
gid = ctx.pat(ppos)
marks = Mark(gid, ptr, marks)
ppos += 1
elif op == OPCODE_NOT_LITERAL:
# match if it's not a literal string
# <NOT_LITERAL> <code>
if ptr >= ctx.end or ctx.str(ptr) == ctx.pat(ppos):
return
ppos += 1
ptr += 1
elif op == OPCODE_NOT_LITERAL_IGNORE:
# match if it's not a literal string, ignoring case
# <NOT_LITERAL> <code>
if ptr >= ctx.end or ctx.lowstr(ptr) == ctx.pat(ppos):
return
ppos += 1
ptr += 1
elif op == OPCODE_REPEAT:
# general repeat. in this version of the re module, all the work
# is done here, and not on the later UNTIL operator.
# <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail
# FIXME: we probably need to deal with zero-width matches in here..
# decode the later UNTIL operator to see if it is actually
# a MAX_UNTIL or MIN_UNTIL
untilppos = ppos + ctx.pat(ppos)
tailppos = untilppos + 1
op = ctx.pat(untilppos)
if op == OPCODE_MAX_UNTIL:
# the hard case: we have to match as many repetitions as
# possible, followed by the 'tail'. we do this by
# remembering each state for each possible number of
# 'item' matching.
result = MaxUntilMatchResult(ppos, tailppos, ptr, marks)
return result.find_first_result(ctx)
elif op == OPCODE_MIN_UNTIL:
# first try to match the 'tail', and if it fails, try
# to match one more 'item' and try again
result = MinUntilMatchResult(ppos, tailppos, ptr, marks)
return result.find_first_result(ctx)
else:
raise Error("missing UNTIL after REPEAT")
elif op == OPCODE_REPEAT_ONE:
# match repeated sequence (maximizing regexp).
# this operator only works if the repeated item is
# exactly one character wide, and we're not already
# collecting backtracking points. for other cases,
# use the MAX_REPEAT operator.
# <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
start = ptr
minptr = start + ctx.pat(ppos+1)
if minptr > ctx.end:
return # cannot match
ptr = find_repetition_end(ctx, ppos+3, start, ctx.pat(ppos+2),
marks)
# when we arrive here, ptr points to the tail of the target
# string. check if the rest of the pattern matches,
# and backtrack if not.
nextppos = ppos + ctx.pat(ppos)
result = RepeatOneMatchResult(nextppos, minptr, ptr, marks)
return result.find_first_result(ctx)
elif op == OPCODE_MIN_REPEAT_ONE:
# match repeated sequence (minimizing regexp).
# this operator only works if the repeated item is
# exactly one character wide, and we're not already
# collecting backtracking points. for other cases,
# use the MIN_REPEAT operator.
# <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
start = ptr
min = ctx.pat(ppos+1)
if min > 0:
minptr = ptr + min
if minptr > ctx.end:
return # cannot match
# count using pattern min as the maximum
ptr = find_repetition_end(ctx, ppos+3, ptr, min, marks)
if ptr < minptr:
return # did not match minimum number of times
maxptr = ctx.end
max = ctx.pat(ppos+2)
if max != rsre_char.MAXREPEAT:
maxptr1 = start + max
if maxptr1 <= maxptr:
maxptr = maxptr1
nextppos = ppos + ctx.pat(ppos)
result = MinRepeatOneMatchResult(nextppos, ppos+3, maxptr,
ptr, marks)
return result.find_first_result(ctx)
else:
raise Error("bad pattern code %d" % op)
def get_group_ref(marks, groupnum):
gid = groupnum * 2
startptr = find_mark(marks, gid)
if startptr < 0:
return 0, -1
endptr = find_mark(marks, gid + 1)
length = endptr - startptr # < 0 if endptr < startptr (or if endptr=-1)
return startptr, length
@specializectx
def match_repeated(ctx, ptr, oldptr, length):
if ptr + length > ctx.end:
return False
for i in range(length):
if ctx.str(ptr + i) != ctx.str(oldptr + i):
return False
return True
@specializectx
def match_repeated_ignore(ctx, ptr, oldptr, length):
if ptr + length > ctx.end:
return False
for i in range(length):
if ctx.lowstr(ptr + i) != ctx.lowstr(oldptr + i):
return False
return True
@specializectx
def find_repetition_end(ctx, ppos, ptr, maxcount, marks):
end = ctx.end
ptrp1 = ptr + 1
# First get rid of the cases where we don't have room for any match.
if maxcount <= 0 or ptrp1 > end:
return ptr
# Check the first character directly. If it doesn't match, we are done.
# The idea is to be fast for cases like re.search("b+"), where we expect
# the common case to be a non-match. It's much faster with the JIT to
# have the non-match inlined here rather than detect it in the fre() call.
op = ctx.pat(ppos)
for op1, checkerfn in unroll_char_checker:
if op1 == op:
if checkerfn(ctx, ptr, ppos):
break
return ptr
else:
# obscure case: it should be a single char pattern, but isn't
# one of the opcodes in unroll_char_checker (see test_ext_opcode)
return general_find_repetition_end(ctx, ppos, ptr, maxcount, marks)
# It matches at least once. If maxcount == 1 (relatively common),
# then we are done.
if maxcount == 1:
return ptrp1
# Else we really need to count how many times it matches.
if maxcount != rsre_char.MAXREPEAT:
# adjust end
end1 = ptr + maxcount
if end1 <= end:
end = end1
op = ctx.pat(ppos)
for op1, fre in unroll_fre_checker:
if op1 == op:
return fre(ctx, ptrp1, end, ppos)
raise Error("rsre.find_repetition_end[%d]" % op)
@specializectx
def general_find_repetition_end(ctx, ppos, ptr, maxcount, marks):
# moved into its own JIT-opaque function
end = ctx.end
if maxcount != rsre_char.MAXREPEAT:
# adjust end
end1 = ptr + maxcount
if end1 <= end:
end = end1
while ptr < end and sre_match(ctx, ppos, ptr, marks) is not None:
ptr += 1
return ptr
@specializectx
def match_ANY(ctx, ptr, ppos): # dot wildcard.
return not rsre_char.is_linebreak(ctx.str(ptr))
def match_ANY_ALL(ctx, ptr, ppos):
return True # match anything (including a newline)
@specializectx
def match_IN(ctx, ptr, ppos):
return rsre_char.check_charset(ctx, ppos+2, ctx.str(ptr))
@specializectx
def match_IN_IGNORE(ctx, ptr, ppos):
return rsre_char.check_charset(ctx, ppos+2, ctx.lowstr(ptr))
@specializectx
def match_LITERAL(ctx, ptr, ppos):
return ctx.str(ptr) == ctx.pat(ppos+1)
@specializectx
def match_LITERAL_IGNORE(ctx, ptr, ppos):
return ctx.lowstr(ptr) == ctx.pat(ppos+1)
@specializectx
def match_NOT_LITERAL(ctx, ptr, ppos):
return ctx.str(ptr) != ctx.pat(ppos+1)
@specializectx
def match_NOT_LITERAL_IGNORE(ctx, ptr, ppos):
return ctx.lowstr(ptr) != ctx.pat(ppos+1)
def _make_fre(checkerfn):
if checkerfn == match_ANY_ALL:
def fre(ctx, ptr, end, ppos):
return end
elif checkerfn == match_IN:
install_jitdriver_spec('MatchIn',
greens=['ppos', 'ctx.pattern'],
reds=['ptr', 'end', 'ctx'],
debugprint=(1, 0))
@specializectx
def fre(ctx, ptr, end, ppos):
while True:
ctx.jitdriver_MatchIn.jit_merge_point(ctx=ctx, ptr=ptr,
end=end, ppos=ppos)
if ptr < end and checkerfn(ctx, ptr, ppos):
ptr += 1
else:
return ptr
elif checkerfn == match_IN_IGNORE:
install_jitdriver_spec('MatchInIgnore',
greens=['ppos', 'ctx.pattern'],
reds=['ptr', 'end', 'ctx'],
debugprint=(1, 0))
@specializectx
def fre(ctx, ptr, end, ppos):
while True:
ctx.jitdriver_MatchInIgnore.jit_merge_point(ctx=ctx, ptr=ptr,
end=end, ppos=ppos)
if ptr < end and checkerfn(ctx, ptr, ppos):
ptr += 1
else:
return ptr
else:
# in the other cases, the fre() function is not JITted at all
# and is present as a residual call.
@specializectx
def fre(ctx, ptr, end, ppos):
while ptr < end and checkerfn(ctx, ptr, ppos):
ptr += 1
return ptr
fre = func_with_new_name(fre, 'fre_' + checkerfn.__name__)
return fre
unroll_char_checker = [
(OPCODE_ANY, match_ANY),
(OPCODE_ANY_ALL, match_ANY_ALL),
(OPCODE_IN, match_IN),
(OPCODE_IN_IGNORE, match_IN_IGNORE),
(OPCODE_LITERAL, match_LITERAL),
(OPCODE_LITERAL_IGNORE, match_LITERAL_IGNORE),
(OPCODE_NOT_LITERAL, match_NOT_LITERAL),
(OPCODE_NOT_LITERAL_IGNORE, match_NOT_LITERAL_IGNORE),
]
unroll_fre_checker = [(_op, _make_fre(_fn))
for (_op, _fn) in unroll_char_checker]
unroll_char_checker = unrolling_iterable(unroll_char_checker)
unroll_fre_checker = unrolling_iterable(unroll_fre_checker)
##### At dispatch
AT_BEGINNING = 0
AT_BEGINNING_LINE = 1
AT_BEGINNING_STRING = 2
AT_BOUNDARY = 3
AT_NON_BOUNDARY = 4
AT_END = 5
AT_END_LINE = 6
AT_END_STRING = 7
AT_LOC_BOUNDARY = 8
AT_LOC_NON_BOUNDARY = 9
AT_UNI_BOUNDARY = 10
AT_UNI_NON_BOUNDARY = 11
@specializectx
def sre_at(ctx, atcode, ptr):
if (atcode == AT_BEGINNING or
atcode == AT_BEGINNING_STRING):
return ptr == 0
elif atcode == AT_BEGINNING_LINE:
prevptr = ptr - 1
return prevptr < 0 or rsre_char.is_linebreak(ctx.str(prevptr))
elif atcode == AT_BOUNDARY:
return at_boundary(ctx, ptr)
elif atcode == AT_NON_BOUNDARY:
return at_non_boundary(ctx, ptr)
elif atcode == AT_END:
remaining_chars = ctx.end - ptr
return remaining_chars <= 0 or (
remaining_chars == 1 and rsre_char.is_linebreak(ctx.str(ptr)))
elif atcode == AT_END_LINE:
return ptr == ctx.end or rsre_char.is_linebreak(ctx.str(ptr))
elif atcode == AT_END_STRING:
return ptr == ctx.end
elif atcode == AT_LOC_BOUNDARY:
return at_loc_boundary(ctx, ptr)
elif atcode == AT_LOC_NON_BOUNDARY:
return at_loc_non_boundary(ctx, ptr)
elif atcode == AT_UNI_BOUNDARY:
return at_uni_boundary(ctx, ptr)
elif atcode == AT_UNI_NON_BOUNDARY:
return at_uni_non_boundary(ctx, ptr)
return False
def _make_boundary(word_checker):
@specializectx
def at_boundary(ctx, ptr):
if ctx.end == 0:
return False
prevptr = ptr - 1
that = prevptr >= 0 and word_checker(ctx.str(prevptr))
this = ptr < ctx.end and word_checker(ctx.str(ptr))
return this != that
@specializectx
def at_non_boundary(ctx, ptr):
if ctx.end == 0:
return False
prevptr = ptr - 1
that = prevptr >= 0 and word_checker(ctx.str(prevptr))
this = ptr < ctx.end and word_checker(ctx.str(ptr))
return this == that
return at_boundary, at_non_boundary
at_boundary, at_non_boundary = _make_boundary(rsre_char.is_word)
at_loc_boundary, at_loc_non_boundary = _make_boundary(rsre_char.is_loc_word)
at_uni_boundary, at_uni_non_boundary = _make_boundary(rsre_char.is_uni_word)
# ____________________________________________________________
def _adjust(start, end, length):
if start < 0: start = 0
elif start > length: start = length
if end < 0: end = 0
elif end > length: end = length
return start, end
def match(pattern, string, start=0, end=sys.maxint, flags=0, fullmatch=False):
start, end = _adjust(start, end, len(string))
ctx = StrMatchContext(pattern, string, start, end, flags)
ctx.fullmatch_only = fullmatch
if match_context(ctx):
return ctx
else:
return None
def fullmatch(pattern, string, start=0, end=sys.maxint, flags=0):
return match(pattern, string, start, end, flags, fullmatch=True)
def search(pattern, string, start=0, end=sys.maxint, flags=0):
start, end = _adjust(start, end, len(string))
ctx = StrMatchContext(pattern, string, start, end, flags)
if search_context(ctx):
return ctx
else:
return None
install_jitdriver('Match',
greens=['ctx.pattern'], reds=['ctx'],
debugprint=(0,))
def match_context(ctx):
ctx.original_pos = ctx.match_start
if ctx.end < ctx.match_start:
return False
ctx.jitdriver_Match.jit_merge_point(ctx=ctx)
return sre_match(ctx, 0, ctx.match_start, None) is not None
def search_context(ctx):
ctx.original_pos = ctx.match_start
if ctx.end < ctx.match_start:
return False
base = 0
charset = False
if ctx.pat(base) == OPCODE_INFO:
flags = ctx.pat(2)
if flags & rsre_char.SRE_INFO_PREFIX:
if ctx.pat(5) > 1:
return fast_search(ctx)
else:
charset = (flags & rsre_char.SRE_INFO_CHARSET)
base += 1 + ctx.pat(1)
if ctx.pat(base) == OPCODE_LITERAL:
return literal_search(ctx, base)
if charset:
return charset_search(ctx, base)
return regular_search(ctx, base)
install_jitdriver('RegularSearch',
greens=['base', 'ctx.pattern'],
reds=['start', 'ctx'],
debugprint=(1, 0))
def regular_search(ctx, base):
start = ctx.match_start
while start <= ctx.end:
ctx.jitdriver_RegularSearch.jit_merge_point(ctx=ctx, start=start,
base=base)
if sre_match(ctx, base, start, None) is not None:
ctx.match_start = start
return True
start += 1
return False
install_jitdriver_spec("LiteralSearch",
greens=['base', 'character', 'ctx.pattern'],
reds=['start', 'ctx'],
debugprint=(2, 0, 1))
@specializectx
def literal_search(ctx, base):
# pattern starts with a literal character. this is used
# for short prefixes, and if fast search is disabled
character = ctx.pat(base + 1)
base += 2
start = ctx.match_start
while start < ctx.end:
ctx.jitdriver_LiteralSearch.jit_merge_point(ctx=ctx, start=start,
base=base, character=character)
if ctx.str(start) == character:
if sre_match(ctx, base, start + 1, None) is not None:
ctx.match_start = start
return True
start += 1
return False
install_jitdriver_spec("CharsetSearch",
greens=['base', 'ctx.pattern'],
reds=['start', 'ctx'],
debugprint=(1, 0))
@specializectx
def charset_search(ctx, base):
# pattern starts with a character from a known set
start = ctx.match_start
while start < ctx.end:
ctx.jitdriver_CharsetSearch.jit_merge_point(ctx=ctx, start=start,
base=base)
if rsre_char.check_charset(ctx, 5, ctx.str(start)):
if sre_match(ctx, base, start, None) is not None:
ctx.match_start = start
return True
start += 1
return False
install_jitdriver_spec('FastSearch',
greens=['i', 'prefix_len', 'ctx.pattern'],
reds=['string_position', 'ctx'],
debugprint=(2, 0))
@specializectx
def fast_search(ctx):
# skips forward in a string as fast as possible using information from
# an optimization info block
# <INFO> <1=skip> <2=flags> <3=min> <4=...>
# <5=length> <6=skip> <7=prefix data> <overlap data>
string_position = ctx.match_start
if string_position >= ctx.end:
return False
prefix_len = ctx.pat(5)
assert prefix_len >= 0
i = 0
while True:
ctx.jitdriver_FastSearch.jit_merge_point(ctx=ctx,
string_position=string_position, i=i, prefix_len=prefix_len)
char_ord = ctx.str(string_position)
if char_ord != ctx.pat(7 + i):
if i > 0:
overlap_offset = prefix_len + (7 - 1)
i = ctx.pat(overlap_offset + i)
continue
else:
i += 1
if i == prefix_len:
# found a potential match
start = string_position + 1 - prefix_len
assert start >= 0
prefix_skip = ctx.pat(6)
ptr = start + prefix_skip
#flags = ctx.pat(2)
#if flags & rsre_char.SRE_INFO_LITERAL:
# # matched all of pure literal pattern
# ctx.match_start = start
# ctx.match_end = ptr
# ctx.match_marks = None
# return True
pattern_offset = ctx.pat(1) + 1
ppos_start = pattern_offset + 2 * prefix_skip
if sre_match(ctx, ppos_start, ptr, None) is not None:
ctx.match_start = start
return True
overlap_offset = prefix_len + (7 - 1)
i = ctx.pat(overlap_offset + i)
string_position += 1
if string_position >= ctx.end:
return False
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
from page_sets import key_mobile_sites_pages
def _IssueMarkerAndScroll(action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
def _CreatePageClassWithSmoothInteractions(page_cls):
class DerivedSmoothPage(page_cls): # pylint: disable=W0232
def RunPageInteractions(self, action_runner):
_IssueMarkerAndScroll(action_runner)
return DerivedSmoothPage
class KeyMobileSitesSmoothPage(page_module.Page):
def __init__(self, url, page_set, name='', labels=None,
action_on_load_complete=False):
super(KeyMobileSitesSmoothPage, self).__init__(
url=url, page_set=page_set, name=name,
credentials_path='data/credentials.json', labels=labels)
self.user_agent_type = 'mobile'
self.archive_data_file = 'data/key_mobile_sites.json'
self.action_on_load_complete = action_on_load_complete
def RunPageInteractions(self, action_runner):
if self.action_on_load_complete:
action_runner.WaitForJavaScriptCondition(
'document.readyState == "complete"', 30)
_IssueMarkerAndScroll(action_runner)
class LinkedInSmoothPage(key_mobile_sites_pages.LinkedInPage):
def __init__(self, page_set):
super(LinkedInSmoothPage, self).__init__(page_set=page_set)
# Linkedin has expensive shader compilation so it can benefit from shader
# cache from reload.
def RunNavigateSteps(self, action_runner):
super(LinkedInSmoothPage, self).RunNavigateSteps(action_runner)
action_runner.ScrollPage()
action_runner.ReloadPage()
super(LinkedInSmoothPage, self).RunNavigateSteps(action_runner)
class WowwikiSmoothPage(KeyMobileSitesSmoothPage):
"""Why: Mobile wiki."""
def __init__(self, page_set):
super(WowwikiSmoothPage, self).__init__(
url='http://www.wowwiki.com/World_of_Warcraft:_Mists_of_Pandaria',
page_set=page_set)
# Wowwiki has expensive shader compilation so it can benefit from shader
# cache from reload.
def RunNavigateSteps(self, action_runner):
super(WowwikiSmoothPage, self).RunNavigateSteps(action_runner)
action_runner.ScrollPage()
action_runner.ReloadPage()
super(WowwikiSmoothPage, self).RunNavigateSteps(action_runner)
class GmailSmoothPage(key_mobile_sites_pages.GmailPage):
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(element_function=(
'document.getElementById("views").childNodes[1].firstChild'))
interaction.End()
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(element_function=(
'document.getElementById("views").childNodes[1].firstChild'))
interaction.End()
class GroupClonedSmoothPage(key_mobile_sites_pages.GroupClonedPage):
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage(
distance_expr='''
Math.max(0, 1250 + document.getElementById("element-19")
.contentDocument
.getElementById("element-22")
.getBoundingClientRect().top);''',
use_touch=True)
interaction.End()
class GroupClonedListImagesPage(
key_mobile_sites_pages.GroupClonedListImagesPage):
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage(
distance_expr='''
Math.max(0, 1250 +
document.getElementById("element-5")
.getBoundingClientRect().top);''',
use_touch=True)
interaction.End()
class GoogleNewsMobile2SmoothPage(
key_mobile_sites_pages.GoogleNewsMobile2Page):
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
element_function='document.getElementById(":5")',
distance_expr='''
Math.max(0, 2500 +
document.getElementById(':h').getBoundingClientRect().top)''',
use_touch=True)
interaction.End()
class AmazonNicolasCageSmoothPage(
key_mobile_sites_pages.AmazonNicolasCagePage):
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
selector='#search',
distance_expr='document.body.scrollHeight - window.innerHeight')
interaction.End()
class KeyMobileSitesSmoothPageSet(page_set_module.PageSet):
""" Key mobile sites with smooth interactions. """
def __init__(self):
super(KeyMobileSitesSmoothPageSet, self).__init__(
user_agent_type='mobile',
archive_data_file='data/key_mobile_sites_smooth.json',
bucket=page_set_module.PARTNER_BUCKET)
# Add pages with predefined classes that contain custom navigation logic.
predefined_page_classes = [
key_mobile_sites_pages.CapitolVolkswagenPage,
key_mobile_sites_pages.TheVergeArticlePage,
key_mobile_sites_pages.CnnArticlePage,
key_mobile_sites_pages.FacebookPage,
key_mobile_sites_pages.YoutubeMobilePage,
key_mobile_sites_pages.YahooAnswersPage,
key_mobile_sites_pages.GoogleNewsMobilePage,
]
for page_class in predefined_page_classes:
self.AddUserStory(
_CreatePageClassWithSmoothInteractions(page_class)(self))
self.AddUserStory(
_CreatePageClassWithSmoothInteractions(LinkedInSmoothPage)(self))
self.AddUserStory(WowwikiSmoothPage(self))
# Add pages with custom page interaction logic.
# Page behaves non-deterministically, replaced with test version for now.
# self.AddUserStory(GroupClonedSmoothPage(self))
# mean_input_event_latency cannot be tracked correctly for
# GroupClonedListImagesPage.
# See crbug.com/409086.
# self.AddUserStory(GroupClonedListImagesSmoothPage(self))
self.AddUserStory(GoogleNewsMobile2SmoothPage(self))
self.AddUserStory(AmazonNicolasCageSmoothPage(self))
# Add pages with custom labels.
# Why: Top news site.
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://nytimes.com/', page_set=self, labels=['fastpath']))
# Why: Image-heavy site.
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://cuteoverload.com', page_set=self, labels=['fastpath']))
# Why: #11 (Alexa global), google property; some blogger layouts
# have infinite scroll but more interesting.
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://googlewebmastercentral.blogspot.com/',
page_set=self, name='Blogger'))
# Why: #18 (Alexa global), Picked an interesting post """
self.AddUserStory(KeyMobileSitesSmoothPage(
# pylint: disable=line-too-long
url='http://en.blog.wordpress.com/2012/09/04/freshly-pressed-editors-picks-for-august-2012/',
page_set=self,
name='Wordpress'))
# Why: #6 (Alexa) most visited worldwide, picked an interesting page
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://en.wikipedia.org/wiki/Wikipedia',
page_set=self,
name='Wikipedia (1 tab)'))
# Why: Wikipedia page with a delayed scroll start
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://en.wikipedia.org/wiki/Wikipedia',
page_set=self,
name='Wikipedia (1 tab) - delayed scroll start',
action_on_load_complete=True))
# Why: #8 (Alexa global), picked an interesting page
# Forbidden (Rate Limit Exceeded)
# self.AddUserStory(KeyMobileSitesSmoothPage(
# url='http://twitter.com/katyperry', page_set=self, name='Twitter'))
# Why: #37 (Alexa global) """
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://pinterest.com',
page_set=self,
name='Pinterest'))
# Why: #1 sports.
# Fails often; crbug.com/249722'
# self.AddUserStory(KeyMobileSitesSmoothPage(
# url='http://espn.go.com', page_set=self, name='ESPN'))
# Why: crbug.com/231413
# Doesn't scroll; crbug.com/249736
# self.AddUserStory(KeyMobileSitesSmoothPage(
# url='http://forecast.io', page_set=self))
# Why: crbug.com/169827
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://slashdot.org/', page_set=self, labels=['fastpath']))
# Why: #5 Alexa news """
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://www.reddit.com/r/programming/comments/1g96ve',
page_set=self, labels=['fastpath']))
# Why: Problematic use of fixed position elements """
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://www.boingboing.net', page_set=self, labels=['fastpath']))
# Add simple pages with no custom navigation logic or labels.
urls_list = [
# Why: Social; top Google property; Public profile; infinite scrolls.
# pylint: disable=line-too-long
'https://plus.google.com/app/basic/110031535020051778989/posts?source=apppromo',
# Why: crbug.com/242544
('http://www.androidpolice.com/2012/10/03/rumor-evidence-mounts-that-an-'
'lg-optimus-g-nexus-is-coming-along-with-a-nexus-phone-certification-'
'program/'),
# Why: crbug.com/149958
'http://gsp.ro',
# Why: Top tech blog
'http://theverge.com',
# Why: Top tech site
'http://digg.com',
# Why: Top Google property; a Google tab is often open
'https://www.google.com/#hl=en&q=barack+obama',
# Why: #1 news worldwide (Alexa global)
'http://news.yahoo.com',
# Why: #2 news worldwide
'http://www.cnn.com',
# Why: #1 commerce website by time spent by users in US
'http://shop.mobileweb.ebay.com/searchresults?kw=viking+helmet',
# Why: #1 Alexa recreation
# pylint: disable=line-too-long
'http://www.booking.com/searchresults.html?src=searchresults&latitude=65.0500&longitude=25.4667',
# Why: #1 Alexa sports
'http://sports.yahoo.com/',
# Why: Top tech blog
'http://techcrunch.com',
# Why: #6 Alexa sports
'http://mlb.com/',
# Why: #14 Alexa California
'http://www.sfgate.com/',
# Why: Non-latin character set
'http://worldjournal.com/',
# Why: #15 Alexa news
'http://online.wsj.com/home-page',
# Why: Image-heavy mobile site
'http://www.deviantart.com/',
# Why: Top search engine
('http://www.baidu.com/s?wd=barack+obama&rsv_bp=0&rsv_spt=3&rsv_sug3=9&'
'rsv_sug=0&rsv_sug4=3824&rsv_sug1=3&inputT=4920'),
# Why: Top search engine
'http://www.bing.com/search?q=sloths',
# Why: Good example of poor initial scrolling
'http://ftw.usatoday.com/2014/05/spelling-bee-rules-shenanigans'
]
for url in urls_list:
self.AddUserStory(KeyMobileSitesSmoothPage(url, self))
|
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import sys
import logging
import copy
import aliyunOpenApiData
LOG = logging.getLogger(__name__)
class Completer(object):
def __init__(self):
self.openApiDataHandler = aliyunOpenApiData.aliyunOpenApiDataHandler()
self.driver = None
self.main_hc = None
self.main_options = ['output', 'AccessKeyId', 'AccessKeySecret', 'RegionId' ,'profile', 'version']
self.cmdline = None
self.point = None
self.command_hc = None
self.subcommand_hc = None
self.command_name = None
self.operation = None
self.current_word = None
self.previous_word = None
self.non_options = None
self.version = None
self.aliyuncli = 'aliyuncli'
def _complete_option(self, option_name):
# if option_name == '--endpoint-url':
# return []
if option_name == '--output':
cli_data = ['text', 'table', 'json']
return cli_data
# if option_name == '--profile':
# return self.driver.session.available_profiles
return []
def _complete_provider(self):
retval = []
if self.current_word.startswith('-'):
cw = self.current_word.lstrip('-')
l = ['--' + n for n in self.main_options
if n.startswith(cw)]
retval = l
elif self.current_word == './testcli' or self.current_word == self.aliyuncli:
retval = self._documented(self.openApiDataHandler.getApiCmdsLower())
else:
# Otherwise, see if they have entered a partial command name
retval = self._documented(self.openApiDataHandler.getApiCmdsLower(),
startswith=self.current_word)
return retval
def _complete_command(self):
retval = []
if self.current_word == self.command_name: # here means only cmd give operation is None
_operations = set()
apiOperations = self.openApiDataHandler.getApiOperations(self.command_name, self.version)
import commandConfigure
_configure = commandConfigure.commandConfigure()
extensionOperations = _configure.getExtensionOperations(self.command_name)
for item in apiOperations:
_operations.add(item)
if extensionOperations is not None:
for item in extensionOperations:
_operations.add(item)
if self.openApiDataHandler.getApiOperations(self.command_name, self.version):
retval = self._documented(_operations)
# retval = self._documented(self.openApiDataHandler.getApiOperations(self.command_name, self.version))
elif self.current_word.startswith('-'): # this is complete the key and values
retval = self._find_possible_options()
else: # here means cmd give we need complete the operation
# See if they have entered a partial command name
_operations = set()
apiOperations = self.openApiDataHandler.getApiOperations(self.command_name, self.version)
import commandConfigure
_configure = commandConfigure.commandConfigure()
extensionOperations = _configure.getExtensionOperations(self.command_name)
for item in apiOperations:
_operations.add(item)
if extensionOperations is not None:
for item in extensionOperations:
_operations.add(item)
if self.openApiDataHandler.getApiOperations(self.command_name, self.version):
retval = self._documented(_operations, startswith=self.current_word)
# retval = self._documented(self.openApiDataHandler.getApiOperations(self.command_name, self.version),
# startswith=self.current_word)
return retval
def _documented(self, table, startswith=None):
names = []
for key in table:
# if getattr(command, '_UNDOCUMENTED', False):
# Don't tab complete undocumented commands/params
# continue
if startswith is not None and not key.startswith(startswith):
continue
# if getattr(command, 'positional_arg', False):
# continue
names.append(key)
return names
def _complete_subcommand(self):
retval = []
if self.current_word == self.operation:
retval = []
elif self.current_word.startswith('-'):
retval = self._find_possible_options()
return retval
def _find_possible_options(self):
all_options = copy.copy(self.main_options)
# here give all attribute list
# where code run here , self.version should be decide before
# self.subcommand_name = self.operation
# cmdInstance = self.openApiDataHandler.getInstanceByCmd(self.command_name, self.operation, self.version)
cmdInstance, mclassname = self.openApiDataHandler.getInstanceByCmdOperation(self.command_name, self.operation, self.version)
# old_arg_list = self.openApiDataHandler.getAttrList(cmdInstance)
old_arg_list = list()
if cmdInstance is None:
import commandConfigure
_configure = commandConfigure.commandConfigure()
old_arg_list = _configure.getExtensionOptions(self.command_name, self.operation)
else:
old_arg_list = self.openApiDataHandler.getAttrList(mclassname)
new_arg_list = set()
if not old_arg_list is None:
for item in old_arg_list:
if not item.startswith('_'):
new_arg_list.add(item)
all_options = all_options + self._documented(new_arg_list)
for opt in self.options:
# Look thru list of options on cmdline. If there are
# options that have already been specified and they are
# not the current word, remove them from list of possibles.
if opt != self.current_word:
stripped_opt = opt.lstrip('-')
if stripped_opt in all_options:
all_options.remove(stripped_opt)
cw = self.current_word.lstrip('-')
possibles = ['--' + n for n in all_options if n.startswith(cw)]
if len(possibles) == 1 and possibles[0] == self.current_word:
return self._complete_option(possibles[0])
return possibles
def _help_to_show_instance_attribute(self, cmdInstance):
all_options = copy.copy(self.main_options)
# here give all attribute list
# where code run here , self.version should be decide before
# self.subcommand_name = self.operation
old_arg_list = self.openApiDataHandler.getAttrList(cmdInstance)
new_arg_list = set()
if not old_arg_list is None:
for item in old_arg_list:
if not item.startswith('_'):
new_arg_list.add(item)
all_options = all_options + self._documented(new_arg_list)
# for opt in self.options:
# # Look thru list of options on cmdline. If there are
# # options that have already been specified and they are
# # not the current word, remove them from list of possibles.
# if opt != self.current_word:
# stripped_opt = opt.lstrip('-')
# if stripped_opt in all_options:
# all_options.remove(stripped_opt)
#cw = self.current_word.lstrip('-')
possibles = ['--' + n for n in all_options]
# if len(possibles) == 1 and possibles[0] == self.current_word:
# return self._complete_option(possibles[0])
return possibles
def _process_command_line(self):
# Process the command line and try to find:
# - command_name
# - subcommand_name
# - words
# - current_word
# - previous_word
# - non_options
# - options
self.command_name = None
self.operation = None
self.words = self.cmdline[0:self.point].split()
self.current_word = self.words[-1]
if len(self.words) >= 2:
self.previous_word = self.words[-2]
else:
self.previous_word = None
self.non_options = [w for w in self.words if not w.startswith('-')]
self.options = [w for w in self.words if w.startswith('-')]
# Look for a command name in the non_options
for w in self.non_options:
if w in self.openApiDataHandler.getApiCmdsLower() or w in self.openApiDataHandler.getApiCmds(): # cmd check
self.command_name = w # here give the command_name
self.version = self.openApiDataHandler.getSdkVersion(self.command_name, None)
cmd_obj = self.openApiDataHandler.getApiOperations(self.command_name, self.version)
# self.command_hc = cmd_obj.create_help_command()
if not cmd_obj is None:
# Look for subcommand name
for w in self.non_options:
if w in cmd_obj:
self.operation = w
# cmd_obj = self.command_hc.command_table[self.subcommand_name]
# self.subcommand_hc = cmd_obj.create_help_command()
break
cmd_extension_obj = self.openApiDataHandler.getExtensionOperationsFromCmd(self.command_name)
if not cmd_extension_obj is None:
for w in self.non_options:
if w in cmd_extension_obj:
self.operation = w
# cmd_obj = self.command_hc.command_table[self.subcommand_name]
# self.subcommand_hc = cmd_obj.create_help_command()
break
break
def complete(self, cmdline, point):
self.cmdline = cmdline
self.command_name = None
if point is None:
point = len(cmdline)
self.point = point
self._process_command_line()
if not self.command_name: # such as 'ec'
# If we didn't find any command names in the cmdline
# lets try to complete provider options
return self._complete_provider()
if self.command_name and not self.operation: # such as 'ecs create-'
return self._complete_command()
return self._complete_subcommand()
def complete(cmdline, point):
choices = Completer().complete(cmdline, point)
print(' \n'.join(choices))
if __name__ == '__main__':
# if len(sys.argv) == 3:
# cmdline = sys.argv[1]
# point = int(sys.argv[2])
# elif len(sys.argv) == 2:
# cmdline = sys.argv[1]
# else:
# print('usage: %s <cmdline> <point>' % sys.argv[0])
# sys.exit(1)
cmdline = './testcli E'
point = len(cmdline)
print(complete(cmdline, point))
|
|
''' IMPORTS '''
from CommonServerPython import *
import urllib3
from pyotrs import Article, Attachment, Client, DynamicField, Ticket
from urllib.parse import unquote
# disable insecure warnings
urllib3.disable_warnings()
''' GLOBAL VARS '''
SERVER = demisto.params()['server'][:-1] if demisto.params()['server'].endswith('/') else demisto.params()['server']
USERNAME = demisto.params()['credentials']['identifier']
PASSWORD = demisto.params()['credentials']['password']
USE_SSL = not demisto.params().get('unsecure', False)
FETCH_QUEUE = demisto.params().get('fetch_queue', 'Any')
FETCH_PRIORITY = demisto.params().get('fetch_priority')
FETCH_TIME_DEFAULT = '3 days'
FETCH_TIME = demisto.params().get('fetch_time', FETCH_TIME_DEFAULT)
FETCH_TIME = FETCH_TIME if FETCH_TIME and FETCH_TIME.strip() else FETCH_TIME_DEFAULT
otrs_client = None # type: Client
''' HELPER FUNCTIONS '''
def ticket_to_incident(ticket):
attachments_list = []
articles = ticket.get('Article')
if articles:
for article in articles:
attachments = article.get('Attachment')
if attachments:
for attachment in attachments:
file_name = attachment['Filename']
attachment_file = fileResult(file_name, base64.b64decode(attachment['Content']))
attachments_list.append({
'path': attachment_file['FileID'],
'name': file_name
})
incident = {
'attachment': attachments_list,
'rawJSON': unquote(json.dumps(ticket)),
'name': 'OTRS ticket {}'.format(ticket['TicketID'])
}
return incident
def translate_state(state):
state_dict = {
'ClosedSuccessful': 'closed successful',
'ClosedUnsuccessful': 'closed unsuccessful',
'Open': 'open',
'PendingReminder': 'pending reminder',
'New': 'new'
}
return state_dict[state]
def translate_priority(priority):
priority_dict = {
'1VeryLow': '1 very low',
'2Low': '2 low',
'3Normal': '3 normal',
'4High': '4 high',
'5VeryHigh': '5 very high'
}
return priority_dict[priority]
def calculate_age(seconds):
"""
Convert seconds to time period string
e.g. 6000 -> 1 h 40 m
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return '%d h %02d m' % (h, m)
def demisto_entry_to_otrs_attachment(entry_list):
"""
Convert Demisto file entry to OTRS attachment object
"""
attachments = []
for file in entry_list:
file_path = demisto.getFilePath(file)
with open(file_path['path'], 'rb') as file_content:
encoded_file = base64.b64encode(file_content.read()).decode('utf-8') # Encoding file content in base64,
# as required by OTRS and then decode it as mentioned in https://gitlab.com/rhab/PyOTRS/-/issues/18
# Getting file type from context
context_files = demisto.get(demisto.context(), 'File')
if isinstance(context_files, dict): # If there's only one file in context, we will get a dict and we convert it to list
context_files = [context_files]
content_type = None
for context_file in context_files: # Looking for file entry in context
if context_file['EntryID'] == file:
content_type = context_file['Info']
break
if content_type is None:
return_error('Could not find file in context')
otrs_attachment = Attachment.create_basic( # Creating OTRS attachment object
Filename=file_path['name'],
Content=encoded_file,
ContentType=content_type
)
attachments.append(otrs_attachment)
return attachments
''' FUNCTIONS '''
def get_ticket_command():
ticket_id = demisto.args().get('ticket_id')
ticket_number = demisto.args().get('ticket_number')
if (ticket_id and ticket_number is None):
ticket = get_ticket(ticket_id)
elif (ticket_id is None and ticket_number):
ticket = get_ticket_by_number(ticket_number)
else:
return_error('Exactly one ticket identifier is required in order to retrieve a ticket, ticket_id or ticket_number!')
output = {
'ID': str(ticket['TicketID']),
'Number': ticket['TicketNumber'],
'Created': ticket['Created'],
'CustomerID': ticket['CustomerUserID'],
'Owner': ticket['Owner'],
'Priority': ticket['Priority'],
'Queue': ticket['Queue'],
'State': ticket['State'],
'Title': ticket['Title'],
'Type': ticket['Type'],
'Lock': ticket['Lock'],
'Age': calculate_age(ticket['Age'])
}
df = ticket.get('DynamicField')
if df:
output['DynamicField'] = {}
for field in df:
value = field['Value']
if value:
name = field['Name']
output['DynamicField'][name] = value
title = 'OTRS Ticket ' + str(ticket['TicketID'])
headers = ['ID', 'Number', 'Age', 'Title', 'State', 'Lock', 'Queue',
'Owner', 'CustomerID', 'Priority', 'Type', 'Created', 'DynamicField']
human_readable = tableToMarkdown(title, output, headers=headers, removeNull=True)
attachments_list = []
articles = ticket.get('Article')
if articles:
articles_list = []
human_readable_articles = []
for article in articles:
# Get article details
current_article = {
'ID': str(article['ArticleID']),
'Subject': article.get('Subject'),
'Body': article.get('Body'),
'CreateTime': article.get('CreateTime'),
'From': article.get('From'),
'ContentType': article.get('ContentType')
}
currect_human_readable_article = dict(current_article)
# Get attachments
attachments = article.get('Attachment')
if attachments:
attachments_output = []
attachments_str = ''
for attachment in attachments:
file_name = attachment['Filename']
file_size = attachment['FilesizeRaw']
content_type = attachment['ContentType']
current_attachment = {
'Name': file_name,
'Size': file_size,
'ContentType': content_type
}
attachments_str += 'Name: {0}, Size: {1}, ContentType: {2}'.format(file_name, file_size, content_type)
attachments_str += '\n\n'
attachments_list.append(fileResult(file_name, base64.b64decode(attachment['Content'])))
attachments_output.append(current_attachment)
currect_human_readable_article['Attachment'] = attachments_str
current_article['Attachment'] = attachments_output
human_readable_articles.append(currect_human_readable_article)
articles_list.append(current_article)
human_readable += tableToMarkdown('Articles', human_readable_articles,
headers=['ID', 'From', 'Subject', 'Body', 'CreateTime',
'ContentType', 'Attachment'], removeNull=True)
output['Article'] = articles_list
ec = {
'OTRS.Ticket(val.ID===obj.ID)': output
}
demisto.results({
'Type': entryTypes['note'],
'Contents': ticket,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': ec
})
demisto.results(attachments_list)
def get_ticket(ticket_id):
args = {'ticket_id': ticket_id, 'articles': True, 'attachments': True, 'dynamic_fields': True}
response = execute_otrs_method(otrs_client.ticket_get_by_id, args)
raw_ticket = response.to_dct()['Ticket']
return raw_ticket
def get_ticket_by_number(ticket_number):
args = {'ticket_number': ticket_number, 'articles': True, 'attachments': True, 'dynamic_fields': True}
response = execute_otrs_method(otrs_client.ticket_get_by_number, args)
raw_ticket = response.to_dct().get('Ticket')
return raw_ticket
def search_ticket_command():
states = demisto.args().get('state')
if states:
states = argToList(states)
created_before = demisto.args().get('created_before')
if created_before:
created_before, _ = parse_date_range(created_before)
created_after = demisto.args().get('created_after')
if created_after:
created_after, _ = parse_date_range(created_after)
title = demisto.args().get('title')
queue = demisto.args().get('queue')
if queue:
queue = argToList(queue)
priority = demisto.args().get('priority')
if priority:
priority_list = argToList(priority)
priority = [translate_priority(p) for p in priority_list]
ticket_type = demisto.args().get('type')
tickets = search_ticket(states, created_before, created_after, title, queue, priority, ticket_type)
if tickets:
output = []
raw_output = []
for ticket_id in tickets:
raw_ticket = get_ticket(ticket_id)
ticket = {
'ID': str(raw_ticket['TicketID']),
'Number': raw_ticket['TicketNumber'],
'Created': raw_ticket['Created'],
'Owner': raw_ticket['Owner'],
'Priority': raw_ticket['Priority'],
'Queue': raw_ticket['Queue'],
'State': raw_ticket['State'],
'Title': raw_ticket['Title'],
'Type': raw_ticket['Type']
}
output.append(ticket)
raw_output.append(raw_ticket)
ec = {
'OTRS.Ticket(val.ID===obj.ID)': output
}
title = 'OTRS Search Results'
headers = ['ID', 'Number', 'Title', 'Type', 'State', 'Priority', 'Queue', 'Created', 'Owner']
demisto.results({
'Type': entryTypes['note'],
'Contents': raw_output,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, output, headers),
'EntryContext': ec
})
else:
demisto.results('No results found')
def search_ticket(states=None, created_before=None, created_after=None, title=None, queue=None, priority=None, ticket_type=None):
args = {'States': states,
'TicketCreateTimeOlderDate': created_before,
'TicketCreateTimeNewerDate': created_after,
'Title': title,
'Queues': queue,
'Priorities': priority,
'Types': ticket_type}
return execute_otrs_method(otrs_client.ticket_search, args)
def create_ticket_command():
title = demisto.args().get('title')
queue = demisto.args().get('queue')
state = translate_state(demisto.args().get('state'))
priority = translate_priority(demisto.args().get('priority'))
customer_user = demisto.args().get('customer_user')
article_subject = demisto.args().get('article_subject')
article_body = demisto.args().get('article_body')
ticket_type = demisto.args().get('type')
dynamic_fields = demisto.args().get('dynamic_fields')
attachment = demisto.args().get('attachment')
df = []
df_output = []
if dynamic_fields:
dynamic_fields_list = argToList(dynamic_fields)
for field in dynamic_fields_list:
splitted_field = field.split('=')
current_field, current_value = splitted_field[0], splitted_field[1]
df.append(DynamicField(current_field, current_value))
df_output.append({current_field: current_value})
attachments = []
if attachment:
attachments_list = argToList(attachment)
attachments = demisto_entry_to_otrs_attachment(attachments_list)
new_ticket = Ticket.create_basic(
Title=title,
Queue=queue,
State=state,
Priority=priority,
CustomerUser=customer_user,
Type=ticket_type
)
article = Article({
'Subject': article_subject,
'Body': article_body
})
ticket = create_ticket(new_ticket, article, df, attachments)
context = {
'ID': str(ticket['TicketID']),
'Number': ticket['TicketNumber'],
'CustomerUser': customer_user,
'Priority': priority,
'Queue': queue,
'State': state,
'Title': title,
'Article': {
'Subject': article_subject,
'Body': article_body
},
'Type': ticket_type,
'DynamicField': df_output
}
ec = {
'OTRS.Ticket(val.ID===obj.ID)': context
}
output = 'Created ticket {} successfully'.format(ticket['TicketID'])
demisto.results({
'Type': entryTypes['note'],
'Contents': context,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': output,
'EntryContext': ec
})
def create_ticket(new_ticket, article, df, attachments):
args = {'ticket': new_ticket, 'article': article, 'dynamic_fields': df, 'attachments': attachments}
return execute_otrs_method(otrs_client.ticket_create, args)
def update_ticket_command():
ticket_id = demisto.args().get('ticket_id')
title = demisto.args().get('title')
queue = demisto.args().get('queue')
state = demisto.args().get('state')
priority = demisto.args().get('priority')
article_subject = demisto.args().get('article_subject')
article_body = demisto.args().get('article_body')
ticket_type = demisto.args().get('type')
dynamic_fields = demisto.args().get('dynamic_fields')
attachment = demisto.args().get('attachment')
if all(v is None for v in [title, queue, state, priority, article_subject,
article_body, ticket_type, dynamic_fields, attachment]):
return_error('No fields to update were given')
if (article_subject and article_body is None) or (article_subject is None and article_body):
return_error('Both article subject and body are required in order to add article')
elif article_subject and article_body:
article_obj = {
'Subject': article_subject,
'Body': article_body
}
article = Article(article_obj)
else:
article = None
if state:
state = translate_state(state)
if priority:
priority = translate_priority(priority)
df = []
if dynamic_fields:
dynamic_fields_list = argToList(dynamic_fields)
for field in dynamic_fields_list:
splitted_field = field.split('=')
current_field, current_value = splitted_field[0], splitted_field[1]
df.append(DynamicField(current_field, current_value))
attachments = []
if attachment:
attachments_list = argToList(attachment)
attachments = demisto_entry_to_otrs_attachment(attachments_list)
ticket = update_ticket(ticket_id, title, queue, state, priority, article, ticket_type, df, attachments)
context = {
'ID': ticket['TicketID'],
}
if priority:
context['Priority'] = priority
if queue:
context['Queue'] = queue
if state:
context['State'] = state
if title:
context['Title'] = title
if article:
context['Article'] = article.to_dct()
if ticket_type:
context['Type'] = ticket_type
ec = {
'OTRS.Ticket(val.ID===obj.ID)': context
}
output = 'Updated ticket {} successfully'.format(ticket['TicketID'])
demisto.results({
'Type': entryTypes['note'],
'Contents': context,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': output,
'EntryContext': ec
})
def close_ticket_command():
ticket_id = demisto.args().get('ticket_id')
article_subject = demisto.args().get('article_subject')
article_body = demisto.args().get('article_body')
article_object = {
'Subject': article_subject,
'Body': article_body
}
article = Article(article_object)
ticket = update_ticket(ticket_id, article=article, state='closed successful')
context = {
'ID': ticket['TicketID'],
'State': 'closed successful',
'Article': article_object
}
ec = {
'OTRS.Ticket(val.ID===obj.ID)': context
}
output = 'Closed ticket {} successfully'.format(ticket['TicketID'])
demisto.results({
'Type': entryTypes['note'],
'Contents': context,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': output,
'EntryContext': ec
})
def update_ticket(ticket_id, title=None, queue=None, state=None, priority=None,
article=None, ticket_type=None, df=None, attachments=None):
kwargs = {'Type': ticket_type}
args = {'ticket_id': ticket_id,
'Title': title,
'Queue': queue,
'State': state,
'Priority': priority,
'article': article,
'dynamic_fields': df,
'attachments': attachments,
'kwargs': kwargs}
return execute_otrs_method(otrs_client.ticket_update, args)
def fetch_incidents():
last_run = demisto.getLastRun() and demisto.getLastRun()['time']
if last_run:
last_run = datetime.strptime(last_run, '%Y-%m-%d %H:%M:%S')
last_run += timedelta(seconds=1)
else:
last_run, _ = parse_date_range(FETCH_TIME)
queue_list = argToList(FETCH_QUEUE)
if 'Any' in queue_list:
queue = None
else:
queue = queue_list
priority = None
if FETCH_PRIORITY:
priority = [translate_priority(p) for p in FETCH_PRIORITY]
tickets = search_ticket(created_after=last_run, queue=queue, priority=priority)
incidents = []
first_ticket = True
last_created = ''
for ticket_id in tickets:
ticket = get_ticket(ticket_id)
incident = ticket_to_incident(ticket)
incidents.append(incident)
if first_ticket:
# First ticket fetched is the last created, so should set its creation time as last fetched ticket
last_created = ticket['Created']
first_ticket = False
demisto.incidents(incidents)
if not last_created:
last_created = datetime.strftime(last_run, '%Y-%m-%d %H:%M:%S')
demisto.setLastRun({'time': last_created})
def update_session():
otrs_client.session_create()
sessionID = otrs_client.session_id_store.value
demisto.setIntegrationContext({'SessionID': sessionID})
otrs_client.session_id_store.write(sessionID)
def execute_otrs_method(method, args):
try:
response = method(**args)
except Exception:
update_session()
response = method(**args)
return response
def main():
global otrs_client
handle_proxy(demisto.params().get('proxy'))
cache = demisto.getIntegrationContext()
otrs_client = Client(SERVER, USERNAME, PASSWORD, https_verify=USE_SSL)
# OTRS creates new session for each request, to avoid that behavior -
# save the sessionId in integration context to use it multiple times
if cache.get('SessionID'):
otrs_client.session_id_store.write(cache['SessionID'])
else:
update_session()
LOG('command is %s' % (demisto.command(), ))
try:
if demisto.command() == 'test-module':
# Testing connectivity and credentials
demisto.results('ok')
elif demisto.command() == 'fetch-incidents':
fetch_incidents()
elif demisto.command() == 'otrs-get-ticket':
get_ticket_command()
elif demisto.command() == 'otrs-search-ticket':
search_ticket_command()
elif demisto.command() == 'otrs-create-ticket':
create_ticket_command()
elif demisto.command() == 'otrs-update-ticket':
update_ticket_command()
elif demisto.command() == 'otrs-close-ticket':
close_ticket_command()
except Exception as e:
LOG(str(e))
LOG.print_log()
return_error(str(e))
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
|
from typing import Dict, Any, Optional # noqa: F401
from vint.ast.plugin.scope_plugin.scope import (
ScopeVisibility,
ExplicityOfScopeVisibility,
Scope,
)
from vint.ast.node_type import NodeType
from vint.ast.dictionary.builtins import (
BuiltinVariablesCanHaveImplicitScope,
BuiltinFunctions,
)
from vint.ast.plugin.scope_plugin.identifier_attribute import (
is_dynamic_identifier,
is_declarative_identifier,
is_function_identifier,
is_member_identifier,
is_on_lambda_string_context,
is_lambda_argument_identifier,
is_function_argument,
)
class ScopeVisibilityHint:
def __init__(self, scope_visibility, explicity):
# type: (ScopeVisibility, ExplicityOfScopeVisibility) -> None
self.scope_visibility = scope_visibility
self.explicity = explicity
FunctionDeclarationIdentifierScopePrefixToScopeVisibility = {
'g:': ScopeVisibility.GLOBAL_LIKE,
'b:': ScopeVisibility.INVALID,
'w:': ScopeVisibility.INVALID,
't:': ScopeVisibility.INVALID,
's:': ScopeVisibility.SCRIPT_LOCAL,
'l:': ScopeVisibility.INVALID,
'a:': ScopeVisibility.INVALID,
'v:': ScopeVisibility.INVALID,
}
VariableIdentifierScopePrefixToScopeVisibility = {
'g:': ScopeVisibility.GLOBAL_LIKE,
'b:': ScopeVisibility.GLOBAL_LIKE,
'w:': ScopeVisibility.GLOBAL_LIKE,
't:': ScopeVisibility.GLOBAL_LIKE,
's:': ScopeVisibility.SCRIPT_LOCAL,
'l:': ScopeVisibility.FUNCTION_LOCAL,
'a:': ScopeVisibility.FUNCTION_LOCAL,
'v:': ScopeVisibility.BUILTIN,
}
GlobalLikeScopeVisibilityNodeTypes = {
NodeType.ENV: True,
NodeType.OPTION: True,
NodeType.REG: True,
}
IdentifierLikeNodeTypes = {
NodeType.IDENTIFIER: True,
NodeType.ENV: True,
NodeType.OPTION: True,
NodeType.REG: True,
}
def is_builtin_variable(id_node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is a builtin identifier. """
# Builtin variables are always IDENTIFIER.
if NodeType(id_node['type']) is not NodeType.IDENTIFIER:
return False
id_value = id_node['value']
if id_value.startswith('v:'):
# It is an explicit builtin variable such as: "v:count", "v:char"
# TODO: Add unknown builtin flag
return True
if is_builtin_function(id_node):
return True
if id_value in ['key', 'val']:
# These builtin variable names are available on only map() or filter().
return is_on_lambda_string_context(id_node)
# It is an implicit builtin variable such as: "count", "char"
return id_value in BuiltinVariablesCanHaveImplicitScope
def is_builtin_function(id_node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is a builtin function name identifier.
The given identifier should be a child node of NodeType.CALL.
"""
# Builtin functions are always IDENTIFIER.
if NodeType(id_node['type']) is not NodeType.IDENTIFIER:
return False
id_value = id_node['value']
if not is_function_identifier(id_node):
return False
# There are difference between a function identifier and variable
# identifier:
#
# let localtime = 0
# echo localtime " => 0
# echo localtime() " => 1420011455
return id_value in BuiltinFunctions
def is_analyzable_identifier(node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is an analyzable identifier.
Node declarative-identifier-like is analyzable if it is not dynamic
and not a member variable, because we can do static scope analysis.
Analyzable cases:
- let s:var = 0
- function! Func()
- echo s:var
Unanalyzable cases:
- let s:my_{var} = 0
- function! dict.Func()
- echo s:my_{var}
"""
return not (is_dynamic_identifier(node) or is_member_identifier(node))
def is_analyzable_declarative_identifier(node): # type: (Dict[str, Any]) -> bool
""" Whether the specified node is an analyzable declarative identifier.
Node declarative-identifier-like is analyzable if it is not dynamic
and not a member variable, because we can do static scope analysis.
Analyzable cases:
- let s:var = 0
- function! Func()
Unanalyzable cases:
- let s:my_{var} = 0
- function! dict.Func()
"""
return is_declarative_identifier(node) and is_analyzable_identifier(node)
def detect_possible_scope_visibility(node, context_scope): # type: (Dict[str, Any], Scope) -> ScopeVisibilityHint
""" Returns a *possible* variable visibility by the specified node.
The "possible" means that we can not determine a scope visibility of lambda arguments until reachability check.
"""
node_type = NodeType(node['type'])
if not is_analyzable_identifier(node):
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
if node_type is NodeType.IDENTIFIER:
return _detect_possible_identifier_scope_visibility(node, context_scope)
if node_type in GlobalLikeScopeVisibilityNodeTypes:
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
def _detect_possible_identifier_scope_visibility(id_node, context_scope):
# type: (Dict[str, Any], Scope) -> ScopeVisibilityHint
explicit_scope_visibility = _get_explicit_scope_visibility(id_node)
if explicit_scope_visibility is not None:
# Vim allow `g:` as a function name prefix but it is not recommended.
# SEE: https://github.com/Kuniwak/vint/pull/136
is_unrecommended_explicit = is_function_identifier(id_node) and _is_just_global(id_node)
if is_unrecommended_explicit:
return ScopeVisibilityHint(
explicit_scope_visibility,
ExplicityOfScopeVisibility.UNRECOMMENDED_EXPLICIT
)
return ScopeVisibilityHint(
explicit_scope_visibility,
ExplicityOfScopeVisibility.EXPLICIT
)
if is_function_argument(id_node):
# Function arguments can not have any explicit scope prefix.
return ScopeVisibilityHint(
ScopeVisibility.FUNCTION_LOCAL,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if is_builtin_function(id_node):
# Builtin functions can not have any scope prefix.
return ScopeVisibilityHint(
ScopeVisibility.BUILTIN,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if is_builtin_variable(id_node):
# Implicit scope variable will be resolved as a builtin variable if it
# has a same name to Vim builtin variables.
return ScopeVisibilityHint(
ScopeVisibility.BUILTIN,
ExplicityOfScopeVisibility.IMPLICIT
)
if is_function_identifier(id_node):
# Functions can have the scope visibility only explicit global or
# implicit global or explicit script local. So a function have implicit
# scope visibility is always a global function.
#
# And the explicity should be implicit. Vim allow `g:` but it is not recommended.
# SEE: https://github.com/Kuniwak/vint/pull/136
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
)
if not context_scope:
# We can not detect implicit scope visibility if context scope is not
# specified.
return ScopeVisibilityHint(
ScopeVisibility.UNANALYZABLE,
ExplicityOfScopeVisibility.UNANALYZABLE
)
current_scope_visibility = context_scope.scope_visibility
# A lambda argument declaration or the references can not have any explicit scope prefix.
if current_scope_visibility is ScopeVisibility.LAMBDA:
if is_lambda_argument_identifier(id_node):
# It can not have any explicit scope prefix.
explicity = ExplicityOfScopeVisibility.IMPLICIT_BUT_CONSTRAINED
else:
# We can not detect the scope of an implicit variable until
# we know whether the variable can reach to a lambda argument or not.
# If it can reach to a lambda argument, then it is IMPLICIT_BUT_CONSTRAINED otherwise IMPLICIT.
explicity = ExplicityOfScopeVisibility.IMPLICIT_OR_LAMBDA
else:
explicity = ExplicityOfScopeVisibility.IMPLICIT
if current_scope_visibility is ScopeVisibility.SCRIPT_LOCAL:
# Implicit scope variable will be resolved as a global variable when
# current scope is script local.
return ScopeVisibilityHint(
ScopeVisibility.GLOBAL_LIKE,
explicity
)
# Otherwise be a function local variable.
return ScopeVisibilityHint(
ScopeVisibility.FUNCTION_LOCAL,
explicity
)
def _get_explicit_scope_visibility(id_node): # type: (Dict[str, Any]) -> Optional[ScopeVisibility]
# See :help internal-variables
scope_prefix = id_node['value'][0:2]
if is_function_identifier(id_node) and is_declarative_identifier(id_node):
return FunctionDeclarationIdentifierScopePrefixToScopeVisibility.get(scope_prefix)
else:
return VariableIdentifierScopePrefixToScopeVisibility.get(scope_prefix)
def _is_just_global(id_node): # type: (Dict[str, Any]) -> bool
# See :help internal-variables
return id_node['value'][0:2] == 'g:'
|
|
import re
import sys
import types
import appvalidator.unicodehelper as unicodehelper
from . import csstester
from appvalidator.contextgenerator import ContextGenerator
from appvalidator.constants import *
from appvalidator.csp import warn as message_csp
from appvalidator.python.HTMLParser import HTMLParser, HTMLParseError
DEBUG = False
UNSAFE_TAGS = ("script", "object", "embed", "base", )
SELF_CLOSING_TAGS = ("area", "base", "basefont", "br", "col", "frame", "hr",
"img", "input", "li", "link", "meta", "p", "param", )
TAG_NOT_OPENED = "Tag (%s) being closed before it is opened."
REMOTE_URL_PATTERN = re.compile("((ht|f)tps?:)?//")
DOM_MUTATION_HANDLERS = set([
"ondomattrmodified", "ondomattributenamechanged",
"ondomcharacterdatamodified", "ondomelementnamechanged",
"ondomnodeinserted", "ondomnodeinsertedintodocument", "ondomnoderemoved",
"ondomnoderemovedfromdocument", "ondomsubtreemodified", ])
class MarkupParser(HTMLParser):
"""Parse and analyze the versious components of markup files."""
def __init__(self, err, strict=True, debug=False):
HTMLParser.__init__(self)
self.err = err
self.is_jetpack = "is_jetpack" in err.metadata # Cache this value.
self.line = 0
self.strict = strict
self.debug = debug
self.context = None
self.xml_state = []
self.xml_line_stack = []
self.xml_buffer = []
self.reported = set()
def process(self, filename, data, extension="html"):
"""Processes data by splitting it into individual lines, then
incrementally feeding each line into the parser, increasing the
value of the line number with each line."""
self.line = 0
self.filename = filename
self.extension = extension.lower()
self.reported = set()
self.context = ContextGenerator(data)
lines = data.split("\n")
buffering = False
pline = 0
for line in lines:
self.line += 1
search_line = line
while True:
# If a CDATA element is found, push it and its contents to the
# buffer. Push everything previous to it to the parser.
if "<![CDATA[" in search_line and not buffering:
# Find the CDATA element.
cdatapos = search_line.find("<![CDATA[")
# If the element isn't at the start of the line, pass
# everything before it to the parser.
if cdatapos:
self._feed_parser(search_line[:cdatapos])
# Collect the rest of the line to send it to the buffer.
search_line = search_line[cdatapos:]
buffering = True
continue
elif "]]>" in search_line and buffering:
# If we find the end element on the line being scanned,
# buffer everything up to the end of it, and let the rest
# of the line pass through for further processing.
end_cdatapos = search_line.find("]]>") + 3
self._save_to_buffer(search_line[:end_cdatapos])
search_line = search_line[end_cdatapos:]
buffering = False
break
if buffering:
self._save_to_buffer(search_line + "\n")
else:
self._feed_parser(search_line)
def _feed_parser(self, line):
"""Feed incoming data into the underlying HTMLParser."""
line = unicodehelper.decode(line)
try:
self.feed(line + u"\n")
except UnicodeDecodeError, exc_instance:
# There's no recovering from a unicode error here. We've got the
# unicodehelper; if that doesn't help us, nothing will.
return
except HTMLParseError as inst:
if DEBUG: # pragma: no cover
print self.xml_state, inst
if "markup" in self.reported:
return
if ("script" in self.xml_state or
self.debug and "testscript" in self.xml_state):
if "script_comments" in self.reported or not self.strict:
return
self.err.notice(
err_id=("testcases_markup_markuptester", "_feed",
"missing_script_comments"),
notice="Missing comments in <script> tag",
description="Markup parsing errors occurred while trying "
"to parse the file. This would likely be "
"mitigated by wrapping <script> tag contents "
"in HTML comment tags (<!-- -->)",
filename=self.filename,
line=self.line,
context=self.context,
tier=2)
self.reported.add("script_comments")
return
if self.strict:
self.err.warning(
err_id=("testcases_markup_markuptester", "_feed",
"parse_error"),
warning="Markup parsing error",
description=["There was an error parsing a markup file.",
str(inst)],
filename=self.filename,
line=self.line,
context=self.context)
self.reported.add("markup")
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs, True)
self.handle_endtag(tag)
def handle_starttag(self, tag, attrs, self_closing=False):
# Normalize!
tag = tag.lower()
# Be extra sure it's not a self-closing tag.
if not self_closing:
self_closing = tag in SELF_CLOSING_TAGS
if DEBUG: # pragma: no cover
print "S: ", self.xml_state, tag, self_closing
attr_dict = dict([(a[0].lower(), a[1]) for a in attrs if a[1]])
if "style" in attr_dict:
csstester.test_css_snippet(
self.err, self.filename, attr_dict["style"], self.line)
event_attribute = lambda k: k.startswith("on") and "-" not in k
script_attributes = dict(
(k, v) for k, v in attr_dict.iteritems() if event_attribute(k))
if script_attributes:
if any(k in DOM_MUTATION_HANDLERS for k in script_attributes):
self.err.error(
err_id=("testcases_markup_markuptester",
"handle_starttag", "dom_manipulation_handler"),
error="DOM Mutation Events Prohibited",
description="DOM mutation events are flagged because of "
"their deprecated status, as well as thier "
"extreme inefficiency. Consider using a "
"different event.",
filename=self.filename,
line=self.line,
context=self.context)
message_csp(err=self.err, filename=self.filename,
line=self.line, column=None, context=self.context,
violation_type="script_attribute", severity="error")
# When the dev forgets their <!-- --> on a script tag, bad
# things happen.
if "script" in self.xml_state and tag != "script":
self._save_to_buffer("<" + tag + self._format_args(attrs) + ">")
return
elif (tag == "script" and
("type" not in attr_dict or
any(a[0] == "type" and "javascript" in a[1].lower() for
a in attrs))):
# Inspect scripts which either have no type or have a type which
# is JS.
if "src" not in attr_dict:
# CSP warnings for inline scripts
message_csp(err=self.err, filename=self.filename,
line=self.line, column=None,
context=self.context,
violation_type="inline_script",
severity="error")
elif not self._is_url_local(attr_dict.get("src", "")):
# If there's a remote SRC, then that's a CSP violation.
message_csp(err=self.err, filename=self.filename,
line=self.line, column=None, context=self.context,
violation_type="remote_script", severity="error")
self.xml_state.append(tag)
self.xml_line_stack.append(self.line)
self.xml_buffer.append(u"")
def handle_endtag(self, tag):
tag = tag.lower()
if DEBUG: # pragma: no cover
print "E: ", tag, self.xml_state
if not self.xml_state:
if "closing_tags" in self.reported or not self.strict:
if DEBUG:
print "Unstrict; extra closing tags ------"
return
self.err.warning(
err_id=("markup", "handle_endtag", "extra_closing_tags"),
warning="Markup parsing error",
description="The markup file has more closing tags than it "
"has opening tags.",
filename=self.filename,
line=self.line,
context=self.context,
tier=2)
self.reported.add("closing_tags")
if DEBUG: # pragma: no cover
print "Too many closing tags ------"
return
elif "script" in self.xml_state[:-1]:
# If we're in a script tag, nothing else matters. Just rush
# everything possible into the xml buffer.
self._save_to_buffer("</%s>" % tag)
if DEBUG:
print "Markup as text in script ------"
return
elif tag not in self.xml_state:
# If the tag we're processing isn't on the stack, then
# something is wrong.
self.err.warning(
err_id=("markup", "handle_endtag", "extra_closing_tags"),
warning="Parse error: tag closed before opened",
description=["Markup tags cannot be closed before they are "
"opened. Perhaps you were just a little "
"overzealous with forward-slashes?",
'Tag `%s` closed before it was opened' % tag],
filename=self.filename,
line=self.line,
context=self.context,
tier=2)
if DEBUG: # pragma: no cover
print "Tag closed before opened ------"
return
data_buffer = self.xml_buffer.pop()
old_state = self.xml_state.pop()
old_line = self.xml_line_stack.pop()
# If the tag on the stack isn't what's being closed and it also
# classifies as a self-closing tag, we just recursively close
# down to the level of the tag we're actualy closing.
if old_state != tag and old_state in SELF_CLOSING_TAGS:
if DEBUG:
print "Self closing tag cascading down ------"
return self.handle_endtag(tag)
# If this is an XML-derived language, everything must nest
# properly. No overlapping tags.
if (old_state != tag and self.extension[0] == 'x' and not self.strict):
self.err.warning(
err_id=("testcases_markup_markuptester", "handle_endtag",
"invalid_nesting"),
warning="Markup invalidly nested",
description="It has been determined that the document "
"invalidly nests its tags. This is not permitted "
"in the specified document type.",
filename=self.filename,
line=self.line,
context=self.context,
tier=2)
if DEBUG: # pragma: no cover
print "Invalid markup nesting ------"
data_buffer = data_buffer.strip()
# Perform analysis on collected data.
if data_buffer and tag == "style":
csstester.test_css_file(self.err, self.filename, data_buffer,
old_line)
def handle_data(self, data):
self._save_to_buffer(data)
def handle_comment(self, data):
self._save_to_buffer(data)
def parse_marked_section(self, i, report=0):
rawdata = self.rawdata
_markedsectionclose = re.compile(r']\s*]\s*>')
assert rawdata[i:i + 3] == '<![', \
"unexpected call to parse_marked_section()"
sectName, j = self._scan_name(i + 3, i)
if j < 0: # pragma: no cover
return j
if sectName in ("temp", "cdata", "ignore", "include", "rcdata"):
# look for standard ]]> ending
match = _markedsectionclose.search(rawdata, i + 3)
else: # pragma: no cover
self.error('unknown status keyword %r in marked section' %
rawdata[i + 3:j])
if not match: # pragma: no cover
return -1
if report: # pragma: no cover
j = match.start(0)
self.unknown_decl(rawdata[i + 3: j])
return match.end(0)
def _save_to_buffer(self, data):
"""Save data to the XML buffer for the current tag."""
# We're not interested in data that isn't in a tag.
if not self.xml_buffer:
return
self.xml_buffer[-1] += unicodehelper.decode(data)
def _format_args(self, args):
"""Formats a dict of HTML attributes to be in HTML attribute
format."""
if not args:
return ""
return " " + " ".join('%s="%s"' % a for a in args)
def _is_url_local(self, url):
return not REMOTE_URL_PATTERN.match(url)
|
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
import json
import logging
import unittest
import StringIO
import zipfile
from c7n.mu import (
custodian_archive, LambdaManager, PolicyLambda,
CloudWatchLogSubscription)
from c7n.policy import Policy
from c7n.ufuncs import logsub
from .common import BaseTest, Config
class PolicyLambdaProvision(BaseTest):
role = "arn:aws:iam::619193117841:role/lambda_basic_execution"
def assert_items(self, result, expected):
for k, v in expected.items():
self.assertEqual(v, result[k])
def test_cwl_subscriber(self):
self.patch(CloudWatchLogSubscription, 'iam_delay', 0.01)
session_factory = self.replay_flight_data('test_cwl_subscriber')
session = session_factory()
client = session.client('logs')
lname = "custodian-test-log-sub"
self.addCleanup(client.delete_log_group, logGroupName=lname)
client.create_log_group(logGroupName=lname)
linfo = client.describe_log_groups(
logGroupNamePrefix=lname)['logGroups'][0]
params = dict(
session_factory=session_factory,
name="c7n-log-sub",
role=self.role,
sns_topic="arn:",
log_groups=[linfo])
func = logsub.get_function(**params)
manager = LambdaManager(session_factory)
finfo = manager.publish(func)
self.addCleanup(manager.remove, func)
results = client.describe_subscription_filters(logGroupName=lname)
self.assertEqual(len(results['subscriptionFilters']), 1)
self.assertEqual(results['subscriptionFilters'][0]['destinationArn'],
finfo['FunctionArn'])
# try and update
#params['sns_topic'] = "arn:123"
#manager.publish(func)
def test_cwe_update_config_and_code(self):
# Originally this was testing the no update case.. but
# That is tricky to record, any updates to the code end up
# causing issues due to checksum mismatches which imply updating
# the function code / which invalidate the recorded data and
# the focus of the test.
session_factory = self.replay_flight_data(
'test_cwe_update', zdata=True)
p = Policy({
'resource': 's3',
'name': 's3-bucket-policy',
'mode': {
'type': 'cloudtrail',
'events': ["CreateBucket"],
},
'filters': [
{'type': 'missing-policy-statement',
'statement_ids': ['RequireEncryptedPutObject']}],
'actions': ['no-op']
}, Config.empty())
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, 'Dev', role=self.role)
self.addCleanup(mgr.remove, pl)
p = Policy({
'resource': 's3',
'name': 's3-bucket-policy',
'mode': {
'type': 'cloudtrail',
'memory': 256,
'events': [
"CreateBucket",
{'event': 'PutBucketPolicy',
'ids': 'requestParameters.bucketName',
'source': 's3.amazonaws.com'}]
},
'filters': [
{'type': 'missing-policy-statement',
'statement_ids': ['RequireEncryptedPutObject']}],
'actions': ['no-op']
}, Config.empty())
output = self.capture_logging('custodian.lambda', level=logging.DEBUG)
result2 = mgr.publish(PolicyLambda(p), 'Dev', role=self.role)
lines = output.getvalue().strip().split('\n')
self.assertTrue(
'Updating function custodian-s3-bucket-policy code' in lines)
self.assertTrue(
'Updating function: custodian-s3-bucket-policy config' in lines)
self.assertEqual(result['FunctionName'], result2['FunctionName'])
# drive by coverage
functions = [i for i in mgr.list_functions()
if i['FunctionName'] == 'custodian-s3-bucket-policy']
self.assertTrue(len(functions), 1)
self.assertEqual(list(mgr.logs(pl)), [])
def test_cwe_trail(self):
session_factory = self.replay_flight_data('test_cwe_trail', zdata=True)
p = Policy({
'resource': 's3',
'name': 's3-bucket-policy',
'mode': {
'type': 'cloudtrail',
'events': ["CreateBucket"],
},
'filters': [
{'type': 'missing-policy-statement',
'statement_ids': ['RequireEncryptedPutObject']}],
'actions': ['no-op']
}, Config.empty())
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, 'Dev', role=self.role)
events = pl.get_events(session_factory)
self.assertEqual(len(events), 1)
event = events.pop()
self.assertEqual(
json.loads(event.render_event_pattern()),
{u'detail': {u'eventName': [u'CreateBucket'],
u'eventSource': [u's3.amazonaws.com']},
u'detail-type': ['AWS API Call via CloudTrail']})
self.assert_items(
result,
{'Description': 'cloud-custodian lambda policy',
'FunctionName': 'custodian-s3-bucket-policy',
'Handler': 'custodian_policy.run',
'MemorySize': 512,
'Runtime': 'python2.7',
'Timeout': 60})
mgr.remove(pl)
def test_mu_metrics(self):
session_factory = self.replay_flight_data('test_mu_metrics')
p = Policy({
'resources': 's3',
'name': 's3-bucket-policy',
'resource': 's3',
'mode': {
'type': 'cloudtrail',
'events': ['CreateBucket'],
},
'actions': ['no-op']}, Config.empty())
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
end = datetime.utcnow()
start = end - timedelta(1)
results = mgr.metrics([pl], start, end, 3600)
self.assertEqual(
results, [{'Durations': [], 'Errors': [],
'Throttles': [], 'Invocations': []}])
def test_cwe_instance(self):
session_factory = self.replay_flight_data(
'test_cwe_instance', zdata=True)
p = Policy({
'resource': 's3',
'name': 'ec2-encrypted-vol',
'mode': {
'type': 'ec2-instance-state',
'events': ['pending']}
}, Config.empty())
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, 'Dev', role=self.role)
self.assert_items(
result,
{'Description': 'cloud-maid lambda policy',
'FunctionName': 'maid-ec2-encrypted-vol',
'Handler': 'maid_policy.run',
'MemorySize': 512,
'Runtime': 'python2.7',
'Timeout': 60})
events = session_factory().client('events')
result = events.list_rules(NamePrefix="maid-ec2-encrypted-vol")
self.assert_items(
result['Rules'][0],
{"State": "ENABLED",
"Name": "maid-ec2-encrypted-vol"})
self.assertEqual(
json.loads(result['Rules'][0]['EventPattern']),
{"source": ["aws.ec2"],
"detail": {
"state": ["pending"]},
"detail-type": ["EC2 Instance State-change Notification"]})
mgr.remove(pl)
def test_cwe_asg_instance(self):
session_factory = self.replay_flight_data('test_cwe_asg', zdata=True)
p = Policy({
'resource': 'asg',
'name': 'asg-spin-detector',
'mode': {
'type': 'asg-instance-state',
'events': ['launch-failure']}
}, Config.empty())
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, 'Dev', role=self.role)
self.assert_items(
result,
{'FunctionName': 'maid-asg-spin-detector',
'Handler': 'maid_policy.run',
'MemorySize': 512,
'Runtime': 'python2.7',
'Timeout': 60})
events = session_factory().client('events')
result = events.list_rules(NamePrefix="maid-asg-spin-detector")
self.assert_items(
result['Rules'][0],
{"State": "ENABLED",
"Name": "maid-asg-spin-detector"})
self.assertEqual(
json.loads(result['Rules'][0]['EventPattern']),
{"source": ["aws.autoscaling"],
"detail-type": ["EC2 Instance Launch Unsuccessful"]})
mgr.remove(pl)
def test_cwe_schedule(self):
session_factory = self.replay_flight_data(
'test_cwe_schedule', zdata=True)
p = Policy({
'resource': 'ec2',
'name': 'periodic-ec2-checker',
'mode': {
'type': 'periodic',
'schedule': 'rate(1 day)'
}
}, Config.empty())
pl = PolicyLambda(p)
mgr = LambdaManager(session_factory)
result = mgr.publish(pl, 'Dev', role=self.role)
self.assert_items(
result,
{'FunctionName': 'maid-periodic-ec2-checker',
'Handler': 'maid_policy.run',
'MemorySize': 512,
'Runtime': 'python2.7',
'Timeout': 60})
events = session_factory().client('events')
result = events.list_rules(NamePrefix="maid-periodic-ec2-checker")
self.assert_items(
result['Rules'][0],
{
"State": "ENABLED",
"ScheduleExpression": "rate(1 day)",
"Name": "maid-periodic-ec2-checker"})
mgr.remove(pl)
class PythonArchiveTest(unittest.TestCase):
def test_archive_bytes(self):
self.archive = custodian_archive()
self.archive.create()
self.addCleanup(self.archive.remove)
self.archive.close()
io = StringIO.StringIO(self.archive.get_bytes())
reader = zipfile.ZipFile(io, mode='r')
fileset = [n.filename for n in reader.filelist]
self.assertTrue('c7n/__init__.py' in fileset)
def test_archive_skip(self):
self.archive = custodian_archive("*.pyc")
self.archive.create()
self.addCleanup(self.archive.remove)
self.archive.close()
with open(self.archive.path) as fh:
reader = zipfile.ZipFile(fh, mode='r')
fileset = [n.filename for n in reader.filelist]
for i in ['c7n/__init__.pyc',
'c7n/resources/s3.pyc',
'boto3/__init__.py']:
self.assertFalse(i in fileset)
|
|
#!/bin/env python3
# Backtracking based solver for Crazy Machines 3 laser puzzles (e.g.campaign 67 (Laser beams and pyramid))
# Solves 67 in 2.5 minutes on an i7-2640m / 1.2 min on an i7-6700
from math import sqrt, pow
class Thingy2D():
def __init__(self, x=0, y=0):
self.x = int(x)
self.y = int(y)
def clone(self):
return self.__class__(self.x, self.y)
def div(self, scalar):
return self.__class__(self.x / scalar, self.y / scalar)
def __add__(self, other):
return self.__class__(self.x + other.x, self.y + other.y)
def __iadd__(self, other):
self.x += other.x
self.y += other.y
def __sub__(self, other):
return self.__class__(self.x - other.x, self.y - other.y)
def __isub__(self, other):
self.x -= other.x
self.y -= other.y
def __str__(self):
return "[{0},{1}]".format(self.x, self.y)
class Pos2D(Thingy2D):
pass
class Vec2D(Thingy2D):
def normalize(self):
return self.div(self.len())
def len(self):
return sqrt(pow(self.x, 2) + pow(self.y, 2))
class CMObject():
ROT_0 = 0
ROT_90 = 1
ROT_180 = 2
ROT_270 = 3
def dirToVec(dir):
if dir == CMObject.ROT_0:
return Vec2D(1, 0)
if dir == CMObject.ROT_90:
return Vec2D(0, 1)
if dir == CMObject.ROT_180:
return Vec2D(-1, 0)
if dir == CMObject.ROT_270:
return Vec2D(0, -1)
raise Exception("Invalid direction")
def __init__(self, pos, rot):
self.pos = pos
self.rot = rot
self.pf = None
def onAdd(self, playfield):
self.pf = playfield
def onRemove(self):
self.playfield = None
def getPlayfield(self):
return self.pf
def getPos(self):
return self.pos
def setPos(self, pos):
self.pos = pos
def setPosXY(self, x, y):
self.pos = Pos2D(x, y)
def setRotation(self, rot):
self.rot = rot
class LaserPart(CMObject):
def __init__(self, pos=None, rot=0):
super().__init__(pos, rot)
self.transparent = False
self.beamsIn = []
self.beamsOut = []
def onAdd(self, playfield):
super().onAdd(playfield)
def onRemove(self):
super().onRemove()
for beam in self.beamsOut:
self.pf.removeBeam(beam)
self.beamsOut.clear()
for beam in self.beamsIn:
beam.unblock(self)
self.beamsIn.clear()
def isTransparent(self):
return self.transparent
# laser beam hit object
def hit(self, beam):
if beam in self.beamsIn:
raise Exception("Same beam hit multiple times")
return
self.beamsIn.append(beam)
# Laser beam stops hitting object
def unhit(self, beam):
if beam in self.beamsIn:
self.beamsIn.remove(beam)
class Target(LaserPart):
def __init__(self, pos=None, rot=0):
super().__init__(pos, rot)
self.active = False
def hit(self, beam):
super().hit(beam);
if not self.doesActivate(beam):
return
self.active = True
def unhit(self, beam):
super().unhit(beam);
if not self.doesActivate(beam):
return
self.active = False
def doesActivate(self, beam):
return (2 - self.rot) % 4 == beam.getDir()
def isActive(self):
return self.active
def __str__(self):
return "U" if self.isActive() else "X"
class Frame(LaserPart):
def __init__(self, pos=None):
super().__init__(pos, 0)
self.transparent = True
def __str__(self):
return 'O'
class Laser(LaserPart):
def onAdd(self, playfield):
super().onAdd(playfield)
beam = LaserBeam(self, CMObject.dirToVec(self.rot), self.rot)
self.beamsOut.append(beam)
self.pf.addBeam(beam)
def __str__(self):
return '>'
class Mirror(LaserPart):
def __init__(self, pos=None, rot=0):
super().__init__(pos, rot)
self.excitationMap = {}
def hit(self, beam):
super().hit(beam)
dirout = self.doesExcite(beam)
if dirout == -1:
return
beamout = LaserBeam(self, CMObject.dirToVec(dirout), dirout)
self.beamsOut.append(beamout)
self.excitationMap[beam] = beamout
self.pf.addBeam(beamout)
def unhit(self, beam):
super().unhit(beam);
if self.doesExcite(beam) == -1:
return
if not beam in self.excitationMap:
return
beamout = self.excitationMap.pop(beam)
if not beamout in self.beamsOut:
return
self.beamsOut.remove(beamout)
self.pf.removeBeam(beamout)
def doesExcite(self, beam):
if (3 - self.rot) % 4 == beam.getDir():
return (beam.getDir() + 1) % 4
if (2 - self.rot) % 4 == beam.getDir():
return (beam.getDir() - 1) % 4
return -1
def __str__(self):
return '/' if self.rot % 2 else '\\'
class Splitter(LaserPart):
def __init__(self, pos=None, rot=0):
super().__init__(pos, rot)
self.exciter = None
def hit(self, beam):
super().hit(beam)
dirsout = self.doesExcite(beam)
if not dirsout:
return
self.exciter = beam
for dirout in dirsout:
beamout = LaserBeam(self, CMObject.dirToVec(dirout), dirout)
self.beamsOut.append(beamout)
self.pf.addBeam(beamout)
def unhit(self, beam):
super().unhit(beam)
if self.doesExcite(beam) == -1:
return
if beam != self.exciter:
return
for beamout in self.beamsOut:
self.pf.removeBeam(beamout)
self.beamsOut.clear()
def doesExcite(self, beam):
if (2 - self.rot) % 4 == beam.getDir():
return [(self.rot - 1) % 4, (self.rot + 1) % 4]
return None
def __str__(self):
return str(self.rot) #'T'
class LaserBeam():
# src = Source laser part
def __init__(self, src, vec, dir):
self.src = src
self.vec = vec.normalize()
self.dir = dir
self.dest = None
self.beamparts = []
def onAdd(self):
self.updateDest()
def raytrace(self, pos=None):
if not pos:
pos = self.src.getPos() + self.vec
pf = self.src.getPlayfield()
while pf.isInside(pos):
obj = pf.getObjAt(pos)
if obj and not obj.isTransparent():
return obj
self.beamparts.append(pos)
pf.addBeamPart(self, pos)
pos = pos + self.vec
return False
def updateDest(self, pos=None):
if self.dest:
self.dest.unhit(self)
self.dest = self.raytrace(pos)
if self.dest:
self.dest.hit(self)
# Something has been placed in the beam
def block(self, part):
for pos in self.beamparts:
self.src.getPlayfield().removeBeamPart(self, pos)
self.beamparts.clear();
self.updateDest()
def unblock(self, part):
self.updateDest(part.getPos())
def getDir(self):
return self.dir
def onRemove(self):
self.destroy()
def destroy(self):
for pos in self.beamparts:
self.src.getPlayfield().removeBeamPart(self, pos)
self.beamparts.clear();
if self.dest:
self.dest.unhit(self)
def __str__(self):
return '|' if self.dir % 2 else '-'
class Playfield():
def __init__(self, width, height):
self.width = width
self.height = height
self.beams = [[] for i in range(0, width * height)]
self.objects = [False for i in range(0, width * height)]
def placePart(self, part):
part.onAdd(self)
pos = part.getPos()
i = pos.y * self.width + pos.x
self.objects[i] = part
if not part.isTransparent():
for beam in self.beams[i]:
beam.block(part)
def removePart(self, part):
pos = part.getPos()
i = pos.y * self.width + pos.x
if self.objects[i] != part:
raise Exception("Can't remove nonexistent part")
self.objects[i] = None
part.onRemove()
def getPartAtXY(self, x, y):
return self.objects[y * self.width + x]
def getPartAt(self, pos):
return self.getPartAtXY(pos.x, pos.y)
def getBeamsAtXY(self, x, y):
return self.beams[y * self.width + x]
def getBeamsAt(self, pos):
return self.getBeamsAtXY(pos.x, pos.y)
def addBeamPart(self, beam, pos):
self.beams[pos.y * self.width + pos.x].append(beam)
def removeBeamPart(self, beam, pos):
self.beams[pos.y * self.width + pos.x].remove(beam)
def addBeam(self, beam):
beam.onAdd()
def removeBeam(self, beam):
beam.onRemove()
def getObjAtXY(self, x, y):
return self.objects[y * self.width + x]
def getObjAt(self, pos):
return self.getObjAtXY(pos.x, pos.y)
def isInside(self, pos):
return pos.x >= 0 and pos.y >= 0 and pos.x < self.width and pos.y < self.height
def __str__(self):
s = "=" * self.width + "\n"
for y in reversed(range(0, self.height)):
for x in range(0, self.width):
chr = ' '
chrs = list(map(lambda lzr: str(lzr), self.getBeamsAtXY(x, y)))
if len(chrs):
if '|' in chrs and '-' in chrs:
chr = '+'
else:
chr = chrs[0]
obj = self.getObjAtXY(x, y)
if obj:
chr = str(obj)
s += chr
s += "\n"
s += "=" * self.width + "\n"
return s
pf = Playfield(23, 6)
laser = Laser(Pos2D(0, 1), 0)
pf.placePart(laser)
mirror = Mirror(Pos2D(4, 0), 0)
pf.placePart(mirror)
mirror = Mirror(Pos2D(8, 0), 3)
pf.placePart(mirror)
mirror = Mirror(Pos2D(12, 0), 0)
pf.placePart(mirror)
frame = Frame(Pos2D(16, 0))
pf.placePart(frame)
mirror = Mirror(Pos2D(17, 0), 3)
pf.placePart(mirror)
frame = Frame(Pos2D(21, 0))
pf.placePart(frame)
targets = []
target = Target(Pos2D(4, 5), 1)
pf.placePart(target)
targets.append(target)
target = Target(Pos2D(8, 5), 1)
pf.placePart(target)
targets.append(target)
target = Target(Pos2D(12, 5), 1)
pf.placePart(target)
targets.append(target)
target = Target(Pos2D(17, 5), 1)
pf.placePart(target)
targets.append(target)
target = Target(Pos2D(21, 5), 1)
pf.placePart(target)
targets.append(target)
# Calculate all valid locations
validlocs = []
for y in range(0, pf.height):
for x in range(0, pf.width):
if pf.getPartAtXY(x, y):
continue
if y != 0 and not pf.getPartAtXY(x, y - 1):
continue
validlocs.append(Pos2D(x, y))
placeables = [Frame(), Mirror(), Mirror(rot=3), Mirror(rot=3), Splitter(), Splitter(rot=2), Splitter(rot=1), Splitter(rot=1)]
def backtrack(validlocs, placeables):
if(len(placeables) == 0):
success = True
for target in targets:
if not target.isActive():
success = False
# Uncomment to see intermediate steps
# else:
# print(str(pf))
return success
for pos in validlocs:
# Place parts only in laser beams
if not len(pf.getBeamsAt(pos)):
continue
for part in placeables:
part.setPos(pos)
pf.placePart(part)
newlocs = list(validlocs)
newlocs.remove(pos)
# Calculate new valid placement location above $part
newloc = pos + Vec2D(0, 1)
if pf.isInside(newloc) and not pf.getPartAt(newloc):
newlocs.append(newloc)
newparts = list(placeables)
newparts.remove(part)
if backtrack(newlocs, newparts):
return True
pf.removePart(part)
return False
print("Solving...")
if backtrack(validlocs, placeables):
print("Solution:")
print(str(pf))
else:
print("No solution")
|
|
import config
import random
import csv
import radar
import math
from datetime import timedelta
from models import Location, Individual, Admission
def random_date(start, stop):
return radar.random_date(
start=start,
stop=stop
)
class Movement:
def __init__(self):
self.individual_list = []
# Generate individuals
self.generate_individuals()
self.create_output_file()
#def __del__(self):
def add_individual(self, individual):
self.individual_list.append(individual)
def generate_individuals(self):
for individual_id in config.IN_PATIENT_LIST:
individual = Individual(individual_id)
self.add_individual(individual)
def create_output_file(self):
# Open file for writing
try:
# Open the file with option 'rU' Enable Universal newline support
with open(config.OUTPUT_MOVEMENT_FILENAME, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=config.OUTPUT_MOVEMENT_HEADINGS)
writer.writeheader()
self.generate_movement(writer)
except IOError as err:
print("Error in file writing", err)
exit(1)
def generate_movement(self, writer):
"""
for entry in xrange(0, INDIVIDUAL_COUNT):
# FIXME: Exclude individuals already chosen!!
# Choose a random individual
random_individual_list = random.choice(INDIVIDUAL_LIST)
"""
# Shuffle the list to mix the id's around a little
# random.shuffle(INDIVIDUAL_LIST)
output = []
for individual in self.individual_list:
individual_id = individual.id
###############
# Admissions
###############
# Number of admissions
admission = self.generate_random_admission(config.MOVEMENT_DATE_START, config.MOVEMENT_DATE_END, config.ADMISSION_AVG_DURATION)
admission_start_date = admission['start']
admission_end_date = admission['end']
###############
# LOCATIONS
###############
# Calculate number of movements
location_count = self.generate_location_count(admission_start_date, admission_end_date, config.LOCATION_DURATION_PER_COUNT, config.LOCATION_AVG_COUNT)
# Generate a random date between the admission start and end date
# Organise the randomly selected dates into the admission
location_dates = self.generate_movement_list_dates(admission['start'], admission['end'], location_count)
for i, locations_preselected in enumerate(range(0, location_count)):
location_start_date = location_dates[i]['start']
location_end_date = location_dates[i]['end']
# Randomly select a location
location_selected = random.choice(config.LOCATION_LIST)
# Write the entry to the output file
writer.writerow({
config.OUTPUT_MOVEMENT_HEADING_MAPPING['location_start_date']: location_start_date.strftime(config.DATE_FORMAT),
config.OUTPUT_MOVEMENT_HEADING_MAPPING['location_end_date']: location_end_date.strftime(config.DATE_FORMAT),
config.OUTPUT_MOVEMENT_HEADING_MAPPING['admission_end_date']: admission_end_date.strftime(config.DATE_FORMAT),
config.OUTPUT_MOVEMENT_HEADING_MAPPING['admission_start_date']: admission_start_date.strftime(config.DATE_FORMAT),
config.OUTPUT_MOVEMENT_HEADING_MAPPING['patient_id']: individual_id,
config.OUTPUT_MOVEMENT_HEADING_MAPPING['location']: location_selected,
config.OUTPUT_MOVEMENT_HEADING_MAPPING['site_building']: "Site 1"
})
generated_location = Location(location_selected, location_start_date, location_end_date)
individual.add_location(generated_location)
generated_admission = Admission(admission_start_date, admission_end_date)
individual.add_admission(generated_admission)
# #http://www.caijournal.com/viewimage.asp?img=CommunityAcquirInfect_2015_2_1_13_153857_b2.jpg
def generate_random_admission(self, master_start_date, master_end_date, master_duration):
"""
:param start: Date
:param end: Date
:param duration: Int
:return: Dict: { start: Date, end: Date }
"""
duration = random.choice(master_duration)
# Generate a random date between the specified time period
start_date = random_date(
start=master_start_date,
stop=master_end_date
)
end_date = start_date + timedelta(minutes=duration)
return {
'start': start_date,
'end': end_date
}
def generate_location_count(self, master_start_date, master_end_date, location_duration_per_count, location_average_count):
# Dictate how many locations the admission should have
location_count = random.choice(location_average_count)
# how many days is the admission
duration = master_end_date - master_start_date
blocks = int(math.ceil(duration.days / location_duration_per_count))
if blocks > 1:
location_count *= blocks
return location_count
def generate_movement_list_dates(self, master_start_date, master_end_date, location_count):
"""
:return: array [{
start: Date,
end: Date
}]
"""
if location_count == 1:
return [{
'start': master_start_date,
'end': master_end_date
}]
dates = []
for location in range(location_count - 1):
random_date = radar.random_date(
start=master_start_date,
stop=master_end_date
)
dates.append(random_date)
# Sort the dates into ascending order
dates.sort()
# FIXME: location count is 1???
# first loop set the start to be admission start, end to be random date 1
dates_out = [{
'start': master_start_date,
'end': dates[0]
}]
# continuous loops start is last loops value, end is current loops value
for i, date in enumerate(dates):
# last loop start is current value, last is admission
if len(dates) - 1 > i:
end = dates[i + 1]
else:
end = master_end_date
dates_out.append({
'start': date,
'end': end
})
return dates_out
|
|
#
# Module providing manager classes for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token',
'SharedMemoryManager' ]
#
# Imports
#
import sys
import threading
import signal
import array
import queue
import time
import os
from os import getpid
from traceback import format_exc
from . import connection
from .context import reduction, get_spawning_popen, ProcessError
from . import pool
from . import process
from . import util
from . import get_context
try:
from . import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list: # only needed in Py3.0
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely indentify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return '%s(typeid=%r, address=%r, id=%r)' % \
(self.__class__.__name__, self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
if not isinstance(result, str):
raise TypeError(
"Result {0!r} (kind '{1}') type is {2}, not str".format(
result, kind, type(result)))
if kind == '#UNSERIALIZABLE':
return RemoteError('Unserializable message: %s\n' % result)
else:
return RemoteError(result)
else:
return ValueError('Unrecognized message type {!r}'.format(kind))
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
if not isinstance(authkey, bytes):
raise TypeError(
"Authkey {0!r} is type {1!s}, not bytes".format(
authkey, type(authkey)))
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.id_to_local_proxy_obj = {}
self.mutex = threading.Lock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__: # what about stderr?
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def handle_request(self, c):
'''
Handle a new connection
'''
funcname = result = request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
c.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
try:
obj, exposed, gettypeid = id_to_obj[ident]
except KeyError as ke:
try:
obj, exposed, gettypeid = \
self.id_to_local_proxy_obj[ident]
except KeyError as second_ke:
raise ke
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception as e:
send(('#UNSERIALIZABLE', format_exc()))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
# Perhaps include debug info about 'c'?
with self.mutex:
result = []
keys = list(self.id_to_refcount.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
# Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
return len(self.id_to_refcount)
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(*args, **kwds):
'''
Create a new shared object and return its id
'''
if len(args) >= 3:
self, c, typeid, *args = args
elif not args:
raise TypeError("descriptor 'create' of 'Server' object "
"needs an argument")
else:
if 'typeid' not in kwds:
raise TypeError('create expected at least 2 positional '
'arguments, got %d' % (len(args)-1))
typeid = kwds.pop('typeid')
if len(args) >= 2:
self, c, *args = args
import warnings
warnings.warn("Passing 'typeid' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
else:
if 'c' not in kwds:
raise TypeError('create expected at least 2 positional '
'arguments, got %d' % (len(args)-1))
c = kwds.pop('c')
self, *args = args
import warnings
warnings.warn("Passing 'c' as keyword argument is deprecated",
DeprecationWarning, stacklevel=2)
args = tuple(args)
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
if kwds or (len(args) != 1):
raise ValueError(
"Without callable, must have one non-keyword argument")
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
if not isinstance(method_to_typeid, dict):
raise TypeError(
"Method_to_typeid {0!r}: type {1!s}, not dict".format(
method_to_typeid, type(method_to_typeid)))
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
self.incref(c, ident)
return ident, tuple(exposed)
create.__text_signature__ = '($self, c, typeid, /, *args, **kwds)'
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
try:
self.id_to_refcount[ident] += 1
except KeyError as ke:
# If no external references exist but an internal (to the
# manager) still does and a new external reference is created
# from it, restore the manager's tracking of it from the
# previously stashed internal ref.
if ident in self.id_to_local_proxy_obj:
self.id_to_refcount[ident] = 1
self.id_to_obj[ident] = \
self.id_to_local_proxy_obj[ident]
obj, exposed, gettypeid = self.id_to_obj[ident]
util.debug('Server re-enabled tracking & INCREF %r', ident)
else:
raise ke
def decref(self, c, ident):
if ident not in self.id_to_refcount and \
ident in self.id_to_local_proxy_obj:
util.debug('Server DECREF skipping %r', ident)
return
with self.mutex:
if self.id_to_refcount[ident] <= 0:
raise AssertionError(
"Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
ident, self.id_to_obj[ident],
self.id_to_refcount[ident]))
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_refcount[ident]
if ident not in self.id_to_refcount:
# Two-step process in case the object turns out to contain other
# proxy objects (e.g. a managed list of managed lists).
# Otherwise, deleting self.id_to_obj[ident] would trigger the
# deleting of the stored value (another managed object) which would
# in turn attempt to acquire the mutex that is already held here.
self.id_to_obj[ident] = (None, (), None) # thread-safe
util.debug('disposing of obj with id %r', ident)
with self.mutex:
del self.id_to_obj[ident]
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey,
self._state, self._Client),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
# bpo-36368: protect server process from KeyboardInterrupt signals
signal.signal(signal.SIGINT, signal.SIG_IGN)
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, /, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
if self._state.value != State.STARTED:
if self._state.value == State.INITIAL:
raise ProcessError("Unable to start server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=1.0)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=0.1)
if process.is_alive():
util.info('manager still alive after terminate')
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
@property
def address(self):
return self._address
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()): # isinstance?
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, /, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True, manager_owned=False):
with BaseProxy._mutex:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
# Should be set to True only when a proxy object is being created
# on the manager server; primary use case: nested proxy objects.
# RebuildProxy detects when a proxy is being created on the manager
# and sets this value appropriately.
self._owned_by_manager = manager_owned
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referrent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
if self._owned_by_manager:
util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
return
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %#x>' % \
(type(self).__name__, self._token.typeid, id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
util.debug('Rebuild a proxy owned by manager, token=%r', token)
kwds['manager_owned'] = True
if token.id not in server.id_to_local_proxy_obj:
server.id_to_local_proxy_obj[token.id] = \
server.id_to_obj[token.id]
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, /, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, /, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self, n=1):
return self._callmethod('notify', (n,))
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = time.monotonic() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - time.monotonic()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
DictProxy._method_to_typeid_ = {
'__iter__': 'Iterator',
}
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
#
# Definition of SharedMemoryManager and SharedMemoryServer
#
if HAS_SHMEM:
class _SharedMemoryTracker:
"Manages one or more shared memory segments."
def __init__(self, name, segment_names=[]):
self.shared_memory_context_name = name
self.segment_names = segment_names
def register_segment(self, segment_name):
"Adds the supplied shared memory block name to tracker."
util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
self.segment_names.append(segment_name)
def destroy_segment(self, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the list of blocks being tracked."""
util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
self.segment_names.remove(segment_name)
segment = shared_memory.SharedMemory(segment_name)
segment.close()
segment.unlink()
def unlink(self):
"Calls destroy_segment() on all tracked shared memory blocks."
for segment_name in self.segment_names[:]:
self.destroy_segment(segment_name)
def __del__(self):
util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
self.unlink()
def __getstate__(self):
return (self.shared_memory_context_name, self.segment_names)
def __setstate__(self, state):
self.__init__(*state)
class SharedMemoryServer(Server):
public = Server.public + \
['track_segment', 'release_segment', 'list_segments']
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
self.shared_memory_context = \
_SharedMemoryTracker(f"shmm_{self.address}_{getpid()}")
util.debug(f"SharedMemoryServer started by pid {getpid()}")
def create(*args, **kwargs):
"""Create a new distributed-shared object (not backed by a shared
memory block) and return its id to be used in a Proxy Object."""
# Unless set up as a shared proxy, don't make shared_memory_context
# a standard part of kwargs. This makes things easier for supplying
# simple functions.
if len(args) >= 3:
typeod = args[2]
elif 'typeid' in kwargs:
typeid = kwargs['typeid']
elif not args:
raise TypeError("descriptor 'create' of 'SharedMemoryServer' "
"object needs an argument")
else:
raise TypeError('create expected at least 2 positional '
'arguments, got %d' % (len(args)-1))
if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
kwargs['shared_memory_context'] = self.shared_memory_context
return Server.create(*args, **kwargs)
create.__text_signature__ = '($self, c, typeid, /, *args, **kwargs)'
def shutdown(self, c):
"Call unlink() on all tracked shared memory, terminate the Server."
self.shared_memory_context.unlink()
return Server.shutdown(self, c)
def track_segment(self, c, segment_name):
"Adds the supplied shared memory block name to Server's tracker."
self.shared_memory_context.register_segment(segment_name)
def release_segment(self, c, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the tracker instance inside the Server."""
self.shared_memory_context.destroy_segment(segment_name)
def list_segments(self, c):
"""Returns a list of names of shared memory blocks that the Server
is currently tracking."""
return self.shared_memory_context.segment_names
class SharedMemoryManager(BaseManager):
"""Like SyncManager but uses SharedMemoryServer instead of Server.
It provides methods for creating and returning SharedMemory instances
and for creating a list-like object (ShareableList) backed by shared
memory. It also provides methods that create and return Proxy Objects
that support synchronization across processes (i.e. multi-process-safe
locks and semaphores).
"""
_Server = SharedMemoryServer
def __init__(self, *args, **kwargs):
if os.name == "posix":
# bpo-36867: Ensure the resource_tracker is running before
# launching the manager process, so that concurrent
# shared_memory manipulation both in the manager and in the
# current process does not create two resource_tracker
# processes.
from . import resource_tracker
resource_tracker.ensure_running()
BaseManager.__init__(self, *args, **kwargs)
util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
def __del__(self):
util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
pass
def get_server(self):
'Better than monkeypatching for now; merge into Server ultimately'
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started SharedMemoryServer")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("SharedMemoryManager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self._Server(self._registry, self._address,
self._authkey, self._serializer)
def SharedMemory(self, size):
"""Returns a new SharedMemory instance with the specified size in
bytes, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sms = shared_memory.SharedMemory(None, create=True, size=size)
try:
dispatch(conn, None, 'track_segment', (sms.name,))
except BaseException as e:
sms.unlink()
raise e
return sms
def ShareableList(self, sequence):
"""Returns a new ShareableList instance populated with the values
from the input sequence, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sl = shared_memory.ShareableList(sequence)
try:
dispatch(conn, None, 'track_segment', (sl.shm.name,))
except BaseException as e:
sl.shm.unlink()
raise e
return sl
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from __future__ import with_statement
from datetime import datetime, timedelta
from StringIO import StringIO
import tempfile
import shutil
import unittest
from trac import core
from trac.attachment import Attachment
from trac.core import TracError, implements
from trac.resource import ResourceNotFound
from trac.ticket.model import (
Ticket, Component, Milestone, Priority, Type, Version
)
from trac.ticket.api import (
IMilestoneChangeListener, ITicketChangeListener, TicketSystem
)
from trac.test import EnvironmentStub
from trac.util.datefmt import from_utimestamp, to_utimestamp, utc
class TestTicketChangeListener(core.Component):
implements(ITicketChangeListener)
def ticket_created(self, ticket):
self.action = 'created'
self.ticket = ticket
self.resource = ticket.resource
def ticket_changed(self, ticket, comment, author, old_values):
self.action = 'changed'
self.ticket = ticket
self.comment = comment
self.author = author
self.old_values = old_values
def ticket_deleted(self, ticket):
self.action = 'deleted'
self.ticket = ticket
class TicketTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.env.config.set('ticket-custom', 'foo', 'text')
self.env.config.set('ticket-custom', 'cbon', 'checkbox')
self.env.config.set('ticket-custom', 'cboff', 'checkbox')
def tearDown(self):
self.env.reset_db()
def _insert_ticket(self, summary, **kw):
"""Helper for inserting a ticket into the database"""
ticket = Ticket(self.env)
for k, v in kw.items():
ticket[k] = v
return ticket.insert()
def _create_a_ticket(self):
# 1. Creating ticket
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['foo'] = 'This is a custom field'
return ticket
def test_invalid_ticket_id(self):
self.assertEqual(Ticket.id_is_valid(-1), False)
self.assertEqual(Ticket.id_is_valid(0), False)
self.assertEqual(Ticket.id_is_valid(1), True)
self.assertEqual(Ticket.id_is_valid(1L << 31), True)
self.assertEqual(Ticket.id_is_valid(1L << 32), False)
self.assertRaises(ResourceNotFound, Ticket, self.env, -1)
self.assertRaises(ResourceNotFound, Ticket, self.env, 1L << 32)
def test_create_ticket_1(self):
ticket = self._create_a_ticket()
self.assertEqual('santa', ticket['reporter'])
self.assertEqual('Foo', ticket['summary'])
self.assertEqual('This is a custom field', ticket['foo'])
ticket.insert()
def test_create_ticket_2(self):
ticket = self._create_a_ticket()
ticket.insert()
self.assertEqual(1, ticket.id)
# Retrieving ticket
ticket2 = Ticket(self.env, 1)
self.assertEqual(1, ticket2.id)
self.assertEqual('santa', ticket2['reporter'])
self.assertEqual('Foo', ticket2['summary'])
self.assertEqual('This is a custom field', ticket2['foo'])
def _modify_a_ticket(self):
ticket2 = self._create_a_ticket()
ticket2.insert()
ticket2['summary'] = 'Bar'
ticket2['foo'] = 'New value'
ticket2.save_changes('santa', 'this is my comment')
return ticket2
def test_create_ticket_3(self):
self._modify_a_ticket()
# Retrieving ticket
ticket3 = Ticket(self.env, 1)
self.assertEqual(1, ticket3.id)
self.assertEqual(ticket3['reporter'], 'santa')
self.assertEqual(ticket3['summary'], 'Bar')
self.assertEqual(ticket3['foo'], 'New value')
def test_create_ticket_4(self):
ticket3 = self._modify_a_ticket()
# Testing get_changelog()
log = ticket3.get_changelog()
self.assertEqual(len(log), 3)
ok_vals = ['foo', 'summary', 'comment']
self.failUnless(log[0][2] in ok_vals)
self.failUnless(log[1][2] in ok_vals)
self.failUnless(log[2][2] in ok_vals)
def test_create_ticket_5(self):
ticket3 = self._modify_a_ticket()
# Testing delete()
ticket3.delete()
log = ticket3.get_changelog()
self.assertEqual(len(log), 0)
self.assertRaises(TracError, Ticket, self.env, 1)
def test_ticket_id_is_always_int(self):
ticket_id = self._insert_ticket('Foo')
self.assertEqual(ticket_id, int(ticket_id))
ticket = Ticket(self.env, str(ticket_id))
self.assertEqual(ticket_id, ticket.id)
self.assertEqual(ticket.resource.id, ticket_id)
def test_can_save_ticket_without_explicit_comment(self):
ticket = Ticket(self.env)
ticket.insert()
ticket['summary'] = 'another summary'
ticket.save_changes('foo')
changes = ticket.get_changelog()
comment_change = [c for c in changes if c[2] == 'comment'][0]
self.assertEqual('1', comment_change[3])
self.assertEqual('', comment_change[4])
def test_can_save_ticket_without_explicit_username(self):
ticket = Ticket(self.env)
ticket.insert()
ticket['summary'] = 'another summary'
ticket.save_changes()
for change in ticket.get_changelog():
self.assertEqual(None, change[1])
def test_comment_with_whitespace_only_is_not_saved(self):
ticket = Ticket(self.env)
ticket.insert()
ticket.save_changes(comment='\n \n ')
self.assertEqual(0, len(ticket.get_changelog()))
def test_prop_whitespace_change_is_not_saved(self):
ticket = Ticket(self.env)
ticket.populate({'summary': 'ticket summary'})
ticket.insert()
ticket['summary'] = ' ticket summary '
ticket.save_changes()
self.assertEqual(0, len(ticket.get_changelog()))
def test_ticket_default_values(self):
"""
Verify that a ticket uses default values specified in the configuration
when created.
"""
# Set defaults for some standard fields
self.env.config.set('ticket', 'default_type', 'defect')
self.env.config.set('ticket', 'default_component', 'component1')
# Add a custom field of type 'text' with a default value
self.env.config.set('ticket-custom', 'foo', 'text')
self.env.config.set('ticket-custom', 'foo.value', 'Something')
# Add a custom field of type 'select' with a default value specified as
# the value itself
self.env.config.set('ticket-custom', 'bar', 'select')
self.env.config.set('ticket-custom', 'bar.options', 'one|two|three')
self.env.config.set('ticket-custom', 'bar.value', 'two')
# Add a custom field of type 'select' with a default value specified as
# index into the options list
self.env.config.set('ticket-custom', 'baz', 'select')
self.env.config.set('ticket-custom', 'baz.options', 'one|two|three')
self.env.config.set('ticket-custom', 'baz.value', '2')
ticket = Ticket(self.env)
self.assertEqual('defect', ticket['type'])
self.assertEqual('component1', ticket['component'])
self.assertEqual('Something', ticket['foo'])
self.assertEqual('two', ticket['bar'])
self.assertEqual('three', ticket['baz'])
def test_set_field_stripped(self):
"""
Verify that whitespace around ticket fields is stripped, except for
textarea fields.
"""
ticket = Ticket(self.env)
ticket['component'] = ' foo '
ticket['description'] = ' bar '
self.assertEqual('foo', ticket['component'])
self.assertEqual(' bar ', ticket['description'])
def test_set_field_multi(self):
"""
Ticket fields can't yet be multi-valued
"""
ticket = Ticket(self.env)
def set_multi_valued():
ticket['component'] = [' foo ', ' bar ']
self.assertRaises(TracError, set_multi_valued)
def test_owner_from_component(self):
"""
Verify that the owner of a new ticket is set to the owner of the
component.
"""
component = Component(self.env)
component.name = 'test'
component.owner = 'joe'
component.insert()
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['component'] = 'test'
ticket.insert()
self.assertEqual('joe', ticket['owner'])
def test_owner_from_changed_component(self):
"""
Verify that the owner of a new ticket is updated when the component is
changed.
"""
component1 = Component(self.env)
component1.name = 'test1'
component1.owner = 'joe'
component1.insert()
component2 = Component(self.env)
component2.name = 'test2'
component2.owner = 'kate'
component2.insert()
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['component'] = 'test1'
ticket['status'] = 'new'
tktid = ticket.insert()
ticket = Ticket(self.env, tktid)
ticket['component'] = 'test2'
ticket.save_changes('jane', 'Testing')
self.assertEqual('kate', ticket['owner'])
def test_no_disown_from_changed_component(self):
"""
Verify that a ticket is not disowned when the component is changed to
a non-assigned component.
"""
component1 = Component(self.env)
component1.name = 'test1'
component1.owner = 'joe'
component1.insert()
component2 = Component(self.env)
component2.name = 'test2'
component2.owner = ''
component2.insert()
ticket = Ticket(self.env)
ticket['reporter'] = 'santa'
ticket['summary'] = 'Foo'
ticket['component'] = 'test1'
ticket['status'] = 'new'
tktid = ticket.insert()
ticket = Ticket(self.env, tktid)
ticket['component'] = 'test2'
ticket.save_changes('jane', 'Testing')
self.assertEqual('joe', ticket['owner'])
def test_populate_ticket(self):
data = {'summary': 'Hello world', 'reporter': 'john',
'foo': 'bar', 'checkbox_cbon': '', 'cbon': 'on',
'checkbox_cboff': ''}
ticket = Ticket(self.env)
ticket.populate(data)
# Standard fields
self.assertEqual('Hello world', ticket['summary'])
self.assertEqual('john', ticket['reporter'])
# An unknown field
assert ticket['bar'] is None
# Custom field
self.assertEqual('bar', ticket['foo'])
# Custom field of type 'checkbox'
self.assertEqual('on', ticket['cbon'])
self.assertEqual('0', ticket['cboff'])
def test_custom_time(self):
# Add a custom field of type 'time'
self.env.config.set('ticket-custom', 'due', 'time')
ticket = Ticket(self.env)
self.assertFalse('due' in ticket.std_fields)
self.assertTrue('due' in ticket.time_fields)
ticket['reporter'] = 'john'
ticket['summary'] = 'Task1'
tktid = ticket.insert()
ticket = Ticket(self.env, tktid)
# Empty string is default value, but not a time stamp
self.assertEqual(None, ticket['due'])
ts = datetime(2011, 11, 11, 0, 0, 0, 0, utc)
ticket['due'] = ts
t1 = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
ticket.save_changes('joe', when=t1)
self.assertEqual(ts, ticket['due'])
ticket['due'] = ''
t2 = datetime(2001, 1, 1, 1, 1, 2, 0, utc)
ticket.save_changes('joe', when=t2)
self.assertEqual('', ticket['due'])
def test_changelog(self):
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo',
milestone='bar')
ticket = Ticket(self.env, tkt_id)
ticket['component'] = 'bar'
ticket['milestone'] = 'foo'
now = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
ticket.save_changes('jane', 'Testing', now)
changelog = sorted(ticket.get_changelog())
self.assertEqual([(now, 'jane', 'comment', '1', 'Testing', True),
(now, 'jane', 'component', 'foo', 'bar', True),
(now, 'jane', 'milestone', 'bar', 'foo', True)],
changelog)
def test_changelog_with_attachment(self):
"""Verify ordering of attachments and comments in the changelog."""
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo')
ticket = Ticket(self.env, tkt_id)
t1 = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
ticket.save_changes('jane', 'Testing', t1)
t2 = datetime(2001, 1, 1, 1, 1, 2, 0, utc)
self.env.db_transaction("""
INSERT INTO attachment (type, id, filename, size, time,
description, author, ipnr)
VALUES ('ticket',%s,'file.txt',1234,%s, 'My file','mark','')
""", (str(tkt_id), to_utimestamp(t2)))
t3 = datetime(2001, 1, 1, 1, 1, 3, 0, utc)
ticket.save_changes('jim', 'Other', t3)
log = ticket.get_changelog()
self.assertEqual(4, len(log))
self.assertEqual((t1, 'jane', 'comment', '1', 'Testing', True), log[0])
self.assertEqual([(t2, 'mark', 'attachment', '', 'file.txt', False),
(t2, 'mark', 'comment', '', 'My file', False)],
sorted(log[1:3]))
self.assertEqual((t3, 'jim', 'comment', '2', 'Other', True), log[3])
def test_subsecond_change(self):
"""Perform two ticket changes within a second."""
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo')
ticket = Ticket(self.env, tkt_id)
t1 = datetime(2001, 1, 1, 1, 1, 1, 123456, utc)
ticket.save_changes('jane', 'Testing', t1)
t2 = datetime(2001, 1, 1, 1, 1, 1, 123789, utc)
ticket.save_changes('jim', 'Other', t2)
log = ticket.get_changelog()
self.assertEqual(2, len(log))
self.assertEqual((t1, 'jane', 'comment', '1', 'Testing', True), log[0])
self.assertEqual((t2, 'jim', 'comment', '2', 'Other', True), log[1])
def test_changelog_with_reverted_change(self):
tkt_id = self._insert_ticket('Test', reporter='joe', component='foo')
ticket = Ticket(self.env, tkt_id)
ticket['component'] = 'bar'
ticket['component'] = 'foo'
now = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
ticket.save_changes('jane', 'Testing', now)
self.assertEqual([(now, 'jane', 'comment', '1', 'Testing', True)],
list(ticket.get_changelog()))
def test_change_listener_created(self):
listener = TestTicketChangeListener(self.env)
ticket = self._create_a_ticket()
ticket.insert()
self.assertEqual('created', listener.action)
self.assertEqual(ticket, listener.ticket)
self.assertEqual(ticket.id, ticket.resource.id)
def test_change_listener_changed(self):
listener = TestTicketChangeListener(self.env)
data = {'component': 'foo', 'milestone': 'bar'}
tkt_id = self._insert_ticket('Hello World', reporter='john', **data)
ticket = Ticket(self.env, tkt_id)
ticket['component'] = 'new component'
ticket['milestone'] = 'new milestone'
comment = 'changing ticket'
ticket.save_changes('author', comment)
self.assertEqual('changed', listener.action)
self.assertEqual(comment, listener.comment)
self.assertEqual('author', listener.author)
for key, value in data.iteritems():
self.assertEqual(value, listener.old_values[key])
def test_change_listener_deleted(self):
listener = TestTicketChangeListener(self.env)
ticket = self._create_a_ticket()
ticket.insert()
ticket.delete()
self.assertEqual('deleted', listener.action)
self.assertEqual(ticket, listener.ticket)
class TicketCommentTestCase(unittest.TestCase):
def _insert_ticket(self, summary, when, **kwargs):
ticket = Ticket(self.env)
for k, v in kwargs.iteritems():
ticket[k] = v
self.id = ticket.insert(when)
def _modify_ticket(self, author, comment, when, cnum, **kwargs):
ticket = Ticket(self.env, self.id)
for k, v in kwargs.iteritems():
ticket[k] = v
ticket.save_changes(author, comment, when, cnum=cnum)
def _find_change(self, ticket, cnum):
(ts, author, comment) = ticket._find_change(cnum)
return from_utimestamp(ts)
def assertChange(self, ticket, cnum, date, author, **fields):
change = ticket.get_change(cnum=cnum)
self.assertEqual(dict(date=date, author=author, fields=fields), change)
class TicketCommentEditTestCase(TicketCommentTestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.created = datetime(2001, 1, 1, 1, 0, 0, 0, utc)
self._insert_ticket('Test ticket', self.created,
owner='john', keywords='a, b, c')
self.t1 = self.created + timedelta(seconds=1)
self._modify_ticket('jack', 'Comment 1', self.t1, '1')
self.t2 = self.created + timedelta(seconds=2)
self._modify_ticket('john', 'Comment 2', self.t2, '1.2',
owner='jack')
self.t3 = self.created + timedelta(seconds=3)
self._modify_ticket('jim', 'Comment 3', self.t3, '3',
keywords='a, b')
def tearDown(self):
self.env.reset_db()
def test_modify_comment(self):
"""Check modification of a "standalone" comment"""
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='Comment 1'))
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='1.2', new='Comment 2'))
self.assertChange(ticket, 3, self.t3, 'jim',
keywords=dict(author='jim', old='a, b, c', new='a, b'),
comment=dict(author='jim', old='3', new='Comment 3'))
t = self.created + timedelta(seconds=10)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'New comment 1', t)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='New comment 1'),
_comment0=dict(author='joe', old='Comment 1',
new=str(to_utimestamp(t))))
self.assertEqual(t, Ticket(self.env, self.id)['changetime'])
def test_threading(self):
"""Check modification of a "threaded" comment"""
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=20)
ticket.modify_comment(self._find_change(ticket, 2),
'joe', 'New comment 2', t)
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='1.2', new='New comment 2'),
_comment0=dict(author='joe', old='Comment 2',
new=str(to_utimestamp(t))))
def test_modify_missing_cnum(self):
"""Editing a comment with no cnum in oldvalue"""
self.env.db_transaction(
"UPDATE ticket_change SET oldvalue='' WHERE oldvalue='3'")
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=30)
ticket.modify_comment(self._find_change(ticket, 3),
'joe', 'New comment 3', t)
self.assertChange(ticket, 3, self.t3, 'jim',
keywords=dict(author='jim', old='a, b, c', new='a, b'),
comment=dict(author='jim', old='', new='New comment 3'),
_comment0=dict(author='joe', old='Comment 3',
new=str(to_utimestamp(t))))
def test_modify_missing_comment(self):
"""Editing a comment where the comment field is missing"""
self.env.db_transaction("""
DELETE FROM ticket_change WHERE field='comment' AND oldvalue='1.2'
""")
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=40)
ticket.modify_comment(self._find_change(ticket, 2),
'joe', 'New comment 2', t)
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='', new='New comment 2'),
_comment0=dict(author='joe', old='',
new=str(to_utimestamp(t))))
def test_modify_missing_cnums_and_comment(self):
"""Editing a comment when all cnums are missing and one comment
field is missing
"""
with self.env.db_transaction as db:
db("UPDATE ticket_change SET oldvalue='' WHERE oldvalue='1'")
db("""DELETE FROM ticket_change
WHERE field='comment' AND oldvalue='1.2'""")
db("UPDATE ticket_change SET oldvalue='' WHERE oldvalue='3'")
# Modify after missing comment
ticket = Ticket(self.env, self.id)
t = self.created + timedelta(seconds=50)
ticket.modify_comment(self._find_change(ticket, 3),
'joe', 'New comment 3', t)
self.assertChange(ticket, 3, self.t3, 'jim',
keywords=dict(author='jim', old='a, b, c', new='a, b'),
comment=dict(author='jim', old='', new='New comment 3'),
_comment0=dict(author='joe', old='Comment 3',
new=str(to_utimestamp(t))))
# Modify missing comment
t = self.created + timedelta(seconds=60)
ticket.modify_comment(self._find_change(ticket, 2),
'joe', 'New comment 2', t)
self.assertChange(ticket, 2, self.t2, 'john',
owner=dict(author='john', old='john', new='jack'),
comment=dict(author='john', old='', new='New comment 2'),
_comment0=dict(author='joe', old='',
new=str(to_utimestamp(t))))
def test_missing_comment_edit(self):
"""Modify a comment where one edit is missing"""
ticket = Ticket(self.env, self.id)
t1 = self.created + timedelta(seconds=70)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'New comment 1', t1)
t2 = self.created + timedelta(seconds=80)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'Other comment 1', t2)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='Other comment 1'),
_comment0=dict(author='joe', old='Comment 1',
new=str(to_utimestamp(t1))),
_comment1=dict(author='joe', old='New comment 1',
new=str(to_utimestamp(t2))))
self.env.db_transaction(
"DELETE FROM ticket_change WHERE field='_comment0'")
t3 = self.created + timedelta(seconds=90)
ticket.modify_comment(self._find_change(ticket, 1),
'joe', 'Newest comment 1', t3)
self.assertChange(ticket, 1, self.t1, 'jack',
comment=dict(author='jack', old='1', new='Newest comment 1'),
_comment1=dict(author='joe', old='New comment 1',
new=str(to_utimestamp(t2))),
_comment2=dict(author='joe', old='Other comment 1',
new=str(to_utimestamp(t3))))
def test_comment_history(self):
"""Check the generation of the comment history"""
ticket = Ticket(self.env, self.id)
t = [self.t1]
for i in range(1, 32):
t.append(self.created + timedelta(minutes=i))
ticket.modify_comment(self._find_change(ticket, 1),
'joe (%d)' % i,
'Comment 1 (%d)' % i, t[-1])
history = ticket.get_comment_history(cnum=1)
self.assertEqual((0, t[0], 'jack', 'Comment 1'), history[0])
for i in range(1, len(history)):
self.assertEqual((i, t[i], 'joe (%d)' % i,
'Comment 1 (%d)' % i), history[i])
history = ticket.get_comment_history(cdate=self.t1)
self.assertEqual((0, t[0], 'jack', 'Comment 1'), history[0])
for i in range(1, len(history)):
self.assertEqual((i, t[i], 'joe (%d)' % i,
'Comment 1 (%d)' % i), history[i])
class TicketCommentDeleteTestCase(TicketCommentTestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.env.config.set('ticket-custom', 'foo', 'text')
self.created = datetime(2001, 1, 1, 1, 0, 0, 0, utc)
self._insert_ticket('Test ticket', self.created,
owner='john', keywords='a, b, c', foo='initial')
self.t1 = self.created + timedelta(seconds=1)
self._modify_ticket('jack', 'Comment 1', self.t1, '1',
foo='change 1')
self.t2 = self.created + timedelta(seconds=2)
self._modify_ticket('john', 'Comment 2', self.t2, '1.2',
owner='jack', foo='change2')
self.t3 = self.created + timedelta(seconds=3)
self._modify_ticket('jim', 'Comment 3', self.t3, '3',
keywords='a, b', foo='change3')
self.t4 = self.created + timedelta(seconds=4)
self._modify_ticket('joe', 'Comment 4', self.t4, '4',
keywords='a', foo='change4')
def tearDown(self):
self.env.reset_db()
def test_delete_last_comment(self):
ticket = Ticket(self.env, self.id)
self.assertEqual('a', ticket['keywords'])
self.assertEqual('change4', ticket['foo'])
t = datetime.now(utc)
ticket.delete_change(cnum=4, when=t)
self.assertEqual('a, b', ticket['keywords'])
self.assertEqual('change3', ticket['foo'])
self.assertEqual(None, ticket.get_change(cnum=4))
self.assertNotEqual(None, ticket.get_change(cnum=3))
self.assertEqual(t, ticket.time_changed)
def test_delete_last_comment_when_custom_field_gone(self):
"""Regression test for http://trac.edgewall.org/ticket/10858"""
ticket = Ticket(self.env, self.id)
self.assertEqual('a', ticket['keywords'])
self.assertEqual('change4', ticket['foo'])
# we simulate the removal of the definition of the 'foo' custom field
self.env.config.remove('ticket-custom', 'foo')
del TicketSystem(self.env).fields
del TicketSystem(self.env).custom_fields
ticket = Ticket(self.env, self.id)
#
t = datetime.now(utc)
ticket.delete_change(cnum=4, when=t)
self.assertEqual('a, b', ticket['keywords'])
# 'foo' is no longer defined for the ticket
self.assertEqual(None, ticket['foo'])
# however, 'foo=change3' is still in the database
self.assertEqual([('change3',)], self.env.db_query("""
SELECT value FROM ticket_custom WHERE ticket=%s AND name='foo'
""", (self.id,)))
self.assertEqual(None, ticket.get_change(cnum=4))
self.assertNotEqual(None, ticket.get_change(cnum=3))
self.assertEqual(t, ticket.time_changed)
def test_delete_last_comment_by_date(self):
ticket = Ticket(self.env, self.id)
self.assertEqual('a', ticket['keywords'])
self.assertEqual('change4', ticket['foo'])
t = datetime.now(utc)
ticket.delete_change(cdate=self.t4, when=t)
self.assertEqual('a, b', ticket['keywords'])
self.assertEqual('change3', ticket['foo'])
self.assertEqual(None, ticket.get_change(cdate=self.t4))
self.assertNotEqual(None, ticket.get_change(cdate=self.t3))
self.assertEqual(t, ticket.time_changed)
def test_delete_mid_comment(self):
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b', new='a'),
foo=dict(author='joe', old='change3', new='change4'))
t = datetime.now(utc)
ticket.delete_change(cnum=3, when=t)
self.assertEqual(None, ticket.get_change(cnum=3))
self.assertEqual('a', ticket['keywords'])
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b, c', new='a'),
foo=dict(author='joe', old='change2', new='change4'))
self.assertEqual(t, ticket.time_changed)
def test_delete_mid_comment_by_date(self):
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b', new='a'),
foo=dict(author='joe', old='change3', new='change4'))
t = datetime.now(utc)
ticket.delete_change(cdate=self.t3, when=t)
self.assertEqual(None, ticket.get_change(cdate=self.t3))
self.assertEqual('a', ticket['keywords'])
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='a, b, c', new='a'),
foo=dict(author='joe', old='change2', new='change4'))
self.assertEqual(t, ticket.time_changed)
def test_delete_mid_comment_inconsistent(self):
# Make oldvalue on keywords for change 4 inconsistent. This should
# result in no change in oldvalue when deleting change 3. The
# oldvalue of foo should change normally.
self.env.db_transaction("""
UPDATE ticket_change SET oldvalue='1, 2'
WHERE field='keywords' AND oldvalue='a, b'
""")
ticket = Ticket(self.env, self.id)
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='1, 2', new='a'),
foo=dict(author='joe', old='change3', new='change4'))
ticket.delete_change(3)
self.assertEqual(None, ticket.get_change(3))
self.assertEqual('a', ticket['keywords'])
self.assertChange(ticket, 4, self.t4, 'joe',
comment=dict(author='joe', old='4', new='Comment 4'),
keywords=dict(author='joe', old='1, 2', new='a'),
foo=dict(author='joe', old='change2', new='change4'))
def test_delete_all_comments(self):
ticket = Ticket(self.env, self.id)
ticket.delete_change(4)
ticket.delete_change(3)
ticket.delete_change(2)
t = datetime.now(utc)
ticket.delete_change(1, when=t)
self.assertEqual(t, ticket.time_changed)
class EnumTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
def tearDown(self):
self.env.reset_db()
def test_priority_fetch(self):
prio = Priority(self.env, 'major')
self.assertEqual(prio.name, 'major')
self.assertEqual(prio.value, '3')
def test_priority_insert(self):
prio = Priority(self.env)
prio.name = 'foo'
prio.insert()
self.assertEqual(True, prio.exists)
def test_priority_insert_with_value(self):
prio = Priority(self.env)
prio.name = 'bar'
prio.value = 100
prio.insert()
self.assertEqual(True, prio.exists)
def test_priority_update(self):
prio = Priority(self.env, 'major')
prio.name = 'foo'
prio.update()
Priority(self.env, 'foo')
self.assertRaises(TracError, Priority, self.env, 'major')
def test_priority_delete(self):
prio = Priority(self.env, 'major')
self.assertEqual('3', prio.value)
prio.delete()
self.assertEqual(False, prio.exists)
self.assertRaises(TracError, Priority, self.env, 'major')
prio = Priority(self.env, 'minor')
self.assertEqual('3', prio.value)
def test_ticket_type_update(self):
tkttype = Type(self.env, 'task')
self.assertEqual(tkttype.name, 'task')
self.assertEqual(tkttype.value, '3')
tkttype.name = 'foo'
tkttype.update()
Type(self.env, 'foo')
class TestMilestoneChangeListener(core.Component):
implements(IMilestoneChangeListener)
def milestone_created(self, milestone):
self.action = 'created'
self.milestone = milestone
def milestone_changed(self, milestone, old_values):
self.action = 'changed'
self.milestone = milestone
self.old_values = old_values
def milestone_deleted(self, milestone):
self.action = 'deleted'
self.milestone = milestone
class MilestoneTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
self.env.path = tempfile.mkdtemp(prefix='trac-tempenv-')
def tearDown(self):
shutil.rmtree(self.env.path)
self.env.reset_db()
def _create_milestone(self, **values):
milestone = Milestone(self.env)
for k, v in values.iteritems():
setattr(milestone, k, v)
return milestone
def test_new_milestone(self):
milestone = Milestone(self.env)
self.assertEqual(False, milestone.exists)
self.assertEqual(None, milestone.name)
self.assertEqual(None, milestone.due)
self.assertEqual(None, milestone.completed)
self.assertEqual('', milestone.description)
def test_new_milestone_empty_name(self):
"""
Verifies that specifying an empty milestone name results in the
milestone being correctly detected as non-existent.
"""
milestone = Milestone(self.env, '')
self.assertEqual(False, milestone.exists)
self.assertEqual(None, milestone.name)
self.assertEqual(None, milestone.due)
self.assertEqual(None, milestone.completed)
self.assertEqual('', milestone.description)
def test_existing_milestone(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
milestone = Milestone(self.env, 'Test')
self.assertEqual(True, milestone.exists)
self.assertEqual('Test', milestone.name)
self.assertEqual(None, milestone.due)
self.assertEqual(None, milestone.completed)
self.assertEqual('', milestone.description)
def test_create_and_update_milestone(self):
milestone = Milestone(self.env)
milestone.name = 'Test'
milestone.insert()
self.assertEqual([('Test', 0, 0, '')], self.env.db_query("""
SELECT name, due, completed, description FROM milestone
WHERE name='Test'
"""))
# Use the same model object to update the milestone
milestone.description = 'Some text'
milestone.update()
self.assertEqual([('Test', 0, 0, 'Some text')], self.env.db_query("""
SELECT name, due, completed, description FROM milestone
WHERE name='Test'
"""))
def test_create_milestone_without_name(self):
milestone = Milestone(self.env)
self.assertRaises(TracError, milestone.insert)
def test_delete_milestone(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
milestone = Milestone(self.env, 'Test')
milestone.delete()
self.assertEqual(False, milestone.exists)
self.assertEqual([],
self.env.db_query("SELECT * FROM milestone WHERE name='Test'"))
def test_delete_milestone_retarget_tickets(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
tkt1 = Ticket(self.env)
tkt1.populate({'summary': 'Foo', 'milestone': 'Test'})
tkt1.insert()
tkt2 = Ticket(self.env)
tkt2.populate({'summary': 'Bar', 'milestone': 'Test'})
tkt2.insert()
milestone = Milestone(self.env, 'Test')
milestone.delete(retarget_to='Other')
self.assertEqual(False, milestone.exists)
self.assertEqual('Other', Ticket(self.env, tkt1.id)['milestone'])
self.assertEqual('Other', Ticket(self.env, tkt2.id)['milestone'])
def test_update_milestone(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
milestone = Milestone(self.env, 'Test')
t1 = datetime(2001, 01, 01, tzinfo=utc)
t2 = datetime(2002, 02, 02, tzinfo=utc)
milestone.due = t1
milestone.completed = t2
milestone.description = 'Foo bar'
milestone.update()
self.assertEqual(
[('Test', to_utimestamp(t1), to_utimestamp(t2), 'Foo bar')],
self.env.db_query("SELECT * FROM milestone WHERE name='Test'"))
def test_update_milestone_without_name(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
milestone = Milestone(self.env, 'Test')
milestone.name = None
self.assertRaises(TracError, milestone.update)
def test_update_milestone_update_tickets(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
tkt1 = Ticket(self.env)
tkt1.populate({'summary': 'Foo', 'milestone': 'Test'})
tkt1.insert()
tkt2 = Ticket(self.env)
tkt2.populate({'summary': 'Bar', 'milestone': 'Test'})
tkt2.insert()
milestone = Milestone(self.env, 'Test')
milestone.name = 'Testing'
milestone.update()
self.assertEqual('Testing', Ticket(self.env, tkt1.id)['milestone'])
self.assertEqual('Testing', Ticket(self.env, tkt2.id)['milestone'])
def test_rename_milestone(self):
milestone = Milestone(self.env)
milestone.name = 'OldName'
milestone.insert()
attachment = Attachment(self.env, 'milestone', 'OldName')
attachment.insert('foo.txt', StringIO(), 0, 1)
milestone = Milestone(self.env, 'OldName')
milestone.name = 'NewName'
milestone.update()
self.assertRaises(ResourceNotFound, Milestone, self.env, 'OldName')
self.assertEqual('NewName', Milestone(self.env, 'NewName').name)
attachments = Attachment.select(self.env, 'milestone', 'OldName')
self.assertRaises(StopIteration, attachments.next)
attachments = Attachment.select(self.env, 'milestone', 'NewName')
self.assertEqual('foo.txt', attachments.next().filename)
self.assertRaises(StopIteration, attachments.next)
def test_select_milestones(self):
self.env.db_transaction.executemany(
"INSERT INTO milestone (name) VALUES (%s)",
[('1.0',), ('2.0',)])
milestones = list(Milestone.select(self.env))
self.assertEqual('1.0', milestones[0].name)
assert milestones[0].exists
self.assertEqual('2.0', milestones[1].name)
assert milestones[1].exists
def test_change_listener_created(self):
listener = TestMilestoneChangeListener(self.env)
milestone = self._create_milestone(name='Milestone 1')
milestone.insert()
self.assertEqual('created', listener.action)
self.assertEqual(milestone, listener.milestone)
def test_change_listener_changed(self):
listener = TestMilestoneChangeListener(self.env)
milestone = self._create_milestone(
name='Milestone 1',
due=datetime(2001, 01, 01, tzinfo=utc),
description='The milestone description')
milestone.insert()
milestone.name = 'Milestone 2'
milestone.completed = datetime(2001, 02, 03, tzinfo=utc)
milestone.description = 'The changed description'
milestone.update()
self.assertEqual('changed', listener.action)
self.assertEqual(milestone, listener.milestone)
self.assertEqual({'name': 'Milestone 1', 'completed': None,
'description': 'The milestone description'},
listener.old_values)
def test_change_listener_deleted(self):
listener = TestMilestoneChangeListener(self.env)
milestone = self._create_milestone(name='Milestone 1')
milestone.insert()
self.assertEqual(True, milestone.exists)
milestone.delete()
self.assertEqual('Milestone 1', milestone.name)
self.assertEqual(False, milestone.exists)
self.assertEqual('deleted', listener.action)
self.assertEqual(milestone, listener.milestone)
class ComponentTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
def tearDown(self):
self.env.reset_db()
def test_exists_negative(self):
def get_fake_component():
return Component(self.env, "Shrubbery")
self.assertRaises(TracError, get_fake_component)
def test_exists(self):
"""
http://trac.edgewall.org/ticket/4247
"""
for c in Component.select(self.env):
self.assertEqual(c.exists, True)
def test_create_and_update(self):
component = Component(self.env)
component.name = 'Test'
component.insert()
self.assertEqual([('Test', None, None)], self.env.db_query("""
SELECT name, owner, description FROM component
WHERE name='Test'"""))
# Use the same model object to update the component
component.owner = 'joe'
component.update()
self.assertEqual([('Test', 'joe', None)], self.env.db_query(
"SELECT name, owner, description FROM component WHERE name='Test'"))
class VersionTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=True)
def tearDown(self):
self.env.reset_db()
def test_exists_negative(self):
def get_fake_version():
return Version(self.env, "-1")
self.assertRaises(TracError, get_fake_version)
def test_exists(self):
"""
http://trac.edgewall.org/ticket/4247
"""
for v in Version.select(self.env):
self.assertEqual(v.exists, True)
def test_create_and_update(self):
version = Version(self.env)
version.name = 'Test'
version.insert()
self.assertEqual([('Test', 0, None)], self.env.db_query(
"SELECT name, time, description FROM version WHERE name='Test'"))
# Use the same model object to update the version
version.description = 'Some text'
version.update()
self.assertEqual([('Test', 0, 'Some text')], self.env.db_query(
"SELECT name, time, description FROM version WHERE name='Test'"))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TicketTestCase, 'test'))
suite.addTest(unittest.makeSuite(TicketCommentEditTestCase, 'test'))
suite.addTest(unittest.makeSuite(TicketCommentDeleteTestCase, 'test'))
suite.addTest(unittest.makeSuite(EnumTestCase, 'test'))
suite.addTest(unittest.makeSuite(MilestoneTestCase, 'test'))
suite.addTest(unittest.makeSuite(ComponentTestCase, 'test'))
suite.addTest(unittest.makeSuite(VersionTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# -*- coding: utf-8 -*-
# Copyright 2014-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import binascii
from struct import pack
from struct import unpack
from netaddr import IPAddress
from netaddr.core import AddrFormatError
from os_dhcp_server import globals as dhcp
logger = logging.getLogger(__name__)
def value_to_bytelist(optype, value):
""" Given a field or option type, format the value as a list with a
length that matches the given type.
:param optype: The object type, which is one of: int, int16, int32,
bool, char, char+, string, or an RFC-defined type.
:param value: The value on which to enforce type.
:returns: value after type enforcement"""
if optype == 'int':
if isinstance(value, int):
if (value < 0) or (value > 255):
return False
else:
return [value]
if isinstance(value, list):
if len(value) != 1:
return False
i = value[0]
if (not isinstance(i, int)) or (i < 0) or (i > 255):
return False
else:
return [value]
elif isinstance(value, str):
try:
int_value = int(value)
except ValueError:
return False
if (int_value < 0) or (int_value > 255):
return False
else:
return [int_value]
elif optype == 'int16':
if isinstance(value, int):
return [int(value >> 8 & 0xFF), int(value & 0xFF)]
if isinstance(value, list):
if len(value) != 2:
return False
for i in range(0, 2):
if (not isinstance(i, int)) or (i < 0) or (i > 255):
return False
else:
return value
elif isinstance(value, str):
if len(value) != 2:
return False
new_value = []
for i in value:
new_value.append(int(i))
return new_value
elif optype == 'int32':
if isinstance(value, int):
return int32_to_octets(value)
if isinstance(value, list):
if len(value) != 4:
return False
for i in range(0, 4):
if (not isinstance(i, int)) or (i < 0) or (i > 255):
return False
return value
elif isinstance(value, str):
if len(value) != 4:
return False
new_value = []
for i in value:
new_value.append(int(i))
return new_value
elif optype == 'bool':
if (isinstance(value, bool)) or (isinstance(value, int)):
if value:
return [1]
else:
return [0]
elif isinstance(value, str):
if value in dhcp.TRUE_VALUES:
return [1]
elif value in dhcp.FALSE_VALUES:
return [0]
else:
return False
elif optype == 'ipv4':
if isinstance(value, int):
return int32_to_octets(value)
elif isinstance(value, str):
try:
ip_addr = IPAddress(value.strip())
except AddrFormatError:
logger.error("Could not parse IP address: %s" % value)
return False
return int32_to_octets(int(ip_addr))
elif isinstance(value, IPAddress):
return int32_to_octets(int(value))
elif optype == '[ipv4]':
if isinstance(value, list):
new_value = []
for ip in value:
ip_octets = value_to_bytelist('ipv4', ip)
new_value.extend(ip_octets)
return new_value
elif isinstance(value, str):
new_value = []
for ip in value.split(','):
try:
ip_addr = IPAddress(ip.strip())
except AddrFormatError:
return False
for octet in int32_to_octets(int(ip_addr)):
new_value.append(octet)
return new_value
else:
return False
elif optype == 'string':
return list(str(value))
elif optype == 'identifier' or optype == 'hwmacaddr': # see RFC6842
# Deal with MAC addresses or optype (01 is Ethernet) plus MAC
if isinstance(value, str):
macaddr = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})',
re.IGNORECASE)
macaddr_type = re.compile(r'01:([0-9A-F]{2}[:-]){5}'
r'([0-9A-F]{2})', re.IGNORECASE)
if macaddr.match(value) or macaddr_type.match(value):
mac_raw = ''.join(value.split(':')) # strip the colons
# Convert the remaining hex to bytes
return [ord(i) for i in binascii.unhexlify(mac_raw)]
else:
return [ord(i) for i in value]
elif isinstance(value, list):
return value
logger.error("Type not implemented: %s" % optype)
return False
def bytelist_to_value(optype, bytelist):
""" Given a series of bytes, return a human-readable value based on
the option type. Does the reverse of value_to_bytelist.
:param optype: The object type, which is one of: int, int16, int32,
bool, char, char+, string, or an RFC-defined type.
:param bytelist: The list of bytes to convert to a readable format.
:returns: value after type conversion"""
if optype == 'int':
return bytelist[0]
elif optype == 'int16':
if len(bytelist) != 2:
logger.debug("Could not convert %s bytes to int16")
return False
new_value = bytelist[0] * 256
new_value += bytelist[1]
return new_value
elif optype == 'int32':
if len(bytelist) != 4:
logger.error("Could not convert %s bytes to int32" %
len(bytelist))
return False
new_value = bytelist[0] * 256 * 256 * 256
new_value += bytelist[1] * 256 * 256
new_value += bytelist[2] * 256
new_value += bytelist[3]
return new_value
elif optype == 'bool':
if bytelist in dhcp.TRUE_VALUES:
return 'True'
else:
return 'False'
elif optype == 'ipv4':
if len(bytelist) != 4:
logger.error("Could not convert %s to IPv4 address" %
bytelist)
return False
else:
return '{}.{}.{}.{}'.format((bytelist[0], bytelist[1],
bytelist[2], bytelist[3]))
elif optype == '[ipv4]':
if len(bytelist) < 4:
logger.error("Could not convert %s to a list of IPs" %
bytelist)
return False
else:
__bytelist = bytelist[:]
__bytelist.reverse()
new_value = ''
while len(__bytelist) > 3:
if new_value: # if there is already at least 1 IP,
new_value += ', ' # append a comma between addresses
for i in range(0,3):
new_value += str(__bytelist.pop()) + "."
new_value += str(__bytelist.pop())
return new_value
elif optype == 'string':
return ''.join(chr(byte) for byte in bytelist).strip('\x00')
elif optype == 'identifier':
return ''.join('{}'.format(chr(x)) for x in bytelist).strip('\x00')
elif optype == 'hwmacaddr':
if bytelist[0] == 1: # MAC address preceded by type
return ':'.join('{:02x}'.format(x) for x in bytelist[1:8])
else:
return ':'.join('{:02x}'.format(x) for x in bytelist[0:7])
else:
return bytelist
def unpack_ipv4_bytes(byte_pattern):
""" Given a list of raw bytes, parse out and return a list of IPs
:param byte_pattern: The raw bytes from the DHCP option containing
a list of IP addresses. The RFC specifies that an IP list will
be a list of octets, with each group of 4 octets representing
one IP address. There are no separators or terminators.
:returns: a list of IP addresses as strings"""
ip_list = []
# reverse the bytes so we can pop them off one at a time
byte_pattern.reverse()
while len(byte_pattern) > 3:
# if there are at least 4 octets, add them as an IP to the list
ip_string = ''
for i in range(0, 3):
ip_string += str(byte_pattern.pop()) + "."
ip_string += str(byte_pattern.pop())
ip_list.append(ip_string)
return ip_list
def int32_to_octets(value):
""" Given an int or long, return a 4-byte array of 8-bit ints."""
return [int(value >> 24 & 0xFF), int(value >> 16 & 0xFF),
int(value >> 8 & 0xFF), int(value & 0xFF)]
def get_option_name_id(option):
""" Return name if given ID, or ID if given name"""
if isinstance(option, int):
return dhcp.DHCP_OPTIONS[option]
else:
return dhcp.DHCP_OPTIONS.index(option)
def field_length_valid(name, length):
type = dhcp.DHCP_FIELDS[name]['type']
if type in ['hwmacaddr', 'sname', 'file']:
if length > dhcp.DHCP_FIELDS[name]['len']:
return False
else:
return True
else:
if length == dhcp.DHCP_FIELDS[name]['len']:
return True
else:
return False
class DhcpPacket(object):
""" Packet handler class for DHCP packets
:param data: Raw packet data, otherwise packet will be initialized.
"""
def __init__(self, data=None):
self.source_address = False
self.dhcp_options = {}
if data:
if isinstance(data, list):
self.packet_data = data
self.map_options()
if isinstance(data, str):
self.raw_packet_data = data
self.decode_packet()
else:
# Initialize a blank packet
self.packet_data = [0] * 240
self.packet_data[236:240] = dhcp.MAGIC_COOKIE
logger.debug("Initializing blank DHCP packet")
logger.info("DhcpPacket packet created")
def get_option_start(self):
""" Return location after MagicCookie, or None if not found """
# Sometimes it's right where you expect it
if len(self.packet_data) > 238:
if self.packet_data[236:240] == dhcp.MAGIC_COOKIE:
logger.debug("DHCP packet received, contains MagicCookie")
return 240
else:
# search the entire packet, but not past packet end
for i in range(237, len(self.packet_data) - 3):
if self.packet_data[i:i+4] == dhcp.MAGIC_COOKIE:
logger.debug("DHCP packet received, contains MagicCookie")
return i+4
return None # not found
def get_option_number(self, name):
""" Get the DHCP option number from the name. """
return dhcp.DHCP_OPTIONS.index(name)
def get_option(self, opt_name):
""" Get DHCP options (including fields)"""
if opt_name in dhcp.DHCP_FIELDS:
field = dhcp.DHCP_FIELDS[opt_name]
try:
rawvalue = self.packet_data[
field['pos']:field['pos']+field['len']
]
except IndexError:
return None
value = bytelist_to_value(field['type'], rawvalue)
logger.debug("DHCP field retrieved, opt_name: %s, value: %s" %
(opt_name, value))
return value
# Option being retrieved is not one of the main fields
elif opt_name in dhcp.DHCP_OPTIONS:
if opt_name in self.dhcp_options:
opt_num = self.get_option_number(opt_name)
value = self.dhcp_options[opt_name]
logger.debug("DHCP option %s read, opt_name: %s, value: %s" %
(opt_num, opt_name, value))
return value
logger.error("Error: Could not get value for invalid option: %s" %\
opt_name)
return None
def set_option(self, opt_name, value):
""" Set DHCP options (including fields)
:param opt_name: The opt_name of the option or field to be set
:param value: The value to set for the field or option. If the
value is a list, then it will be treated as a series of bytes
and the length must not be shorter than the min or larger than
the max length allowed per option. Depending on the field or
option type, a transformation may occur, e.g. if a field type
is 'int16', then each byte will be converted to an int before
setting the byte value, conversely if the field type is
'string', then a string transformation will be done.
Booleans will be tested against the TRUE_VALUES and
FALSE_VALUES defined in os_dhcp_server.globals.
:returns: True or False to indicate success, failure will be logged
"""
if opt_name in dhcp.DHCP_FIELDS:
type = dhcp.DHCP_FIELDS[opt_name]['type']
begin = dhcp.DHCP_FIELDS[opt_name]['pos']
if isinstance(value, int):
value = [value]
if isinstance(value, IPAddress):
# Treat IP addresses like strings below
value = str(value)
if isinstance(value, list):
# boundary validation
if not field_length_valid(opt_name, len(value)):
logger.error("DhcpPacket field %s value wrong length: %s" %
(opt_name, value))
return False
self.packet_data[begin:(begin + len(value))] = value
logger.debug("DHCP field set, opt_name: %s, value: %s" %
(opt_name, value))
return True
elif isinstance(value, str):
# Convert string to an array of bytes as unsigned small ints
bytelist = value_to_bytelist(type, value)
if not field_length_valid(opt_name, len(bytelist)):
logger.error("DhcpPacket field %s value wrong length: %s" %
(opt_name, value))
return False
self.packet_data[begin:(begin + len(bytelist))] = bytelist
logger.debug("DHCP field set, opt_name: %s, value: %s" %
(opt_name, value))
return True
else:
return False
elif opt_name in dhcp.DHCP_OPTIONS:
option = dhcp.DHCP_OPTION_TYPES[dhcp.DHCP_OPTIONS.index(opt_name)]
# boundary validation
bytelist = value_to_bytelist(option['type'], value)
if len(bytelist) < option['min']:
logger.error("Cannot set option %s, value too short: %s" %
(opt_name, value))
return False
elif (option['max'] != 0) and (len(bytelist) > option['max']):
logger.error("Cannot set option %s, value too long: %s" %
(opt_name, value))
return False
self.dhcp_options[opt_name] = value
logger.debug("DHCP option set, opt_name: %s, value: %s" %
(opt_name, value))
return True
def sort_options(self):
""" Return a list of dicts of DHCP options sorted by option number """
option_list = []
ord_options = {}
for option in self.dhcp_options:
# Options must be set in order according to RFC
order = dhcp.DHCP_OPTIONS.index(option)
# DCHP requires the option ID, length, and data concatenated
ord_options[order] = (option, self.dhcp_options[option])
for option in sorted(ord_options.keys()):
option_list.append([option,
ord_options[option][0],
ord_options[option][1]])
return option_list
def pack_packet(self, packet):
""" Packs the packet using struct.pack to prepare to send on wire """
pack_fmt = str(len(packet)) + "c"
packet = map(chr, packet)
return pack(pack_fmt, *packet)
def map_options(self):
location = self.get_option_start()
if not location:
logger.info("Magic Cookie not found, not a valid DHCP packet")
return
while location < len(self.packet_data):
if self.packet_data[location] == 255:
logger.debug("DHCP Option End reached at byte %d" % location)
break
elif self.packet_data[location] == 0: # pad byte
location += 1
else:
option = dhcp.DHCP_OPTIONS[self.packet_data[location]]
# TODO(dsneddon) lookup field type for data validation
length = self.packet_data[location+1]
start = location + 2
end = start + length
self.dhcp_options[option] = self.packet_data[start:end]
location = end
def decode_packet(self):
""" Unpack the packet and lookup the option values. An option has the
format option number (1 byte), length (1 byte), and data. """
if not self.raw_packet_data:
logger.debug("Empty packet received, discarding...")
return
# treat the packet like a list of ints representing chars
unpack_fmt = str(len(self.raw_packet_data)) + "c"
self.packet_data = [ord(i) for i in unpack(unpack_fmt,
self.raw_packet_data)]
# TODO(dsneddon) replace this with a human-readable packet decode
logger.debug("Raw packet decoded: \n%s\n" % self.packet_data)
self.map_options()
def encode_packet(self):
""" Set the options and pack the packet. An option has an option
number, followed by length, followed by data. """
ord_options = self.sort_options()
logger.debug("Options to encode: %s" % ord_options)
option_data = []
logger.debug("DHCP options added to packet: %s" % ord_options)
for option in ord_options:
option_data.append(option[0])
option_data.append(len(option[2]))
option_data.extend(option[2])
self.packet_data[240:] = option_data
self.packet_data.append(255) # add end option
def str(self):
""" Return a human-readable decode of the packet"""
str_rep = """
+--------------------------DHCP Packet--------------------------+
0 1 2 3 |
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| op ({}) | htype ({:3}) | hlen ({:3}) | hops ({:3}) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| xid ( {:<16} ) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| secs ({:3}) | flags ({:3}) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| ciaddr ( {:<16} ) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| yiaddr ( {:<16} ) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| siaddr ( {:<16} ) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| giaddr ( {:<16} ) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| chaddr ( {:>16} ) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| sname ( {:<51} ) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| file ( {:<52} ) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| <magic cookie> indicates options begin at byte: {:>12} |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
""".format(self.get_option('op'), self.get_option('htype'),
self.get_option('hlen'), self.get_option('hops'),
self.get_option('xid'), self.get_option('secs'),
self.get_option('flags'), self.get_option('ciaddr'),
self.get_option('yiaddr'), self.get_option('siaddr'),
self.get_option('giaddr'), self.get_option('chaddr'),
self.get_option('sname'), self.get_option('file'),
self.get_option_start())
str_rep += "|--------------------------DHCP Options----------------"
str_rep += "---------|\n"
for option in self.sort_options():
opt_val = self.get_option(option[1])
str_rep += "| option {:3}: {:<18} {:<30} |\n".format(
option[0], str(option[1])[0:18], str(opt_val)[0:29]
)
x = 30
while x < len(str(opt_val)):
str_rep += "| {:61} |\n".format(
str(opt_val)[x:x+61]
)
x = x + 61
str_rep += "+-----------------------------------------------------"
str_rep += "----------+"
return str_rep
class DhcpOffer(DhcpPacket):
""" Subclass of DHCPPacket specifically for DHCP Offers
:param chaddr: Client HWAddr (MAC Address)
:param ip_dest: Unicast destination IP address
:param data: Raw packet data (otherwise packet will be initialized)
"""
def __init__(self, chaddr=None, source_address=None, ip_dest=None,
data=None):
super(DhcpOffer, self).__init__(data)
self.source_address = source_address
self.dhcp_options = {}
self.ip_dest = ip_dest
self.chaddr = chaddr
if self.chaddr:
self.set_option('chaddr', self.chaddr)
logger.info("DhcpOffer packet created")
self.set_option('op', 2)
self.set_option('htype', 1)
self.set_option('hlen', 6)
self.set_option('dhcp_message_type',
dhcp.DHCP_MESSAGE_LIST.index('DHCP_OFFER'))
self.set_option('dhcp_message_type', 2)
|
|
from itertools import product
import numpy as np
from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix)
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn import neighbors, datasets
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix,
lil_matrix)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto')
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(
neighbors.radius_neighbors_graph)
def _weight_func(dist):
""" Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid. """
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide='ignore'):
retval = 1. / dist
return retval ** 2
def test_unsupervised_kneighbors(n_samples=20, n_features=5,
n_query_pts=2, n_neighbors=5):
"""Test unsupervised neighbors methods"""
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results_nodist = []
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
p=p)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(test,
return_distance=False))
results.append(neigh.kneighbors(test, return_distance=True))
for i in range(len(results) - 1):
assert_array_almost_equal(results_nodist[i], results[i][1])
assert_array_almost_equal(results[i][0], results[i + 1][0])
assert_array_almost_equal(results[i][1], results[i + 1][1])
def test_unsupervised_inputs():
"""test the types of valid input into NearestNeighbors"""
X = rng.random_sample((10, 3))
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = neighbors.NearestNeighbors(n_neighbors=1)
for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(input)
dist2, ind2 = nbrs.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
def test_unsupervised_radius_neighbors(n_samples=20, n_features=5,
n_query_pts=2, radius=0.5,
random_state=0):
"""Test unsupervised radius-based query"""
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm,
p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for (d, i, i1) in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_array_almost_equal(np.concatenate(list(ind)),
np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_array_almost_equal(np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0]))),
assert_array_almost_equal(np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])))
def test_kneighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
"""Test k-neighbors classification"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5,
n_test_pts=10, n_neighbors=5,
random_state=0):
"""Test k-neighbors classification"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(np.float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba():
"""Test KNeighborsClassifier.predict_proba() method"""
X = np.array([[0, 2, 0],
[0, 2, 1],
[2, 0, 0],
[2, 2, 0],
[0, 0, 2],
[0, 0, 1]])
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = np.array([[0, 2. / 3, 1. / 3],
[1. / 3, 2. / 3, 0],
[1. / 3, 0, 2. / 3],
[0, 1. / 3, 2. / 3],
[2. / 3, 1. / 3, 0],
[2. / 3, 1. / 3, 0]])
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(
n_neighbors=2, p=1, weights='distance')
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_array_almost_equal(real_prob, y_prob)
def test_radius_neighbors_classifier(n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0):
"""Test radius-based classification"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
y_str = y.astype(str)
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_radius_neighbors_classifier_when_no_neighbors():
""" Test radius-based classifier when no neighbors found.
In this case it should rise an informative exception """
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
weight_func = _weight_func
for outlier_label in [0, -1, None]:
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(radius=radius, weights=weights, algorithm=algorithm,
outlier_label=outlier_label)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]),
clf.predict(z1))
if outlier_label is None:
assert_raises(ValueError, clf.predict, z2)
elif False:
assert_array_equal(np.array([1, outlier_label]),
clf.predict(z2))
def test_radius_neighbors_classifier_outlier_labeling():
""" Test radius-based classifier when no neighbors found and outliers
are labeled. """
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers
z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([1, -1])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=-1)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
assert_array_equal(correct_labels2, clf.predict(z2))
def test_radius_neighbors_classifier_zero_distance():
""" Test radius-based classifier, when distance to a sample is zero. """
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
clf = neighbors.RadiusNeighborsClassifier(radius=radius,
weights=weights,
algorithm=algorithm)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
""" Test radius-based regressor, when distance to a sample is zero. """
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ['uniform', 'distance']:
rnn = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
assert_array_almost_equal(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(['uniform', 'distance'],
[knn_correct_unif, knn_correct_dist]):
knn = neighbors.KNeighborsRegressor(n_neighbors=2,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
assert_array_almost_equal(corr_labels, knn.predict(z))
def test_radius_neighbors_boundary_handling():
"""Test whether points lying on boundary are handled consistently
Also ensures that even with only one query point, an object array
is returned rather than a 2d array.
"""
X = np.array([[1.5], [3.0], [3.01]])
radius = 3.0
for algorithm in ALGORITHMS:
nbrs = neighbors.NearestNeighbors(radius=radius,
algorithm=algorithm).fit(X)
results = nbrs.radius_neighbors([0.0], return_distance=False)
assert_equal(results.shape, (1,))
assert_equal(results.dtype, object)
assert_array_equal(results[0], [0, 1])
def test_RadiusNeighborsClassifier_multioutput():
"""Test k-NN classifier on multioutput data"""
rng = check_random_state(0)
n_features = 2
n_samples = 40
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
for o in range(n_output):
rnn = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train[:, o])
y_pred_so.append(rnn.predict(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
# Multioutput prediction
rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights,
algorithm=algorithm)
rnn_mo.fit(X_train, y_train)
y_pred_mo = rnn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
def test_kneighbors_classifier_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
"""Test k-NN classifier on sparse matrices"""
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
X *= X > .2
y = ((X ** 2).sum(axis=1) < .5).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
for sparsev in SPARSE_TYPES + (np.asarray,):
X_eps = sparsev(X[:n_test_pts] + epsilon)
y_pred = knn.predict(X_eps)
assert_array_equal(y_pred, y[:n_test_pts])
def test_KNeighborsClassifier_multioutput():
"""Test k-NN classifier on multioutput data"""
rng = check_random_state(0)
n_features = 5
n_samples = 50
n_output = 3
X = rng.rand(n_samples, n_features)
y = rng.randint(0, 3, (n_samples, n_output))
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
weights = [None, 'uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
# Stack single output prediction
y_pred_so = []
y_pred_proba_so = []
for o in range(n_output):
knn = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train[:, o])
y_pred_so.append(knn.predict(X_test))
y_pred_proba_so.append(knn.predict_proba(X_test))
y_pred_so = np.vstack(y_pred_so).T
assert_equal(y_pred_so.shape, y_test.shape)
assert_equal(len(y_pred_proba_so), n_output)
# Multioutput prediction
knn_mo = neighbors.KNeighborsClassifier(weights=weights,
algorithm=algorithm)
knn_mo.fit(X_train, y_train)
y_pred_mo = knn_mo.predict(X_test)
assert_equal(y_pred_mo.shape, y_test.shape)
assert_array_almost_equal(y_pred_mo, y_pred_so)
# Check proba
y_pred_proba_mo = knn_mo.predict_proba(X_test)
assert_equal(len(y_pred_proba_mo), n_output)
for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so):
assert_array_almost_equal(proba_mo, proba_so)
def test_kneighbors_regressor(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
"""Test k-neighbors regression"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < 0.3))
def test_KNeighborsRegressor_multioutput_uniform_weight():
"""Test k-neighbors in multi-output regression with uniform weight"""
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
knn = neighbors.KNeighborsRegressor(weights=weights,
algorithm=algorithm)
knn.fit(X_train, y_train)
neigh_idx = knn.kneighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred = knn.predict(X_test)
assert_equal(y_pred.shape, y_test.shape)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_kneighbors_regressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
"""Test k-neighbors in multi-output regression"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
knn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_radius_neighbors_regressor(n_samples=40,
n_features=3,
n_test_pts=10,
radius=0.5,
random_state=0):
"""Test radius-based neighbors regression"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y_target = y[:n_test_pts]
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ['uniform', 'distance', weight_func]:
neigh = neighbors.RadiusNeighborsRegressor(radius=radius,
weights=weights,
algorithm=algorithm)
neigh.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_true(np.all(abs(y_pred - y_target) < radius / 2))
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
"""Test radius neighbors in multi-output regression (uniform weight)"""
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
def test_RadiusNeighborsRegressor_multioutput(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=3,
random_state=0):
"""Test k-neighbors in multi-output regression with various weight"""
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = np.sqrt((X ** 2).sum(1))
y /= y.max()
y = np.vstack([y, y]).T
y_target = y[:n_test_pts]
weights = ['uniform', 'distance', _weight_func]
for algorithm, weights in product(ALGORITHMS, weights):
rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors,
weights=weights,
algorithm=algorithm)
rnn.fit(X, y)
epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = rnn.predict(X[:n_test_pts] + epsilon)
assert_equal(y_pred.shape, y_target.shape)
assert_true(np.all(np.abs(y_pred - y_target) < 0.3))
def test_kneighbors_regressor_sparse(n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0):
"""Test radius-based regression on sparse matrices"""
# Like the above, but with various types of sparse matrices
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features) - 1
y = ((X ** 2).sum(axis=1) < .25).astype(np.int)
for sparsemat in SPARSE_TYPES:
knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors,
algorithm='auto')
knn.fit(sparsemat(X), y)
for sparsev in SPARSE_OR_DENSE:
X2 = sparsev(X)
assert_true(np.mean(knn.predict(X2).round() == y) > 0.95)
def test_neighbors_iris():
"""Sanity checks on the iris dataset
Puts three points of each label in the plane and performs a
nearest neighbor query on points near the decision boundary.
"""
for algorithm in ALGORITHMS:
clf = neighbors.KNeighborsClassifier(n_neighbors=1,
algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_array_equal(clf.predict(iris.data), iris.target)
clf.set_params(n_neighbors=9, algorithm=algorithm)
clf.fit(iris.data, iris.target)
assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95)
rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm)
rgs.fit(iris.data, iris.target)
assert_true(np.mean(rgs.predict(iris.data).round() == iris.target)
> 0.95)
def test_neighbors_digits():
"""Sanity check on the digits dataset
the 'brute' algorithm has been observed to fail if the input
dtype is uint8 due to overflow in distance calculations.
"""
X = digits.data.astype('uint8')
Y = digits.target
(n_samples, n_features) = X.shape
train_test_boundary = int(n_samples * 0.8)
train = np.arange(0, train_test_boundary)
test = np.arange(train_test_boundary, n_samples)
(X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test]
clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute')
score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test)
score_float = clf.fit(X_train.astype(float), Y_train).score(
X_test.astype(float), Y_test)
assert_equal(score_uint8, score_float)
def test_kneighbors_graph():
"""Test kneighbors_graph to build the k-Nearest Neighbor graph."""
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
# n_neighbors = 1
A = neighbors.kneighbors_graph(X, 1, mode='connectivity')
assert_array_equal(A.toarray(), np.eye(A.shape[0]))
A = neighbors.kneighbors_graph(X, 1, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0.00, 1.01, 0.],
[1.01, 0., 0.],
[0.00, 1.40716026, 0.]])
# n_neighbors = 2
A = neighbors.kneighbors_graph(X, 2, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 0.],
[0., 1., 1.]])
A = neighbors.kneighbors_graph(X, 2, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 2.23606798],
[1.01, 0., 1.40716026],
[2.23606798, 1.40716026, 0.]])
# n_neighbors = 3
A = neighbors.kneighbors_graph(X, 3, mode='connectivity')
assert_array_almost_equal(
A.toarray(),
[[1, 1, 1], [1, 1, 1], [1, 1, 1]])
def test_kneighbors_graph_sparse(seed=36):
"""Test kneighbors_graph to build the k-Nearest Neighbor graph
for sparse input."""
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.kneighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.kneighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_radius_neighbors_graph():
"""Test radius_neighbors_graph to build the Nearest Neighbor graph."""
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity')
assert_array_equal(
A.toarray(),
[[1., 1., 0.],
[1., 1., 1.],
[0., 1., 1.]])
A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance')
assert_array_almost_equal(
A.toarray(),
[[0., 1.01, 0.],
[1.01, 0., 1.40716026],
[0., 1.40716026, 0.]])
def test_radius_neighbors_graph_sparse(seed=36):
"""Test radius_neighbors_graph to build the Nearest Neighbor graph
for sparse input."""
rng = np.random.RandomState(seed)
X = rng.randn(10, 10)
Xcsr = csr_matrix(X)
for n_neighbors in [1, 2, 3]:
for mode in ["connectivity", "distance"]:
assert_array_almost_equal(
neighbors.radius_neighbors_graph(X,
n_neighbors,
mode=mode).toarray(),
neighbors.radius_neighbors_graph(Xcsr,
n_neighbors,
mode=mode).toarray())
def test_neighbors_badargs():
"""Test bad argument values: these should all raise ValueErrors"""
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm='blah')
X = rng.random_sample((10, 2))
Xsparse = csr_matrix(X)
y = np.ones(10)
for cls in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
assert_raises(ValueError,
cls,
weights='blah')
assert_raises(ValueError,
cls, p=-1)
assert_raises(ValueError,
cls, algorithm='blah')
nbrs = cls(algorithm='ball_tree', metric='haversine')
assert_raises(ValueError,
nbrs.predict,
X)
assert_raises(ValueError,
ignore_warnings(nbrs.fit),
Xsparse, y)
nbrs = cls()
assert_raises(ValueError,
nbrs.fit,
np.ones((0, 2)), np.ones(0))
assert_raises(ValueError,
nbrs.fit,
X[:, :, None], y)
nbrs.fit(X, y)
assert_raises(ValueError,
nbrs.predict,
[])
nbrs = neighbors.NearestNeighbors().fit(X)
assert_raises(ValueError,
nbrs.kneighbors_graph,
X, mode='blah')
assert_raises(ValueError,
nbrs.radius_neighbors_graph,
X, mode='blah')
def test_neighbors_metrics(n_samples=20, n_features=3,
n_query_pts=2, n_neighbors=5):
"""Test computing the neighbors for various metrics"""
# create a symmetric matrix
V = rng.rand(n_features, n_features)
VI = np.dot(V, V.T)
metrics = [('euclidean', {}),
('manhattan', {}),
('minkowski', dict(p=1)),
('minkowski', dict(p=2)),
('minkowski', dict(p=3)),
('minkowski', dict(p=np.inf)),
('chebyshev', {}),
('seuclidean', dict(V=rng.rand(n_features))),
('wminkowski', dict(p=3, w=rng.rand(n_features))),
('mahalanobis', dict(VI=VI))]
algorithms = ['brute', 'ball_tree', 'kd_tree']
X = rng.rand(n_samples, n_features)
test = rng.rand(n_query_pts, n_features)
for metric, metric_params in metrics:
results = []
p = metric_params.pop('p', 2)
for algorithm in algorithms:
# KD tree doesn't support all metrics
if (algorithm == 'kd_tree' and
metric not in neighbors.KDTree.valid_metrics):
assert_raises(ValueError,
neighbors.NearestNeighbors,
algorithm=algorithm,
metric=metric, metric_params=metric_params)
continue
neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm,
metric=metric, p=p,
metric_params=metric_params)
neigh.fit(X)
results.append(neigh.kneighbors(test, return_distance=True))
assert_array_almost_equal(results[0][0], results[1][0])
assert_array_almost_equal(results[0][1], results[1][1])
def test_callable_metric():
metric = lambda x1, x2: np.sqrt(np.sum(x1 ** 2 + x2 ** 2))
X = np.random.RandomState(42).rand(20, 2)
nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=metric)
nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=metric)
nbrs1.fit(X)
nbrs2.fit(X)
dist1, ind1 = nbrs1.kneighbors(X)
dist2, ind2 = nbrs2.kneighbors(X)
assert_array_almost_equal(dist1, dist2)
def test_metric_params_interface():
assert_warns(DeprecationWarning, neighbors.KNeighborsClassifier,
metric='wminkowski', w=np.ones(10))
assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier,
metric_params={'p': 3})
def test_predict_sparse_ball_kd_tree():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
y = rng.randint(0, 2, 5)
nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree')
nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree')
for model in [nbrs1, nbrs2]:
model.fit(X, y)
assert_raises(ValueError, model.predict, csr_matrix(X))
def test_non_euclidean_kneighbors():
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Find a reasonable radius.
dist_array = pairwise_distances(X).flatten()
np.sort(dist_array)
radius = dist_array[15]
# Test kneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.kneighbors_graph(
X, 3, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X)
assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray())
# Test radiusneighbors_graph
for metric in ['manhattan', 'chebyshev']:
nbrs_graph = neighbors.radius_neighbors_graph(
X, radius, metric=metric).toarray()
nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X)
assert_array_equal(nbrs_graph,
nbrs1.radius_neighbors_graph(X).toarray())
# Raise error when wrong parameters are supplied,
X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3,
metric='euclidean')
X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan')
X_nbrs.fit(X)
assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs,
radius, metric='euclidean')
def check_object_arrays(nparray, list_check):
for ind, ele in enumerate(nparray):
assert_array_equal(ele, list_check[ind])
def test_k_and_radius_neighbors_train_is_not_query():
"""Test kneighbors et.al when query is not training data"""
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
test_data = [[2], [1]]
# Test neighbors.
dist, ind = nn.kneighbors(test_data)
assert_array_equal(dist, [[1], [0]])
assert_array_equal(ind, [[1], [1]])
dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5)
check_object_arrays(dist, [[1], [1, 0]])
check_object_arrays(ind, [[1], [0, 1]])
# Test the graph variants.
assert_array_equal(
nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]])
assert_array_equal(
nn.kneighbors_graph([[2], [1]], mode='distance').A,
np.array([[0., 1.], [0., 0.]]))
rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5)
assert_array_equal(rng.A, [[0, 1], [1, 1]])
def test_k_and_radius_neighbors_X_None():
"""Test kneighbors et.al when query is None"""
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
X = [[0], [1]]
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, [[1], [1]])
assert_array_equal(ind, [[1], [0]])
dist, ind = nn.radius_neighbors(None, radius=1.5)
check_object_arrays(dist, [[1], [1]])
check_object_arrays(ind, [[1], [0]])
# Test the graph variants.
rng = nn.radius_neighbors_graph(None, radius=1.5)
kng = nn.kneighbors_graph(None)
for graph in [rng, kng]:
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.data, [1, 1])
assert_array_equal(rng.indices, [1, 0])
X = [[0, 1], [0, 1], [1, 1]]
nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm)
nn.fit(X)
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]]))
def test_k_and_radius_neighbors_duplicates():
"""Test behavior of kneighbors when duplicates are present in query"""
for algorithm in ALGORITHMS:
nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm)
nn.fit([[0], [1]])
# Do not do anything special to duplicates.
kng = nn.kneighbors_graph([[0], [1]], mode='distance')
assert_array_equal(
kng.A,
np.array([[0., 0.], [0., 0.]]))
assert_array_equal(kng.data, [0., 0.])
assert_array_equal(kng.indices, [0, 1])
dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5)
check_object_arrays(dist, [[0, 1], [1, 0]])
check_object_arrays(ind, [[0, 1], [0, 1]])
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5)
assert_array_equal(rng.A, np.ones((2, 2)))
rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5,
mode='distance')
assert_array_equal(rng.A, [[0, 1], [1, 0]])
assert_array_equal(rng.indices, [0, 1, 0, 1])
assert_array_equal(rng.data, [0, 1, 1, 0])
# Mask the first duplicates when n_duplicates > n_neighbors.
X = np.ones((3, 1))
nn = neighbors.NearestNeighbors(n_neighbors=1)
nn.fit(X)
dist, ind = nn.kneighbors()
assert_array_equal(dist, np.zeros((3, 1)))
assert_array_equal(ind, [[1], [0], [1]])
# Test that zeros are explicitly marked in kneighbors_graph.
kng = nn.kneighbors_graph(mode='distance')
assert_array_equal(
kng.A, np.zeros((3, 3)))
assert_array_equal(kng.data, np.zeros(3))
assert_array_equal(kng.indices, [1., 0., 1.])
assert_array_equal(
nn.kneighbors_graph().A,
np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]]))
def test_include_self_neighbors_graph():
"""Test include_self parameter in neighbors_graph"""
X = [[2, 3], [4, 5]]
kng = neighbors.kneighbors_graph(X, 1, include_self=True).A
kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A
assert_array_equal(kng, [[1., 0.], [0., 1.]])
assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]])
rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A
rng_not_self = neighbors.radius_neighbors_graph(
X, 5.0, include_self=False).A
assert_array_equal(rng, [[1., 1.], [1., 1.]])
assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]])
if __name__ == '__main__':
import nose
nose.runmodule()
|
|
#!/usr/bin/env python
"""shors.py: Shor's algorithm for quantum integer factorization"""
import math
import random
import argparse
__author__ = "Todd Wildey"
__copyright__ = "Copyright 2013"
__credits__ = ["Todd Wildey"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Todd Wildey"
__email__ = "[email protected]"
__status__ = "Prototype"
def printNone(str):
pass
def printVerbose(str):
print(str)
printInfo = printNone
####################################################################################################
#
# Quantum Components
#
####################################################################################################
class Mapping:
def __init__(self, state, amplitude):
self.state = state
self.amplitude = amplitude
class QuantumState:
def __init__(self, amplitude, register):
self.amplitude = amplitude
self.register = register
self.entangled = {}
def entangle(self, fromState, amplitude):
register = fromState.register
entanglement = Mapping(fromState, amplitude)
try:
self.entangled[register].append(entanglement)
except KeyError:
self.entangled[register] = [entanglement]
def entangles(self, register = None):
entangles = 0
if register is None:
for states in self.entangled.values():
entangles += len(states)
else:
entangles = len(self.entangled[register])
return entangles
class QubitRegister:
def __init__(self, numBits):
self.numBits = numBits
self.numStates = 1 << numBits
self.entangled = []
self.states = [QuantumState(complex(0.0), self) for x in range(self.numStates)]
self.states[0].amplitude = complex(1.0)
def propagate(self, fromRegister = None):
if fromRegister is not None:
for state in self.states:
amplitude = complex(0.0)
try:
entangles = state.entangled[fromRegister]
for entangle in entangles:
amplitude += entangle.state.amplitude * entangle.amplitude
state.amplitude = amplitude
except KeyError:
state.amplitude = amplitude
for register in self.entangled:
if register is fromRegister:
continue
register.propagate(self)
# Map will convert any mapping to a unitary tensor given each element v
# returned by the mapping has the property v * v.conjugate() = 1
#
def map(self, toRegister, mapping, propagate = True):
self.entangled.append(toRegister)
toRegister.entangled.append(self)
# Create the covariant/contravariant representations
mapTensorX = {}
mapTensorY = {}
for x in range(self.numStates):
mapTensorX[x] = {}
codomain = mapping(x)
for element in codomain:
y = element.state
mapTensorX[x][y] = element
try:
mapTensorY[y][x] = element
except KeyError:
mapTensorY[y] = { x: element }
# Normalize the mapping:
def normalize(tensor, p = False):
lSqrt = math.sqrt
for vectors in tensor.values():
sumProb = 0.0
for element in vectors.values():
amplitude = element.amplitude
sumProb += (amplitude * amplitude.conjugate()).real
normalized = lSqrt(sumProb)
for element in vectors.values():
element.amplitude = element.amplitude / normalized
normalize(mapTensorX)
normalize(mapTensorY, True)
# Entangle the registers
for x, yStates in mapTensorX.items():
for y, element in yStates.items():
amplitude = element.amplitude
toState = toRegister.states[y]
fromState = self.states[x]
toState.entangle(fromState, amplitude)
fromState.entangle(toState, amplitude.conjugate())
if propagate:
toRegister.propagate(self)
def measure(self):
measure = random.random()
sumProb = 0.0
# Pick a state
finalX = None
finalState = None
for x, state in enumerate(self.states):
amplitude = state.amplitude
sumProb += (amplitude * amplitude.conjugate()).real
if sumProb > measure:
finalState = state
finalX = x
break
# If state was found, update the system
if finalState is not None:
for state in self.states:
state.amplitude = complex(0.0)
finalState.amplitude = complex(1.0)
self.propagate()
return finalX
def entangles(self, register = None):
entangles = 0
for state in self.states:
entangles += state.entangles(None)
return entangles
def amplitudes(self):
amplitudes = []
for state in self.states:
amplitudes.append(state.amplitude)
return amplitudes
def printEntangles(register):
printInfo("Entagles: " + str(register.entangles()))
def printAmplitudes(register):
amplitudes = register.amplitudes()
for x, amplitude in enumerate(amplitudes):
printInfo('State #' + str(x) + '\'s amplitude: ' + str(amplitude))
def hadamard(x, Q):
codomain = []
for y in range(Q):
amplitude = complex(pow(-1.0, bitCount(x & y) & 1))
codomain.append(Mapping(y, amplitude))
return codomain
# Quantum Modular Exponentiation
def qModExp(a, exp, mod):
state = modExp(a, exp, mod)
amplitude = complex(1.0)
return [Mapping(state, amplitude)]
# Quantum Fourier Transform
def qft(x, Q):
fQ = float(Q)
k = -2.0 * math.pi
codomain = []
for y in range(Q):
theta = (k * float((x * y) % Q)) / fQ
amplitude = complex(math.cos(theta), math.sin(theta))
codomain.append(Mapping(y, amplitude))
return codomain
def findPeriod(a, N):
nNumBits = N.bit_length()
inputNumBits = (2 * nNumBits) - 1
inputNumBits += 1 if ((1 << inputNumBits) < (N * N)) else 0
Q = 1 << inputNumBits
printInfo("Finding the period...")
printInfo("Q = " + str(Q) + "\ta = " + str(a))
inputRegister = QubitRegister(inputNumBits)
hmdInputRegister = QubitRegister(inputNumBits)
qftInputRegister = QubitRegister(inputNumBits)
outputRegister = QubitRegister(inputNumBits)
printInfo("Registers generated")
printInfo("Performing Hadamard on input register")
inputRegister.map(hmdInputRegister, lambda x: hadamard(x, Q), False)
# inputRegister.hadamard(False)
printInfo("Hadamard complete")
printInfo("Mapping input register to output register, where f(x) is a^x mod N")
hmdInputRegister.map(outputRegister, lambda x: qModExp(a, x, N), False)
printInfo("Modular exponentiation complete")
printInfo("Performing quantum Fourier transform on output register")
hmdInputRegister.map(qftInputRegister, lambda x: qft(x, Q), False)
inputRegister.propagate()
printInfo("Quantum Fourier transform complete")
printInfo("Performing a measurement on the output register")
y = outputRegister.measure()
printInfo("Output register measured\ty = " + str(y))
# Interesting to watch - simply uncomment
# printAmplitudes(inputRegister)
# printAmplitudes(qftInputRegister)
# printAmplitudes(outputRegister)
# printEntangles(inputRegister)
printInfo("Performing a measurement on the periodicity register")
x = qftInputRegister.measure()
printInfo("QFT register measured\tx = " + str(x))
if x is None:
return None
printInfo("Finding the period via continued fractions")
r = cf(x, Q, N)
printInfo("Candidate period\tr = " + str(r))
return r
####################################################################################################
#
# Classical Components
#
####################################################################################################
BIT_LIMIT = 12
def bitCount(x):
sumBits = 0
while x > 0:
sumBits += x & 1
x >>= 1
return sumBits
# Greatest Common Divisor
def gcd(a, b):
while b != 0:
tA = a % b
a = b
b = tA
return a
# Extended Euclidean
def extendedGCD(a, b):
fractions = []
while b != 0:
fractions.append(a // b)
tA = a % b
a = b
b = tA
return fractions
# Continued Fractions
def cf(y, Q, N):
fractions = extendedGCD(y, Q)
depth = 2
def partial(fractions, depth):
c = 0
r = 1
for i in reversed(range(depth)):
tR = fractions[i] * r + c
c = r
r = tR
return c
r = 0
for d in range(depth, len(fractions) + 1):
tR = partial(fractions, d)
if tR == r or tR >= N:
return r
r = tR
return r
# Modular Exponentiation
def modExp(a, exp, mod):
fx = 1
while exp > 0:
if (exp & 1) == 1:
fx = fx * a % mod
a = (a * a) % mod
exp = exp >> 1
return fx
def pick(N):
a = math.floor((random.random() * (N - 1)) + 0.5)
return a
def checkCandidates(a, r, N, neighborhood):
if r is None:
return None
# Check multiples
for k in range(1, neighborhood + 2):
tR = k * r
if modExp(a, a, N) == modExp(a, a + tR, N):
return tR
# Check lower neighborhood
for tR in range(r - neighborhood, r):
if modExp(a, a, N) == modExp(a, a + tR, N):
return tR
# Check upper neigborhood
for tR in range(r + 1, r + neighborhood + 1):
if modExp(a, a, N) == modExp(a, a + tR, N):
return tR
return None
def shors(N, attempts = 1, neighborhood = 0.0, numPeriods = 1):
if(N.bit_length() > BIT_LIMIT or N < 3):
return False
periods = []
neighborhood = math.floor(N * neighborhood) + 1
printInfo("N = " + str(N))
printInfo("Neighborhood = " + str(neighborhood))
printInfo("Number of periods = " + str(numPeriods))
for attempt in range(attempts):
printInfo("\nAttempt #" + str(attempt))
a = pick(N)
while a < 2:
a = pick(N)
d = gcd(a, N)
if d > 1:
printInfo("Found factors classically, re-attempt")
continue
r = findPeriod(a, N)
printInfo("Checking candidate period, nearby values, and multiples")
r = checkCandidates(a, r, N, neighborhood)
if r is None:
printInfo("Period was not found, re-attempt")
continue
if (r % 2) > 0:
printInfo("Period was odd, re-attempt")
continue
d = modExp(a, (r // 2), N)
if r == 0 or d == (N - 1):
printInfo("Period was trivial, re-attempt")
continue
printInfo("Period found\tr = " + str(r))
periods.append(r)
if(len(periods) < numPeriods):
continue
printInfo("\nFinding least common multiple of all periods")
r = 1
for period in periods:
d = gcd(period, r)
r = (r * period) // d
b = modExp(a, (r // 2), N)
f1 = gcd(N, b + 1)
f2 = gcd(N, b - 1)
return [f1, f2]
return None
####################################################################################################
#
# Command-line functionality
#
####################################################################################################
def parseArgs():
parser = argparse.ArgumentParser(description='Simulate Shor\'s algorithm for N.')
parser.add_argument('-a', '--attempts', type=int, default=20, help='Number of quantum attemtps to perform')
parser.add_argument('-n', '--neighborhood', type=float, default=0.01, help='Neighborhood size for checking candidates (as percentage of N)')
parser.add_argument('-p', '--periods', type=int, default=2, help='Number of periods to get before determining least common multiple')
parser.add_argument('-v', '--verbose', type=bool, default=True, help='Verbose')
parser.add_argument('N', type=int, help='The integer to factor')
return parser.parse_args()
def main():
args = parseArgs()
global printInfo
if args.verbose:
printInfo = printVerbose
else:
printInfo = printNone
factors = shors(args.N, args.attempts, args.neighborhood, args.periods)
if factors is not None:
print("Factors:\t" + str(factors[0]) + ", " + str(factors[1]))
if __name__ == "__main__":
main()
|
|
import logging
from collections import defaultdict
from datetime import datetime
import pkg_resources
from pylons import c, g, request
from paste.deploy.converters import asbool
from tg import expose, redirect, flash, validate, config
from tg.decorators import with_trailing_slash, without_trailing_slash
from webob import exc
from bson import ObjectId
from allura.app import Application, WidgetController, DefaultAdminController, SitemapEntry
from allura.lib import helpers as h
from allura import version
from allura import model as M
from allura.lib.security import has_access, require_access
from allura.lib.widgets import form_fields as ffw
from allura.lib import exceptions as forge_exc
from allura.lib import plugin
from allura.controllers import BaseController
from allura.lib.decorators import require_post
from . import widgets as aw
from allura.lib.widgets.project_list import ProjectScreenshots
log = logging.getLogger(__name__)
class W:
markdown_editor = ffw.MarkdownEdit()
label_edit = ffw.LabelEdit()
mount_delete = ffw.Lightbox(name='mount_delete',trigger='a.mount_delete')
admin_modal = ffw.Lightbox(name='admin_modal',trigger='a.admin_modal')
install_modal = ffw.Lightbox(name='install_modal',trigger='a.install_trig')
explain_export_modal = ffw.Lightbox(name='explain_export',trigger='#why_export')
group_card = aw.GroupCard()
permission_card = aw.PermissionCard()
group_settings = aw.GroupSettings()
new_group_settings = aw.NewGroupSettings()
screenshot_admin = aw.ScreenshotAdmin()
screenshot_list = ProjectScreenshots()
metadata_admin = aw.MetadataAdmin()
audit = aw.AuditLog()
page_list=ffw.PageList()
class AdminApp(Application):
'''This is the admin app. It is pretty much required for
a functioning allura project.
'''
__version__ = version.__version__
installable=False
_installable_tools = None
tool_label = 'admin'
icons={
24:'images/admin_24.png',
32:'images/admin_32.png',
48:'images/admin_48.png'
}
def __init__(self, project, config):
Application.__init__(self, project, config)
self.root = ProjectAdminController()
self.admin = AdminAppAdminController(self)
self.templates = pkg_resources.resource_filename('allura.ext.admin', 'templates')
self.sitemap = [ SitemapEntry('Admin','.')]
def is_visible_to(self, user):
'''Whether the user can view the app.'''
return has_access(c.project, 'create')(user=user)
@staticmethod
def installable_tools_for(project):
cls = AdminApp
if cls._installable_tools is None:
tools = [dict(name=k, app=v) for k,v in g.entry_points['tool'].iteritems()]
tools.sort(key=lambda t:(t['app'].status_int(), t['app'].ordinal))
cls._installable_tools = [ t for t in tools if t['app'].installable ]
return [ t for t in cls._installable_tools
if t['app'].status in project.allowed_tool_status ]
def main_menu(self):
'''Apps should provide their entries to be added to the main nav
:return: a list of :class:`SitemapEntries <allura.app.SitemapEntry>`
'''
return [SitemapEntry('Admin', '.')]
@h.exceptionless([], log)
def sidebar_menu(self):
links = []
admin_url = c.project.url()+'admin/'
if c.project.is_nbhd_project:
links.append(SitemapEntry('Add Project', c.project.url()+'add_project', ui_icon=g.icons['plus']))
nbhd_admin_url = c.project.neighborhood.url()+'_admin/'
links = links + [
SitemapEntry('Neighborhood'),
SitemapEntry('Overview', nbhd_admin_url+'overview'),
SitemapEntry('Awards', nbhd_admin_url+'accolades')]
else:
links += [SitemapEntry('Metadata', admin_url+'overview'),]
if c.project.neighborhood.name != "Users":
links += [
SitemapEntry('Screenshots', admin_url+'screenshots'),
SitemapEntry('Categorization', admin_url+'trove')
]
links.append(SitemapEntry('Tools', admin_url+'tools'))
if c.project.is_root and has_access(c.project, 'admin')():
links.append(SitemapEntry('User Permissions', admin_url+'groups/'))
if not c.project.is_root and has_access(c.project, 'admin')():
links.append(SitemapEntry('Permissions', admin_url+'permissions/'))
if len(c.project.neighborhood_invitations):
links.append(SitemapEntry('Invitation(s)', admin_url+'invitations'))
links.append(SitemapEntry('Audit Trail', admin_url+ 'audit/'))
if c.project.is_nbhd_project:
links.append(SitemapEntry('Statistics', nbhd_admin_url+ 'stats/'))
links.append(None)
links.append(SitemapEntry('Help', nbhd_admin_url+ 'help/'))
return links
def admin_menu(self):
return []
def install(self, project):
pass
class ProjectAdminController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
def __init__(self):
self.permissions = PermissionsController()
self.groups = GroupsController()
self.audit = AuditController()
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_admin.html')
def index(self, **kw):
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_invitations.html')
def invitations(self):
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_overview.html')
def overview(self, **kw):
c.markdown_editor = W.markdown_editor
c.metadata_admin = W.metadata_admin
c.explain_export_modal = W.explain_export_modal
show_export_control = asbool(config.get('show_export_control', False))
allow_project_delete = asbool(config.get('allow_project_delete', True))
explain_export_text = '''The purpose of this section is to determine whether your project is subject to the provisions of the
US Export Administration Regulations. You should consult section 734.4 and Supplement 2 to Part 734 for information on such items
and the calculation of U.S. controlled content.
<a href="http://www.bis.doc.gov/encryption/default.htm" target="_blank">http://www.bis.doc.gov/encryption/default.htm</a>'''
if 'us_export_contact' in config:
explain_export_text += 'If you have additional questions, please contact <a href="mailto:{contact}">{contact}</a>.'.format(
contact=config['us_export_contact']
)
return dict(show_export_control=show_export_control,
allow_project_delete=allow_project_delete,
explain_export_text=explain_export_text)
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_screenshots.html')
def screenshots(self, **kw):
c.screenshot_admin = W.screenshot_admin
c.screenshot_list = W.screenshot_list
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_trove.html')
def trove(self):
c.label_edit = W.label_edit
base_troves = M.TroveCategory.query.find(dict(trove_parent_id=0)).sort('fullname').all()
topic_trove = M.TroveCategory.query.get(trove_parent_id=0,shortname='topic')
license_trove = M.TroveCategory.query.get(trove_parent_id=0,shortname='license')
return dict(base_troves=base_troves,license_trove=license_trove,topic_trove=topic_trove)
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_tools.html')
def tools(self, **kw):
c.markdown_editor = W.markdown_editor
c.label_edit = W.label_edit
c.mount_delete = W.mount_delete
c.admin_modal = W.admin_modal
c.install_modal = W.install_modal
mounts = c.project.ordered_mounts()
return dict(
mounts=mounts,
installable_tools=AdminApp.installable_tools_for(c.project),
roles=M.ProjectRole.query.find(dict(project_id=c.project.root_project._id)).sort('_id').all(),
categories=M.ProjectCategory.query.find(dict(parent_id=None)).sort('label').all())
@expose()
@require_post()
def update_labels(self, labels=None, labels_old=None, **kw):
require_access(c.project, 'admin')
c.project.labels = labels.split(',')
M.AuditLog.log('updated labels')
redirect('trove')
@without_trailing_slash
@expose()
def clone(self,
repo_type=None, source_url=None,
mount_point=None, mount_label=None,
**kw):
require_access(c.project, 'admin')
if repo_type is None:
return (
'<form method="get">'
'<input name="repo_type" value="Git">'
'<input name="source_url">'
'<input type="submit">'
'</form>')
for ep in pkg_resources.iter_entry_points('allura', repo_type):
break
if ep is None or source_url is None:
raise exc.HTTPNotFound
h.log_action(log, 'install tool').info(
'clone repo from %s', source_url,
meta=dict(tool_type=repo_type, mount_point=mount_point, mount_label=mount_label))
c.project.install_app(
repo_type,
mount_point=mount_point,
mount_label=mount_label,
init_from_url=source_url)
M.AuditLog.log('Create repo as clone')
redirect('tools')
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_permissions.html')
def groups(self, **kw):
return dict()
@expose()
def _lookup(self, name, *remainder):
app = c.project.app_instance(name)
if app is None:
raise exc.HTTPNotFound, name
return app.admin, remainder
@expose()
@require_post()
@validate(W.metadata_admin, error_handler=overview)
def update(self, name=None,
short_description=None,
summary='',
icon=None,
category=None,
external_homepage='',
support_page='',
support_page_url='',
removal='',
moved_to_url='',
export_controlled=False,
export_control_type=None,
tracking_id='',
**kw):
require_access(c.project, 'update')
if removal != c.project.removal:
M.AuditLog.log('change project removal status to %s', removal)
h.log_action(log, 'change project removal status').info('')
c.project.removal = removal
c.project.removal_changed_date = datetime.utcnow()
if 'delete_icon' in kw:
M.ProjectFile.query.remove(dict(project_id=c.project._id, category='icon'))
M.AuditLog.log('remove project icon')
h.log_action(log, 'remove project icon').info('')
g.post_event('project_updated')
redirect('overview')
elif 'delete' in kw:
allow_project_delete = asbool(config.get('allow_project_delete', True))
if allow_project_delete or not c.project.is_root:
M.AuditLog.log('delete project')
h.log_action(log, 'delete project').info('')
plugin.ProjectRegistrationProvider.get().delete_project(c.project, c.user)
redirect('overview')
elif 'undelete' in kw:
h.log_action(log, 'undelete project').info('')
M.AuditLog.log('undelete project')
plugin.ProjectRegistrationProvider.get().undelete_project(c.project, c.user)
redirect('overview')
if name != c.project.name:
h.log_action(log, 'change project name').info('')
M.AuditLog.log('change project name to %s', name)
c.project.name = name
if short_description != c.project.short_description:
h.log_action(log, 'change project short description').info('')
M.AuditLog.log('change short description to %s', short_description)
c.project.short_description = short_description
if summary != c.project.summary:
h.log_action(log, 'change project summary').info('')
M.AuditLog.log('change summary to %s', summary)
c.project.summary = summary
category = category and ObjectId(category) or None
if category != c.project.category_id:
h.log_action(log, 'change project category').info('')
M.AuditLog.log('change category to %s', category)
c.project.category_id = category
if external_homepage != c.project.external_homepage:
h.log_action(log, 'change external home page').info('')
M.AuditLog.log('change external home page to %s', external_homepage)
c.project.external_homepage = external_homepage
if support_page != c.project.support_page:
h.log_action(log, 'change project support page').info('')
M.AuditLog.log('change project support page to %s', support_page)
c.project.support_page = support_page
if support_page_url != c.project.support_page_url:
h.log_action(log, 'change project support page url').info('')
M.AuditLog.log('change project support page url to %s', support_page_url)
c.project.support_page_url = support_page_url
if moved_to_url != c.project.moved_to_url:
h.log_action(log, 'change project moved to url').info('')
M.AuditLog.log('change project moved to url to %s', moved_to_url)
c.project.moved_to_url = moved_to_url
if export_controlled != c.project.export_controlled:
h.log_action(log, 'change project export controlled status').info('')
M.AuditLog.log('change project export controlled status to %s', export_controlled)
c.project.export_controlled = not not export_controlled
if not export_controlled:
export_control_type = None
if export_control_type != c.project.export_control_type:
h.log_action(log, 'change project export control type').info('')
M.AuditLog.log('change project export control type to %s', export_control_type)
c.project.export_control_type = export_control_type
if tracking_id != c.project.tracking_id:
h.log_action(log, 'change project tracking ID').info('')
M.AuditLog.log('change project tracking ID to %s', tracking_id)
c.project.tracking_id = tracking_id
if icon is not None and icon != '':
if c.project.icon:
M.ProjectFile.remove(dict(project_id=c.project._id, category='icon'))
M.AuditLog.log('update project icon')
M.ProjectFile.save_image(
icon.filename, icon.file, content_type=icon.type,
square=True, thumbnail_size=(48,48),
thumbnail_meta=dict(project_id=c.project._id,category='icon'))
g.post_event('project_updated')
redirect('overview')
def _add_trove(self, type, new_trove):
current_troves = getattr(c.project,'trove_%s'%type)
trove_obj = M.TroveCategory.query.get(trove_cat_id=int(new_trove))
error_msg = None
if type in ['license','audience','developmentstatus','language'] and len(current_troves) >= 6:
error_msg = 'You may not have more than 6 of this category.'
elif type in ['topic'] and len(current_troves) >= 3:
error_msg = 'You may not have more than 3 of this category.'
elif trove_obj is not None:
if trove_obj._id not in current_troves:
current_troves.append(trove_obj._id)
g.post_event('project_updated')
else:
error_msg = 'This category has already been assigned to the project.'
return (trove_obj, error_msg)
@expose('json:')
@require_post()
def add_trove_js(self, type, new_trove, **kw):
require_access(c.project, 'update')
trove_obj, error_msg = self._add_trove(type, new_trove)
return dict(trove_full_path = trove_obj.fullpath, trove_cat_id = trove_obj.trove_cat_id, error_msg=error_msg)
redirect('trove')
@expose()
@require_post()
def add_trove(self, type, new_trove, **kw):
require_access(c.project, 'update')
trove_obj, error_msg = self._add_trove(type, new_trove)
M.AuditLog.log('add trove %s: %s', type, trove_obj.fullpath)
if error_msg:
flash(error_msg,'error')
redirect('trove')
@expose()
@require_post()
def delete_trove(self, type, trove, **kw):
require_access(c.project, 'update')
trove_obj = M.TroveCategory.query.get(trove_cat_id=int(trove))
current_troves = getattr(c.project,'trove_%s'%type)
if trove_obj is not None and trove_obj._id in current_troves:
M.AuditLog.log('remove trove %s: %s', type, trove_obj.fullpath)
current_troves.remove(trove_obj._id)
g.post_event('project_updated')
redirect('trove')
@expose()
@require_post()
@validate(W.screenshot_admin)
def add_screenshot(self, screenshot=None, caption=None, **kw):
require_access(c.project, 'update')
if len(c.project.get_screenshots()) >= 6:
flash('You may not have more than 6 screenshots per project.','error')
elif screenshot is not None and screenshot != '':
M.AuditLog.log('add screenshot')
M.ProjectFile.save_image(
screenshot.filename, screenshot.file, content_type=screenshot.type,
save_original=True,
original_meta=dict(project_id=c.project._id,category='screenshot',caption=caption),
square=True, thumbnail_size=(150,150),
thumbnail_meta=dict(project_id=c.project._id,category='screenshot_thumb'))
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def delete_screenshot(self, id=None, **kw):
require_access(c.project, 'update')
if id is not None and id != '':
M.AuditLog.log('remove screenshot')
M.ProjectFile.query.remove(dict(project_id=c.project._id, _id=ObjectId(id)))
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def edit_screenshot(self, id=None, caption=None, **kw):
require_access(c.project, 'update')
if id is not None and id != '':
M.ProjectFile.query.get(project_id=c.project._id, _id=ObjectId(id)).caption=caption
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def join_neighborhood(self, nid):
require_access(c.project, 'admin')
if not nid:
n = M.Neighborhood.query.get(name='Projects')
c.project.neighborhood_id = n._id
flash('Joined %s' % n.name)
redirect(c.project.url() + 'admin/')
nid = ObjectId(str(nid))
if nid not in c.project.neighborhood_invitations:
flash('No invitation to that neighborhood', 'error')
redirect('.')
c.project.neighborhood_id = nid
n = M.Neighborhood.query.get(_id=nid)
flash('Joined %s' % n.name)
redirect('invitations')
@h.vardec
@expose()
@require_post()
def update_mount_order(self, subs=None, tools=None, **kw):
if subs:
for sp in subs:
p = M.Project.query.get(shortname=sp['shortname'],
neighborhood_id=c.project.neighborhood_id)
p.ordinal = int(sp['ordinal'])
if tools:
for p in tools:
c.project.app_config(p['mount_point']).options.ordinal = int(p['ordinal'])
redirect('tools')
@h.vardec
@expose()
@require_post()
def update_mounts(self, subproject=None, tool=None, new=None, **kw):
if subproject is None: subproject = []
if tool is None: tool = []
for sp in subproject:
p = M.Project.query.get(shortname=sp['shortname'],
neighborhood_id=c.project.neighborhood_id)
if sp.get('delete'):
require_access(c.project, 'admin')
M.AuditLog.log('delete subproject %s', sp['shortname'])
h.log_action(log, 'delete subproject').info(
'delete subproject %s', sp['shortname'],
meta=dict(name=sp['shortname']))
p.removal = 'deleted'
plugin.ProjectRegistrationProvider.get().delete_project(p, c.user)
elif not new:
M.AuditLog.log('update subproject %s', sp['shortname'])
p.name = sp['name']
p.ordinal = int(sp['ordinal'])
for p in tool:
if p.get('delete'):
require_access(c.project, 'admin')
M.AuditLog.log('uninstall tool %s', p['mount_point'])
h.log_action(log, 'uninstall tool').info(
'uninstall tool %s', p['mount_point'],
meta=dict(mount_point=p['mount_point']))
c.project.uninstall_app(p['mount_point'])
elif not new:
M.AuditLog.log('update tool %s', p['mount_point'])
options = c.project.app_config(p['mount_point']).options
options.mount_label = p['mount_label']
options.ordinal = int(p['ordinal'])
try:
if new and new.get('install'):
ep_name = new.get('ep_name', None)
if not ep_name:
require_access(c.project, 'create')
mount_point = new['mount_point'].lower() or h.nonce()
M.AuditLog.log('create subproject %s', mount_point)
h.log_action(log, 'create subproject').info(
'create subproject %s', mount_point,
meta=dict(mount_point=mount_point,name=new['mount_label']))
sp = c.project.new_subproject(mount_point)
sp.name = new['mount_label']
sp.ordinal = int(new['ordinal'])
else:
require_access(c.project, 'admin')
mount_point = new['mount_point'].lower() or ep_name.lower()
M.AuditLog.log('install tool %s', mount_point)
h.log_action(log, 'install tool').info(
'install tool %s', mount_point,
meta=dict(tool_type=ep_name, mount_point=mount_point, mount_label=new['mount_label']))
c.project.install_app(ep_name, mount_point, mount_label=new['mount_label'], ordinal=new['ordinal'])
except forge_exc.ForgeError, exc:
flash('%s: %s' % (exc.__class__.__name__, exc.args[0]),
'error')
g.post_event('project_updated')
redirect('tools')
class PermissionsController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_permissions.html')
def index(self, **kw):
c.card = W.permission_card
return dict(permissions=self._index_permissions())
@without_trailing_slash
@expose()
@h.vardec
@require_post()
def update(self, card=None, **kw):
permissions = self._index_permissions()
old_permissions = dict(permissions)
for args in card:
perm = args['id']
new_group_ids = args.get('new', [])
group_ids = args.get('value', [])
if isinstance(new_group_ids, basestring):
new_group_ids = [ new_group_ids ]
if isinstance(group_ids, basestring):
group_ids = [ group_ids ]
# make sure the admin group has the admin permission
if perm == 'admin':
if c.project.is_root:
pid = c.project._id
else:
pid = c.project.parent_id
admin_group_id = str(M.ProjectRole.query.get(project_id=pid, name='Admin')._id)
if admin_group_id not in group_ids + new_group_ids:
flash('You cannot remove the admin group from the admin permission.','warning')
group_ids.append(admin_group_id)
permissions[perm] = []
role_ids = map(ObjectId, group_ids + new_group_ids)
permissions[perm] = role_ids
c.project.acl = []
for perm, role_ids in permissions.iteritems():
role_names = lambda ids: ','.join(sorted(
pr.name for pr in M.ProjectRole.query.find(dict(_id={'$in':ids}))))
old_role_ids = old_permissions.get(perm, [])
if old_role_ids != role_ids:
M.AuditLog.log('updated "%s" permissions: "%s" => "%s"',
perm,role_names(old_role_ids), role_names(role_ids))
c.project.acl += [M.ACE.allow(rid, perm) for rid in role_ids]
g.post_event('project_updated')
redirect('.')
def _index_permissions(self):
permissions = dict(
(p,[]) for p in c.project.permissions)
for ace in c.project.acl:
if ace.access == M.ACE.ALLOW:
permissions[ace.permission].append(ace.role_id)
return permissions
class GroupsController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
def _index_permissions(self):
permissions = dict(
(p,[]) for p in c.project.permissions)
for ace in c.project.acl:
if ace.access == M.ACE.ALLOW:
permissions[ace.permission].append(ace.role_id)
return permissions
def _map_group_permissions(self):
roles = c.project.named_roles
permissions=self._index_permissions()
permissions_by_role = dict()
auth_role = M.ProjectRole.authenticated()
anon_role = M.ProjectRole.anonymous()
for role in roles+[auth_role, anon_role]:
permissions_by_role[str(role._id)] = []
for perm in permissions:
perm_info = dict(has="no", text="Does not have permission %s" % perm, name=perm)
role_ids = permissions[perm]
if role._id in role_ids:
perm_info['text'] = "Has permission %s" % perm
perm_info['has'] = "yes"
else:
for r in role.child_roles():
if r._id in role_ids:
perm_info['text'] = "Inherited permission %s from %s" % (perm, r.name)
perm_info['has'] = "inherit"
break
if perm_info['has'] == "no":
if anon_role._id in role_ids:
perm_info['text'] = "Inherited permission %s from Anonymous" % perm
perm_info['has'] = "inherit"
elif auth_role._id in role_ids and role != anon_role:
perm_info['text'] = "Inherited permission %s from Authenticated" % perm
perm_info['has'] = "inherit"
permissions_by_role[str(role._id)].append(perm_info)
return permissions_by_role
@without_trailing_slash
@expose()
@h.vardec
def delete_group(self, group_name, **kw):
role = M.ProjectRole.by_name(group_name)
if not role:
flash('Group "%s" does not exist.' % group_name, 'error')
else:
role.delete()
M.AuditLog.log('delete group %s', group_name)
flash('Group "%s" deleted successfully.' % group_name)
g.post_event('project_updated')
redirect('.')
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_groups.html')
def index(self, **kw):
c.admin_modal = W.admin_modal
c.card = W.group_card
permissions_by_role = self._map_group_permissions()
auth_role = M.ProjectRole.authenticated()
anon_role = M.ProjectRole.anonymous()
roles = c.project.named_roles
roles.append(None)
return dict(roles=roles, permissions_by_role=permissions_by_role,
auth_role=auth_role, anon_role=anon_role)
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def change_perm(self, role_id, permission, allow="true", **kw):
if allow=="true":
M.AuditLog.log('granted permission %s to group with id %s', permission, role_id)
c.project.acl.append(M.ACE.allow(ObjectId(role_id), permission))
else:
admin_group_id = str(M.ProjectRole.by_name('Admin')._id)
if admin_group_id == role_id and permission == 'admin':
return dict(error='You cannot remove the admin permission from the admin group.')
M.AuditLog.log('revoked permission %s from group with id %s', permission, role_id)
c.project.acl.remove(M.ACE.allow(ObjectId(role_id), permission))
g.post_event('project_updated')
return self._map_group_permissions()
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def add_user(self, role_id, username, **kw):
if not username or username=='*anonymous':
return dict(error='You must choose a user to add.')
group = M.ProjectRole.query.get(_id=ObjectId(role_id))
user = M.User.by_username(username.strip())
if not group:
return dict(error='Could not find group with id %s' % role_id)
if not user:
return dict(error='User %s not found' % username)
if group._id in user.project_role().roles:
return dict(error='%s (%s) is already in the group %s.' % (user.display_name, username, group.name))
M.AuditLog.log('add user %s to %s', username, group.name)
user.project_role().roles.append(group._id)
g.post_event('project_updated')
return dict(username=username, displayname=user.display_name)
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def remove_user(self, role_id, username, **kw):
group = M.ProjectRole.query.get(_id=ObjectId(role_id))
user = M.User.by_username(username.strip())
if group.name == 'Admin' and len(group.users_with_role()) == 1:
return dict(error='You must have at least one user with the Admin role.')
if not group:
return dict(error='Could not find group with id %s' % role_id)
if not user:
return dict(error='User %s not found' % username)
if group._id not in user.project_role().roles:
return dict(error='%s (%s) is not in the group %s.' % (user.display_name, username, group.name))
M.AuditLog.log('remove user %s from %s', username, group.name)
user.project_role().roles.remove(group._id)
g.post_event('project_updated')
return dict()
@without_trailing_slash
@expose()
@require_post()
@h.vardec
def update(self, card=None, **kw):
for pr in card:
group = M.ProjectRole.query.get(_id=ObjectId(pr['id']))
assert group.project == c.project, 'Security violation'
user_ids = pr.get('value', [])
new_users = pr.get('new', [])
if isinstance(user_ids, basestring):
user_ids = [ user_ids ]
if isinstance(new_users, basestring):
new_users = [ new_users ]
# Handle new users in groups
user_added = False
for username in new_users:
user = M.User.by_username(username.strip())
if not user:
flash('User %s not found' % username, 'error')
redirect('.')
if not user._id:
continue # never add anon users to groups
M.AuditLog.log('add user %s to %s', username, group.name)
user.project_role().roles.append(group._id)
user_added = True
# Make sure we aren't removing all users from the Admin group
if group.name == u'Admin' and not (user_ids or user_added):
flash('You must have at least one user with the Admin role.',
'warning')
redirect('.')
# Handle users removed from groups
user_ids = set(
uid and ObjectId(uid)
for uid in user_ids)
for role in M.ProjectRole.query.find(dict(user_id={'$ne':None}, roles=group._id)):
if role.user_id and role.user_id not in user_ids:
role.roles = [ rid for rid in role.roles if rid != group._id ]
M.AuditLog.log('remove user %s from %s', role.user.username, group.name)
g.post_event('project_updated')
redirect('.')
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_group.html')
def new(self):
c.form = W.new_group_settings
return dict(
group=None,
show_settings=True,
action="create")
@expose()
@require_post()
@validate(W.new_group_settings)
@h.vardec
def create(self, name=None, **kw):
if M.ProjectRole.by_name(name):
flash('%s already exists' % name, 'error')
else:
M.ProjectRole(project_id=c.project._id, name=name)
M.AuditLog.log('create group %s', name)
g.post_event('project_updated')
redirect('.')
@expose()
def _lookup(self, name, *remainder):
return GroupController(name), remainder
class GroupController(BaseController):
def __init__(self, name):
self._group = M.ProjectRole.query.get(_id=ObjectId(name))
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_group.html')
def index(self):
if self._group.name in ('Admin', 'Developer', 'Member'):
show_settings = False
action = None
else:
show_settings = True
action = self._group.settings_href + 'update'
c.form = W.group_settings
return dict(
group=self._group,
show_settings=show_settings,
action=action)
@expose()
@h.vardec
@require_post()
@validate(W.group_settings)
def update(self, _id=None, delete=None, name=None, **kw):
pr = M.ProjectRole.by_name(name)
if pr and pr._id != _id._id:
flash('%s already exists' % name, 'error')
redirect('..')
if delete:
_id.delete()
M.AuditLog.log('delete group %s', _id.name)
flash('%s deleted' % name)
redirect('..')
M.AuditLog.log('update group name %s=>%s', _id.name, name)
_id.name = name
flash('%s updated' % name)
redirect('..')
class AuditController(BaseController):
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/audit.html')
def index(self, limit=10, page=0, **kwargs):
limit = int(limit)
page = int(page)
count = M.AuditLog.query.find(dict(project_id=c.project._id)).count()
q = M.AuditLog.query.find(dict(project_id=c.project._id))
q = q.sort('timestamp', -1)
q = q.skip(page * limit)
if count > limit:
q = q.limit(limit)
else:
limit=count
c.widget = W.audit
return dict(
entries=q.all(),
limit=limit,
page=page,
count=count)
class AdminAppAdminController(DefaultAdminController):
'''Administer the admin app'''
pass
|
|
# -*- coding: utf-8 -*-
"""
sphinx.application
~~~~~~~~~~~~~~~~~~
Sphinx application object.
Gracefully adapted from the TextPress system by Armin.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import types
import posixpath
from os import path
from cStringIO import StringIO
from docutils import nodes
from docutils.parsers.rst import convert_directive_function, \
directives, roles
import sphinx
from sphinx import package_dir, locale
from sphinx.roles import XRefRole
from sphinx.config import Config
from sphinx.errors import SphinxError, SphinxWarning, ExtensionError, \
VersionRequirementError, ConfigError
from sphinx.domains import ObjType, BUILTIN_DOMAINS
from sphinx.domains.std import GenericObject, Target, StandardDomain
from sphinx.builders import BUILTIN_BUILDERS
from sphinx.environment import BuildEnvironment, SphinxStandaloneReader
from sphinx.util import pycompat # imported for side-effects
from sphinx.util.tags import Tags
from sphinx.util.osutil import ENOENT
from sphinx.util.console import bold, lightgray, darkgray
# List of all known core events. Maps name to arguments description.
events = {
'builder-inited': '',
'env-get-outdated': 'env, added, changed, removed',
'env-purge-doc': 'env, docname',
'source-read': 'docname, source text',
'doctree-read': 'the doctree before being pickled',
'missing-reference': 'env, node, contnode',
'doctree-resolved': 'doctree, docname',
'env-updated': 'env',
'html-collect-pages': 'builder',
'html-page-context': 'pagename, context, doctree or None',
'build-finished': 'exception',
}
CONFIG_FILENAME = 'conf.py'
ENV_PICKLE_FILENAME = 'environment.pickle'
class Sphinx(object):
def __init__(self, srcdir, confdir, outdir, doctreedir, buildername,
confoverrides=None, status=sys.stdout, warning=sys.stderr,
freshenv=False, warningiserror=False, tags=None, verbosity=0,
parallel=0):
self.verbosity = verbosity
self.next_listener_id = 0
self._extensions = {}
self._listeners = {}
self.domains = BUILTIN_DOMAINS.copy()
self.builderclasses = BUILTIN_BUILDERS.copy()
self.builder = None
self.env = None
self.srcdir = srcdir
self.confdir = confdir
self.outdir = outdir
self.doctreedir = doctreedir
self.parallel = parallel
if status is None:
self._status = StringIO()
self.quiet = True
else:
self._status = status
self.quiet = False
if warning is None:
self._warning = StringIO()
else:
self._warning = warning
self._warncount = 0
self.warningiserror = warningiserror
self._events = events.copy()
# say hello to the world
self.info(bold('Running Sphinx v%s' % sphinx.__version__))
# status code for command-line application
self.statuscode = 0
# read config
self.tags = Tags(tags)
self.config = Config(confdir, CONFIG_FILENAME,
confoverrides or {}, self.tags)
self.config.check_unicode(self.warn)
# set confdir to srcdir if -C given (!= no confdir); a few pieces
# of code expect a confdir to be set
if self.confdir is None:
self.confdir = self.srcdir
# backwards compatibility: activate old C markup
self.setup_extension('sphinx.ext.oldcmarkup')
# load all user-given extension modules
for extension in self.config.extensions:
self.setup_extension(extension)
# the config file itself can be an extension
if self.config.setup:
# py31 doesn't have 'callable' function for bellow check
if hasattr(self.config.setup, '__call__'):
self.config.setup(self)
else:
raise ConfigError(
"'setup' that is specified in the conf.py has not been " +
"callable. Please provide a callable `setup` function " +
"in order to behave as a sphinx extension conf.py itself."
)
# now that we know all config values, collect them from conf.py
self.config.init_values()
# check the Sphinx version if requested
if self.config.needs_sphinx and \
self.config.needs_sphinx > sphinx.__version__[:3]:
raise VersionRequirementError(
'This project needs at least Sphinx v%s and therefore cannot '
'be built with this version.' % self.config.needs_sphinx)
# set up translation infrastructure
self._init_i18n()
# set up the build environment
self._init_env(freshenv)
# set up the builder
self._init_builder(buildername)
def _init_i18n(self):
"""Load translated strings from the configured localedirs if enabled in
the configuration.
"""
if self.config.language is not None:
self.info(bold('loading translations [%s]... ' %
self.config.language), nonl=True)
locale_dirs = [None, path.join(package_dir, 'locale')] + \
[path.join(self.srcdir, x) for x in self.config.locale_dirs]
else:
locale_dirs = []
self.translator, has_translation = locale.init(locale_dirs,
self.config.language)
if self.config.language is not None:
if has_translation or self.config.language == 'en':
# "en" never needs to be translated
self.info('done')
else:
self.info('not available for built-in messages')
def _init_env(self, freshenv):
if freshenv:
self.env = BuildEnvironment(self.srcdir, self.doctreedir,
self.config)
self.env.find_files(self.config)
for domain in self.domains.keys():
self.env.domains[domain] = self.domains[domain](self.env)
else:
try:
self.info(bold('loading pickled environment... '), nonl=True)
self.env = BuildEnvironment.frompickle(self.config,
path.join(self.doctreedir, ENV_PICKLE_FILENAME))
self.env.domains = {}
for domain in self.domains.keys():
# this can raise if the data version doesn't fit
self.env.domains[domain] = self.domains[domain](self.env)
self.info('done')
except Exception, err:
if type(err) is IOError and err.errno == ENOENT:
self.info('not yet created')
else:
self.info('failed: %s' % err)
return self._init_env(freshenv=True)
self.env.set_warnfunc(self.warn)
def _init_builder(self, buildername):
if buildername is None:
print >>self._status, 'No builder selected, using default: html'
buildername = 'html'
if buildername not in self.builderclasses:
raise SphinxError('Builder name %s not registered' % buildername)
builderclass = self.builderclasses[buildername]
if isinstance(builderclass, tuple):
# builtin builder
mod, cls = builderclass
builderclass = getattr(
__import__('sphinx.builders.' + mod, None, None, [cls]), cls)
self.builder = builderclass(self)
self.emit('builder-inited')
# ---- main "build" method -------------------------------------------------
def build(self, force_all=False, filenames=None):
try:
if force_all:
self.builder.build_all()
elif filenames:
self.builder.build_specific(filenames)
else:
self.builder.build_update()
except Exception, err:
# delete the saved env to force a fresh build next time
envfile = path.join(self.doctreedir, ENV_PICKLE_FILENAME)
if path.isfile(envfile):
os.unlink(envfile)
self.emit('build-finished', err)
raise
else:
self.emit('build-finished', None)
self.builder.cleanup()
# ---- logging handling ----------------------------------------------------
def _log(self, message, wfile, nonl=False):
try:
wfile.write(message)
except UnicodeEncodeError:
encoding = getattr(wfile, 'encoding', 'ascii') or 'ascii'
wfile.write(message.encode(encoding, 'replace'))
if not nonl:
wfile.write('\n')
if hasattr(wfile, 'flush'):
wfile.flush()
def warn(self, message, location=None, prefix='WARNING: '):
"""Emit a warning.
If *location* is given, it should either be a tuple of (docname, lineno)
or a string describing the location of the warning as well as possible.
*prefix* usually should not be changed.
.. note::
For warnings emitted during parsing, you should use
:meth:`.BuildEnvironment.warn` since that will collect all
warnings during parsing for later output.
"""
if isinstance(location, tuple):
docname, lineno = location
if docname:
location = '%s:%s' % (self.env.doc2path(docname), lineno or '')
else:
location = None
warntext = location and '%s: %s%s\n' % (location, prefix, message) or \
'%s%s\n' % (prefix, message)
if self.warningiserror:
raise SphinxWarning(warntext)
self._warncount += 1
self._log(warntext, self._warning, True)
def info(self, message='', nonl=False):
"""Emit an informational message.
If *nonl* is true, don't emit a newline at the end (which implies that
more info output will follow soon.)
"""
self._log(message, self._status, nonl)
def verbose(self, message, *args, **kwargs):
"""Emit a verbose informational message.
The message will only be emitted for verbosity levels >= 1 (i.e. at
least one ``-v`` option was given).
The message can contain %-style interpolation placeholders, which is
formatted with either the ``*args`` or ``**kwargs`` when output.
"""
if self.verbosity < 1:
return
if args or kwargs:
message = message % (args or kwargs)
self._log(message, self._status)
def debug(self, message, *args, **kwargs):
"""Emit a debug-level informational message.
The message will only be emitted for verbosity levels >= 2 (i.e. at
least two ``-v`` options were given).
The message can contain %-style interpolation placeholders, which is
formatted with either the ``*args`` or ``**kwargs`` when output.
"""
if self.verbosity < 2:
return
if args or kwargs:
message = message % (args or kwargs)
self._log(darkgray(message), self._status)
def debug2(self, message, *args, **kwargs):
"""Emit a lowlevel debug-level informational message.
The message will only be emitted for verbosity level 3 (i.e. three
``-v`` options were given).
The message can contain %-style interpolation placeholders, which is
formatted with either the ``*args`` or ``**kwargs`` when output.
"""
if self.verbosity < 3:
return
if args or kwargs:
message = message % (args or kwargs)
self._log(lightgray(message), self._status)
# ---- general extensibility interface -------------------------------------
def setup_extension(self, extension):
"""Import and setup a Sphinx extension module. No-op if called twice."""
self.debug('[app] setting up extension: %r', extension)
if extension in self._extensions:
return
try:
mod = __import__(extension, None, None, ['setup'])
except ImportError, err:
raise ExtensionError('Could not import extension %s' % extension,
err)
if not hasattr(mod, 'setup'):
self.warn('extension %r has no setup() function; is it really '
'a Sphinx extension module?' % extension)
else:
try:
mod.setup(self)
except VersionRequirementError, err:
# add the extension name to the version required
raise VersionRequirementError(
'The %s extension used by this project needs at least '
'Sphinx v%s; it therefore cannot be built with this '
'version.' % (extension, err))
self._extensions[extension] = mod
def require_sphinx(self, version):
# check the Sphinx version if requested
if version > sphinx.__version__[:3]:
raise VersionRequirementError(version)
def import_object(self, objname, source=None):
"""Import an object from a 'module.name' string."""
try:
module, name = objname.rsplit('.', 1)
except ValueError, err:
raise ExtensionError('Invalid full object name %s' % objname +
(source and ' (needed for %s)' % source or ''),
err)
try:
return getattr(__import__(module, None, None, [name]), name)
except ImportError, err:
raise ExtensionError('Could not import %s' % module +
(source and ' (needed for %s)' % source or ''),
err)
except AttributeError, err:
raise ExtensionError('Could not find %s' % objname +
(source and ' (needed for %s)' % source or ''),
err)
# event interface
def _validate_event(self, event):
event = intern(event)
if event not in self._events:
raise ExtensionError('Unknown event name: %s' % event)
def connect(self, event, callback):
self._validate_event(event)
listener_id = self.next_listener_id
if event not in self._listeners:
self._listeners[event] = {listener_id: callback}
else:
self._listeners[event][listener_id] = callback
self.next_listener_id += 1
self.debug('[app] connecting event %r: %r [id=%s]',
event, callback, listener_id)
return listener_id
def disconnect(self, listener_id):
self.debug('[app] disconnecting event: [id=%s]', listener_id)
for event in self._listeners.itervalues():
event.pop(listener_id, None)
def emit(self, event, *args):
try:
self.debug2('[app] emitting event: %r%s', event, repr(args)[:100])
except Exception: # not every object likes to be repr()'d (think
# random stuff coming via autodoc)
pass
results = []
if event in self._listeners:
for _, callback in self._listeners[event].iteritems():
results.append(callback(self, *args))
return results
def emit_firstresult(self, event, *args):
for result in self.emit(event, *args):
if result is not None:
return result
return None
# registering addon parts
def add_builder(self, builder):
self.debug('[app] adding builder: %r', builder)
if not hasattr(builder, 'name'):
raise ExtensionError('Builder class %s has no "name" attribute'
% builder)
if builder.name in self.builderclasses:
if isinstance(self.builderclasses[builder.name], tuple):
raise ExtensionError('Builder %r is a builtin builder' %
builder.name)
else:
raise ExtensionError(
'Builder %r already exists (in module %s)' % (
builder.name, self.builderclasses[builder.name].__module__))
self.builderclasses[builder.name] = builder
def add_config_value(self, name, default, rebuild):
self.debug('[app] adding config value: %r', (name, default, rebuild))
if name in self.config.values:
raise ExtensionError('Config value %r already present' % name)
if rebuild in (False, True):
rebuild = rebuild and 'env' or ''
self.config.values[name] = (default, rebuild)
def add_event(self, name):
self.debug('[app] adding event: %r', name)
if name in self._events:
raise ExtensionError('Event %r already present' % name)
self._events[name] = ''
def add_node(self, node, **kwds):
self.debug('[app] adding node: %r', (node, kwds))
nodes._add_node_class_names([node.__name__])
for key, val in kwds.iteritems():
try:
visit, depart = val
except ValueError:
raise ExtensionError('Value for key %r must be a '
'(visit, depart) function tuple' % key)
if key == 'html':
from sphinx.writers.html import HTMLTranslator as translator
elif key == 'latex':
from sphinx.writers.latex import LaTeXTranslator as translator
elif key == 'text':
from sphinx.writers.text import TextTranslator as translator
elif key == 'man':
from sphinx.writers.manpage import ManualPageTranslator \
as translator
elif key == 'texinfo':
from sphinx.writers.texinfo import TexinfoTranslator \
as translator
else:
# ignore invalid keys for compatibility
continue
setattr(translator, 'visit_'+node.__name__, visit)
if depart:
setattr(translator, 'depart_'+node.__name__, depart)
def _directive_helper(self, obj, content=None, arguments=None, **options):
if isinstance(obj, (types.FunctionType, types.MethodType)):
obj.content = content
obj.arguments = arguments or (0, 0, False)
obj.options = options
return convert_directive_function(obj)
else:
if content or arguments or options:
raise ExtensionError('when adding directive classes, no '
'additional arguments may be given')
return obj
def add_directive(self, name, obj, content=None, arguments=None, **options):
self.debug('[app] adding directive: %r',
(name, obj, content, arguments, options))
directives.register_directive(
name, self._directive_helper(obj, content, arguments, **options))
def add_role(self, name, role):
self.debug('[app] adding role: %r', (name, role))
roles.register_local_role(name, role)
def add_generic_role(self, name, nodeclass):
# don't use roles.register_generic_role because it uses
# register_canonical_role
self.debug('[app] adding generic role: %r', (name, nodeclass))
role = roles.GenericRole(name, nodeclass)
roles.register_local_role(name, role)
def add_domain(self, domain):
self.debug('[app] adding domain: %r', domain)
if domain.name in self.domains:
raise ExtensionError('domain %s already registered' % domain.name)
self.domains[domain.name] = domain
def override_domain(self, domain):
self.debug('[app] overriding domain: %r', domain)
if domain.name not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain.name)
if not issubclass(domain, self.domains[domain.name]):
raise ExtensionError('new domain not a subclass of registered %s '
'domain' % domain.name)
self.domains[domain.name] = domain
def add_directive_to_domain(self, domain, name, obj,
content=None, arguments=None, **options):
self.debug('[app] adding directive to domain: %r',
(domain, name, obj, content, arguments, options))
if domain not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain)
self.domains[domain].directives[name] = \
self._directive_helper(obj, content, arguments, **options)
def add_role_to_domain(self, domain, name, role):
self.debug('[app] adding role to domain: %r', (domain, name, role))
if domain not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain)
self.domains[domain].roles[name] = role
def add_index_to_domain(self, domain, index):
self.debug('[app] adding index to domain: %r', (domain, index))
if domain not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain)
self.domains[domain].indices.append(index)
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[]):
self.debug('[app] adding object type: %r',
(directivename, rolename, indextemplate, parse_node,
ref_nodeclass, objname, doc_field_types))
StandardDomain.object_types[directivename] = \
ObjType(objname or directivename, rolename)
# create a subclass of GenericObject as the new directive
new_directive = type(directivename, (GenericObject, object),
{'indextemplate': indextemplate,
'parse_node': staticmethod(parse_node),
'doc_field_types': doc_field_types})
StandardDomain.directives[directivename] = new_directive
# XXX support more options?
StandardDomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass)
# backwards compatible alias
add_description_unit = add_object_type
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname=''):
self.debug('[app] adding crossref type: %r',
(directivename, rolename, indextemplate, ref_nodeclass,
objname))
StandardDomain.object_types[directivename] = \
ObjType(objname or directivename, rolename)
# create a subclass of Target as the new directive
new_directive = type(directivename, (Target, object),
{'indextemplate': indextemplate})
StandardDomain.directives[directivename] = new_directive
# XXX support more options?
StandardDomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass)
def add_transform(self, transform):
self.debug('[app] adding transform: %r', transform)
SphinxStandaloneReader.transforms.append(transform)
def add_javascript(self, filename):
self.debug('[app] adding javascript: %r', filename)
from sphinx.builders.html import StandaloneHTMLBuilder
if '://' in filename:
StandaloneHTMLBuilder.script_files.append(filename)
else:
StandaloneHTMLBuilder.script_files.append(
posixpath.join('_static', filename))
def add_stylesheet(self, filename):
self.debug('[app] adding stylesheet: %r', filename)
from sphinx.builders.html import StandaloneHTMLBuilder
if '://' in filename:
StandaloneHTMLBuilder.css_files.append(filename)
else:
StandaloneHTMLBuilder.css_files.append(
posixpath.join('_static', filename))
def add_lexer(self, alias, lexer):
self.debug('[app] adding lexer: %r', (alias, lexer))
from sphinx.highlighting import lexers
if lexers is None:
return
lexers[alias] = lexer
def add_autodocumenter(self, cls):
self.debug('[app] adding autodocumenter: %r', cls)
from sphinx.ext import autodoc
autodoc.add_documenter(cls)
self.add_directive('auto' + cls.objtype, autodoc.AutoDirective)
def add_autodoc_attrgetter(self, type, getter):
self.debug('[app] adding autodoc attrgetter: %r', (type, getter))
from sphinx.ext import autodoc
autodoc.AutoDirective._special_attrgetters[type] = getter
def add_search_language(self, cls):
self.debug('[app] adding search language: %r', cls)
from sphinx.search import languages, SearchLanguage
assert isinstance(cls, SearchLanguage)
languages[cls.lang] = cls
class TemplateBridge(object):
"""
This class defines the interface for a "template bridge", that is, a class
that renders templates given a template name and a context.
"""
def init(self, builder, theme=None, dirs=None):
"""Called by the builder to initialize the template system.
*builder* is the builder object; you'll probably want to look at the
value of ``builder.config.templates_path``.
*theme* is a :class:`sphinx.theming.Theme` object or None; in the latter
case, *dirs* can be list of fixed directories to look for templates.
"""
raise NotImplementedError('must be implemented in subclasses')
def newest_template_mtime(self):
"""Called by the builder to determine if output files are outdated
because of template changes. Return the mtime of the newest template
file that was changed. The default implementation returns ``0``.
"""
return 0
def render(self, template, context):
"""Called by the builder to render a template given as a filename with
a specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
def render_string(self, template, context):
"""Called by the builder to render a template given as a string with a
specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
|
|
from .. import msg
import numpy as np
from .results import DictResult, ListResult
import xml.etree.ElementTree as ET
import re
class TemplateContents(object):
"""Represents the contents of a file template for a single version.
:attr comments: the character that designates the rest of the line
as a comment in the file being templated.
:attr stop: possible values are EOF|finite. Specifies how the body
template will be iterated over.
:attr comparisons: a dictionary with mode types as keys and compare
rules as values (type templates.FieldComparisons).
:attr key: as the body is iterated, values are added to a list in
the order that they appear. If a key is specified, the value of
the field key will be used to determine which body blocks represent
the same data.
"""
def __init__(self):
self.comments = ""
self.stop = "EOF"
self.comparisons = {}
self.outcomes = {}
self.key = None
#The actual specification in the template for how many times to
#repeat the body block read. Can be a number or a variable name.
self._count = None
#The parsed count value. If it was a number, this will be a valid
#python number type; otherwise a variable name.
self._pcount = None
#The order in which the line identifiers will be encountered in the
#actual output file for each body block. List of line identifiers.
self._bodyorder = []
#The dictionary of template lines (type FileLine) that define the
#lines that make up a single body block.
self._body = {}
#As for bodyorder, but for the lines in the template preamble
self._preambleorder = []
#As for body, but defines a single preamble block whose stored variable
#values are available to all the body blocks.
self._preamble = {}
def add_body(self, fline):
"""Adds the specified file line to the body of the template.
:arg fline: a FileLine object representing the next line that
will be encountered when the file body is parsed.
"""
self._bodyorder.append(fline.identifier)
self._body[fline.identifier] = fline
def add_preamble(self, fline):
"""Adds the specified file line to the preamble of the template.
:arg fline: a FileLine object representing the next line that
will be encountered when the file body is parsed.
"""
self._preambleorder.append(fline.identifier)
self._preamble[fline.identifier] = fline
@property
def body(self):
"""Returns the body FileLines in order of appearance."""
return [ self._body[n] for n in self._bodyorder ]
@property
def preamble(self):
"""Returns the preamble FileLines in order of appearance."""
return [ self._preamble[n] for n in self._preambleorder ]
@property
def count(self):
"""Returns the number of lines that this template should be used for."""
if self._pcount is None:
if self._count is not None:
if self._count.isdigit():
self._pcount = int(self._count)
else:
self._pcount = self._count
else:
self._pcount = 1
return self._pcount
class LineValues(object):
"""Represents the results of parsing a line using a FileLine.parse().
:attr named: a dictionary of values extracted from a line that were
named according to the list in the 'names' attribute. Key is variable
name, value is the value.
:attr stored: a dictionary of values that were specified for storing so
that other lines in the same block/file can access their values.
:attr values: the raw values extracted from the line and cast to the
data types specified by the 'type' attribute.
"""
def __init__(self):
self.named = {}
self.stored = {}
self.values = []
class LineGroup(object):
"""Represents a logical grouping of <line> entries in an output template
that should be repeated a *variable* number of times.
"""
def __init__(self, xml):
self.identifier = "default" if "name" not in xml.attrib else xml.attrib["name"]
if "repeat" not in xml.attrib:
raise ValueError("'repeat' is a required attribute of <group> in an output template.")
else:
#We have to do some fancy things here with stored/named values etc.
self.count = xml.attrib["repeat"]
self.comment = None if "comment" not in xml.attrib else xml.attrib["comment"]
self.lines = []
for child in xml:
if (child.tag == "line" or child.tag == "lines") and "id" in child.attrib:
fline = FileLine(child)
self.lines.append(fline)
self.line_counts = None
"""List of integer line counts, one for each FileLine in self.lines.
"""
self._line_cum = None
"""Cumulative number of times to use each FileLine. Makes it easier to
pick which one we should be using.
"""
def update_counts(self, lcounts, gcount):
"""Updates the list of counts describing how often each FileLine should be repeated.
:arg counts: a list of integer values, one for each FileLine in self.lines.
"""
self.line_counts = counts
self._line_cum = [sum(a[0:i]) for i in range(1, len(self.lines))]
def parse(self, line, i):
"""Parses the specified line using the relevant FileLine object, based on the global line
counter 'i'.
"""
#i is zero-based. However, once we reach sum(self.line_counts), we need to repeat
#the line templates again. igroup is the index within the group (instead of global).
igroup = i % sum(self.line_counts)
iline = [0 if c < i else 1 for c in self._line_cum].index(1)
return self.lines[iline].parse(line)
class FileLine(object):
"""Represents a template line specification in a file.
:arg element: the XML 'line' tag element.
:attr compatibility: a dictionary that maps variable names in one version
to different names in another version of the template.
"""
def __init__(self, element, parent):
self.xml = element
self.compatibility = {}
self.defaults = {}
self.parent = parent
"""The FileTemplate that this FileLine belongs to."""
#Overwrite makes the default values get used *even if* a value was
#specified in the dictionary for write mode.
self.overwrite = False
#The names that should be used for the parsed values of
#a particular line in the file being compared.
self._names = None
self._raw_names = None
#The names that should be used for 'global' variables whose values
#will be available to the entire block/file.
self._stores = None
#The actual specification in the template for how many times to
#repeat the line read. Can be a number or a variable name.
self._count = None
#The parsed count value. If it was a number, this will be a valid
#python number type; otherwise a variable name.
self._pcount = None
self._load_xml()
@property
def count(self):
"""Returns the number of lines that this template should be used for."""
if self._pcount is None:
if self._count is not None:
if self._count.isdigit():
self._pcount = int(self._count)
else:
self._pcount = self._count
else:
self._pcount = 1
return self._pcount
@property
def unique_names(self):
"""Returns a list of all the named variables where each variable only
appears once, even if it is multi-valued.
"""
return [n.split("*")[0] for n in self._raw_names]
def write(self, valuedict, version, stored):
"""Creates a string representation for this line template using the
specified values as part of output file conversion.
:arg valuedict: the dictionary of values from the version being
converted.
:arg version: the version number of the values from the version
being converted.
"""
result = []
count = self.count
if type(count) == type("") and count in stored:
try:
count = int(stored[count])
except ValueError:
msg.warn("Can't understand how to use {} for count".format(count))
return
else:
count = 1
if self.identifier in valuedict and not self.overwrite:
values = valuedict[self.identifier]
for i in range(count):
outvals = []
if self._raw_names is None:
#There aren't any named variables, so we just write the
#values directly to the line.
outvals.append(self._write_values(values[i].values))
else:
outvals.extend(self._write_values_generic(values[i].named, version))
result.append(" ".join(outvals))
elif self.identifier in self.defaults:
#We are going to use defaults. If there need to be multiple entries
#use the same default value for all of them
if type(self.defaults[self.identifier]) == type({}):
value = " ".join(self._write_values_generic(self.defaults[self.identifier],
version))
else:
value = self.defaults[self.identifier]
for i in range(count):
result.append(value)
return "\n".join(result)
def _write_values_generic(self, values, version):
"""Creates a list of elements to write for this line using a generic
dict of values to work from."""
result = []
for name in self._raw_names:
sname = name.split("*")[0]
value = self._write_find_values(sname, values)
if value is None and version in self.compatibility:
value = self._write_compat_values(sname, version, values)
if value is not None:
result.append(value)
return result
def _write_compat_values(self, name, version, values):
"""Returns a string representing the values obtained when compatibility
is taken into account between versions.
"""
usename = None
for oldname in self.compatibility[version]:
if self.compatibility[version][oldname] == name:
usename = oldname
break
if usename is not None:
return self._write_find_values(usename, values)
def _write_find_values(self, name, values):
"""Searches for the value to use for the specified variable; first looks
in 'values', then in defaults for this line.
"""
if name in values:
if hasattr(values[name], "values"):
return self._write_values(values[name].values)
else:
return self._write_values(values[name])
elif name in self.defaults:
return self._write_values(self.defaults[name])
elif (self.identifier in self.defaults and
name in self.defaults[self.identifier]):
return self._write_values(self.defaults[self.identifier][name])
else:
return None
def _write_values(self, values):
"""Returns a string representing the specified values."""
if type(values) == type([]):
return " ".join([str(v) for v in values])
else:
return str(values)
def _load_xml(self):
"""Examines XML element to extract file line info."""
#We can handle multiple lines with the same class and template info.
self.multiple = self.xml.tag == "lines"
if self.multiple and "count" in self.xml.attrib:
self._count = self.xml.attrib["count"]
#Get the mandatory attributes first.
self.identifier = self.xml.attrib["id"]
self.dtypes = re.split(",\s*", self.xml.attrib["type"])
self.values = re.split(",\s*", self.xml.attrib["values"])
#Handle default value specifiers for the output conversion capability
if "default" in self.xml.attrib:
defaults = re.split(",\s*", self.xml.attrib["default"])
innerdict = {}
for d in defaults:
if "=" in d:
name, value = d.split("=")
innerdict[name] = value
if len(list(innerdict.keys())) == 0:
self.defaults[self.identifier] = d
else:
self.defaults[self.identifier] = innerdict
#See which of the optional attribs are in the element
if "overwrite" in self.xml.attrib:
self.overwrite = self.xml.attrib["overwrite"] == "true"
if "store" in self.xml.attrib:
self._stores = re.split(";\s*", self.xml.attrib["store"])
if "names" in self.xml.attrib:
#The template allows them to repeat names using a *[int] notation
#If the same name appears multiple times, the values are grouped
#into a single liste under that name when values are extracted.
self._names = []
self._raw_names = re.split(",\s*", self.xml.attrib["names"])
for n in self._raw_names:
if "*" in n:
name, times = n.split("*")
for t in range(int(times)):
self._names.append(name)
else:
self._names.append(n)
#The line(s) element may have some children for compatibility
kids = list(self.xml)
if len(kids) > 0:
for kid in kids:
if kid.tag == "compatibility":
self._load_compat_xml(kid)
def _load_compat_xml(self, element):
"""Extracts XML data from a compatibility tag in the line element."""
for vtag in element:
#Each child of compatibility is a version element that describes
#mappings between version names of values.
versions = xml_get_versions(vtag)
for v in versions:
if not v in self.compatibility:
self.compatibility[v] = {}
#Add all the mappings from this version tag to the list.
mappings = {}
for amap in re.split(",\s*", vtag.attrib["mappings"]):
source, target = amap.split("=")
mappings[source.strip()] = target.strip()
self.compatibility[v][vtag.attrib["id"]] = mappings
def parse(self, line):
"""Parses a line from an actual file using the rules in
this line's template definition."""
#Initialize the result of this parsing operation.
result = LineValues()
#First, we split on whitespace to get all the elements in the line
raw = line.strip().split()
#Loop over the total number of known values and cast them to the right type
k = 0
for i in range(len(self.values)):
#If there are a variable number of entries for this value
#just use up all the rest
if self.values[i] == "*":
loop = list(range(k, len(raw)))
namek = k
else:
loop = list(range(int(self.values[i])))
#If there are a variable number of entries, they would be stored together
#as a list under a certain name. Use this as a clearing house. After we have
#stored the values, we can extend the results list.
current = []
for j in loop:
if k >= len(raw):
if self.parent is not None:
emsg = "Specified known value index '{}/{}' exceeds line value count ({}). Using template '{}'."
msg.err(emsg.format(k, len(loop)-1, raw, self.parent.filepath))
else:
msg.err("Specified known value index '{}/{}' exceeds line value count ({}).".format(k, len(loop)-1, raw))
val = raw[k]
dtype = self.dtypes[i]
try:
if dtype == "int":
current.append(int(val))
elif dtype == "float":
current.append(float(val))
else:
current.append(val)
except ValueError:
msg.err("[{}] could not parse value '{}' of type '{}'.\n".format(
self.identifier, val, dtype))
#If names were specified for the values, we need to populate the dict
#now
if self._names is not None:
if self.values[i] != "*":
if self._names[k] not in result.named:
result.named[self._names[k]] = current[j]
else:
if type(result.named[self._names[k]]) == type([]):
result.named[self._names[k]].append(current[j])
else:
result.named[self._names[k]] = [ result.named[self._names[k]],
current[j] ]
k += 1
#Now that the look is over, if we were naming variables, we want
#to save the rest of the current
#values list under the name.
result.values = current
if self.values[i] == "*":
if self._names is not None:
result.named[self._names[namek]] = current
#We used up all the values, save the user from themselves
break
#Now that we have determined all the values, we can store the ones
#that need to be used later. We import operator so that the eval()
#can work properly.
import operator
if self._stores is not None:
for s in self._stores:
name, target = s.split("=")
if "$" in target:
store = eval(target.replace("$", "result.values"))
elif re.match("\d+", target) is not None:
store = eval(target)
result.stored[name] = store
return result
class LineComparer(object):
"""Compares values for specific names between dictionaries
using specified operators and tolerances.
:attr name: the name of the value in the line that will be compared.
:attr element: the xml element that defined the comparison.
"""
def __init__(self, name, element):
self.name = name
self.numeric = False
if "operator" in element.attrib:
self.operator = element.attrib["operator"]
else:
self.operator = "equals"
if "tolerance" in element.attrib:
self.tolerance = element.attrib["tolerance"]
if self.tolerance[0].isdigit():
#We are working with a number, just eval() it and use it in
#a finite difference comparison.
try:
self.tolerance = eval(self.tolerance)
self.numeric = True
except ValueError:
msg.warn("tolerance for comparison {} ".format(element.attrib["id"]) +
"should be a number but can't be evaluated.")
self.tolerance = None
else:
self.tolerance = None
self._comparer = {
"equals": self._compare_equals,
"finite": self._compare_finite
}
@property
def isdict(self):
"""Determines whether this comparer compares only dictionaries."""
return self.name is not None
def compare(self, value1, value2):
"""Compares a value in the two dicts/values according to the settings
in this line comparer. Returns True if they match within the
specified tolerance."""
#If anything doesn't match up, we just say they don't match.
result = False
if self.isdict:
if self.name in value1 and self.name in value2:
result = self._comparer[self.operator](value1, value2, self.isdict)
#We can't process regular values with finite differences unless
#we have numeric tolerances.
elif self.operator == "equals" or self.numeric:
result = self._comparer[self.operator](value1, value2, self.isdict)
return result
def _compare_equals(self, value1, value2, isdict = True):
"""Determines if the two values are equal."""
if isdict:
return value1[self.name] == value2[self.name]
else:
return value1 == value2
def _compare_finite(self, value1, value2, isdict = True):
"""Determines if the two values are equal within the tolerance."""
#First we need to check if the tolerance is a number or a reference
#to a variable in the dictionary.
if self.tolerance is not None:
if self.numeric:
if isdict:
adiff = np.abs(np.array(value1[self.name]) - np.array(value2[self.name]))
return np.all(adiff <= self.tolerance)
else:
return value1 - value2 <= self.tolerance
else:
#Look for the values in the dictionaries and build a dynamic
#tolerance value. This won't be reached unless isdict==True
try:
s1 = eval(self.tolerance.replace("$", "value1"))
return value1[self.name] - value2[self.name] <= s1
except ValueError:
msg.warn("could not generate dynamic tolerance for comparison" +
"{} and tolerance {}""".format(self.name, self.tolerance))
return False
#We can't perform a finite difference calculation unless a tolerance
#was specified.
else:
return self._compare_equals(value1, value2, isdict)
class FieldComparisons(object):
"""Represents instructions on how to compare fields that have
random variance."""
def __init__(self, element = None):
self.compares = {}
if element is not None:
self._load_xml(element)
def _load_xml(self, element):
"""Extracts all of the child field comparisons from a
comparisons tag in the template."""
for child in element:
self._load_compare_xml(child)
def _load_compare_xml(self, element):
"""Extracts comparison information for a single compare entry
in a comparison."""
if "id" in element.attrib:
identifier = element.attrib["id"]
if "." in identifier:
line, name = identifier.split(".")
if not line in self.compares:
self.compares[line] = {}
self.compares[line][name] = LineComparer(name, element)
else:
self.compares[identifier] = LineComparer(None, element)
def compare_d(self, dict1, dict2, key, outcomes):
"""Compares all values in the two dictionaries using any
comparison rules for the specific field specified in the
template.
:arg key: the identifier of the line that these lists are
associated with.
:arg outcoms: a TemplateOutcomes with information on how to
intepret comparison results.
"""
#Initialize a list result. The basic initialization is common to
#both logic branches.
result = DictResult(dict1, dict2, key, outcomes)
if key in self.compares:
self._compare_dict(dict1, dict2, self.compares[key], result)
else:
self._compare_dict(dict1, dict2, None, result)
return result
def _compare_dict(self, dict1, dict2, line_comparer, result):
"""Compares the values of two dictionaries by key."""
#First compare the keys in the first with the second.
for key in dict1:
if key not in dict2:
result.only1[key] = dict1[key]
else:
#Use a flag because of the complicated logic tree
compared = False
if line_comparer is not None:
if type(line_comparer) == type({}) and key in line_comparer:
ctrue = line_comparer[key].compare(dict1, dict2)
compared = True
elif isinstance(line_comparer, LineComparer):
ctrue = line_comparer.compare(dict1[key], dict2[key])
compared = True
if not compared:
ctrue = dict1[key] == dict2[key]
if ctrue:
result.add_common(key)
else:
result.different.append((key, dict1[key], dict2[key]))
#Now, see if the second has anything not in the first
for key in dict2:
if key not in dict1:
result.only2[key] = dict2[key]
def compare_l(self, list1, list2, key, outcomes):
"""Compares the values at corresponding indices in the lists
using any comparison rules defined.
:arg key: the identifier of the line that these lists are
associated with.
"""
#Initialize a list result. The basic initialization is common to
#both logic branches.
result = ListResult(list1, list2, key, outcomes)
elcount = min([len(list1), len(list2)])
if key in self.compares:
#The key cannot have any variable names, otherwise it would be
#a dictionary
self._compare_list(list1, list2, self.compares[key], result)
else:
#We only do equality comparison on each element.
self._compare_list(list1, list2, None, result)
return result
def _compare_list(self, list1, list2, line_comparer, result):
"""Performs the element-wise list comparison using the specified
comparer and appending the outcomes to the result."""
elcount = min([len(list1), len(list2)])
for i in range(elcount):
if line_comparer is not None:
ctrue = line_comparer.compare(list1[i], list2[i])
else:
if isinstance(list1[i], float):
#Override the default behavior of finite precision comparisions for
#float values to be the precision default in fortpy.
ctrue = (list1[i]-list2[i] < 1e-13)
else:
ctrue = list1[i] == list2[i]
if not ctrue:
result.different.append((i, list1[i], list2[i]))
else:
result.common += 1
class TemplateOutcomes(object):
"""Represents a set of rules to use in determining outcomes of
comparisons between files.
:arg element: the xml 'outcomes' element that this class handles.
:attr ignore: a dictionary of data elements that should be ignored
in comparison interpretation. Keys are line identifiers in the
preamble or body block. Values are lists of variable names in
the line that should be ignored.
"""
def __init__(self, element = None):
self.ignore = {}
#The original list of line.name ignore directives.
self._ignore = []
if element is not None:
self._load_xml(element)
def can_ignore(self, line, name = None):
"""Determines whether the line and/or name should be ignored."""
if name is None:
return line in self.ignore and self.ignore[line] is None
else:
if line in self.ignore:
return self.ignore[line] is None or name in self.ignore[line]
else:
return False
def _load_xml(self, element):
"""Extracts all relevant tags from the parent 'outcome' element."""
for child in element:
if child.tag == "ignore" and "id" in child.attrib:
self._ignore.append(child.attrib["id"])
self._parse_ignores()
def _parse_ignores(self):
"""Extracts line id and line names as lists in dictionaries
to make ignore checking faster."""
for i in self._ignore:
if "." in i:
line, name = i.split(".")
if line in self.ignore:
if self.ignore[line] is None:
self.ignore[line] = [ name ]
else:
self.ignore[line].append(name)
else:
self.ignore[line] = [ name ]
else:
self.ignore[i] = None
class FileTemplate(object):
"""Represents an XML template defining multiple versions
of the same file for comparison.
:arg filepath: the path to the XML template file to load.
:attr contents: a dictionary with key=version# and value
a TemplateContents() for that specific version.
"""
def __init__(self, filepath):
self.filepath = filepath
self.contents = {}
self._xml_load()
def _xml_load(self):
"""Loads the XML file and splits the template entries
based on version numbers."""
with open(self.filepath) as f:
lines = f.read()
from fortpy.utility import XML_fromstring
root = XML_fromstring(lines, self.filepath)
#The first element must be a fortpy with attribute template
#otherwise give a message about it being an invalid template file
if "mode" in root.attrib and root.attrib["mode"] == "template":
#See if we have multiple versions to work with or if it is a
#straight laced template
versions = xml_get_versions(root)
#Create a contents object for each version specified in the file.
for v in versions:
if v not in self.contents:
self.contents[v] = TemplateContents()
self._xml_load_versions(root)
else:
msg.warn("The XML template {} is not".format(self.filepath) +
" a valid fortpy template file.")
def _xml_load_versions(self, root):
"""Loads the template from XML tracking important version information."""
#Creating a dictionary like this is a slick way to handle multiple cases.
methods = {
"preamble": self._xml_v_lineparent,
"body": self._xml_v_body,
"comments": self._xml_v_comments,
"comparisons": self._xml_v_comparisons,
"outcomes": self._xml_v_outcomes
}
#Run the extraction method for the relevant tag
for child in root:
methods[child.tag](child)
#There has to be a FieldComparer to process file comparisons. If one wasn't
#explicitly specified, create a default one that does regular comparisons.
defaultfc = FieldComparisons()
for vkey in self.contents:
if "default" not in self.contents[vkey].comparisons:
self.contents[vkey].comparisons["default"] = defaultfc
#THere also has to be a TemplateOutcomes.
defaultto = TemplateOutcomes()
for vkey in self.contents:
if "default" not in self.contents[vkey].outcomes:
self.contents[vkey].outcomes["default"] = defaultto
def _xml_v_outcomes(self, element):
"""Extracts outcome information from the template."""
versions = xml_get_versions(element)
modes = xml_get_modes(element)
to = TemplateOutcomes(element)
for v in versions:
for m in modes:
self.contents[v].outcomes[m] = to
def _xml_v_comparisons(self, element):
"""Extracts comparison information for specific fields from a comparison element."""
versions = xml_get_versions(element)
modes = xml_get_modes(element)
fc = FieldComparisons(element)
for v in versions:
for m in modes:
self.contents[v].comparisons[m] = fc
def _xml_v_body(self, element):
"""Extracts the body attributes and child lines."""
#We are looking for a specification on how to handle reading the body in
versions = xml_get_versions(element)
for v in versions:
if "stop" in element.attrib:
self.contents[v].stop = element.attrib["stop"]
if "count" in element.attrib:
self.contents[v]._count = element.attrib["count"]
if "key" in element.attrib:
self.contents[v].key = element.attrib["key"]
if "," in self.contents[v].key:
self.contents[v].key = re.split(",\s*", self.contents[v].key)
self._xml_v_lineparent(element)
def _xml_v_comments(self, element):
"""Extracts the comments information from the specified comments element."""
versions = xml_get_versions(element)
for v in versions:
self.contents[v].comments = element.text
def _xml_v_lineparent(self, element):
"""Extracts the line-type elements from the specified parent element."""
for child in element:
versions = xml_get_versions(child)
if (child.tag == "line" or child.tag == "lines") and "id" in child.attrib:
fline = FileLine(child, self)
elif child.tag == "group":
fline = LineGroup(child)
else:
msg.warn("non line-type tag in <{0}>\n{1}\n</{0}>".format(element.tag, element.text))
for v in versions:
if element.tag == "preamble":
self.contents[v].add_preamble(fline)
elif element.tag == "body":
self.contents[v].add_body(fline)
def xml_get_modes(element):
"""Returns a list of comparison modes declared in the XML element."""
if "mode" in element.attrib:
return re.split(",\s*", element.attrib["mode"])
else:
return [ "default" ]
def xml_get_versions(element):
"""Returns a list of versions referenced in the XML element."""
if "versions" in element.attrib:
return [ int(n.strip()) for n in element.attrib["versions"].split(",") ]
else:
#There are not multiple versions, so this is the first!
return [ 1 ]
|
|
# -*- coding: utf-8 -*-
#
# Ray documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 1 13:19:58 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import glob
import shutil
import sys
import os
import urllib
sys.path.insert(0, os.path.abspath('.'))
from custom_directives import CustomGalleryItemDirective
# These lines added to enable Sphinx to work without installing Ray.
import mock
class ChildClassMock(mock.MagicMock):
@classmethod
def __getattr__(cls, name):
return mock.Mock
MOCK_MODULES = [
"ax",
"ax.service.ax_client",
"blist",
"ConfigSpace",
"gym",
"gym.spaces",
"horovod",
"horovod.ray",
"kubernetes",
"mxnet",
"mxnet.model",
"psutil",
"ray._raylet",
"ray.core.generated",
"ray.core.generated.common_pb2",
"ray.core.generated.gcs_pb2",
"ray.core.generated.ray.protocol.Task",
"scipy.signal",
"scipy.stats",
"setproctitle",
"tensorflow_probability",
"tensorflow",
"tensorflow.contrib",
"tensorflow.contrib.all_reduce",
"tree",
"tensorflow.contrib.all_reduce.python",
"tensorflow.contrib.layers",
"tensorflow.contrib.rnn",
"tensorflow.contrib.slim",
"tensorflow.core",
"tensorflow.core.util",
"tensorflow.keras",
"tensorflow.python",
"tensorflow.python.client",
"tensorflow.python.util",
"torch",
"torch.distributed",
"torch.nn",
"torch.nn.parallel",
"torch.utils.data",
"torch.utils.data.distributed",
"wandb",
"xgboost",
"zoopt",
]
import scipy.stats
import scipy.linalg
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# ray.rllib.models.action_dist.py and
# ray.rllib.models.lstm.py will use tf.VERSION
sys.modules["tensorflow"].VERSION = "9.9.9"
sys.modules["tensorflow.keras.callbacks"] = ChildClassMock()
sys.modules["pytorch_lightning"] = ChildClassMock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../../python/"))
import ray
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx_click.ext',
'sphinx_tabs.tabs',
'sphinx-jsonschema',
'sphinx_gallery.gen_gallery',
'sphinxemoji.sphinxemoji',
'sphinx_copybutton',
'versionwarning.extension',
]
versionwarning_admonition_type = "tip"
versionwarning_messages = {
"master": (
"This document is for the master branch. "
'Visit the <a href="/en/latest/">latest pip release documentation here</a>.'
),
"latest": (
"This document is for the latest pip release. "
'Visit the <a href="/en/master/">master branch documentation here</a>.'
),
}
versionwarning_body_selector = "#main-content"
sphinx_gallery_conf = {
"examples_dirs": ["../examples",
"tune/_tutorials"], # path to example scripts
# path where to save generated examples
"gallery_dirs": ["auto_examples", "tune/tutorials"],
"ignore_pattern": "../examples/doc_code/",
"plot_gallery": "False",
# "filename_pattern": "tutorial.py",
# "backreferences_dir": "False",
# "show_memory': False,
# 'min_reported_time': False
}
for i in range(len(sphinx_gallery_conf["examples_dirs"])):
gallery_dir = sphinx_gallery_conf["gallery_dirs"][i]
source_dir = sphinx_gallery_conf["examples_dirs"][i]
try:
os.mkdir(gallery_dir)
except OSError:
pass
# Copy rst files from source dir to gallery dir.
for f in glob.glob(os.path.join(source_dir, '*.rst')):
shutil.copy(f, gallery_dir)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
from recommonmark.parser import CommonMarkParser
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': CommonMarkParser,
}
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ray'
copyright = u'2019, The Ray Team'
author = u'The Ray Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from ray import __version__ as version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
exclude_patterns += sphinx_gallery_conf['examples_dirs']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_book_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"repository_url": "https://github.com/ray-project/ray",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"path_to_docs": "doc/source",
"home_page_in_toc": True,
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = f"Ray v{release}"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/ray_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {'**': ['index.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Raydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Ray.tex', u'Ray Documentation', u'The Ray Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'ray', u'Ray Documentation', [author], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Ray', u'Ray Documentation', author, 'Ray',
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# pcmoritz: To make the following work, you have to run
# sudo pip install recommonmark
# Python methods should be presented in source code order
autodoc_member_order = 'bysource'
# Taken from https://github.com/edx/edx-documentation
FEEDBACK_FORM_FMT = "https://github.com/ray-project/ray/issues/new?title={title}&labels=docs&body={body}"
def feedback_form_url(project, page):
"""Create a URL for feedback on a particular page in a project."""
return FEEDBACK_FORM_FMT.format(
title=urllib.parse.quote(
"[docs] Issue on `{page}.rst`".format(page=page)),
body=urllib.parse.quote(
"# Documentation Problem/Question/Comment\n"
"<!-- Describe your issue/question/comment below. -->\n"
"<!-- If there are typos or errors in the docs, feel free to create a pull-request. -->\n"
"\n\n\n\n"
"(Created directly from the docs)\n"))
def update_context(app, pagename, templatename, context, doctree):
"""Update the page rendering context to include ``feedback_form_url``."""
context['feedback_form_url'] = feedback_form_url(app.config.project,
pagename)
# see also http://searchvoidstar.tumblr.com/post/125486358368/making-pdfs-from-markdown-on-readthedocsorg-using
def setup(app):
app.connect('html-page-context', update_context)
app.add_stylesheet('css/custom.css')
# Custom directives
app.add_directive('customgalleryitem', CustomGalleryItemDirective)
|
|
import collections
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
class Converter(object):
"""Base class of converters.
Converters receive batched data retrieved from iterators and perform
arbitrary transforms as well as device transfer.
Implementation should override the ``__call__`` method.
.. seealso::
:meth:`chainer.dataset.converter` --- a decorator to turn a converter
function into a ``Converter`` instance.
"""
def __call__(self, batch, device):
"""Performs conversion.
Args:
batch:
A batch. The type and value are arbitrary, depending on usage.
device(~chainer.backend.Device):
Device to which the converter is expected to send the batch.
Returns: A converted batch.
"""
raise NotImplementedError(
'Concrete class must implement __call__.')
class _ArbitraryCallableConverter(Converter):
"""Converter to wrap a callable with arbitrary arguments.
This class accepts arbitrary arguments and pass-through to the underlying
callable, with device argument replaced.
"""
def __init__(self, base_callable):
if not callable(base_callable):
raise TypeError(
'Can only wrap a callable. Actual: {}'.format(
type(base_callable)))
self.base_callable = base_callable
def __call__(self, *args, **kwargs):
base_callable = self.base_callable
# Normalize the 'device' argument
if len(args) >= 2:
# specified as a positional argument
args = list(args)
args[1] = _get_device(args[1])
elif 'device' in kwargs:
kwargs['device'] = _get_device(kwargs['device'])
return base_callable(*args, **kwargs)
def converter():
"""Decorator to make a converter.
This decorator turns a converter function into a
:class:`chainer.dataset.Converter` class instance, which also is a
callable.
This is required to use the converter function from an old module that
does not support :class:`chainer.backend.Device` instances
(See the **Device argument conversion** section below).
.. rubric:: Requirements of the target function
The target converter function must accept two positional arguments:
a batch and a device, and return a converted batch.
The type of the device argument is :class:`chainer.backend.Device`.
The types and values of the batches (the first argument and the return
value) are not specified: they depend on how the converter is used (e.g.
by updaters).
.. admonition:: Example
>>> @chainer.dataset.converter()
... def custom_converter(batch, device):
... assert isinstance(device, chainer.backend.Device)
... # do something with batch...
... return device.send(batch)
.. rubric:: Device argument conversion
For backward compatibility, the decorator wraps
the function so that if the converter is called with the device argument
with ``int`` type, it is converted to a :class:`chainer.backend.Device`
instance before calling the original function. The ``int`` value indicates
the CUDA device of the cupy backend.
Without the decorator, the converter cannot support ChainerX devices.
If the batch were requested to be converted to ChainerX with such
converters, :class:`RuntimeError` will be raised.
"""
def wrap(func):
return _ArbitraryCallableConverter(func)
return wrap
def _call_converter(converter, batch, device):
# Calls the converter.
# Converter can be either new-style (accepts chainer.backend.Device) or
# old-style (accepts int as device).
assert device is None or isinstance(device, backend.Device)
if isinstance(converter, Converter):
# New-style converter
return converter(batch, device)
# Old-style converter
if device is None:
return converter(batch, None)
if device.xp is numpy:
return converter(batch, -1)
if device.xp is cuda.cupy:
return converter(batch, device.device.id)
raise RuntimeError(
'Converter does not support ChainerX. '
'Use chainer.dataset.converter decorator.')
def to_device(device, x):
"""Send an array to a given device.
This method sends a given array to a given device. This method is used in
:func:`~chainer.dataset.concat_examples`.
You can also use this method in a custom converter method used in
:class:`~chainer.training.Updater` and :class:`~chainer.training.Extension`
such as :class:`~chainer.training.updaters.StandardUpdater` and
:class:`~chainer.training.extensions.Evaluator`.
See also :func:`chainer.dataset.concat_examples`.
Args:
device (None or int or device specifier): A device to which an array
is sent. If it is a negative integer, an array is sent to CPU.
If it is a positive integer, an array is sent to GPU with the
given ID. If it is``None``, an array is left in the original
device. Also, any of device specifiers described at
:class:`~chainer.backend.DeviceId` is accepted.
x (:ref:`ndarray`): An array to send.
Returns:
Converted array.
"""
device = _get_device(device)
if device is None:
return x
return device.send(x)
def _get_device(device_spec):
# Converts device specificer to a chainer.Device instance.
# Additionally to chainer.get_device, this function supports None
if device_spec is None:
return None
return backend.get_device(device_spec)
# TODO(hvy): Write unit tests where batch elements contain Python lists.
@converter()
def concat_examples(batch, device=None, padding=None):
"""Concatenates a list of examples into array(s).
This function converts an "array of tuples" into a "tuple of arrays".
Specifically, given a list of examples each of which consists of
a list of elements, this function first makes an array
by taking the element in the same position from each example
and concatenates them along the newly-inserted first axis
(called `batch dimension`) into one array.
It repeats this for all positions and returns the resulting arrays.
The output type depends on the type of examples in ``batch``.
For instance, consider each example consists of two arrays ``(x, y)``.
Then, this function concatenates ``x`` 's into one array, and ``y`` 's
into another array, and returns a tuple of these two arrays. Another
example: consider each example is a dictionary of two entries whose keys
are ``'x'`` and ``'y'``, respectively, and values are arrays. Then, this
function concatenates ``x`` 's into one array, and ``y`` 's into another
array, and returns a dictionary with two entries ``x`` and ``y`` whose
values are the concatenated arrays.
When the arrays to concatenate have different shapes, the behavior depends
on the ``padding`` value. If ``padding`` is ``None`` (default), it raises
an error. Otherwise, it builds an array of the minimum shape that the
contents of all arrays can be substituted to. The padding value is then
used to the extra elements of the resulting arrays.
.. admonition:: Example
>>> import numpy as np
>>> from chainer import dataset
>>> x = [([1, 2], 1),
... ([3, 4], 2),
... ([5, 6], 3)]
>>> dataset.concat_examples(x)
(array([[1, 2],
[3, 4],
[5, 6]]), array([1, 2, 3]))
>>>
>>> y = [(np.array([1, 2]), 0),
... (np.array([3]), 1),
... (np.array([]), 2)]
>>> dataset.concat_examples(y, padding=100)
(array([[ 1, 2],
[ 3, 100],
[100, 100]]), array([0, 1, 2]))
>>>
>>> z = [(np.array([1, 2]), np.array([0])),
... (np.array([3]), np.array([])),
... (np.array([]), np.array([2]))]
>>> dataset.concat_examples(z, padding=(100, 200))
(array([[ 1, 2],
[ 3, 100],
[100, 100]]), array([[ 0],
[200],
[ 2]]))
>>> w = [{'feature': np.array([1, 2]), 'label': 0},
... {'feature': np.array([3, 4]), 'label': 1},
... {'feature': np.array([5, 6]), 'label': 2}]
>>> dataset.concat_examples(w) # doctest: +SKIP
{'feature': array([[1, 2],
[3, 4],
[5, 6]]), 'label': array([0, 1, 2])}
Args:
batch (list): A list of examples. This is typically given by a dataset
iterator.
device (device specifier): A device to which each array is sent.
If it is omitted, all arrays are left in their original devices.
See :meth:`~chainer.dataset.convert.to_device` for more details.
padding: Scalar value for extra elements. If this is None (default),
an error is raised on shape mismatch. Otherwise, an array of
minimum dimensionalities that can accommodate all arrays is
created, and elements outside of the examples are padded by this
value.
Returns:
Array, a tuple of arrays, or a dictionary of arrays. The type depends
on the type of each example in the batch.
"""
assert device is None or isinstance(device, backend.Device)
if not batch:
raise ValueError('batch is empty')
first_elem = batch[0]
if isinstance(first_elem, tuple):
result = []
if not isinstance(padding, tuple):
padding = [padding] * len(first_elem)
for i in six.moves.range(len(first_elem)):
result.append(to_device(device, _concat_arrays(
[example[i] for example in batch], padding[i])))
return tuple(result)
elif isinstance(first_elem, dict):
result = {}
if not isinstance(padding, dict):
padding = {key: padding for key in first_elem}
for key in first_elem:
result[key] = to_device(device, _concat_arrays(
[example[key] for example in batch], padding[key]))
return result
else:
return to_device(device, _concat_arrays(batch, padding))
def _concat_arrays(arrays, padding):
# Convert `arrays` to numpy.ndarray if `arrays` consists of the built-in
# types such as int, float or list.
if not isinstance(arrays[0], chainer.get_array_types()):
arrays = numpy.asarray(arrays)
if padding is not None:
arr_concat = _concat_arrays_with_padding(arrays, padding)
else:
device = backend.get_device_from_array(arrays[0])
with chainer.using_device(device):
arr_concat = device.xp.concatenate(
[array[None] for array in arrays])
return arr_concat
def _concat_arrays_with_padding(arrays, padding):
shape = numpy.array(arrays[0].shape, dtype=int)
for array in arrays[1:]:
if numpy.any(shape != array.shape):
numpy.maximum(shape, array.shape, shape)
shape = tuple(numpy.insert(shape, 0, len(arrays)))
device = backend.get_device_from_array(arrays[0])
with chainer.using_device(device):
result = device.xp.full(shape, padding, dtype=arrays[0].dtype)
for i in six.moves.range(len(arrays)):
src = arrays[i]
slices = tuple(slice(dim) for dim in src.shape)
result[(i,) + slices] = src
return result
class ConcatWithAsyncTransfer(object):
"""Interface to concatenate data and transfer them to GPU asynchronously.
It enables to transfer next batch of input data to GPU while GPU is
running kernels for training using current batch of input data.
An instance of this class is mainly intended to be used as a converter
function of an updater like below.
.. doctest::
from chainer.dataset import convert
...
updater = chainer.training.updaters.StandardUpdater(
...,
converter=convert.ConcatWithAsyncTransfer(),
...)
Args:
stream (cupy.cuda.Stream): CUDA stream. If ``None``, a stream is
automatically created on the first call. Data transfer operation
is launched asynchronously using the stream.
compute_stream(cupy.cuda.Stream): CUDA stream used for compute kernels.
If not ``None``, CUDA events are created/used to avoid global
synchronization and overlap execution of compute kernels and data
transfers as much as possible. If ``None``, global synchronization
is used instead.
"""
def __init__(self, stream=None, compute_stream=None):
self._stream = stream
self.compute_stream = compute_stream
self._device = None
self._conveyor = collections.defaultdict(
lambda: Conveyor(self._device, self._stream))
if compute_stream is not None:
# * event1 prevents a CPU thread to update arrays that might be
# still being used by GPU kernels.
# * event2 prevents a GPU kernel to read arrays that might be
# still being transferred to GPU.
self._event1 = cuda.Event()
self._event2 = cuda.Event()
self._sync_get = False
else:
self._sync_get = True
def __call__(self, batch, device=None, padding=None):
"""Concatenate data and transfer them to GPU asynchronously.
See also :func:`chainer.dataset.concat_examples`.
Args:
batch (list): A list of examples.
device (int): Device ID to which each array is sent.
padding: Scalar value for extra elements.
Returns:
Array, a tuple of arrays, or a dictionary of arrays.
The type depends on the type of each example in the batch.
"""
if not batch:
raise ValueError('batch is empty')
first_elem = batch[0]
if not self._conveyor:
self._device = device # device is set at first call
if device is not None and device >= 0 and self._stream is None:
with cuda.get_device_from_id(device):
self._stream = cuda.Stream(non_blocking=True)
if device is not self._device:
raise ValueError('device is different')
if self.compute_stream is not None:
self._event1.synchronize()
self._event1.record(stream=self.compute_stream)
with cuda.get_device_from_id(device):
if isinstance(first_elem, tuple):
result = []
if not isinstance(padding, tuple):
padding = [padding] * len(first_elem)
for i in six.moves.range(len(first_elem)):
self._conveyor[i].put(_concat_arrays(
[example[i] for example in batch], padding[i]))
for i in six.moves.range(len(first_elem)):
result.append(self._conveyor[i].get(sync=self._sync_get))
if self.compute_stream is not None:
self._event2.record(stream=self._stream)
self.compute_stream.wait_event(self._event2)
return tuple(result)
elif isinstance(first_elem, dict):
result = {}
if not isinstance(padding, dict):
padding = {key: padding for key in first_elem}
for key in first_elem:
self._conveyor[key].put(_concat_arrays(
[example[key] for example in batch], padding[key]))
for key in first_elem:
result[key] = self._conveyor[key].get(sync=self._sync_get)
if self.compute_stream is not None:
self._event2.record(stream=self._stream)
self.compute_stream.wait_event(self._event2)
return result
else:
return to_device(device, _concat_arrays(batch, padding))
class Conveyor(object):
"""Interface to handle asynchronous data transfer using double buffering.
An asynchronous data transfer is initiated by :meth:`put`, and the result,
the array transferred to a target device, is obtained by :meth:`get`.
You should call :meth:`put` followed by :meth:`get`.
Args:
device (int): Device ID to which an array is sent. Negative value
indicates the host memory (CPU). If it is omitted, the array is
left in the original device. Asynchronous data transfer is used
only when device ID >= 0.
stream (cupy.cuda.Stream): CUDA stream. An array is sent to GPU
asynchronously using this stream. If ``None``, asynchronous data
transfer is not used.
"""
def __init__(self, device=None, stream=None):
self._device = device
self._stream = stream
self._array_set = [[None, None], [None, None]]
self._ret_array = []
def put(self, array):
"""Initiates asynchronous transfer of an array to a target device.
This method assumes that the input array is a numpy array and
on host memory without page-locked. So, it first copies the data
to page-locked host memory (so called pinned memory), then initiates
asynchronous data transfer to a target device.
The intermediate arrays on pinned memory and cupy arrays on the
target device are retained at self._array_set in order to reduce number
of memory allocation/release, and they are to be reused for subsequent
data transfer as long as the size are the same.
Double buffering scheme is used here, so you can initiate next data
transfer safely even when current data is still used on the target
device.
"""
if self._device is None or self._device < 0 or self._stream is None:
self._ret_array.append(to_device(self._device, array))
return
pin_array, cp_array = self._array_set.pop(0)
if pin_array is not None:
if pin_array.nbytes != array.nbytes:
pin_array = None
with cuda.get_device_from_id(self._device):
if pin_array is None:
# The global synchronization below is necessary to ensure ALL
# operations including compute and data transfer submitted
# to GPU so far have been completed, in order to avoid possible
# memory corruption due to race condition among operations that
# use different CUDA streams.
# You can also solve this sort of race condition by preparing a
# memory pool for each CUDA stream and using it carefully.
cuda.cupy.cuda.runtime.deviceSynchronize()
pin_mem = cuda.cupy.cuda.alloc_pinned_memory(array.nbytes)
pin_array = numpy.frombuffer(pin_mem,
array.dtype,
array.size
).reshape(array.shape)
cp_array = cuda.cupy.empty(array.shape, array.dtype)
pin_array[...] = array # copy(CPU): paged -> pinned
cp_array.set(pin_array, self._stream) # copy: CPU to GPU
self._array_set.append([pin_array, cp_array])
self._ret_array.append(cp_array)
def get(self, sync=True):
"""Returns the array of data transferred to a target device asynchronously.
If sync is ``True``, the data of returned array is available in GPU
kernels. If sync is ``False``, the data of returned array might be
being transferred to GPU, so synchronization must be done carefully by
the calling function.
Args:
sync (bool): If ``True``, global synchronization is used to ensure
completion of asynchronous data transfer for safer reason.
If ``False``, it assumes a caller function is handling
synchronization correctly hence does not use global
synchronization.
"""
if (self._device is not None and self._device >= 0 and
self._stream is not None):
if sync:
cuda.cupy.cuda.runtime.deviceSynchronize()
return self._ret_array.pop(0)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import shlex
import hashlib
import argparse
import subprocess
from difflib import unified_diff
class DocTests:
def __init__(self, args):
scriptpath = os.path.dirname(os.path.realpath(__file__))
self.ledger = os.path.abspath(args.ledger)
self.sourcepath = os.path.abspath(args.file)
self.verbose = args.verbose
self.tests = args.examples
self.examples = dict()
self.test_files = list()
self.testin_token = 'command'
self.testout_token = 'output'
self.testdat_token = 'input'
self.testfile_token = 'file'
self.validate_token = 'validate'
self.validate_cmd_token = 'validate-command'
self.validate_dat_token = 'validate-data'
self.testwithdat_token = 'with_input'
self.testwithfile_token = 'with_file'
def read_example(self):
endexample = re.compile(r'^@end\s+smallexample\s*$')
example = str()
while True:
line = self.file.readline()
self.current_line += 1
if len(line) <= 0 or endexample.match(line): break
# Replace special texinfo character sequences with their ASCII counterpart
example += re.sub(r'@([@{}])', r'\1', line)
return example
def test_id(self, example):
return hashlib.sha1(example.rstrip()).hexdigest()[0:7].upper()
def find_examples(self):
startexample = re.compile(r'^@smallexample\s+@c\s+(%s|%s|%s|%s)(?::([\dA-Fa-f]+|validate))?(?:,(.*))?'
% (self.testin_token, self.testout_token, self.testdat_token, self.testfile_token))
while True:
line = self.file.readline()
self.current_line += 1
if len(line) <= 0: break
startmatch = startexample.match(line)
if (startmatch):
test_begin_pos = self.file.tell()
test_begin_line = self.current_line
test_kind = startmatch.group(1)
test_id = startmatch.group(2)
test_options = dict()
for pair in re.split(r',\s*', str(startmatch.group(3))):
kv = re.split(r':\s*', pair, 2)
try:
test_options[kv[0]] = kv[1]
except IndexError:
pass
example = self.read_example()
test_end_pos = self.file.tell()
test_end_line = self.current_line
if not test_id:
print >> sys.stderr, 'Example', test_kind, 'in line', test_begin_line, 'is missing id.'
test_id = self.test_id(example)
if test_kind == self.testin_token:
print >> sys.stderr, 'Use', self.test_id(example)
elif test_kind == self.testin_token and test_id != self.validate_token and test_id != self.test_id(example):
print >> sys.stderr, 'Expected test id', test_id, 'for example' \
, test_kind, 'on line', test_begin_line, 'to be', self.test_id(example)
if test_id == self.validate_token:
test_id = "Val-" + str(test_begin_line)
if test_kind == self.testin_token:
test_kind = self.validate_cmd_token
elif test_kind == self.testdat_token:
test_kind = self.validate_dat_token
try:
self.examples[test_id]
except KeyError:
self.examples[test_id] = dict()
try:
example = self.examples[test_id][test_kind][test_kind] + example
except KeyError:
pass
self.examples[test_id][test_kind] = {
'bpos': test_begin_pos,
'epos': test_end_pos,
'blin': test_begin_line,
'elin': test_end_line,
'opts': test_options,
test_kind: example,
}
def parse_command(self, test_id, example):
validate_command = False
try:
command = example[self.testin_token][self.testin_token]
except KeyError:
if self.validate_dat_token in example:
command = '$ ledger bal'
elif self.validate_cmd_token in example:
validate_command = True
command = example[self.validate_cmd_token][self.validate_cmd_token]
else:
return None
command = filter(lambda x: x != '\n', shlex.split(command))
if command[0] == '$': command.remove('$')
index = command.index('ledger')
command[index] = self.ledger
for i,argument in enumerate(shlex.split('--args-only --columns 80')):
command.insert(index+i+1, argument)
try:
findex = command.index('-f')
except ValueError:
try:
findex = command.index('--file')
except ValueError:
findex = index+1
command.insert(findex, '--file')
if validate_command:
command.insert(findex+1, 'sample.dat')
else:
command.insert(findex+1, test_id + '.dat')
return (command, findex+1)
def test_examples(self):
failed = set()
tests = self.examples.keys()
if self.tests:
tests = list(set(self.tests).intersection(tests))
temp = list(set(self.tests).difference(tests))
if len(temp) > 0:
print >> sys.stderr, 'Skipping non-existent examples: %s' % ', '.join(temp)
for test_id in tests:
validation = False
if self.validate_dat_token in self.examples[test_id] or self.validate_cmd_token in self.examples[test_id]:
validation = True
example = self.examples[test_id]
try:
(command, findex) = self.parse_command(test_id, example)
except TypeError:
failed.add(test_id)
continue
output = example.get(self.testout_token, {}).get(self.testout_token)
input = example.get(self.testdat_token, {}).get(self.testdat_token)
if not input:
with_input = example.get(self.testin_token, {}).get('opts', {}).get(self.testwithdat_token)
input = self.examples.get(with_input, {}).get(self.testdat_token, {}).get(self.testdat_token)
if not input:
input = example.get(self.validate_dat_token, {}).get(self.validate_dat_token)
if command and (output != None or validation):
test_file_created = False
if findex:
scriptpath = os.path.dirname(os.path.realpath(__file__))
test_input_dir = os.path.join(scriptpath, '..', 'test', 'input')
test_file = command[findex]
if not os.path.exists(test_file):
if input:
test_file_created = True
with open(test_file, 'w') as f:
f.write(input)
elif os.path.exists(os.path.join(test_input_dir, test_file)):
command[findex] = os.path.join(test_input_dir, test_file)
try:
convert_idx = command.index('convert')
convert_file = command[convert_idx+1]
convert_data = example[self.testfile_token][self.testfile_token]
if not os.path.exists(convert_file):
with open(convert_file, 'w') as f:
f.write(convert_data)
except ValueError:
pass
error = None
try:
verify = subprocess.check_output(command, stderr=subprocess.STDOUT)
valid = (output == verify) or (not error and validation)
except subprocess.CalledProcessError, e:
error = e.output
valid = False
failed.add(test_id)
if valid and test_file_created:
os.remove(test_file)
if self.verbose > 0:
print test_id, ':', 'Passed' if valid else 'FAILED: {}'.format(error) if error else 'FAILED'
else:
sys.stdout.write('.' if valid else 'E')
if not (valid or error):
failed.add(test_id)
if self.verbose > 1:
print ' '.join(command)
if not validation:
for line in unified_diff(output.split('\n'), verify.split('\n'), fromfile='generated', tofile='expected'):
print(line)
print
else:
if self.verbose > 0:
print test_id, ':', 'Skipped'
else:
sys.stdout.write('X')
if not self.verbose:
print
if len(failed) > 0:
print "\nThe following examples failed:"
print " ", "\n ".join(failed)
return len(failed)
def main(self):
self.file = open(self.sourcepath)
self.current_line = 0
self.find_examples()
failed_examples = self.test_examples()
self.file.close()
return failed_examples
if __name__ == "__main__":
def getargs():
parser = argparse.ArgumentParser(prog='DocTests',
description='Test and validate ledger examples from the texinfo manual')
parser.add_argument('-v', '--verbose',
dest='verbose',
action='count',
help='be verbose. Add -vv for more verbosity')
parser.add_argument('-l', '--ledger',
dest='ledger',
type=str,
action='store',
required=True,
help='the path to the ledger executable to test with')
parser.add_argument('-f', '--file',
dest='file',
type=str,
action='store',
required=True,
help='the texinfo documentation file to run the examples from')
parser.add_argument('examples',
metavar='EXAMPLE',
type=str,
nargs='*',
help='the examples to test')
return parser.parse_args()
args = getargs()
script = DocTests(args)
status = script.main()
sys.exit(status)
|
|
import thread
import traceback
thread.stack_size(1024 * 512) # reduce vm size
class Input(dict):
def __init__(self, conn, raw, prefix, command, params,
nick, user, host, paraml, msg):
chan = paraml[0].lower()
if chan == conn.nick.lower(): # is a PM
chan = nick
def say(msg):
conn.msg(chan, msg)
def reply(msg):
if chan == nick: # PMs don't need prefixes
self.say(msg)
else:
self.say(nick + ': ' + msg)
def pm(msg, nick=nick):
conn.msg(nick, msg)
def set_nick(nick):
conn.set_nick(nick)
def me(msg):
self.say("\x01%s %s\x01" % ("ACTION", msg))
def notice(msg):
conn.cmd('NOTICE', [nick, msg])
def kick(target=None, reason=None):
conn.cmd('KICK', [chan, target or nick, reason or ''])
def ban(target=None):
conn.cmd('MODE', [chan, '+b', target or host])
def unban(target=None):
conn.cmd('MODE', [chan, '-b', target or host])
dict.__init__(self, conn=conn, raw=raw, prefix=prefix, command=command,
params=params, nick=nick, user=user, host=host,
paraml=paraml, msg=msg, server=conn.server, chan=chan,
notice=notice, say=say, reply=reply, pm=pm, bot=bot,
kick=kick, ban=ban, unban=unban, me=me,
set_nick=set_nick, lastparam=paraml[-1])
# make dict keys accessible as attributes
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def run(func, input):
args = func._args
if 'inp' not in input:
input.inp = input.paraml
if args:
if 'db' in args and 'db' not in input:
input.db = get_db_connection(input.conn)
if 'input' in args:
input.input = input
if 0 in args:
out = func(input.inp, **input)
else:
kw = dict((key, input[key]) for key in args if key in input)
out = func(input.inp, **kw)
else:
out = func(input.inp)
if out is not None:
input.reply(unicode(out))
def do_sieve(sieve, bot, input, func, type, args):
try:
return sieve(bot, input, func, type, args)
except Exception:
print 'sieve error',
traceback.print_exc()
return None
class Handler(object):
'''Runs plugins in their own threads (ensures order)'''
def __init__(self, func):
self.func = func
self.input_queue = Queue.Queue()
thread.start_new_thread(self.start, ())
def start(self):
uses_db = 'db' in self.func._args
db_conns = {}
while True:
input = self.input_queue.get()
if input == StopIteration:
break
if uses_db:
db = db_conns.get(input.conn)
if db is None:
db = bot.get_db_connection(input.conn)
db_conns[input.conn] = db
input.db = db
try:
run(self.func, input)
except:
traceback.print_exc()
def stop(self):
self.input_queue.put(StopIteration)
def put(self, value):
self.input_queue.put(value)
def dispatch(input, kind, func, args, autohelp=False):
for sieve, in bot.plugs['sieve']:
input = do_sieve(sieve, bot, input, func, kind, args)
if input == None:
return
if autohelp and args.get('autohelp', True) and not input.inp \
and func.__doc__ is not None:
input.reply(func.__doc__)
return
if hasattr(func, '_apikey'):
key = bot.config.get('api_keys', {}).get(func._apikey, None)
if key is None:
input.reply('error: missing api key')
return
input.api_key = key
if func._thread:
bot.threads[func].put(input)
else:
thread.start_new_thread(run, (func, input))
def match_command(command):
commands = list(bot.commands)
# do some fuzzy matching
prefix = filter(lambda x: x.startswith(command), commands)
if len(prefix) == 1:
return prefix[0]
elif prefix and command not in prefix:
return prefix
return command
def main(conn, out):
inp = Input(conn, *out)
# EVENTS
for func, args in bot.events[inp.command] + bot.events['*']:
dispatch(Input(conn, *out), "event", func, args)
if inp.command == 'PRIVMSG':
# COMMANDS
bot_prefix = re.escape(bot.config.get("prefix", "."))
if inp.chan == inp.nick: # private message, no command prefix required
prefix = r'^(?:(?:'+bot_prefix+')?|'
else:
prefix = r'^(?:'+bot_prefix+'|'
command_re = prefix + inp.conn.nick
command_re += r'[:,]+\s+)(\w+)(?:$|\s+)(.*)'
m = re.match(command_re, inp.lastparam)
if m:
trigger = m.group(1).lower()
command = match_command(trigger)
if isinstance(command, list): # multiple potential matches
input = Input(conn, *out)
input.reply("did you mean %s or %s?" %
(', '.join(command[:-1]), command[-1]))
elif command in bot.commands:
input = Input(conn, *out)
input.trigger = trigger
input.inp_unstripped = m.group(2)
input.inp = input.inp_unstripped.strip()
func, args = bot.commands[command]
dispatch(input, "command", func, args, autohelp=True)
# REGEXES
for func, args in bot.plugs['regex']:
m = args['re'].search(inp.lastparam)
if m:
input = Input(conn, *out)
input.inp = m
dispatch(input, "regex", func, args)
|
|
import logging
import struct
import textwrap
from .exceptions import TockLoaderException
class TabApp:
'''
Representation of a Tock app for a specific architecture and board from a
TAB file. This is different from a TAB, since a TAB can include compiled
binaries for a range of architectures, or compiled for various scenarios,
which may not be applicable for a particular board.
A TabApp need not be a single (TBF header, binary) pair, as an app from a
TAB can include multiple (header, binary) pairs if the app was compiled
multiple times. This could be for any reason (e.g. it was signed with
different keys, or it uses different compiler optimizations), but typically
this is because it is compiled for specific addresses in flash and RAM, and
there are multiple linked versions present in the TAB. If so, there will be
multiple (header, binary) pairs included in this App object, and the correct
one for the board will be used later.
'''
def __init__ (self, tbfs):
'''
Create a `TabApp` from a list of (TBF header, app binary) pairs.
'''
self.tbfs = tbfs # A list of (TBF header, app binary) pairs.
def get_name (self):
'''
Return the app name.
'''
app_names = set([tbf[0].get_app_name() for tbf in self.tbfs])
if len(app_names) > 1:
raise TockLoaderException('Different names inside the same TAB?')
elif len(app_names) == 0:
raise TockLoaderException('No name in the TBF binaries')
return app_names.pop()
def is_modified (self):
'''
Returns whether this app needs to be flashed on to the board. Since this
is a TabApp, we did not get this app from the board and therefore we
have to flash this to the board.
'''
return True
def set_sticky (self):
'''
Mark this app as "sticky" in the app's header. This makes it harder to
accidentally remove this app if it is a core service or debug app.
'''
for tbfh,binary in self.tbfs:
tbfh.set_flag('sticky', True)
def get_header (self):
'''
Return a header if there is only one.
'''
if len(self.tbfs) == 1:
return self.tbfs[0][0]
return None
def get_size (self):
'''
Return the total size (including TBF header) of this app in bytes.
This is only valid if there is only one TBF.
'''
if len(self.tbfs) == 1:
return self.tbfs[0][0].get_app_size()
else:
raise TockLoaderException('Size only valid with one TBF')
def set_size (self, size):
'''
Force the entire app to be a certain size. If `size` is smaller than the
actual app an error will be thrown.
'''
for tbfh,app_binary in self.tbfs:
header_size = tbfh.get_header_size()
binary_size = len(app_binary)
current_size = header_size + binary_size
if size < current_size:
raise TockLoaderException('Cannot make app smaller. Current size: {} bytes'.format(current_size))
tbfh.set_app_size(size)
def set_minimum_size (self, size):
'''
Force each version of the entire app to be a certain size. If `size` is
smaller than the actual app nothing happens.
'''
for tbfh,app_binary in self.tbfs:
header_size = tbfh.get_header_size()
binary_size = len(app_binary)
current_size = header_size + binary_size
if size > current_size:
tbfh.set_app_size(size)
def set_size_constraint (self, constraint):
'''
Change the entire app size for each compilation and architecture based
on certain rules.
Valid rules:
- None: do nothing
- 'powers_of_two': make sure the entire size is a power of two.
'''
if constraint == 'powers_of_two':
# Make sure the total app size is a power of two.
for tbfh,app_binary in self.tbfs:
current_size = tbfh.get_app_size()
if (current_size & (current_size - 1)) != 0:
# This is not a power of two, but should be.
count = 0
while current_size != 0:
current_size >>= 1
count += 1
tbfh.set_app_size(1 << count)
logging.debug('Rounding app up to ^2 size ({} bytes)'.format(1 << count))
def has_fixed_addresses (self):
'''
Return true if any TBF binary in this app is compiled for a fixed
address. That likely implies _all_ binaries are compiled for a fixed
address.
'''
has_fixed_addresses = False
for tbfh,app_binary in self.tbfs:
if tbfh.has_fixed_addresses():
has_fixed_addresses = True
break
return has_fixed_addresses
def get_fixed_addresses_flash_and_sizes (self):
'''
Return a list of tuples of all addresses in flash this app is compiled
for and the size of the app at that address.
[(address, size), (address, size), ...]
'''
apps_in_flash = []
for tbfh,app_binary in self.tbfs:
apps_in_flash.append((tbfh.get_fixed_addresses()[1], tbfh.get_app_size()))
return apps_in_flash
def is_loadable_at_address (self, address):
'''
Check if it is possible to load this app at the given address. Returns
True if it is possible, False otherwise.
'''
if not self.has_fixed_addresses():
# No fixed addresses means we can put the app anywhere.
return True
# Otherwise, see if we have a TBF which can go at the requested address.
for tbfh,app_binary in self.tbfs:
fixed_flash_address = tbfh.get_fixed_addresses()[1]
tbf_header_length = tbfh.get_header_size()
# Ok, we have to be a little tricky here. What we actually care
# about is ensuring that the application binary itself ends up at
# the requested fixed address. However, what this function has to do
# is see if the start of the TBF header can go at the requested
# address. We have some flexibility, since we can make the header
# larger so that it pushes the application binary to the correct
# address. So, we want to see if we can reasonably do that. If we
# are within 128 bytes, we say that we can.
if fixed_flash_address >= (address + tbf_header_length) and\
(address + tbf_header_length + 128) > fixed_flash_address:
return True
return False
def fix_at_next_loadable_address (self, address):
'''
Calculate the next reasonable address where we can put this app where
the address is greater than or equal to `address`. The `address`
argument is the earliest address the app can be at, either the start of
apps or immediately after a previous app. Then return that address.
If we can't satisfy the request, return None.
The "fix" part means remove all TBFs except for the one that we used
to meet the address requirements.
If the app doesn't have a fixed address, then we can put it anywhere,
and we just return the address. If the app is compiled with fixed
addresses, then we need to calculate an address. We do a little bit of
"reasonable assuming" here. Fixed addresses are based on where the _app
binary_ must be located. Therefore, the start of the app where the TBF
header goes must be before that. This can be at any address (as long as
the header will fit), but we want to make this simpler, so we just
assume the TBF header should start on a 1024 byte alignment.
'''
if not self.has_fixed_addresses():
# No fixed addresses means we can put the app anywhere.
return address
def align_down_to(v, a):
'''
Calculate the address correctly aligned to `a` that is lower than or
equal to `v`.
'''
return v - (v % a)
# Find the binary with the lowest valid address that is above `address`.
best_address = None
best_index = None
for i,(tbfh,app_binary) in enumerate(self.tbfs):
fixed_flash_address = tbfh.get_fixed_addresses()[1]
# Align to get a reasonable address for this app.
wanted_address = align_down_to(fixed_flash_address, 1024)
if wanted_address >= address:
if best_address == None:
best_address = wanted_address
best_index = i
elif wanted_address < best_address:
best_address = wanted_address
best_index = i
if best_index != None:
self.tbfs = [self.tbfs[best_index]]
return best_address
else:
return None
def has_app_binary (self):
'''
Return true if we have an application binary with this app.
'''
# By definition, a TabApp will have an app binary.
return True
def get_binary (self, address):
'''
Return the binary array comprising the entire application.
This is only valid if there is one TBF file.
`address` is the address of flash the _start_ of the app will be placed
at. This means where the TBF header will go.
'''
if len(self.tbfs) == 1:
tbfh = self.tbfs[0][0]
app_binary = self.tbfs[0][1]
# If the TBF is not compiled for a fixed address, then we can just
# use it.
if tbfh.has_fixed_addresses() == False:
binary = tbfh.get_binary() + app_binary
else:
tbfh.adjust_starting_address(address)
binary = tbfh.get_binary() + app_binary
# Check that the binary is not longer than it is supposed to be. This
# might happen if the size was changed, but any code using this binary
# has no way to check. If the binary is too long, we truncate the actual
# binary blob (which should just be padding) to the correct length. If
# it is too short it is ok, since the board shouldn't care what is in
# the flash memory the app is not using.
size = self.get_size()
if len(binary) > size:
logging.info('Binary is larger than what it says in the header. Actual:{}, expected:{}'
.format(len(binary), size))
logging.info('Truncating binary to match.')
# Check on what we would be removing. If it is all zeros, we
# determine that it is OK to truncate.
to_remove = binary[size:]
if len(to_remove) != to_remove.count(0):
raise TockLoaderException('Error truncating binary. Not zero.')
binary = binary[0:size]
return binary
else:
raise('Only valid for one TBF file.')
def get_crt0_header_str (self):
'''
Return a string representation of the crt0 header some apps use for
doing PIC fixups. We assume this header is positioned immediately
after the TBF header (AKA at the beginning of the application binary).
'''
tbfh,app_binary = self.tbfs[0]
crt0 = struct.unpack('<IIIIIIIIII', app_binary[0:40])
# Also display the number of relocations in the binary.
reldata_start = crt0[8]
reldata_len = struct.unpack('<I', app_binary[reldata_start:reldata_start+4])[0]
out = ''
out += '{:<20}: {:>10} {:>#12x}\n'.format('got_sym_start', crt0[0], crt0[0])
out += '{:<20}: {:>10} {:>#12x}\n'.format('got_start', crt0[1], crt0[1])
out += '{:<20}: {:>10} {:>#12x}\n'.format('got_size', crt0[2], crt0[2])
out += '{:<20}: {:>10} {:>#12x}\n'.format('data_sym_start', crt0[3], crt0[3])
out += '{:<20}: {:>10} {:>#12x}\n'.format('data_start', crt0[4], crt0[4])
out += '{:<20}: {:>10} {:>#12x}\n'.format('data_size', crt0[5], crt0[5])
out += '{:<20}: {:>10} {:>#12x}\n'.format('bss_start', crt0[6], crt0[6])
out += '{:<20}: {:>10} {:>#12x}\n'.format('bss_size', crt0[7], crt0[7])
out += '{:<20}: {:>10} {:>#12x}\n'.format('reldata_start', crt0[8], crt0[8])
out += ' {:<18}: {:>10} {:>#12x}\n'.format('[reldata_len]', reldata_len, reldata_len)
out += '{:<20}: {:>10} {:>#12x}\n'.format('stack_size', crt0[9], crt0[9])
return out
def info (self, verbose=False):
'''
Get a string describing various properties of the app.
'''
out = ''
out += 'Name: {}\n'.format(self.get_name())
out += 'Total Size in Flash: {} bytes\n'.format(self.get_size())
if verbose:
for tbf in self.tbfs:
out += textwrap.indent(str(tbf[0]), ' ')
return out
def __str__ (self):
return self.get_name()
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
logger = logging.getLogger(__name__)
def collate(
samples,
pad_idx,
eos_idx,
left_pad_source=True,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
pad_to_multiple=1,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx,
left_pad,
move_eos_to_beginning,
pad_to_length=pad_to_length,
pad_to_multiple=pad_to_multiple,
)
def check_alignment(alignment, src_len, tgt_len):
if alignment is None or len(alignment) == 0:
return False
if (
alignment[:, 0].max().item() >= src_len - 1
or alignment[:, 1].max().item() >= tgt_len - 1
):
logger.warning("alignment size mismatch found, skipping alignment!")
return False
return True
def compute_alignment_weights(alignments):
"""
Given a tensor of shape [:, 2] containing the source-target indices
corresponding to the alignments, a weight vector containing the
inverse frequency of each target index is computed.
For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then
a tensor containing [1., 0.5, 0.5, 1] should be returned (since target
index 3 is repeated twice)
"""
align_tgt = alignments[:, 1]
_, align_tgt_i, align_tgt_c = torch.unique(
align_tgt, return_inverse=True, return_counts=True
)
align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]]
return 1.0 / align_weights.float()
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = merge(
"source",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
# sort by descending source length
src_lengths = torch.LongTensor(
[s["source"].ne(pad_idx).long().sum() for s in samples]
)
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge(
"target",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
target = target.index_select(0, sort_order)
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
).index_select(0, sort_order)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target)
elif input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
"target",
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
"target": target,
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select(
0, sort_order
)
if samples[0].get("alignment", None) is not None:
bsz, tgt_sz = batch["target"].shape
src_sz = batch["net_input"]["src_tokens"].shape[1]
offsets = torch.zeros((len(sort_order), 2), dtype=torch.long)
offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz
if left_pad_source:
offsets[:, 0] += src_sz - src_lengths
if left_pad_target:
offsets[:, 1] += tgt_sz - tgt_lengths
alignments = [
alignment + offset
for align_idx, offset, src_len, tgt_len in zip(
sort_order, offsets, src_lengths, tgt_lengths
)
for alignment in [samples[align_idx]["alignment"].view(-1, 2)]
if check_alignment(alignment, src_len, tgt_len)
]
if len(alignments) > 0:
alignments = torch.cat(alignments, dim=0)
align_weights = compute_alignment_weights(alignments)
batch["alignments"] = alignments
batch["align_weights"] = align_weights
if samples[0].get("constraints", None) is not None:
# Collate the packed constraints across the samples, padding to
# the length of the longest sample.
lens = [sample.get("constraints").size(0) for sample in samples]
max_len = max(lens)
constraints = torch.zeros((len(samples), max(lens))).long()
for i, sample in enumerate(samples):
constraints[i, 0 : lens[i]] = samples[i].get("constraints")
batch["constraints"] = constraints.index_select(0, sort_order)
return batch
class LanguagePairDataset(FairseqDataset):
"""
A pair of torch.utils.data.Datasets.
Args:
src (torch.utils.data.Dataset): source dataset to wrap
src_sizes (List[int]): source sentence lengths
src_dict (~fairseq.data.Dictionary): source vocabulary
tgt (torch.utils.data.Dataset, optional): target dataset to wrap
tgt_sizes (List[int], optional): target sentence lengths
tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary
left_pad_source (bool, optional): pad source tensors on the left side
(default: True).
left_pad_target (bool, optional): pad target tensors on the left side
(default: False).
shuffle (bool, optional): shuffle dataset elements before batching
(default: True).
input_feeding (bool, optional): create a shifted version of the targets
to be passed into the model for teacher forcing (default: True).
remove_eos_from_source (bool, optional): if set, removes eos from end
of source if it's present (default: False).
append_eos_to_target (bool, optional): if set, appends eos to end of
target if it's absent (default: False).
align_dataset (torch.utils.data.Dataset, optional): dataset
containing alignments.
constraints (Tensor, optional): 2d tensor with a concatenated, zero-
delimited list of constraints for each sentence.
append_bos (bool, optional): if set, appends bos to the beginning of
source/target sentence.
num_buckets (int, optional): if set to a value greater than 0, then
batches will be bucketed into the given number of batch shapes.
src_lang_id (int, optional): source language ID, if set, the collated batch
will contain a field 'src_lang_id' in 'net_input' which indicates the
source language of the samples.
tgt_lang_id (int, optional): target language ID, if set, the collated batch
will contain a field 'tgt_lang_id' which indicates the target language
of the samples.
"""
def __init__(
self,
src,
src_sizes,
src_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
left_pad_source=True,
left_pad_target=False,
shuffle=True,
input_feeding=True,
remove_eos_from_source=False,
append_eos_to_target=False,
align_dataset=None,
constraints=None,
append_bos=False,
eos=None,
num_buckets=0,
src_lang_id=None,
tgt_lang_id=None,
pad_to_multiple=1,
):
if tgt_dict is not None:
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
if tgt is not None:
assert len(src) == len(
tgt
), "Source and target must contain the same number of examples"
self.src = src
self.tgt = tgt
self.src_sizes = np.array(src_sizes)
self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None
self.sizes = (
np.vstack((self.src_sizes, self.tgt_sizes)).T
if self.tgt_sizes is not None
else self.src_sizes
)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.shuffle = shuffle
self.input_feeding = input_feeding
self.remove_eos_from_source = remove_eos_from_source
self.append_eos_to_target = append_eos_to_target
self.align_dataset = align_dataset
if self.align_dataset is not None:
assert (
self.tgt_sizes is not None
), "Both source and target needed when alignments are provided"
self.constraints = constraints
self.append_bos = append_bos
self.eos = eos if eos is not None else src_dict.eos()
self.src_lang_id = src_lang_id
self.tgt_lang_id = tgt_lang_id
if num_buckets > 0:
from fairseq.data import BucketPadLengthDataset
self.src = BucketPadLengthDataset(
self.src,
sizes=self.src_sizes,
num_buckets=num_buckets,
pad_idx=self.src_dict.pad(),
left_pad=self.left_pad_source,
)
self.src_sizes = self.src.sizes
logger.info("bucketing source lengths: {}".format(list(self.src.buckets)))
if self.tgt is not None:
self.tgt = BucketPadLengthDataset(
self.tgt,
sizes=self.tgt_sizes,
num_buckets=num_buckets,
pad_idx=self.tgt_dict.pad(),
left_pad=self.left_pad_target,
)
self.tgt_sizes = self.tgt.sizes
logger.info(
"bucketing target lengths: {}".format(list(self.tgt.buckets))
)
# determine bucket sizes using self.num_tokens, which will return
# the padded lengths (thanks to BucketPadLengthDataset)
num_tokens = np.vectorize(self.num_tokens, otypes=[np.compat.long])
self.bucketed_num_tokens = num_tokens(np.arange(len(self.src)))
self.buckets = [
(None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens)
]
else:
self.buckets = None
self.pad_to_multiple = pad_to_multiple
def get_batch_shapes(self):
return self.buckets
def __getitem__(self, index):
tgt_item = self.tgt[index] if self.tgt is not None else None
src_item = self.src[index]
# Append EOS to end of tgt sentence if it does not have an EOS and remove
# EOS from end of src sentence if it exists. This is useful when we use
# use existing datasets for opposite directions i.e., when we want to
# use tgt_dataset as src_dataset and vice versa
if self.append_eos_to_target:
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
if self.tgt and self.tgt[index][-1] != eos:
tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])])
if self.append_bos:
bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
if self.tgt and self.tgt[index][0] != bos:
tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]])
bos = self.src_dict.bos()
if self.src[index][0] != bos:
src_item = torch.cat([torch.LongTensor([bos]), self.src[index]])
if self.remove_eos_from_source:
eos = self.src_dict.eos()
if self.src[index][-1] == eos:
src_item = self.src[index][:-1]
example = {
"id": index,
"source": src_item,
"target": tgt_item,
}
if self.align_dataset is not None:
example["alignment"] = self.align_dataset[index]
if self.constraints is not None:
example["constraints"] = self.constraints[index]
return example
def __len__(self):
return len(self.src)
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
pad_to_length (dict, optional): a dictionary of
{'source': source_pad_to_length, 'target': target_pad_to_length}
to indicate the max length to pad to in source and target respectively.
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the left if *left_pad_source* is ``True``.
- `src_lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each source sentence of shape `(bsz)`
- `prev_output_tokens` (LongTensor): a padded 2D Tensor of
tokens in the target sentence, shifted right by one
position for teacher forcing, of shape `(bsz, tgt_len)`.
This key will not be present if *input_feeding* is
``False``. Padding will appear on the left if
*left_pad_target* is ``True``.
- `src_lang_id` (LongTensor): a long Tensor which contains source
language IDs of each sample in the batch
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the left if *left_pad_target* is ``True``.
- `tgt_lang_id` (LongTensor): a long Tensor which contains target language
IDs of each sample in the batch
"""
res = collate(
samples,
pad_idx=self.src_dict.pad(),
eos_idx=self.eos,
left_pad_source=self.left_pad_source,
left_pad_target=self.left_pad_target,
input_feeding=self.input_feeding,
pad_to_length=pad_to_length,
pad_to_multiple=self.pad_to_multiple,
)
if self.src_lang_id is not None or self.tgt_lang_id is not None:
src_tokens = res["net_input"]["src_tokens"]
bsz = src_tokens.size(0)
if self.src_lang_id is not None:
res["net_input"]["src_lang_id"] = (
torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens)
)
if self.tgt_lang_id is not None:
res["tgt_lang_id"] = (
torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens)
)
return res
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return max(
self.src_sizes[index],
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
def num_tokens_vec(self, indices):
"""Return the number of tokens for a set of positions defined by indices.
This value is used to enforce ``--max-tokens`` during batching."""
sizes = self.src_sizes[indices]
if self.tgt_sizes is not None:
sizes = np.maximum(sizes, self.tgt_sizes[indices])
return sizes
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (
self.src_sizes[index],
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self)).astype(np.int64)
else:
indices = np.arange(len(self), dtype=np.int64)
if self.buckets is None:
# sort by target length, then source length
if self.tgt_sizes is not None:
indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")]
return indices[np.argsort(self.src_sizes[indices], kind="mergesort")]
else:
# sort by bucketed_num_tokens, which is:
# max(padded_src_len, padded_tgt_len)
return indices[
np.argsort(self.bucketed_num_tokens[indices], kind="mergesort")
]
@property
def supports_prefetch(self):
return getattr(self.src, "supports_prefetch", False) and (
getattr(self.tgt, "supports_prefetch", False) or self.tgt is None
)
def prefetch(self, indices):
self.src.prefetch(indices)
if self.tgt is not None:
self.tgt.prefetch(indices)
if self.align_dataset is not None:
self.align_dataset.prefetch(indices)
def filter_indices_by_size(self, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
return data_utils.filter_paired_dataset_indices_by_size(
self.src_sizes,
self.tgt_sizes,
indices,
max_sizes,
)
|
|
import base64
import datetime
import json
from json import JSONEncoder
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.descriptor import FieldDescriptor
from mlflow.exceptions import MlflowException
from collections import defaultdict
from functools import partial
_PROTOBUF_INT64_FIELDS = [
FieldDescriptor.TYPE_INT64,
FieldDescriptor.TYPE_UINT64,
FieldDescriptor.TYPE_FIXED64,
FieldDescriptor.TYPE_SFIXED64,
FieldDescriptor.TYPE_SINT64,
]
def _mark_int64_fields_for_proto_maps(proto_map, value_field_type):
"""Converts a proto map to JSON, preserving only int64-related fields."""
json_dict = {}
for key, value in proto_map.items():
# The value of a protobuf map can only be a scalar or a message (not a map or repeated
# field).
if value_field_type == FieldDescriptor.TYPE_MESSAGE:
json_dict[key] = _mark_int64_fields(value)
elif value_field_type in _PROTOBUF_INT64_FIELDS:
json_dict[key] = int(value)
elif isinstance(key, int):
json_dict[key] = value
return json_dict
def _mark_int64_fields(proto_message):
"""Converts a proto message to JSON, preserving only int64-related fields."""
json_dict = {}
for field, value in proto_message.ListFields():
if (
# These three conditions check if this field is a protobuf map.
# See the official implementation: https://bit.ly/3EMx1rl
field.type == FieldDescriptor.TYPE_MESSAGE
and field.message_type.has_options
and field.message_type.GetOptions().map_entry
):
# Deal with proto map fields separately in another function.
json_dict[field.name] = _mark_int64_fields_for_proto_maps(
value, field.message_type.fields_by_name["value"].type
)
continue
if field.type == FieldDescriptor.TYPE_MESSAGE:
ftype = partial(_mark_int64_fields)
elif field.type in _PROTOBUF_INT64_FIELDS:
ftype = int
else:
# Skip all non-int64 fields.
continue
json_dict[field.name] = (
[ftype(v) for v in value]
if field.label == FieldDescriptor.LABEL_REPEATED
else ftype(value)
)
return json_dict
def _merge_json_dicts(from_dict, to_dict):
"""Merges the json elements of from_dict into to_dict. Only works for json dicts
converted from proto messages
"""
for key, value in from_dict.items():
if isinstance(key, int) and str(key) in to_dict:
# When the key (i.e. the proto field name) is an integer, it must be a proto map field
# with integer as the key. For example:
# from_dict is {'field_map': {1: '2', 3: '4'}}
# to_dict is {'field_map': {'1': '2', '3': '4'}}
# So we need to replace the str keys with int keys in to_dict.
to_dict[key] = to_dict[str(key)]
del to_dict[str(key)]
if key not in to_dict:
continue
if isinstance(value, dict):
_merge_json_dicts(from_dict[key], to_dict[key])
elif isinstance(value, list):
for i, v in enumerate(value):
if isinstance(v, dict):
_merge_json_dicts(v, to_dict[key][i])
else:
to_dict[key][i] = v
else:
to_dict[key] = from_dict[key]
return to_dict
def message_to_json(message):
"""Converts a message to JSON, using snake_case for field names."""
# Google's MessageToJson API converts int64 proto fields to JSON strings.
# For more info, see https://github.com/protocolbuffers/protobuf/issues/2954
json_dict_with_int64_as_str = json.loads(
MessageToJson(message, preserving_proto_field_name=True)
)
# We convert this proto message into a JSON dict where only int64 proto fields
# are preserved, and they are treated as JSON numbers, not strings.
json_dict_with_int64_fields_only = _mark_int64_fields(message)
# By merging these two JSON dicts, we end up with a JSON dict where int64 proto fields are not
# converted to JSON strings. Int64 keys in proto maps will always be converted to JSON strings
# because JSON doesn't support non-string keys.
json_dict_with_int64_as_numbers = _merge_json_dicts(
json_dict_with_int64_fields_only, json_dict_with_int64_as_str
)
return json.dumps(json_dict_with_int64_as_numbers, indent=2)
def _stringify_all_experiment_ids(x):
"""Converts experiment_id fields which are defined as ints into strings in the given json.
This is necessary for backwards- and forwards-compatibility with MLflow clients/servers
running MLflow 0.9.0 and below, as experiment_id was changed from an int to a string.
To note, the Python JSON serializer is happy to auto-convert strings into ints (so a
server or client that sees the new format is fine), but is unwilling to convert ints
to strings. Therefore, we need to manually perform this conversion.
This code can be removed after MLflow 1.0, after users have given reasonable time to
upgrade clients and servers to MLflow 0.9.1+.
"""
if isinstance(x, dict):
items = x.items()
for k, v in items:
if k == "experiment_id":
x[k] = str(v)
elif k == "experiment_ids":
x[k] = [str(w) for w in v]
elif k == "info" and isinstance(v, dict) and "experiment_id" in v and "run_uuid" in v:
# shortcut for run info
v["experiment_id"] = str(v["experiment_id"])
elif k not in ("params", "tags", "metrics"): # skip run data
_stringify_all_experiment_ids(v)
elif isinstance(x, list):
for y in x:
_stringify_all_experiment_ids(y)
def parse_dict(js_dict, message):
"""Parses a JSON dictionary into a message proto, ignoring unknown fields in the JSON."""
_stringify_all_experiment_ids(js_dict)
ParseDict(js_dict=js_dict, message=message, ignore_unknown_fields=True)
class NumpyEncoder(JSONEncoder):
"""Special json encoder for numpy types.
Note that some numpy types doesn't have native python equivalence,
hence json.dumps will raise TypeError.
In this case, you'll need to convert your numpy types into its closest python equivalence.
"""
def try_convert(self, o):
import numpy as np
import pandas as pd
def encode_binary(x):
return base64.encodebytes(x).decode("ascii")
if isinstance(o, np.ndarray):
if o.dtype == object:
return [self.try_convert(x)[0] for x in o.tolist()], True
elif o.dtype == np.bytes_:
return np.vectorize(encode_binary)(o), True
else:
return o.tolist(), True
if isinstance(o, np.generic):
return o.item(), True
if isinstance(o, bytes) or isinstance(o, bytearray):
return encode_binary(o), True
if isinstance(o, np.datetime64):
return np.datetime_as_string(o), True
if isinstance(o, (pd.Timestamp, datetime.date)):
return o.isoformat(), True
return o, False
def default(self, o): # pylint: disable=E0202
res, converted = self.try_convert(o)
if converted:
return res
else:
return super().default(o)
def _dataframe_from_json(
path_or_str, schema=None, pandas_orient: str = "split", precise_float=False
):
"""
Parse json into pandas.DataFrame. User can pass schema to ensure correct type parsing and to
make any necessary conversions (e.g. string -> binary for binary columns).
:param path_or_str: Path to a json file or a json string.
:param schema: Mlflow schema used when parsing the data.
:param pandas_orient: pandas data frame convention used to store the data.
:return: pandas.DataFrame.
"""
import pandas as pd
from mlflow.types import DataType
if schema is not None:
if schema.is_tensor_spec():
# The schema can be either:
# - a single tensor: attempt to parse all columns with the same dtype
# - a dictionary of tensors: each column gets the type from an equally named tensor
if len(schema.inputs) == 1:
dtypes = schema.numpy_types()[0]
else:
dtypes = dict(zip(schema.input_names(), schema.numpy_types()))
else:
dtypes = dict(zip(schema.input_names(), schema.pandas_types()))
df = pd.read_json(
path_or_str,
orient=pandas_orient,
dtype=dtypes,
precise_float=precise_float,
convert_dates=False,
)
if not schema.is_tensor_spec():
actual_cols = set(df.columns)
for type_, name in zip(schema.input_types(), schema.input_names()):
if type_ == DataType.binary and name in actual_cols:
df[name] = df[name].map(lambda x: base64.decodebytes(bytes(x, "utf8")))
return df
else:
return pd.read_json(
path_or_str, orient=pandas_orient, dtype=False, precise_float=precise_float
)
def _get_jsonable_obj(data, pandas_orient="records"):
"""Attempt to make the data json-able via standard library.
Look for some commonly used types that are not jsonable and convert them into json-able ones.
Unknown data types are returned as is.
:param data: data to be converted, works with pandas and numpy, rest will be returned as is.
:param pandas_orient: If `data` is a Pandas DataFrame, it will be converted to a JSON
dictionary using this Pandas serialization orientation.
"""
import numpy as np
import pandas as pd
if isinstance(data, np.ndarray):
return data.tolist()
if isinstance(data, pd.DataFrame):
return data.to_dict(orient=pandas_orient)
if isinstance(data, pd.Series):
return pd.DataFrame(data).to_dict(orient=pandas_orient)
else: # by default just return whatever this is and hope for the best
return data
def parse_tf_serving_input(inp_dict, schema=None):
"""
:param inp_dict: A dict deserialized from a JSON string formatted as described in TF's
serving API doc
(https://www.tensorflow.org/tfx/serving/api_rest#request_format_2)
:param schema: Mlflow schema used when parsing the data.
"""
import numpy as np
def cast_schema_type(input_data):
if schema is not None:
if schema.has_input_names():
input_names = schema.input_names()
if len(input_names) == 1 and isinstance(input_data, list):
# for schemas with a single column, match input with column
input_data = {input_names[0]: input_data}
if not isinstance(input_data, dict):
raise MlflowException(
"Failed to parse input data. This model contains a tensor-based model"
" signature with input names, which suggests a dictionary input mapping"
" input name to tensor, but an input of type {0} was found.".format(
type(input_data)
)
)
type_dict = dict(zip(schema.input_names(), schema.numpy_types()))
for col_name in input_data.keys():
input_data[col_name] = np.array(
input_data[col_name], dtype=type_dict.get(col_name)
)
else:
if not isinstance(input_data, list):
raise MlflowException(
"Failed to parse input data. This model contains an un-named tensor-based"
" model signature which expects a single n-dimensional array as input,"
" however, an input of type {0} was found.".format(type(input_data))
)
input_data = np.array(input_data, dtype=schema.numpy_types()[0])
else:
if isinstance(input_data, dict):
input_data = {k: np.array(v) for k, v in input_data.items()}
else:
input_data = np.array(input_data)
return input_data
# pylint: disable=broad-except
if "signature_name" in inp_dict:
raise MlflowException(
'Failed to parse data as TF serving input. "signature_name" is currently'
" not supported."
)
if not (list(inp_dict.keys()) == ["instances"] or list(inp_dict.keys()) == ["inputs"]):
raise MlflowException(
'Failed to parse data as TF serving input. One of "instances" and'
' "inputs" must be specified (not both or any other keys).'
)
# Read the JSON
try:
if "instances" in inp_dict:
items = inp_dict["instances"]
if len(items) > 0 and isinstance(items[0], dict):
# convert items to column format (map column/input name to tensor)
data = defaultdict(list)
for item in items:
for k, v in item.items():
data[k].append(v)
data = cast_schema_type(data)
else:
data = cast_schema_type(items)
else:
# items already in column format, convert values to tensor
items = inp_dict["inputs"]
data = cast_schema_type(items)
except Exception:
raise MlflowException(
"Failed to parse data as TF serving input. Ensure that the input is"
" a valid JSON-formatted string that conforms to the request body for"
" TF serving's Predict API as documented at"
" https://www.tensorflow.org/tfx/serving/api_rest#request_format_2"
)
# Sanity check inputted data
if isinstance(data, dict):
# ensure all columns have the same number of items
expected_len = len(list(data.values())[0])
if not all(len(v) == expected_len for v in data.values()):
raise MlflowException(
"Failed to parse data as TF serving input. The length of values for"
" each input/column name are not the same"
)
return data
|
|
"""
An example using Amazon's Thread example for motivation
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/SampleTablesAndData.html
"""
import logging
from pynamodb.models import Model
from pynamodb.attributes import (
ListAttribute, UnicodeAttribute, NumberAttribute, UnicodeSetAttribute, UTCDateTimeAttribute
)
from datetime import datetime
logging.basicConfig()
log = logging.getLogger("pynamodb")
log.setLevel(logging.DEBUG)
log.propagate = True
class Thread(Model):
class Meta:
read_capacity_units = 1
write_capacity_units = 1
table_name = "Thread"
host = "http://localhost:8000"
forum_name = UnicodeAttribute(hash_key=True)
subject = UnicodeAttribute(range_key=True)
views = NumberAttribute(default=0)
replies = NumberAttribute(default=0)
answered = NumberAttribute(default=0)
tags = UnicodeSetAttribute()
last_post_datetime = UTCDateTimeAttribute(null=True)
notes = ListAttribute(default=list) # type: ignore # todo: add ability for basic list types
# Delete the table
# print(Thread.delete_table())
# Create the table
if not Thread.exists():
Thread.create_table(wait=True)
# Create a thread
thread_item = Thread(
'Some Forum',
'Some Subject',
tags=['foo', 'bar'],
last_post_datetime=datetime.now()
)
# try:
# Thread.get('does not', 'exist')
# except Thread.DoesNotExist:
# pass
# Save the thread
thread_item.save()
# Batch write operation
with Thread.batch_write() as batch:
threads = []
for x in range(100):
thread = Thread('forum-{0}'.format(x), 'subject-{0}'.format(x))
thread.tags = {'tag1', 'tag2'}
thread.last_post_datetime = datetime.now()
threads.append(thread)
for thread in threads:
batch.save(thread)
# Get table count
print(Thread.count())
# Count based on a filter
print(Thread.count('forum-1'))
# Batch get
item_keys = [('forum-{0}'.format(x), 'subject-{0}'.format(x)) for x in range(100)]
for item in Thread.batch_get(item_keys):
print(item)
# Scan
for item in Thread.scan():
print(item)
# Query
for item in Thread.query('forum-1', Thread.subject.startswith('subject')):
print(item)
print("-"*80)
# A model that uses aliased attribute names
class AliasedModel(Model):
class Meta:
table_name = "AliasedModel"
host = "http://localhost:8000"
forum_name = UnicodeAttribute(hash_key=True, attr_name='fn')
subject = UnicodeAttribute(range_key=True, attr_name='s')
views = NumberAttribute(default=0, attr_name='v')
replies = NumberAttribute(default=0, attr_name='rp')
answered = NumberAttribute(default=0, attr_name='an')
tags = UnicodeSetAttribute(attr_name='t')
last_post_datetime = UTCDateTimeAttribute(attr_name='lp')
if not AliasedModel.exists():
AliasedModel.create_table(read_capacity_units=1, write_capacity_units=1, wait=True)
# Create an aliased model
aliased_item = AliasedModel(
'Some Forum',
'Some Subject',
tags=['foo', 'bar'],
last_post_datetime=datetime.now()
)
# Save the aliased model
aliased_item.save()
# Batch write operation
with AliasedModel.batch_write() as batch:
aliased_items = []
for x in range(100):
aliased_item = AliasedModel('forum-{0}'.format(x), 'subject-{0}'.format(x))
aliased_item.tags = {'tag1', 'tag2'}
aliased_item.last_post_datetime = datetime.now()
aliased_items.append(aliased_item)
for aliased_item in aliased_items:
batch.save(aliased_item)
# Batch get
item_keys = [('forum-{0}'.format(x), 'subject-{0}'.format(x)) for x in range(100)]
for aliased_item in AliasedModel.batch_get(item_keys):
print("Batch get item: {0}".format(aliased_item))
# Scan
for aliased_item in AliasedModel.scan():
print("Scanned item: {0}".format(aliased_item))
# Query
for aliased_item in AliasedModel.query('forum-1', AliasedModel.subject.startswith('subject')):
print("Query using aliased attribute: {0}".format(aliased_item))
# Query with filters
for item in Thread.query('forum-1', (Thread.views == 0) | (Thread.replies == 0)):
print("Query result: {0}".format(item))
# Scan with filters
for item in Thread.scan(Thread.subject.startswith('subject') & (Thread.views == 0)):
print("Scanned item: {0} {1}".format(item.subject, item.views))
# Scan with null filter
for item in Thread.scan(Thread.subject.startswith('subject') & Thread.last_post_datetime.does_not_exist()):
print("Scanned item: {0} {1}".format(item.subject, item.views))
# Conditionally save an item
thread_item = Thread(
'Some Forum',
'Some Subject',
tags=['foo', 'bar'],
last_post_datetime=datetime.now()
)
# DynamoDB will only save the item if forum_name exists
print(thread_item.save(Thread.forum_name.exists()))
# DynamoDB will update the item, by adding 1 to the views attribute,
# if the forum_name attribute equals 'Some Forum' or the subject attribute exists
print(thread_item.update(
actions=[
Thread.views.add(1)
],
condition=(
(Thread.forum_name == 'Some Forum') | Thread.subject.exists()
)
))
# DynamoDB will atomically update the attributes `replies` (increase value by 1),
# and `last_post_datetime` (set value to the current datetime)
print(thread_item.update(actions=[
Thread.replies.add(1),
Thread.last_post_datetime.set(datetime.now()),
]))
# DynamoDB will delete the item, only if the views attribute is equal to one
try:
print(thread_item.delete(Thread.views == 1))
except:
pass
# Remove an item's attribute
print(thread_item.update(actions=[
Thread.tags.remove()
]))
# Update list attribute
print(thread_item.update(actions=[
Thread.notes.set(
Thread.notes.append(["new note"])
)
]))
# Backup/restore example
# Print the size of the table
print("Table size: {}".format(Thread.describe_table().get('ItemCount')))
# Optionally Delete all table items
# Commented out for safety
# for item in Thread.scan():
# item.delete()
print("Table size: {}".format(Thread.describe_table().get('ItemCount')))
|
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_serviceenginegroup
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of ServiceEngineGroup Avi RESTful Object
description:
- This module is used to configure ServiceEngineGroup object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
active_standby:
description:
- Service engines in active/standby mode for ha failover.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
advertise_backend_networks:
description:
- Advertise reach-ability of backend server networks via adc through bgp for default gateway feature.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
aggressive_failure_detection:
description:
- Enable aggressive failover configuration for ha.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
algo:
description:
- In compact placement, virtual services are placed on existing ses until max_vs_per_se limit is reached.
- Enum options - PLACEMENT_ALGO_PACKED, PLACEMENT_ALGO_DISTRIBUTED.
- Default value when not specified in API or module is interpreted by Avi Controller as PLACEMENT_ALGO_PACKED.
archive_shm_limit:
description:
- Amount of se memory in gb until which shared memory is collected in core archive.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 8.
async_ssl:
description:
- Ssl handshakes will be handled by dedicated ssl threads.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.4"
async_ssl_threads:
description:
- Number of async ssl threads per se_dp.
- Allowed values are 1-4.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
version_added: "2.4"
auto_rebalance:
description:
- If set, virtual services will be automatically migrated when load on an se is less than minimum or more than maximum thresholds.
- Only alerts are generated when the auto_rebalance is not set.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
auto_rebalance_interval:
description:
- Frequency of rebalance, if 'auto rebalance' is enabled.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
auto_redistribute_active_standby_load:
description:
- Redistribution of virtual services from the takeover se to the replacement se can cause momentary traffic loss.
- If the auto-redistribute load option is left in its default off state, any desired rebalancing requires calls to rest api.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
buffer_se:
description:
- Excess service engine capacity provisioned for ha failover.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
cloud_ref:
description:
- It is a reference to an object of type cloud.
connection_memory_percentage:
description:
- Percentage of memory for connection state.
- This will come at the expense of memory used for http in-memory cache.
- Allowed values are 10-90.
- Default value when not specified in API or module is interpreted by Avi Controller as 50.
cpu_reserve:
description:
- Boolean flag to set cpu_reserve.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
cpu_socket_affinity:
description:
- Allocate all the cpu cores for the service engine virtual machines on the same cpu socket.
- Applicable only for vcenter cloud.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
custom_securitygroups_data:
description:
- Custom security groups to be associated with data vnics for se instances in openstack and aws clouds.
- Field introduced in 17.1.3.
custom_securitygroups_mgmt:
description:
- Custom security groups to be associated with management vnic for se instances in openstack and aws clouds.
- Field introduced in 17.1.3.
custom_tag:
description:
- Custom tag will be used to create the tags for se instance in aws.
- Note this is not the same as the prefix for se name.
dedicated_dispatcher_core:
description:
- Dedicate the core that handles packet receive/transmit from the network to just the dispatching function.
- Don't use it for tcp/ip and ssl functions.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
description:
description:
- User defined description for the object.
disk_per_se:
description:
- Amount of disk space for each of the service engine virtual machines.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
distribute_load_active_standby:
description:
- Use both the active and standby service engines for virtual service placement in the legacy active standby ha mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
enable_routing:
description:
- Enable routing for this serviceenginegroup .
- Default value when not specified in API or module is interpreted by Avi Controller as False.
enable_vip_on_all_interfaces:
description:
- Enable vip on all interfaces of se.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
enable_vmac:
description:
- Use virtual mac address for interfaces on which floating interface ips are placed.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
extra_config_multiplier:
description:
- Multiplier for extra config to support large vs/pool config.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.0.
extra_shared_config_memory:
description:
- Extra config memory to support large geo db configuration.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
floating_intf_ip:
description:
- If serviceenginegroup is configured for legacy 1+1 active standby ha mode, floating ip's will be advertised only by the active se in the pair.
- Virtual services in this group must be disabled/enabled for any changes to the floating ip's to take effect.
- Only active se hosting vs tagged with active standby se 1 tag will advertise this floating ip when manual load distribution is enabled.
floating_intf_ip_se_2:
description:
- If serviceenginegroup is configured for legacy 1+1 active standby ha mode, floating ip's will be advertised only by the active se in the pair.
- Virtual services in this group must be disabled/enabled for any changes to the floating ip's to take effect.
- Only active se hosting vs tagged with active standby se 2 tag will advertise this floating ip when manual load distribution is enabled.
ha_mode:
description:
- High availability mode for all the virtual services using this service engine group.
- Enum options - HA_MODE_SHARED_PAIR, HA_MODE_SHARED, HA_MODE_LEGACY_ACTIVE_STANDBY.
- Default value when not specified in API or module is interpreted by Avi Controller as HA_MODE_SHARED.
hardwaresecuritymodulegroup_ref:
description:
- It is a reference to an object of type hardwaresecuritymodulegroup.
hm_on_standby:
description:
- Enable active health monitoring from the standby se for all placed virtual services.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
host_attribute_key:
description:
- Key of a (key, value) pair identifying a label for a set of nodes usually in container clouds.
- Needs to be specified together with host_attribute_value.
- Ses can be configured differently including ha modes across different se groups.
- May also be used for isolation between different classes of virtualservices.
- Virtualservices' se group may be specified via annotations/labels.
- A openshift/kubernetes namespace maybe annotated with a matching se group label as openshift.io/node-selector apptype=prod.
- When multiple se groups are used in a cloud with host attributes specified,just a single se group can exist as a match-all se group without a
- host_attribute_key.
host_attribute_value:
description:
- Value of a (key, value) pair identifying a label for a set of nodes usually in container clouds.
- Needs to be specified together with host_attribute_key.
hypervisor:
description:
- Override default hypervisor.
- Enum options - DEFAULT, VMWARE_ESX, KVM, VMWARE_VSAN, XEN.
instance_flavor:
description:
- Instance/flavor type for se instance.
iptables:
description:
- Iptable rules.
least_load_core_selection:
description:
- Select core with least load for new flow.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
log_disksz:
description:
- Maximum disk capacity (in mb) to be allocated to an se.
- This is exclusively used for debug and log data.
- Default value when not specified in API or module is interpreted by Avi Controller as 10000.
max_cpu_usage:
description:
- When cpu usage on an se exceeds this threshold, virtual services hosted on this se may be rebalanced to other ses to reduce load.
- A new se may be created as part of this process.
- Allowed values are 40-90.
- Default value when not specified in API or module is interpreted by Avi Controller as 80.
max_scaleout_per_vs:
description:
- Maximum number of active service engines for the virtual service.
- Allowed values are 1-64.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
max_se:
description:
- Maximum number of services engines in this group.
- Allowed values are 0-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
max_vs_per_se:
description:
- Maximum number of virtual services that can be placed on a single service engine.
- East west virtual services are excluded from this limit.
- Allowed values are 1-1000.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
mem_reserve:
description:
- Boolean flag to set mem_reserve.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
memory_per_se:
description:
- Amount of memory for each of the service engine virtual machines.
- Default value when not specified in API or module is interpreted by Avi Controller as 2048.
mgmt_network_ref:
description:
- Management network to use for avi service engines.
- It is a reference to an object of type network.
mgmt_subnet:
description:
- Management subnet to use for avi service engines.
min_cpu_usage:
description:
- When cpu usage on an se falls below the minimum threshold, virtual services hosted on the se may be consolidated onto other underutilized ses.
- After consolidation, unused service engines may then be eligible for deletion.
- Allowed values are 20-60.
- Default value when not specified in API or module is interpreted by Avi Controller as 30.
min_scaleout_per_vs:
description:
- Minimum number of active service engines for the virtual service.
- Allowed values are 1-64.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
name:
description:
- Name of the object.
required: true
non_significant_log_throttle:
description:
- This setting limits the number of non-significant logs generated per second per core on this se.
- Default is 100 logs per second.
- Set it to zero (0) to disable throttling.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
num_flow_cores_sum_changes_to_ignore:
description:
- Number of changes in num flow cores sum to ignore.
- Default value when not specified in API or module is interpreted by Avi Controller as 8.
openstack_availability_zone:
description:
- Field deprecated in 17.1.1.
openstack_availability_zones:
description:
- Field introduced in 17.1.1.
openstack_mgmt_network_name:
description:
- Avi management network name.
openstack_mgmt_network_uuid:
description:
- Management network uuid.
os_reserved_memory:
description:
- Amount of extra memory to be reserved for use by the operating system on a service engine.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
per_app:
description:
- Per-app se mode is designed for deploying dedicated load balancers per app (vs).
- In this mode, each se is limited to a max of 2 vss.
- Vcpus in per-app ses count towards licensing usage at 25% rate.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
placement_mode:
description:
- If placement mode is 'auto', virtual services are automatically placed on service engines.
- Enum options - PLACEMENT_MODE_AUTO.
- Default value when not specified in API or module is interpreted by Avi Controller as PLACEMENT_MODE_AUTO.
realtime_se_metrics:
description:
- Enable or disable real time se metrics.
se_deprovision_delay:
description:
- Duration to preserve unused service engine virtual machines before deleting them.
- If traffic to a virtual service were to spike up abruptly, this se would still be available to be utilized again rather than creating a new se.
- If this value is set to 0, controller will never delete any ses and administrator has to manually cleanup unused ses.
- Allowed values are 0-525600.
- Default value when not specified in API or module is interpreted by Avi Controller as 120.
se_dos_profile:
description:
- Dosthresholdprofile settings for serviceenginegroup.
se_ipc_udp_port:
description:
- Udp port for se_dp ipc in docker bridge mode.
- Field introduced in 17.1.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 1500.
version_added: "2.4"
se_name_prefix:
description:
- Prefix to use for virtual machine name of service engines.
- Default value when not specified in API or module is interpreted by Avi Controller as Avi.
se_remote_punt_udp_port:
description:
- Udp port for punted packets in docker bridge mode.
- Field introduced in 17.1.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 1501.
version_added: "2.4"
se_thread_multiplier:
description:
- Multiplier for se threads based on vcpu.
- Allowed values are 1-10.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
se_tunnel_mode:
description:
- Determines if dsr from secondary se is active or not.
- 0 automatically determine based on hypervisor type.
- 1 disable dsr unconditionally.
- ~[0,1] enable dsr unconditionally.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
se_tunnel_udp_port:
description:
- Udp port for tunneled packets from secondary to primary se in docker bridge mode.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 1550.
se_udp_encap_ipc:
description:
- Determines if se-se ipc messages are encapsulated in an udp header.
- 0 automatically determine based on hypervisor type.
- 1 use udp encap unconditionally.
- ~[0,1] don't use udp encap.
- Field introduced in 17.1.2.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
version_added: "2.4"
se_vs_hb_max_pkts_in_batch:
description:
- Maximum number of aggregated vs heartbeat packets to send in a batch.
- Allowed values are 1-256.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 8.
se_vs_hb_max_vs_in_pkt:
description:
- Maximum number of virtualservices for which heartbeat messages are aggregated in one packet.
- Allowed values are 1-1024.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 256.
service_ip_subnets:
description:
- Subnets assigned to the se group.
- Required for vs group placement.
- Field introduced in 17.1.1.
significant_log_throttle:
description:
- This setting limits the number of significant logs generated per second per core on this se.
- Default is 100 logs per second.
- Set it to zero (0) to disable throttling.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
tenant_ref:
description:
- It is a reference to an object of type tenant.
udf_log_throttle:
description:
- This setting limits the number of udf logs generated per second per core on this se.
- Udf logs are generated due to the configured client log filters or the rules with logging enabled.
- Default is 100 logs per second.
- Set it to zero (0) to disable throttling.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 100.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vcenter_clusters:
description:
- Vcenterclusters settings for serviceenginegroup.
vcenter_datastore_mode:
description:
- Enum options - vcenter_datastore_any, vcenter_datastore_local, vcenter_datastore_shared.
- Default value when not specified in API or module is interpreted by Avi Controller as VCENTER_DATASTORE_ANY.
vcenter_datastores:
description:
- List of vcenterdatastore.
vcenter_datastores_include:
description:
- Boolean flag to set vcenter_datastores_include.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
vcenter_folder:
description:
- Folder to place all the service engine virtual machines in vcenter.
- Default value when not specified in API or module is interpreted by Avi Controller as AviSeFolder.
vcenter_hosts:
description:
- Vcenterhosts settings for serviceenginegroup.
vcpus_per_se:
description:
- Number of vcpus for each of the service engine virtual machines.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
vs_host_redundancy:
description:
- Ensure primary and secondary service engines are deployed on different physical hosts.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
vs_scalein_timeout:
description:
- Time to wait for the scaled in se to drain existing flows before marking the scalein done.
- Default value when not specified in API or module is interpreted by Avi Controller as 30.
vs_scalein_timeout_for_upgrade:
description:
- During se upgrade, time to wait for the scaled-in se to drain existing flows before marking the scalein done.
- Default value when not specified in API or module is interpreted by Avi Controller as 30.
vs_scaleout_timeout:
description:
- Time to wait for the scaled out se to become ready before marking the scaleout done.
- Default value when not specified in API or module is interpreted by Avi Controller as 30.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create ServiceEngineGroup object
avi_serviceenginegroup:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_serviceenginegroup
"""
RETURN = '''
obj:
description: ServiceEngineGroup (api/serviceenginegroup) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
active_standby=dict(type='bool',),
advertise_backend_networks=dict(type='bool',),
aggressive_failure_detection=dict(type='bool',),
algo=dict(type='str',),
archive_shm_limit=dict(type='int',),
async_ssl=dict(type='bool',),
async_ssl_threads=dict(type='int',),
auto_rebalance=dict(type='bool',),
auto_rebalance_interval=dict(type='int',),
auto_redistribute_active_standby_load=dict(type='bool',),
buffer_se=dict(type='int',),
cloud_ref=dict(type='str',),
connection_memory_percentage=dict(type='int',),
cpu_reserve=dict(type='bool',),
cpu_socket_affinity=dict(type='bool',),
custom_securitygroups_data=dict(type='list',),
custom_securitygroups_mgmt=dict(type='list',),
custom_tag=dict(type='list',),
dedicated_dispatcher_core=dict(type='bool',),
description=dict(type='str',),
disk_per_se=dict(type='int',),
distribute_load_active_standby=dict(type='bool',),
enable_routing=dict(type='bool',),
enable_vip_on_all_interfaces=dict(type='bool',),
enable_vmac=dict(type='bool',),
extra_config_multiplier=dict(type='float',),
extra_shared_config_memory=dict(type='int',),
floating_intf_ip=dict(type='list',),
floating_intf_ip_se_2=dict(type='list',),
ha_mode=dict(type='str',),
hardwaresecuritymodulegroup_ref=dict(type='str',),
hm_on_standby=dict(type='bool',),
host_attribute_key=dict(type='str',),
host_attribute_value=dict(type='str',),
hypervisor=dict(type='str',),
instance_flavor=dict(type='str',),
iptables=dict(type='list',),
least_load_core_selection=dict(type='bool',),
log_disksz=dict(type='int',),
max_cpu_usage=dict(type='int',),
max_scaleout_per_vs=dict(type='int',),
max_se=dict(type='int',),
max_vs_per_se=dict(type='int',),
mem_reserve=dict(type='bool',),
memory_per_se=dict(type='int',),
mgmt_network_ref=dict(type='str',),
mgmt_subnet=dict(type='dict',),
min_cpu_usage=dict(type='int',),
min_scaleout_per_vs=dict(type='int',),
name=dict(type='str', required=True),
non_significant_log_throttle=dict(type='int',),
num_flow_cores_sum_changes_to_ignore=dict(type='int',),
openstack_availability_zone=dict(type='str',),
openstack_availability_zones=dict(type='list',),
openstack_mgmt_network_name=dict(type='str',),
openstack_mgmt_network_uuid=dict(type='str',),
os_reserved_memory=dict(type='int',),
per_app=dict(type='bool',),
placement_mode=dict(type='str',),
realtime_se_metrics=dict(type='dict',),
se_deprovision_delay=dict(type='int',),
se_dos_profile=dict(type='dict',),
se_ipc_udp_port=dict(type='int',),
se_name_prefix=dict(type='str',),
se_remote_punt_udp_port=dict(type='int',),
se_thread_multiplier=dict(type='int',),
se_tunnel_mode=dict(type='int',),
se_tunnel_udp_port=dict(type='int',),
se_udp_encap_ipc=dict(type='int',),
se_vs_hb_max_pkts_in_batch=dict(type='int',),
se_vs_hb_max_vs_in_pkt=dict(type='int',),
service_ip_subnets=dict(type='list',),
significant_log_throttle=dict(type='int',),
tenant_ref=dict(type='str',),
udf_log_throttle=dict(type='int',),
url=dict(type='str',),
uuid=dict(type='str',),
vcenter_clusters=dict(type='dict',),
vcenter_datastore_mode=dict(type='str',),
vcenter_datastores=dict(type='list',),
vcenter_datastores_include=dict(type='bool',),
vcenter_folder=dict(type='str',),
vcenter_hosts=dict(type='dict',),
vcpus_per_se=dict(type='int',),
vs_host_redundancy=dict(type='bool',),
vs_scalein_timeout=dict(type='int',),
vs_scalein_timeout_for_upgrade=dict(type='int',),
vs_scaleout_timeout=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'serviceenginegroup',
set([]))
if __name__ == '__main__':
main()
|
|
"""Test script for the gzip module.
"""
import unittest
from test import test_support
import os
import io
import struct
import tempfile
gzip = test_support.import_module('gzip')
data1 = """ int length=DEFAULTALLOC, err = Z_OK;
PyObject *RetVal;
int flushmode = Z_FINISH;
unsigned long start_total_out;
"""
data2 = """/* zlibmodule.c -- gzip-compatible data compression */
/* See http://www.gzip.org/zlib/
/* See http://www.winimage.com/zLibDll for Windows */
"""
class TestGzip(unittest.TestCase):
filename = test_support.TESTFN
def setUp(self):
test_support.unlink(self.filename)
def tearDown(self):
test_support.unlink(self.filename)
def write_and_read_back(self, data, mode='b'):
b_data = memoryview(data).tobytes()
with gzip.GzipFile(self.filename, 'w'+mode) as f:
l = f.write(data)
self.assertEqual(l, len(b_data))
with gzip.GzipFile(self.filename, 'r'+mode) as f:
self.assertEqual(f.read(), b_data)
@test_support.requires_unicode
def test_unicode_filename(self):
unicode_filename = test_support.TESTFN_UNICODE
try:
unicode_filename.encode(test_support.TESTFN_ENCODING)
except (UnicodeError, TypeError):
self.skipTest("Requires unicode filenames support")
self.filename = unicode_filename
with gzip.GzipFile(unicode_filename, "wb") as f:
f.write(data1 * 50)
with gzip.GzipFile(unicode_filename, "rb") as f:
self.assertEqual(f.read(), data1 * 50)
# Sanity check that we are actually operating on the right file.
with open(unicode_filename, 'rb') as fobj, \
gzip.GzipFile(fileobj=fobj, mode="rb") as f:
self.assertEqual(f.read(), data1 * 50)
def test_write(self):
with gzip.GzipFile(self.filename, 'wb') as f:
f.write(data1 * 50)
# Try flush and fileno.
f.flush()
f.fileno()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
f.close()
# Test multiple close() calls.
f.close()
# The following test_write_xy methods test that write accepts
# the corresponding bytes-like object type as input
# and that the data written equals bytes(xy) in all cases.
def test_write_memoryview(self):
self.write_and_read_back(memoryview(data1 * 50))
def test_write_incompatible_type(self):
# Test that non-bytes-like types raise TypeError.
# Issue #21560: attempts to write incompatible types
# should not affect the state of the fileobject
with gzip.GzipFile(self.filename, 'wb') as f:
with self.assertRaises(UnicodeEncodeError):
f.write(u'\xff')
with self.assertRaises(TypeError):
f.write([1])
f.write(data1)
with gzip.GzipFile(self.filename, 'rb') as f:
self.assertEqual(f.read(), data1)
def test_read(self):
self.test_write()
# Try reading.
with gzip.GzipFile(self.filename, 'r') as f:
d = f.read()
self.assertEqual(d, data1*50)
def test_read_universal_newlines(self):
# Issue #5148: Reading breaks when mode contains 'U'.
self.test_write()
with gzip.GzipFile(self.filename, 'rU') as f:
d = f.read()
self.assertEqual(d, data1*50)
def test_io_on_closed_object(self):
# Test that I/O operations on closed GzipFile objects raise a
# ValueError, just like the corresponding functions on file objects.
# Write to a file, open it for reading, then close it.
self.test_write()
f = gzip.GzipFile(self.filename, 'r')
f.close()
with self.assertRaises(ValueError):
f.read(1)
with self.assertRaises(ValueError):
f.seek(0)
with self.assertRaises(ValueError):
f.tell()
# Open the file for writing, then close it.
f = gzip.GzipFile(self.filename, 'w')
f.close()
with self.assertRaises(ValueError):
f.write('')
with self.assertRaises(ValueError):
f.flush()
def test_append(self):
self.test_write()
# Append to the previous file
with gzip.GzipFile(self.filename, 'ab') as f:
f.write(data2 * 15)
with gzip.GzipFile(self.filename, 'rb') as f:
d = f.read()
self.assertEqual(d, (data1*50) + (data2*15))
def test_many_append(self):
# Bug #1074261 was triggered when reading a file that contained
# many, many members. Create such a file and verify that reading it
# works.
with gzip.open(self.filename, 'wb', 9) as f:
f.write('a')
for i in range(0, 200):
with gzip.open(self.filename, "ab", 9) as f: # append
f.write('a')
# Try reading the file
with gzip.open(self.filename, "rb") as zgfile:
contents = ""
while 1:
ztxt = zgfile.read(8192)
contents += ztxt
if not ztxt: break
self.assertEqual(contents, 'a'*201)
def test_buffered_reader(self):
# Issue #7471: a GzipFile can be wrapped in a BufferedReader for
# performance.
self.test_write()
with gzip.GzipFile(self.filename, 'rb') as f:
with io.BufferedReader(f) as r:
lines = [line for line in r]
self.assertEqual(lines, 50 * data1.splitlines(True))
def test_readline(self):
self.test_write()
# Try .readline() with varying line lengths
with gzip.GzipFile(self.filename, 'rb') as f:
line_length = 0
while 1:
L = f.readline(line_length)
if not L and line_length != 0: break
self.assertTrue(len(L) <= line_length)
line_length = (line_length + 1) % 50
def test_readlines(self):
self.test_write()
# Try .readlines()
with gzip.GzipFile(self.filename, 'rb') as f:
L = f.readlines()
with gzip.GzipFile(self.filename, 'rb') as f:
while 1:
L = f.readlines(150)
if L == []: break
def test_seek_read(self):
self.test_write()
# Try seek, read test
with gzip.GzipFile(self.filename) as f:
while 1:
oldpos = f.tell()
line1 = f.readline()
if not line1: break
newpos = f.tell()
f.seek(oldpos) # negative seek
if len(line1)>10:
amount = 10
else:
amount = len(line1)
line2 = f.read(amount)
self.assertEqual(line1[:amount], line2)
f.seek(newpos) # positive seek
def test_seek_whence(self):
self.test_write()
# Try seek(whence=1), read test
with gzip.GzipFile(self.filename) as f:
f.read(10)
f.seek(10, whence=1)
y = f.read(10)
self.assertEqual(y, data1[20:30])
def test_seek_write(self):
# Try seek, write test
with gzip.GzipFile(self.filename, 'w') as f:
for pos in range(0, 256, 16):
f.seek(pos)
f.write('GZ\n')
def test_mode(self):
self.test_write()
with gzip.GzipFile(self.filename, 'r') as f:
self.assertEqual(f.myfileobj.mode, 'rb')
def test_1647484(self):
for mode in ('wb', 'rb'):
with gzip.GzipFile(self.filename, mode) as f:
self.assertTrue(hasattr(f, "name"))
self.assertEqual(f.name, self.filename)
def test_mtime(self):
mtime = 123456789
with gzip.GzipFile(self.filename, 'w', mtime = mtime) as fWrite:
fWrite.write(data1)
with gzip.GzipFile(self.filename) as fRead:
dataRead = fRead.read()
self.assertEqual(dataRead, data1)
self.assertTrue(hasattr(fRead, 'mtime'))
self.assertEqual(fRead.mtime, mtime)
def test_metadata(self):
mtime = 123456789
with gzip.GzipFile(self.filename, 'w', mtime = mtime) as fWrite:
fWrite.write(data1)
with open(self.filename, 'rb') as fRead:
# see RFC 1952: http://www.faqs.org/rfcs/rfc1952.html
idBytes = fRead.read(2)
self.assertEqual(idBytes, '\x1f\x8b') # gzip ID
cmByte = fRead.read(1)
self.assertEqual(cmByte, '\x08') # deflate
flagsByte = fRead.read(1)
self.assertEqual(flagsByte, '\x08') # only the FNAME flag is set
mtimeBytes = fRead.read(4)
self.assertEqual(mtimeBytes, struct.pack('<i', mtime)) # little-endian
xflByte = fRead.read(1)
self.assertEqual(xflByte, '\x02') # maximum compression
osByte = fRead.read(1)
self.assertEqual(osByte, '\xff') # OS "unknown" (OS-independent)
# Since the FNAME flag is set, the zero-terminated filename follows.
# RFC 1952 specifies that this is the name of the input file, if any.
# However, the gzip module defaults to storing the name of the output
# file in this field.
expected = self.filename.encode('Latin-1') + '\x00'
nameBytes = fRead.read(len(expected))
self.assertEqual(nameBytes, expected)
# Since no other flags were set, the header ends here.
# Rather than process the compressed data, let's seek to the trailer.
fRead.seek(os.stat(self.filename).st_size - 8)
crc32Bytes = fRead.read(4) # CRC32 of uncompressed data [data1]
self.assertEqual(crc32Bytes, '\xaf\xd7d\x83')
isizeBytes = fRead.read(4)
self.assertEqual(isizeBytes, struct.pack('<i', len(data1)))
def test_with_open(self):
# GzipFile supports the context management protocol
with gzip.GzipFile(self.filename, "wb") as f:
f.write(b"xxx")
f = gzip.GzipFile(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with gzip.GzipFile(self.filename, "wb") as f:
1 // 0
except ZeroDivisionError:
pass
else:
self.fail("1 // 0 didn't raise an exception")
def test_zero_padded_file(self):
with gzip.GzipFile(self.filename, "wb") as f:
f.write(data1 * 50)
# Pad the file with zeroes
with open(self.filename, "ab") as f:
f.write("\x00" * 50)
with gzip.GzipFile(self.filename, "rb") as f:
d = f.read()
self.assertEqual(d, data1 * 50, "Incorrect data in file")
def test_fileobj_from_fdopen(self):
# Issue #13781: Creating a GzipFile using a fileobj from os.fdopen()
# should not embed the fake filename "<fdopen>" in the output file.
fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT)
with os.fdopen(fd, "wb") as f:
with gzip.GzipFile(fileobj=f, mode="w") as g:
self.assertEqual(g.name, "")
def test_fileobj_from_io_open(self):
fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT)
with io.open(fd, "wb") as f:
with gzip.GzipFile(fileobj=f, mode="w") as g:
self.assertEqual(g.name, "")
def test_fileobj_mode(self):
gzip.GzipFile(self.filename, "wb").close()
with open(self.filename, "r+b") as f:
with gzip.GzipFile(fileobj=f, mode='r') as g:
self.assertEqual(g.mode, gzip.READ)
with gzip.GzipFile(fileobj=f, mode='w') as g:
self.assertEqual(g.mode, gzip.WRITE)
with gzip.GzipFile(fileobj=f, mode='a') as g:
self.assertEqual(g.mode, gzip.WRITE)
with self.assertRaises(IOError):
gzip.GzipFile(fileobj=f, mode='z')
for mode in "rb", "r+b":
with open(self.filename, mode) as f:
with gzip.GzipFile(fileobj=f) as g:
self.assertEqual(g.mode, gzip.READ)
for mode in "wb", "ab":
with open(self.filename, mode) as f:
with gzip.GzipFile(fileobj=f) as g:
self.assertEqual(g.mode, gzip.WRITE)
def test_read_with_extra(self):
# Gzip data with an extra field
gzdata = (b'\x1f\x8b\x08\x04\xb2\x17cQ\x02\xff'
b'\x05\x00Extra'
b'\x0bI-.\x01\x002\xd1Mx\x04\x00\x00\x00')
with gzip.GzipFile(fileobj=io.BytesIO(gzdata)) as f:
self.assertEqual(f.read(), b'Test')
def test_fileobj_without_name(self):
# Issue #33038: GzipFile should not assume that file objects that have
# a .name attribute use a non-None value.
with tempfile.SpooledTemporaryFile() as f:
with gzip.GzipFile(fileobj=f, mode='wb') as archive:
archive.write(b'data')
self.assertEqual(archive.name, '')
def test_main(verbose=None):
test_support.run_unittest(TestGzip)
if __name__ == "__main__":
test_main(verbose=True)
|
|
"""
26 Jan 2016
last_edited: 08 Jun 2017
Authors: Sai Nudurupati & Erkan Istanbulluoglu
Ref 1: source_tracking_algorithm_user_manual. @
https://github.com/RondaStrauch/pub_strauch_etal_esurf/blob/master/SourceTrackingAlgorithm_RefManual_draft.docx
Ref 2: 'The Landlab LandslideProbability Component User Manual' @
https://github.com/RondaStrauch/pub_strauch_etal_esurf/blob/master/LandslideComponentUsersManual.docx
MD - Modeling Domain - Raster grid that is being analyzed/worked upon.
HSD - Hydrologic Source Domain - Grid that is at least as coarse as MD. For
more info, refer Ref 1
"""
# %%
# Import required libraries
import numpy as np
import copy
from collections import Counter
# %%
def convert_arc_flow_directions_to_landlab_node_ids(grid, flow_dir_arc):
"""Convert Arc flow_directions to RasterModelGrid node ids
This function receives flow directions (D8) from ESRI ArcGIS and converts
them to Landlab's RasterModelGrid node id. ESRI ArcGIS D8 flow directions
are either of the eight valid output directions relating to the eight
adjacent cells into which flow could travel. The valid output directions
are powers of 2 starting from 2^0 (1) in the Eastern neighbor going
clockwise to 2^7 (128) at Northeastern neighbor. For more information
refer 'http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/
how-flow-direction-works.htm'
Parameters:
----------
grid: RasterModelGrid
A grid.
flow_dir_arc: ndarray of int, shape (n_nodes, )
flow directions derived from ESRII ArcGIS.
Returns:
-------
receiver_nodes: ndarray of int, shape (n_nodes, )
downstream node at each node. Note that this array gives the
receiver nodes only for the core nodes. For non-core
nodes, a zero is used.
"""
r_arc_raw = np.log2(flow_dir_arc)
r_arc_raw = r_arc_raw.astype('int')
neigh_ = grid.neighbors_at_node
diag_ = grid.diagonals_at_node
neigh_ = np.fliplr(neigh_)
diag_ = np.fliplr(diag_)
a_n = np.hsplit(neigh_, 4)
a_d = np.hsplit(diag_, 4)
neighbors = np.hstack((a_n[-1], a_d[0], a_n[0], a_d[1], a_n[1], a_d[2],
a_n[2], a_d[3]))
# Now neighbors has node ids of neighboring nodes in cw order starting at
# right, hence the order of neighbors = [r, br, b, bl, l, tl, t, tr]
receiver_nodes = np.zeros(grid.number_of_nodes, dtype=int)
receiver_nodes[grid.core_nodes] = np.choose(r_arc_raw[grid.core_nodes],
np.transpose(neighbors[grid.core_nodes]))
return (receiver_nodes)
# %%
# Source Routing Algorithm
# Note 1: This algorithm works on core nodes only because core nodes
# have neighbors that are real values and not -1s.
# Note 2: Nodes in the following comments in this section refer to core nodes.
def track_source(grid, hsd_ids, flow_directions=None):
"""Track all contributing upstream core nodes for each core node
This algorithm traverses the grid based on information of flow directions
at nodes and at every node identifies all the nodes upstream of a given
node. The algorithm creates a dictionary with an entry for each node;
a node's entry in the dictionary will contain a list with the node_ids
of all upstream nodes. Thus this method permits identification of the
source area contributing to each and every node in the model grid. This
function is different from a standard flow accumulation routine in that
it not only calculates the amount of flow at each node, but records the
IDs of all upstream nodes. However, similar to a standard
flow accumulation routine, it produces an at_node array of the amount
of flow passing through the node. It also differs from a standard
flow accumulation routing in that it permits the mapping of flow inputs
from a coarser grid to to a finer model grid.
In its present implementation, the algorithm has not been optimized
for efficient time use. Its methods are brute force and it should be
expected to be time intensive. It is not recommended to be run frequently
in a modeling exercise. Due to its intensive nature, this algorithm may
fail with large watersheds (a present, the development team has not
derived a maximum stable watershed size).
This function was initially developed to find contributing area of a
30 m grid (MD), where the quantitative data that we were interested in was
available in significantly coarser resolution (called Hydrologic Source
Domain (HSD)). Therefore, we started working with re-sampled HSD,
that is at the same resolution as MD, and represents exactly the same
landscape. Alternatively, one can use the node ids of MD
(grid.nodes.flatten()) as input for hsd_ids.
For more information, refer Ref 1 and Ref 2.
Parameters:
----------
grid: RasterModelGrid
A grid.
hsd_ids: ndarray of int, shape (n_nodes, )
array that maps the nodes of the grid to, possibly coarser,
Hydrologic Source Domain (HSD) grid ids.
flow_directions: ndarray of int, shape (n_nodes, ), optional.
downstream node at each node. Alternatively, this data can be
provided as a nodal field 'flow__receiver_node' on the grid.
Returns:
-------
(hsd_upstr, flow_accum): (dictionary, ndarray of shape (n_nodes))
'hsd_upstr' maps each grid node to corresponding
contributing upstream hsd_ids. hsd_upstr.keys() will return
node_ids of the grid. hsd_upstr.values() will return lists of
all upstream contributing hsd_ids, including repitions of hsd_ids,
at corresponding node_ids.
'flow_accum' is an array of the number of upstream contributing
nodes at each node.
"""
if flow_directions is None:
r = grid.at_node['flow__receiver_node']
else:
r = flow_directions
z = grid.at_node['topographic__elevation']
core_nodes = grid.core_nodes
core_elev = z[core_nodes]
# Sort all nodes in the descending order of elevation
sor_z = core_nodes[np.argsort(core_elev)[::-1]]
# Create a list to record all nodes that have been visited
# To store nodes that have already been counted
alr_counted = []
flow_accum = np.zeros(grid.number_of_nodes, dtype=int)
hsd_upstr = {}
# Loop through all nodes
for i in sor_z:
# Check 1: Check if this node has been visited earlier. If yes,
# then skip to next node
if i in alr_counted:
continue
# Check 2: If the visited node is a sink
if r[i] == i:
hsd_upstr.update({i: [hsd_ids[i]]})
flow_accum[i] += 1.
alr_counted.append(i)
continue
# Check 3: Now, if the node is not a sink and hasn't been visited, it
# belongs to a stream segment. Hence, all the nodes in the stream will
# have to betraversed.
# stream_buffer is a list that will hold the upstream contributing
# node information for that particular segment until reaching outlet.
stream_buffer = []
j = i
switch_i = True
a = 0.
# Loop below will traverse the segment of the stream until an outlet
# is reached.
while True:
# Following if loop is to execute the contents once the first node
# in the segment is visited.
if not switch_i:
j = r[j]
if j not in core_nodes:
break
# If this node is being visited for the first time,
# this 'if statement' will executed.
if flow_accum[j] == 0.:
a += 1.
alr_counted.append(j)
stream_buffer.append(hsd_ids[j])
# Update number of upstream nodes.
flow_accum[j] += a
# If the node is being visited for the first time, the dictionary
# 'hsd_upstr' will be updated.
if j in hsd_upstr.keys():
hsd_upstr[j] += copy.copy(stream_buffer)
# If the node has been already visited, then the upstream segment
# that was not accounted for in the main stem, would be added to
# all downstream nodes, one by one, until the outlet is reached.
else:
hsd_upstr.update({j: copy.copy(stream_buffer)})
# If the outlet is reached, the 'while' loop will be exited.
if r[j] == j:
break
# This will be executed only for the first node of the
# stream segment.
if switch_i:
switch_i = False
return (hsd_upstr, flow_accum)
# %%
# Algorithm to calculate coefficients of each upstream HSD ID
def find_unique_upstream_hsd_ids_and_fractions(hsd_upstr):
"""Finds unique entries in hsd_upstr.values()
This function operates on hsd_upstr.values(), that are lists of hsd_ids.
Two new Python dictionaries, 'unique_ids' and 'fractions' are created.
unique_ids.keys() = hsd_upstr.keys()
unique_ids.values()[i] = list of unique entries in hsd_upstr.values()[i]
fractions.keys() = hsd_upstr.keys()
fractions.values()[i] = (number of entries of each unique_id.values()[i]/
length of hsd_upstr.values()[i]) for each unique_id.values()[i] in the
same order.
Note that 'hsd_upstr' is the output of track_source(). You can use
an alternative input. In that case, please refer to the documentation
of track_source() or refer source_tracking_algorithm_user_manual for
more information.
Parameters:
----------
hsd_upstr: dictionary
'hsd_upstr' maps each MD grid node to corresponding
contributing upstream HSD ids.
Returns:
-------
(unique_ids, fractions): (dictionary, dictionary)
Tuple of data. 'unique_ids' maps each MD node with all upstream HSD
ids without repitition. 'fractions' maps each MD node with the
fractions of contributions of the corresponding upstream HSD ids in
the same order as uniques_ids[node_id].
"""
unique_ids = {} # Holds unique upstream HSD ids
C = {} # Holds corresponding total numbers
fractions = {} # Holds corresponding fractions of contribution
for ke in hsd_upstr.keys():
cnt = Counter()
for num in hsd_upstr[ke]:
cnt[num] += 1
unique_ids.update({ke: cnt.keys()})
buf = []
for k in cnt.keys():
buf.append(cnt[k])
C.update({ke: buf})
e = [s/float(sum(buf)) for s in buf]
fractions.update({ke: e})
return (unique_ids, fractions)
|
Subsets and Splits