gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# -*- coding:utf-8 -*-
# --
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
from nagare.i18n import _
from nagare import ajax, component, presentation, security, var
from kansha.toolbox import popin
from .comp import CardsCounter, Column, NewColumnEditor
@presentation.render_for(Column)
def render(self, h, comp, *args):
"""Render the column"""
column_class = 'span-auto list'
if self.is_archive:
column_class += ' archive'
with h.div(class_=column_class, id=self.id, ):
h << comp.render(h.AsyncRenderer(), 'content')
return h.root
@presentation.render_for(Column, 'content')
def render_content(self, h, comp, model):
h << comp.render(h.AsyncRenderer(), 'header')
h << comp.render(h, 'body')
h << self.card_counter.render(h, 'footer')
h << component.Component(self.card_filter, 'footer')
return h.root
@presentation.render_for(Column, 'calendar')
def render_column_calendar(self, h, comp, *args):
return [card.render(h.AsyncRenderer(), 'calendar') for card in self.cards]
@presentation.render_for(Column, 'new')
def render_column_new(self, h, comp, *args):
return h.div(comp.becomes(self, 'dnd'), class_='new')
@presentation.render_for(Column, model='dropdown')
def render_column_dropdown(self, h, comp, *args):
"""Render the column menu"""
with h.div(class_="dropdown menu"):
with h.ul:
if not self.is_archive:
with h.li:
onclick = (
u"if (confirm(%(message)s)){"
u" window.location='%(callback)s';"
u"}" %
{
'message': ajax.py2js(
_(u'The list will be deleted. Are you sure?')
).decode('UTF-8'),
'callback': h.SyncRenderer().a.action(
self.actions, 'delete', comp
).get('href')
}
)
h << h.a(_(u'Delete this list'), onclick=onclick)
if self.cards:
with h.li:
onclick = (
u"if (confirm(%(message)s)){"
u" window.location='%(callback)s';"
u"}" %
{
'message': ajax.py2js(
_(u'All the cards will be archived. Are you sure?')
).decode('UTF-8'),
'callback': h.SyncRenderer().a.action(
self.actions, 'empty', comp
).get('href')
}
)
h << h.a(_(u'Empty this list'), onclick=onclick)
h << self.card_counter.render(h, 'menu-entry')
elif self.cards:
with h.li:
onclick = "if (confirm(%(message)s)){window.location='%(purge_func)s';}" % {
'message': ajax.py2js(
_(u'All cards will be deleted. Are you sure?')
).decode('UTF-8'),
'purge_func': h.SyncRenderer().a.action(
self.actions, 'purge', comp
).get('href')
}
h << h.a(_('Purge the cards'), onclick=onclick)
return h.root
@presentation.render_for(Column, model='header')
def render_column_header(self, h, comp, *args):
with h.div(class_='list-header', id=self.id + '_header'):
h << h.a(class_='hidden', id=self.id + '_refresh').action(ajax.Update())
with h.div(class_='list-title'):
with h.div(class_='title'):
h << self.title.render(h.AsyncRenderer(), 0 if security.has_permissions('edit', self) and not self.is_archive else 'readonly')
h << self.card_counter.render(h, 'header')
with h.div(class_='list-actions with-dropdown'):
if security.has_permissions('edit', self):
h << h.a(h.i(class_='icon-dot-menu'),
href='#', class_="toggle-dropdown",
onclick="YAHOO.kansha.app.toggleMenu(this)") << ' '
h << comp.render(h, 'dropdown')
return h.root
@presentation.render_for(Column, model='title')
def render_column_title(self, h, comp, *args):
with h.div(class_='title'):
h << self.title.render(h, 'readonly')
return h.root
@presentation.render_for(Column, 'dnd')
def render_column_dnd(self, h, comp, *args):
"""DnD wrapper for column"""
h << comp.render(h, None)
h << h.script(
"YAHOO.util.Event.onDOMReady(function() {"
" YAHOO.kansha.dnd.initList(%(list_id)s);"
"})" % {'list_id': ajax.py2js(self.id)}
)
return h.root
@presentation.render_for(Column, 'body')
def render_column_body(self, h, comp, *args):
model = 'dnd' if security.has_permissions('edit', self) else "no_dnd"
id_ = h.generate_id()
with h.div(class_='list-body', id=id_):
h << [card.on_answer(self.handle_event, comp).render(h, model=model) for card in self.cards]
h << h.script("YAHOO.kansha.dnd.initTargetCard(%s)" % ajax.py2js(id_))
# h << self.card_counter.render(h, 'body')
kw = {}
if not security.has_permissions('edit', self):
kw['style'] = 'width: 0px'
with h.div(class_='list-footer', id=self.id + '_footer', **kw):
# This HACK is to force nagare to insert its div, otherwise it breaks all the layout.
if self.is_archive or not security.has_permissions('edit', self):
h << {'style': 'display: none'}
h << h.div(self.new_card.on_answer(self.ui_create_card, comp))
return h.root
@presentation.render_for(NewColumnEditor)
def render_NewColumnEditor(self, h, comp, *args):
"""Render column creator"""
h << h.h2(_(u'Add list'))
with h.form:
with h.div:
id_ = h.generate_id()
h << h.label(_('Name'), for_=id)
h << h.input(id=id_, type='text', value=self.title(),
placeholder=_('List title'),
autofocus=True).error(self.title.error).action(self.title)
with h.div:
id_ = h.generate_id()
h << h.label(_('Position'), for_=id_)
with h.select(id=id_).error(self.index.error).action(self.index):
for i in xrange(1, self.columns_count + 2):
h << h.option(i, value=i - 1).selected(i)
# FIXME
with h.div:
id_ = h.generate_id()
h << h.label(_('Number max of cards'), id_=id_)
h << h.input(id=id_, type='text', value=self.nb_cards()).error(self.nb_cards.error).action(self.nb_cards)
h << h.script(
"""YAHOO.util.Event.on(%s, 'keyup', function (e) {
var result =this.value.replace(/[^0-9]/g, '')
if (this.value !=result) {
this.value = result;
}
})""" % ajax.py2js(id_)
)
with h.div(class_='buttons'):
h << h.button(_('Add'), class_=('btn btn-primary')).action(self.commit, comp)
h << ' '
h << h.a(_('Cancel'), class_='btn').action(self.cancel, comp)
return h.root
@presentation.render_for(CardsCounter)
def render_CardsCounter(self, h, comp, *args):
with h.div(class_='list-counter'):
self.error = None
visibility = ' hidden'
if self.column.nb_max_cards:
visibility = '' if self.check_add() else ' limitReached'
with h.div(class_='cardCounter' + visibility, id=self.id):
with h.a().action(comp.call, self, 'edit'):
h << self.column.count_cards << '/' << (self.column.nb_max_cards or 0)
h << h.script(
"YAHOO.kansha.app.saveLimit(%(list_id)s, %(limit)s);"
"YAHOO.kansha.app.countCards(%(list_id)s);" %
{
'list_id': ajax.py2js(self.column.id),
'limit': ajax.py2js(self.column.nb_max_cards or 0)
}
)
return h.root
@presentation.render_for(CardsCounter, 'header')
def render_CardsCounter_header(self, h, comp, model):
h << self.editable_counter
return h.root
@presentation.render_for(CardsCounter, 'menu-entry')
def render_CardsCounter_menu(self, h, comp, model):
with h.li:
h << h.a(_(u'Set card limit')).action(self.editable_counter.call, self, 'edit')
return h.root
@presentation.render_for(CardsCounter, 'body')
def render_CardsCounter_body(self, h, comp, model):
with h.div(class_='no-drop'):
h << h.i(class_='icon-blocked huge') << h.br
h << _(u"This list already holds its maximum amount of cards")
h << h.script("YAHOO.kansha.app.countCards(%s)" % ajax.py2js(self.column.id))
return h.root
@presentation.render_for(CardsCounter, 'footer')
def render_CardsCounter_footer(self, h, comp, model):
h << h.script(
"YAHOO.kansha.app.countCards(%(list_id)s);" %
{
'list_id': ajax.py2js(self.column.id),
}
)
return h.root
@presentation.render_for(CardsCounter, model='edit')
def render_CardsCounter_edit(self, h, comp, *args):
"""Render the title of the associated object"""
text = var.Var(self.text)
with h.div(class_='list-counter'):
with h.div(class_='cardCounter'):
with h.form(onsubmit='return false;'):
action = h.input(type='submit').action(lambda: self.validate(text(), comp)).get('onclick')
id_ = h.generate_id()
h << h.input(id=id_, type='text', value=self.column.nb_max_cards or '', onblur=action).action(text)
h << h.script(
"""YAHOO.util.Event.on(%s, 'keyup', function (e) {
if (e.keyCode == 13) {
e.preventDefault();
this.blur();
}
var result = this.value.replace(/[^0-9]/g, '')
if (this.value !=result) {
this.value = result;
}
});""" % ajax.py2js(id_)
)
h << h.script(
"YAHOO.kansha.app.selectElement(%s);" % ajax.py2js(id_)
)
if self.error is not None:
with h.div(class_='nagare-error-message'):
h << self.error
return h.root
|
|
"""
FlexGet build and development utilities - unfortunately this file is somewhat messy
"""
import os
import sys
from paver.easy import *
import paver.virtual
import paver.setuputils
from paver.setuputils import setup, find_package_data, find_packages
sphinxcontrib = False
try:
from sphinxcontrib import paverutils
sphinxcontrib = True
except ImportError:
pass
sys.path.insert(0, '')
options = environment.options
# There is a bug in sqlalchemy 0.9.0, see gh#127
install_requires = ['FeedParser>=5.1.3', 'SQLAlchemy >=0.7, !=0.9.0, <0.9.99', 'PyYAML',
# There is a bug in beautifulsoup 4.2.0 that breaks imdb parsing, see http://flexget.com/ticket/2091
'beautifulsoup4>=4.1, !=4.2.0, <4.4', 'html5lib>=0.11', 'PyRSS2Gen', 'pynzb', 'progressbar', 'rpyc',
'jinja2', 'requests>=1.0, <2.99', 'python-dateutil!=2.0, !=2.2', 'jsonschema>=2.0', 'python-tvrage',
'tmdb3']
if sys.version_info < (2, 7):
# argparse is part of the standard library in python 2.7+
install_requires.append('argparse')
entry_points = {'console_scripts': ['flexget = flexget:main']}
# Provide an alternate exe on windows which does not cause a pop-up when scheduled
if sys.platform.startswith('win'):
entry_points.setdefault('gui_scripts', []).append('flexget-headless = flexget:main')
with open("README.rst") as readme:
long_description = readme.read()
setup(
name='FlexGet',
version='1.2', # our tasks append the .1234 (current build number) to the version number
description='FlexGet is a program aimed to automate downloading or processing content (torrents, podcasts, etc.) '
'from different sources like RSS-feeds, html-pages, various sites and more.',
long_description=long_description,
author='Marko Koivusalo',
author_email='[email protected]',
license='MIT',
url='http://flexget.com',
download_url='http://download.flexget.com',
install_requires=install_requires,
packages=find_packages(exclude=['tests']),
package_data=find_package_data('flexget', package='flexget',
exclude=['FlexGet.egg-info', '*.pyc'],
only_in_packages=False), # NOTE: the exclude does not seem to work
zip_safe=False,
test_suite='nose.collector',
extras_require={
'memusage': ['guppy'],
'NZB': ['pynzb'],
'TaskTray': ['pywin32'],
'webui': ['flask>=0.7', 'cherrypy']
},
entry_points=entry_points,
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
)
options(
minilib=Bunch(
# 'version' is included as workaround to https://github.com/paver/paver/issues/112, TODO: remove
extra_files=['virtual', 'svn', 'version']
),
virtualenv=Bunch(
paver_command_line='develop'
),
# sphinxcontrib.paverutils
sphinx=Bunch(
docroot='docs',
builddir='build',
builder='html',
confdir='docs'
),
)
def set_init_version(ver):
"""Replaces the version with ``ver`` in __init__.py"""
import fileinput
for line in fileinput.FileInput('flexget/__init__.py', inplace=1):
if line.startswith('__version__ = '):
line = "__version__ = '%s'\n" % ver
print line,
@task
@cmdopts([
('online', None, 'Run online tests')
])
def test(options):
"""Run FlexGet unit tests"""
options.setdefault('test', Bunch())
import nose
from nose.plugins.manager import DefaultPluginManager
cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2)
args = []
# Adding the -v flag makes the tests fail in python 2.7
#args.append('-v')
args.append('--processes=4')
args.append('-x')
if not options.test.get('online'):
args.append('--attr=!online')
args.append('--where=tests')
# Store current path since --where changes it, restore when leaving
cwd = os.getcwd()
try:
return nose.run(argv=args, config=cfg)
finally:
os.chdir(cwd)
@task
def clean():
"""Cleans up the virtualenv"""
import os
import glob
for p in ('bin', 'Scripts', 'build', 'dist', 'include', 'lib', 'man',
'share', 'FlexGet.egg-info', 'paver-minilib.zip', 'setup.py'):
pth = path(p)
if pth.isdir():
pth.rmtree()
elif pth.isfile():
pth.remove()
for pkg in set(options.setup.packages) | set(('tests',)):
for filename in glob.glob(pkg.replace('.', os.sep) + "/*.py[oc~]"):
path(filename).remove()
@task
@cmdopts([
('dist-dir=', 'd', 'directory to put final built distributions in'),
('revision=', 'r', 'minor revision number of this build')
], share_with=['make_egg'])
def sdist(options):
"""Build tar.gz distribution package"""
if not options.sdist.get('revision'):
print 'Revision number required.'
sys.exit(1)
revision = options.sdist.pop('revision')
print 'Revision: %s' % revision
# clean previous build
print 'Cleaning build...'
for p in ['build']:
pth = path(p)
if pth.isdir():
pth.rmtree()
elif pth.isfile():
pth.remove()
else:
print 'Unable to remove %s' % pth
# remove pre-compiled pycs from tests, I don't know why paver even tries to include them ...
# seems to happen only with sdist though
for pyc in path('tests/').files('*.pyc'):
pyc.remove()
ver = '%s.%s' % (options['version'], revision)
print 'Building %s' % ver
# replace version number
set_init_version(ver)
# hack version number into setup( ... options='1.0' ...)
from paver import tasks
setup_section = tasks.environment.options.setdefault("setup", Bunch())
setup_section.update(version=ver)
for t in ['minilib', 'generate_setup', 'setuptools.command.sdist']:
call_task(t)
# restore version ...
set_init_version('{git}')
return ver
@task
@cmdopts([
('dist-dir=', 'd', 'directory to put final built distributions in'),
('revision=', 'r', 'minor revision number of this build')
], share_with=['sdist'])
def make_egg(options):
# naming this task to bdist_egg will make egg installation fail
if not options.make_egg.get('revision'):
print 'Revision number required.'
sys.exit(1)
revision = options.make_egg.revision
ver = '%s.%s' % (options['version'], revision)
# hack version number into setup( ... options='1.0-svn' ...)
from paver import tasks
setup_section = tasks.environment.options.setdefault("setup", Bunch())
setup_section.update(version=ver)
# replace version number
set_init_version(ver)
print 'Making egg release'
import shutil
shutil.copytree('FlexGet.egg-info', 'FlexGet.egg-info-backup')
options.setdefault('bdist_egg', Bunch())['dist_dir'] = options.make_egg.get('dist_dir')
for t in ["minilib", "generate_setup", "setuptools.command.bdist_egg"]:
call_task(t)
# restore version ...
set_init_version('{git}')
# restore egg info from backup
print 'Removing FlexGet.egg-info ...'
shutil.rmtree('FlexGet.egg-info')
print 'Restoring FlexGet.egg-info'
shutil.move('FlexGet.egg-info-backup', 'FlexGet.egg-info')
return ver
@task
def coverage():
"""Make coverage.flexget.com"""
# --with-coverage --cover-package=flexget --cover-html --cover-html-dir /var/www/flexget_coverage/
import nose
from nose.plugins.manager import DefaultPluginManager
cfg = nose.config.Config(plugins=DefaultPluginManager(), verbosity=2)
argv = ['bin/paver']
argv.extend(['--attr=!online'])
argv.append('--with-coverage')
argv.append('--cover-html')
argv.extend(['--cover-package', 'flexget'])
argv.extend(['--cover-html-dir', '/var/www/flexget_coverage/'])
nose.run(argv=argv, config=cfg)
print 'Coverage generated'
@task
@cmdopts([
('docs-dir=', 'd', 'directory to put the documetation in')
])
def docs():
if not sphinxcontrib:
print 'ERROR: requires sphinxcontrib-paverutils'
sys.exit(1)
from paver import tasks
if not os.path.exists('build'):
os.mkdir('build')
if not os.path.exists(os.path.join('build', 'sphinx')):
os.mkdir(os.path.join('build', 'sphinx'))
setup_section = tasks.environment.options.setdefault("sphinx", Bunch())
setup_section.update(outdir=options.docs.get('docs_dir', 'build/sphinx'))
call_task('html')
@task
@might_call('test', 'sdist', 'make_egg')
@cmdopts([
('no-tests', None, 'skips unit tests'),
('type=', None, 'type of release (src | egg)'),
('ver-file=', None, 'java properties file to create with version number FG_VERSION')
])
def release(options):
"""Make a FlexGet release. Same as bdist_egg but adds version information."""
if options.release.get('type') not in ['src', 'egg']:
print 'Invalid --type, must be src or egg'
sys.exit(1)
print 'Cleaning build...'
for p in ['build']:
pth = path(p)
if pth.isdir():
pth.rmtree()
elif pth.isfile():
pth.remove()
else:
print 'Unable to remove %s' % pth
# run unit tests
if not options.release.get('no_tests'):
if not test():
print 'Unit tests did not pass'
sys.exit(1)
if options.release.get('type') == 'egg':
print 'Making egg release'
ver = make_egg()
else:
print 'Making src release'
ver = sdist()
if getattr(options.release, 'ver_file', False):
with open(options.release.ver_file, 'w') as ver_file:
ver_file.write('FG_VERSION=%s' % ver)
@task
def install_tools():
"""Install development / jenkins tools and dependencies"""
try:
import pip
except:
print 'FATAL: Unable to import pip, please install it and run this again!'
sys.exit(1)
try:
import sphinxcontrib
print 'sphinxcontrib INSTALLED'
except:
pip.main(['install', 'sphinxcontrib-paverutils'])
pip.main(['install', '-r', 'jenkins-requirements.txt'])
@task
def clean_compiled():
for root, dirs, files in os.walk('flexget'):
for name in files:
fqn = os.path.join(root, name)
if fqn[-3:] == 'pyc' or fqn[-3:] == 'pyo' or fqn[-5:] == 'cover':
print 'Deleting %s' % fqn
os.remove(fqn)
@task
@consume_args
def pep8(args):
try:
import pep8
except:
print 'Run bin/paver install_tools'
sys.exit(1)
# Ignoring certain errors
ignore = [
'E711', 'E712', # These are comparisons to singletons i.e. == False, and == None. We need these for sqlalchemy.
'W291', 'W293', 'E261',
'E128' # E128 continuation line under-indented for visual indent
]
styleguide = pep8.StyleGuide(show_source=True, ignore=ignore, repeat=1, max_line_length=120,
parse_argv=args)
styleguide.input_dir('flexget')
|
|
"""
Create a GUID for all non-GUID database records. If record already has a GUID,
skip; if record has an ID but not a GUID, create a GUID matching the ID. Newly
created records will have optimistically generated GUIDs.
"""
import time
import collections
from framework.mongo import StoredObject
from website import models
from website.app import init_app
app = init_app('website.settings', set_backends=True, routes=True)
def count_values(values):
counts = collections.defaultdict(int)
for value in values:
counts[value] += 1
return counts
def check_conflicts(conflict_models):
ids = []
for model in conflict_models:
ids += list(model.find().__iter__(raw=True))
if len(set(ids)) != len(ids):
print(
'Conflict among models {}'.format(
', '.join([model._name for model in conflict_models])
)
)
counts = count_values(ids)
case_conflicts = [
_id
for _id in counts
if counts[_id] > 1
]
ids = [
_id.lower()
for _id in ids
if _id
]
counts = count_values(ids)
no_case_conflicts = [
_id
for _id in counts
if counts[_id] > 1
]
return case_conflicts, no_case_conflicts
guid_models = [models.Node, models.User, models.NodeFile,
models.NodeWikiPage, models.MetaData]
def migrate_guid(conflict_models):
"""Check GUID models for conflicts, then migrate records that are not in
conflict. Lower-case primary keys; ensure GUIDs for each record; delete
outdated GUIDs.
"""
case_conflicts, no_case_conflicts = check_conflicts(conflict_models)
print 'Case conflicts', case_conflicts
print 'No-case conflicts', no_case_conflicts
if case_conflicts:
raise Exception('Unavoidable conflicts')
for model in conflict_models:
print 'Working on model', model._name
for obj in model.find():
if obj is None:
continue
# Check for existing GUID
guid = models.Guid.load(obj._primary_key)
print obj._primary_key
if guid is not None:
# Skip if GUID is already lower-cased
if guid._primary_key == guid._primary_key.lower():
continue
# Skip if GUID in no-case conflicts
if guid._primary_key.lower() in no_case_conflicts:
continue
# Delete GUID record
guid.remove_one(guid)
# Lower-case if not in no-case conflicts
if obj._primary_key.lower() not in no_case_conflicts:
obj._primary_key = obj._primary_key.lower()
obj.save()
# Update GUID
obj._ensure_guid()
check_pk_change(obj)
def check_pk_change(obj):
for backref in obj._backrefs_flat:
pk = backref[1]
if backref[0][1] == 'guid':
continue
Schema = StoredObject.get_collection(backref[0][1])
record = Schema.load(pk)
if record is None:
print 'Error: Backref {} not found'.format(pk)
field = getattr(record, backref[0][2])
if isinstance(field, list):
if obj not in field:
print 'Error: Object {} not in backref list'.format(pk)
else:
if field != obj:
print 'Error: Object {} not equal to backref'.format(pk)
for fname, fobj in obj._fields.items():
if fobj._is_foreign:
if fobj._list:
key = fobj._field_instance._backref_field_name
else:
key = fobj._backref_field_name
if not key:
continue
backref_key = '__'.join([
obj._name,
key,
fname,
])
value = getattr(obj, fname)
if not value:
continue
if fobj._list:
for item in value:
if item is None:
continue
if obj not in getattr(item, backref_key):
print 'Error: Obj {} not in backrefs of referent {}'.format(
obj._primary_key, fname
)
else:
if obj not in getattr(value, backref_key):
print 'Error: Obj {} not in backrefs of referent {}'.format(
obj._primary_key, fname
)
def migrate_guid_log(log):
"""Migrate non-reference fields containing primary keys on logs.
"""
for key in ['project', 'node']:
if key in log.params:
value = log.params[key] or ''
record = models.Node.load(value.lower())
if record is not None:
log.params[key] = record._primary_key
if 'contributor' in log.params:
if isinstance(log.params['contributor'], basestring):
record = models.User.load(log.params['contributor'].lower())
if record:
log.params['contributor'] = record._primary_key
if 'contributors' in log.params:
for idx, uid in enumerate(log.params['contributors']):
if isinstance(uid, basestring):
record = models.User.load(uid.lower())
if record:
log.params['contributors'][idx] = record._primary_key
# Shouldn't have to do this, but some logs users weren't correctly
# migrated; may have to do with inconsistent backrefs
data = log.to_storage()
if data['user']:
record = models.User.load(data['user'].lower())
if record:
log.user = record
log.save()
def migrate_guid_node(node):
"""Migrate non-reference fields containing primary keys on nodes.
"""
for idx, contributor in enumerate(node.contributor_list):
if 'id' in contributor:
record = models.User.load(contributor['id'].lower())
if record:
node.contributor_list[idx]['id'] = record._primary_key
for idx, fork in enumerate(node.node__forked):
if isinstance(fork, basestring):
record = models.Node.load(fork.lower())
if record:
node.node__forked[idx] = record._primary_key
for idx, registration in enumerate(node.node__registrations):
if isinstance(registration, basestring):
record = models.Node.load(registration.lower())
if record:
node.node__registrations[idx] = record._primary_key
for page in node.wiki_pages_current:
record = models.NodeWikiPage.load(str(node.wiki_pages_current[page]).lower())
if record:
node.wiki_pages_current[page] = record._primary_key
for page in node.wiki_pages_versions:
for idx, wid in enumerate(node.wiki_pages_versions[page]):
record = models.NodeWikiPage.load(str(wid).lower())
if record:
node.wiki_pages_versions[page][idx] = record._primary_key
for fname in node.files_current:
record = models.NodeFile.load(str(node.files_current[fname]).lower())
if record:
node.files_current[fname] = record._primary_key
for fname in node.files_versions:
for idx, fid in enumerate(node.files_versions[fname]):
record = models.NodeFile.load(str(fid).lower())
if record:
node.files_versions[fname][idx] = record._primary_key
node.save()
def migrate_guid_wiki(wiki):
"""Migrate non-reference fields containing primary keys on wiki pages.
"""
data = wiki.to_storage()
uid = data.get('user')
if uid:
record = models.User.load(uid.lower())
if record:
wiki.user = record
pid = data.get('node')
if pid:
record = models.Node.load(pid.lower())
if record:
wiki.node = record
wiki.save()
if __name__ == '__main__':
t0 = time.time()
# Lower-case PKs and ensure GUIDs
migrate_guid(guid_models)
# Manual migrations
for node in models.Node.find():
#print 'Migrating node', node._primary_key
migrate_guid_node(node)
for log in models.NodeLog.find():
#print 'Migrating log', log._primary_key
migrate_guid_log(log)
for wiki in models.NodeWikiPage.find():
#print 'Migrating wiki', wiki._primary_key
migrate_guid_wiki(wiki)
print 'Took {}'.format(time.time() - t0)
|
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslotest import mockpatch
from tempest.common import credentials_factory as credentials
from tempest.common import dynamic_creds
from tempest import config
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.identity.v2 import identity_client as v2_iden_client
from tempest.lib.services.identity.v2 import roles_client as v2_roles_client
from tempest.lib.services.identity.v2 import tenants_client as \
v2_tenants_client
from tempest.lib.services.identity.v2 import token_client as v2_token_client
from tempest.lib.services.identity.v2 import users_client as v2_users_client
from tempest.lib.services.identity.v3 import identity_client as v3_iden_client
from tempest.lib.services.identity.v3 import projects_client as \
v3_projects_client
from tempest.lib.services.identity.v3 import roles_client as v3_roles_client
from tempest.lib.services.identity.v3 import token_client as v3_token_client
from tempest.lib.services.identity.v3 import users_client as \
v3_users_client
from tempest.lib.services.network import routers_client
from tempest.services.identity.v3.json import domains_client
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests.lib import fake_http
from tempest.tests.lib import fake_identity
class TestDynamicCredentialProvider(base.TestCase):
fixed_params = {'name': 'test class',
'identity_version': 'v2',
'admin_role': 'admin'}
token_client = v2_token_client
iden_client = v2_iden_client
roles_client = v2_roles_client
tenants_client = v2_tenants_client
users_client = v2_users_client
token_client_class = token_client.TokenClient
fake_response = fake_identity._fake_v2_response
tenants_client_class = tenants_client.TenantsClient
delete_tenant = 'delete_tenant'
def setUp(self):
super(TestDynamicCredentialProvider, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.patchobject(config, 'TempestConfigPrivate',
fake_config.FakePrivate)
self.patchobject(self.token_client_class, 'raw_request',
self.fake_response)
cfg.CONF.set_default('operator_role', 'FakeRole',
group='object-storage')
self._mock_list_ec2_credentials('fake_user_id', 'fake_tenant_id')
self.fixed_params.update(
admin_creds=self._get_fake_admin_creds())
def test_tempest_client(self):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self.assertIsInstance(creds.identity_admin_client,
self.iden_client.IdentityClient)
def _get_fake_admin_creds(self):
return credentials.get_credentials(
fill_in=False,
identity_version=self.fixed_params['identity_version'],
username='fake_username', password='fake_password',
tenant_name='fake_tenant')
def _mock_user_create(self, id, name):
user_fix = self.useFixture(mockpatch.PatchObject(
self.users_client.UsersClient,
'create_user',
return_value=(rest_client.ResponseBody
(200, {'user': {'id': id, 'name': name}}))))
return user_fix
def _mock_tenant_create(self, id, name):
tenant_fix = self.useFixture(mockpatch.PatchObject(
self.tenants_client.TenantsClient,
'create_tenant',
return_value=(rest_client.ResponseBody
(200, {'tenant': {'id': id, 'name': name}}))))
return tenant_fix
def _mock_list_roles(self, id, name):
roles_fix = self.useFixture(mockpatch.PatchObject(
self.roles_client.RolesClient,
'list_roles',
return_value=(rest_client.ResponseBody
(200,
{'roles': [{'id': id, 'name': name},
{'id': '1', 'name': 'FakeRole'},
{'id': '2', 'name': 'Member'}]}))))
return roles_fix
def _mock_list_2_roles(self):
roles_fix = self.useFixture(mockpatch.PatchObject(
self.roles_client.RolesClient,
'list_roles',
return_value=(rest_client.ResponseBody
(200,
{'roles': [{'id': '1234', 'name': 'role1'},
{'id': '1', 'name': 'FakeRole'},
{'id': '12345', 'name': 'role2'}]}))))
return roles_fix
def _mock_assign_user_role(self):
tenant_fix = self.useFixture(mockpatch.PatchObject(
self.roles_client.RolesClient,
'create_user_role_on_project',
return_value=(rest_client.ResponseBody
(200, {}))))
return tenant_fix
def _mock_list_role(self):
roles_fix = self.useFixture(mockpatch.PatchObject(
self.roles_client.RolesClient,
'list_roles',
return_value=(rest_client.ResponseBody
(200, {'roles': [
{'id': '1', 'name': 'FakeRole'},
{'id': '2', 'name': 'Member'}]}))))
return roles_fix
def _mock_list_ec2_credentials(self, user_id, tenant_id):
ec2_creds_fix = self.useFixture(mockpatch.PatchObject(
self.users_client.UsersClient,
'list_user_ec2_credentials',
return_value=(rest_client.ResponseBody
(200, {'credentials': [{
'access': 'fake_access',
'secret': 'fake_secret',
'tenant_id': tenant_id,
'user_id': user_id,
'trust_id': None}]}))))
return ec2_creds_fix
def _mock_network_create(self, iso_creds, id, name):
net_fix = self.useFixture(mockpatch.PatchObject(
iso_creds.networks_admin_client,
'create_network',
return_value={'network': {'id': id, 'name': name}}))
return net_fix
def _mock_subnet_create(self, iso_creds, id, name):
subnet_fix = self.useFixture(mockpatch.PatchObject(
iso_creds.subnets_admin_client,
'create_subnet',
return_value={'subnet': {'id': id, 'name': name}}))
return subnet_fix
def _mock_router_create(self, id, name):
router_fix = self.useFixture(mockpatch.PatchObject(
routers_client.RoutersClient,
'create_router',
return_value={'router': {'id': id, 'name': name}}))
return router_fix
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_primary_creds(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_user_create('1234', 'fake_prim_user')
primary_creds = creds.get_primary_creds()
self.assertEqual(primary_creds.username, 'fake_prim_user')
self.assertEqual(primary_creds.tenant_name, 'fake_prim_tenant')
# Verify IDs
self.assertEqual(primary_creds.tenant_id, '1234')
self.assertEqual(primary_creds.user_id, '1234')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_admin_creds(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_list_roles('1234', 'admin')
self._mock_user_create('1234', 'fake_admin_user')
self._mock_tenant_create('1234', 'fake_admin_tenant')
user_mock = mock.patch.object(self.roles_client.RolesClient,
'create_user_role_on_project')
user_mock.start()
self.addCleanup(user_mock.stop)
with mock.patch.object(self.roles_client.RolesClient,
'create_user_role_on_project') as user_mock:
admin_creds = creds.get_admin_creds()
user_mock.assert_has_calls([
mock.call('1234', '1234', '1234')])
self.assertEqual(admin_creds.username, 'fake_admin_user')
self.assertEqual(admin_creds.tenant_name, 'fake_admin_tenant')
# Verify IDs
self.assertEqual(admin_creds.tenant_id, '1234')
self.assertEqual(admin_creds.user_id, '1234')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_role_creds(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_list_2_roles()
self._mock_user_create('1234', 'fake_role_user')
self._mock_tenant_create('1234', 'fake_role_tenant')
user_mock = mock.patch.object(self.roles_client.RolesClient,
'create_user_role_on_project')
user_mock.start()
self.addCleanup(user_mock.stop)
with mock.patch.object(self.roles_client.RolesClient,
'create_user_role_on_project') as user_mock:
role_creds = creds.get_creds_by_roles(
roles=['role1', 'role2'])
calls = user_mock.mock_calls
# Assert that the role creation is called with the 2 specified roles
self.assertEqual(len(calls), 2)
args = map(lambda x: x[1], calls)
args = list(args)
self.assertIn(('1234', '1234', '1234'), args)
self.assertIn(('1234', '1234', '12345'), args)
self.assertEqual(role_creds.username, 'fake_role_user')
self.assertEqual(role_creds.tenant_name, 'fake_role_tenant')
# Verify IDs
self.assertEqual(role_creds.tenant_id, '1234')
self.assertEqual(role_creds.user_id, '1234')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_all_cred_cleanup(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_user_create('1234', 'fake_prim_user')
creds.get_primary_creds()
self._mock_tenant_create('12345', 'fake_alt_tenant')
self._mock_user_create('12345', 'fake_alt_user')
creds.get_alt_creds()
self._mock_tenant_create('123456', 'fake_admin_tenant')
self._mock_user_create('123456', 'fake_admin_user')
self._mock_list_roles('123456', 'admin')
creds.get_admin_creds()
user_mock = self.patchobject(self.users_client.UsersClient,
'delete_user')
tenant_mock = self.patchobject(self.tenants_client_class,
self.delete_tenant)
creds.clear_creds()
# Verify user delete calls
calls = user_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
# Verify tenant delete calls
calls = tenant_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_alt_creds(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_alt_user')
self._mock_tenant_create('1234', 'fake_alt_tenant')
alt_creds = creds.get_alt_creds()
self.assertEqual(alt_creds.username, 'fake_alt_user')
self.assertEqual(alt_creds.tenant_name, 'fake_alt_tenant')
# Verify IDs
self.assertEqual(alt_creds.tenant_id, '1234')
self.assertEqual(alt_creds.user_id, '1234')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_no_network_creation_with_config_set(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True, create_networks=False,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
net = mock.patch.object(creds.networks_admin_client,
'delete_network')
net_mock = net.start()
subnet = mock.patch.object(creds.subnets_admin_client,
'delete_subnet')
subnet_mock = subnet.start()
router = mock.patch.object(creds.routers_admin_client,
'delete_router')
router_mock = router.start()
primary_creds = creds.get_primary_creds()
self.assertEqual(net_mock.mock_calls, [])
self.assertEqual(subnet_mock.mock_calls, [])
self.assertEqual(router_mock.mock_calls, [])
network = primary_creds.network
subnet = primary_creds.subnet
router = primary_creds.router
self.assertIsNone(network)
self.assertIsNone(subnet)
self.assertIsNone(router)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_network_creation(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_network_create(creds, '1234', 'fake_net')
self._mock_subnet_create(creds, '1234', 'fake_subnet')
self._mock_router_create('1234', 'fake_router')
router_interface_mock = self.patch(
'tempest.lib.services.network.routers_client.RoutersClient.'
'add_router_interface')
primary_creds = creds.get_primary_creds()
router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
network = primary_creds.network
subnet = primary_creds.subnet
router = primary_creds.router
self.assertEqual(network['id'], '1234')
self.assertEqual(network['name'], 'fake_net')
self.assertEqual(subnet['id'], '1234')
self.assertEqual(subnet['name'], 'fake_subnet')
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_router')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_network_cleanup(self, MockRestClient):
def side_effect(**args):
return {"security_groups": [{"tenant_id": args['tenant_id'],
"name": args['name'],
"description": args['name'],
"security_group_rules": [],
"id": "sg-%s" % args['tenant_id']}]}
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
**self.fixed_params)
# Create primary tenant and network
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_network_create(creds, '1234', 'fake_net')
self._mock_subnet_create(creds, '1234', 'fake_subnet')
self._mock_router_create('1234', 'fake_router')
router_interface_mock = self.patch(
'tempest.lib.services.network.routers_client.RoutersClient.'
'add_router_interface')
creds.get_primary_creds()
router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
router_interface_mock.reset_mock()
# Create alternate tenant and network
self._mock_user_create('12345', 'fake_alt_user')
self._mock_tenant_create('12345', 'fake_alt_tenant')
self._mock_network_create(creds, '12345', 'fake_alt_net')
self._mock_subnet_create(creds, '12345', 'fake_alt_subnet')
self._mock_router_create('12345', 'fake_alt_router')
creds.get_alt_creds()
router_interface_mock.assert_called_once_with('12345',
subnet_id='12345')
router_interface_mock.reset_mock()
# Create admin tenant and networks
self._mock_user_create('123456', 'fake_admin_user')
self._mock_tenant_create('123456', 'fake_admin_tenant')
self._mock_network_create(creds, '123456', 'fake_admin_net')
self._mock_subnet_create(creds, '123456', 'fake_admin_subnet')
self._mock_router_create('123456', 'fake_admin_router')
self._mock_list_roles('123456', 'admin')
creds.get_admin_creds()
self.patchobject(self.users_client.UsersClient, 'delete_user')
self.patchobject(self.tenants_client_class, self.delete_tenant)
net = mock.patch.object(creds.networks_admin_client, 'delete_network')
net_mock = net.start()
subnet = mock.patch.object(creds.subnets_admin_client, 'delete_subnet')
subnet_mock = subnet.start()
router = mock.patch.object(creds.routers_admin_client, 'delete_router')
router_mock = router.start()
remove_router_interface_mock = self.patch(
'tempest.lib.services.network.routers_client.RoutersClient.'
'remove_router_interface')
return_values = ({'status': 200}, {'ports': []})
port_list_mock = mock.patch.object(creds.ports_admin_client,
'list_ports',
return_value=return_values)
port_list_mock.start()
secgroup_list_mock = mock.patch.object(
creds.security_groups_admin_client,
'list_security_groups',
side_effect=side_effect)
secgroup_list_mock.start()
return_values = fake_http.fake_http_response({}, status=204), ''
remove_secgroup_mock = self.patch(
'tempest.lib.services.network.security_groups_client.'
'SecurityGroupsClient.delete', return_value=return_values)
creds.clear_creds()
# Verify default security group delete
calls = remove_secgroup_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('v2.0/security-groups/sg-1234', args)
self.assertIn('v2.0/security-groups/sg-12345', args)
self.assertIn('v2.0/security-groups/sg-123456', args)
# Verify remove router interface calls
calls = remove_router_interface_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: (x[1][0], x[2]), calls)
args = list(args)
self.assertIn(('1234', {'subnet_id': '1234'}), args)
self.assertIn(('12345', {'subnet_id': '12345'}), args)
self.assertIn(('123456', {'subnet_id': '123456'}), args)
# Verify network delete calls
calls = net_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
# Verify subnet delete calls
calls = subnet_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
# Verify router delete calls
calls = router_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
args = list(args)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_network_alt_creation(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_alt_user')
self._mock_tenant_create('1234', 'fake_alt_tenant')
self._mock_network_create(creds, '1234', 'fake_alt_net')
self._mock_subnet_create(creds, '1234', 'fake_alt_subnet')
self._mock_router_create('1234', 'fake_alt_router')
router_interface_mock = self.patch(
'tempest.lib.services.network.routers_client.RoutersClient.'
'add_router_interface')
alt_creds = creds.get_alt_creds()
router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
network = alt_creds.network
subnet = alt_creds.subnet
router = alt_creds.router
self.assertEqual(network['id'], '1234')
self.assertEqual(network['name'], 'fake_alt_net')
self.assertEqual(subnet['id'], '1234')
self.assertEqual(subnet['name'], 'fake_alt_subnet')
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_alt_router')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_network_admin_creation(self, MockRestClient):
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_user_create('1234', 'fake_admin_user')
self._mock_tenant_create('1234', 'fake_admin_tenant')
self._mock_network_create(creds, '1234', 'fake_admin_net')
self._mock_subnet_create(creds, '1234', 'fake_admin_subnet')
self._mock_router_create('1234', 'fake_admin_router')
router_interface_mock = self.patch(
'tempest.lib.services.network.routers_client.RoutersClient.'
'add_router_interface')
self._mock_list_roles('123456', 'admin')
admin_creds = creds.get_admin_creds()
router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
network = admin_creds.network
subnet = admin_creds.subnet
router = admin_creds.router
self.assertEqual(network['id'], '1234')
self.assertEqual(network['name'], 'fake_admin_net')
self.assertEqual(subnet['id'], '1234')
self.assertEqual(subnet['name'], 'fake_admin_subnet')
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_admin_router')
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_no_network_resources(self, MockRestClient):
net_dict = {
'network': False,
'router': False,
'subnet': False,
'dhcp': False,
}
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
net = mock.patch.object(creds.networks_admin_client,
'delete_network')
net_mock = net.start()
subnet = mock.patch.object(creds.subnets_admin_client,
'delete_subnet')
subnet_mock = subnet.start()
router = mock.patch.object(creds.routers_admin_client,
'delete_router')
router_mock = router.start()
primary_creds = creds.get_primary_creds()
self.assertEqual(net_mock.mock_calls, [])
self.assertEqual(subnet_mock.mock_calls, [])
self.assertEqual(router_mock.mock_calls, [])
network = primary_creds.network
subnet = primary_creds.subnet
router = primary_creds.router
self.assertIsNone(network)
self.assertIsNone(subnet)
self.assertIsNone(router)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_router_without_network(self, MockRestClient):
net_dict = {
'network': False,
'router': True,
'subnet': False,
'dhcp': False,
}
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(lib_exc.InvalidConfiguration,
creds.get_primary_creds)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_subnet_without_network(self, MockRestClient):
net_dict = {
'network': False,
'router': False,
'subnet': True,
'dhcp': False,
}
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(lib_exc.InvalidConfiguration,
creds.get_primary_creds)
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_dhcp_without_subnet(self, MockRestClient):
net_dict = {
'network': False,
'router': False,
'subnet': False,
'dhcp': True,
}
creds = dynamic_creds.DynamicCredentialProvider(
neutron_available=True,
project_network_cidr='10.100.0.0/16', project_network_mask_bits=28,
network_resources=net_dict,
**self.fixed_params)
self._mock_assign_user_role()
self._mock_list_role()
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(lib_exc.InvalidConfiguration,
creds.get_primary_creds)
class TestDynamicCredentialProviderV3(TestDynamicCredentialProvider):
fixed_params = {'name': 'test class',
'identity_version': 'v3',
'admin_role': 'admin'}
token_client = v3_token_client
iden_client = v3_iden_client
roles_client = v3_roles_client
tenants_client = v3_projects_client
users_client = v3_users_client
token_client_class = token_client.V3TokenClient
fake_response = fake_identity._fake_v3_response
tenants_client_class = tenants_client.ProjectsClient
delete_tenant = 'delete_project'
def setUp(self):
super(TestDynamicCredentialProviderV3, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.useFixture(mockpatch.PatchObject(
domains_client.DomainsClient, 'list_domains',
return_value=dict(domains=[dict(id='default',
name='Default')])))
self.patchobject(self.roles_client.RolesClient,
'create_user_role_on_domain')
def _mock_list_ec2_credentials(self, user_id, tenant_id):
pass
def _mock_tenant_create(self, id, name):
project_fix = self.useFixture(mockpatch.PatchObject(
self.tenants_client.ProjectsClient,
'create_project',
return_value=(rest_client.ResponseBody
(200, {'project': {'id': id, 'name': name}}))))
return project_fix
@mock.patch('tempest.lib.common.rest_client.RestClient')
def test_member_role_creation_with_duplicate(self, rest_client_mock):
creds = dynamic_creds.DynamicCredentialProvider(**self.fixed_params)
creds.creds_client = mock.MagicMock()
creds.creds_client.create_user_role.side_effect = lib_exc.Conflict
with mock.patch('tempest.common.dynamic_creds.LOG') as log_mock:
creds._create_creds()
log_mock.warning.assert_called_once_with(
"Member role already exists, ignoring conflict.")
creds.creds_client.assign_user_role.assert_called_once_with(
mock.ANY, mock.ANY, 'Member')
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
#
# This is a thin wrapper for native LD. This is not meant to be
# used by the user, but is called from pnacl-translate.
# This implements the native linking part of translation.
#
# All inputs must be native objects or linker scripts.
#
# --pnacl-sb will cause the sandboxed LD to be used.
# The bulk of this file is logic to invoke the sandboxed translator.
import subprocess
from driver_tools import CheckTranslatorPrerequisites, GetArch, ParseArgs, \
Run, UnrecognizedOption
from driver_env import env
from driver_log import Log
import ldtools
import pathtools
EXTRA_ENV = {
'INPUTS' : '',
'OUTPUT' : '',
# the INPUTS file coming from the llc translation step
'LLC_TRANSLATED_FILE' : '',
'SPLIT_MODULE' : '0',
'USE_STDLIB': '1',
# Determine if we should build nexes compatible with the IRT.
'USE_IRT' : '1',
# Upstream gold has the segment gap built in, but the gap can be modified
# when not using the IRT. The gap does need to be at least one bundle so the
# halt sled can be added for the TCB in case the segment ends up being a
# multiple of 64k.
# --eh-frame-hdr asks the linker to generate an .eh_frame_hdr section,
# which is a presorted list of registered frames. This section is
# used by libgcc_eh/libgcc_s to avoid doing the sort during runtime.
# http://www.airs.com/blog/archives/462
#
# BE CAREFUL: anything added to LD_FLAGS should be synchronized with
# flags used by the in-browser translator.
# See: binutils/gold/nacl_file.cc
'LD_FLAGS' : '-nostdlib ' +
# Only relevant for ARM where it suppresses a warning.
# Ignored for other archs.
'--no-fix-cortex-a8 ' +
'-m ${LD_EMUL} ' +
'--eh-frame-hdr ' +
'${NONSFI_NACL ? -pie : -static} ' +
# "_begin" allows a PIE to find its load address in
# order to apply dynamic relocations.
'${NONSFI_NACL ? -defsym=_begin=0} ' +
# Give an error if any TEXTRELs occur.
'-z text ' +
'--build-id ' +
# Give non-IRT builds 12MB of text before starting rodata
# instead of the larger default gap. The gap cannot be
# too small (e.g., 0) because sel_ldr requires space for
# adding a halt sled.
'${!USE_IRT ? --rosegment-gap=0xc00000}',
'LD_EMUL' : '${LD_EMUL_%BASE_ARCH%}',
'LD_EMUL_ARM' : 'armelf_nacl',
'LD_EMUL_X8632' : 'elf_nacl',
'LD_EMUL_X8664' : 'elf64_nacl',
'LD_EMUL_MIPS32' : 'elf32ltsmip_nacl',
'SEARCH_DIRS' : '${SEARCH_DIRS_USER} ${SEARCH_DIRS_BUILTIN}',
'SEARCH_DIRS_USER' : '',
'SEARCH_DIRS_BUILTIN': '${USE_STDLIB ? ${LIBS_ARCH}/}',
'LIBS_ARCH' : '${LIBS_%ARCH%}',
'LIBS_ARM' : '${BASE_LIB_NATIVE}arm',
'LIBS_ARM_NONSFI' : '${BASE_LIB_NATIVE}arm-nonsfi',
'LIBS_X8632' : '${BASE_LIB_NATIVE}x86-32',
'LIBS_X8632_NONSFI': '${BASE_LIB_NATIVE}x86-32-nonsfi',
'LIBS_X8664' : '${BASE_LIB_NATIVE}x86-64',
'LIBS_MIPS32' : '${BASE_LIB_NATIVE}mips32',
# Note: this is only used in the unsandboxed case
'RUN_LD' : '${LD} ${LD_FLAGS} ${inputs} -o ${output}'
}
def PassThrough(*args):
env.append('LD_FLAGS', *args)
LDPatterns = [
( '-o(.+)', "env.set('OUTPUT', pathtools.normalize($0))"),
( ('-o', '(.+)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( '--noirt', "env.set('USE_IRT', '0')"),
( '-static', "env.set('STATIC', '1')"),
( '-nostdlib', "env.set('USE_STDLIB', '0')"),
( '-L(.+)',
"env.append('SEARCH_DIRS_USER', pathtools.normalize($0))"),
( ('-L', '(.*)'),
"env.append('SEARCH_DIRS_USER', pathtools.normalize($0))"),
# Note: we do not yet support all the combinations of flags which affect
# layout of the various sections and segments because the corner cases in gold
# may not all be worked out yet. They can be added (and tested!) as needed.
( ('(-Ttext=.*)'), PassThrough),
( ('(-Trodata=.*)'), PassThrough),
( ('(-Ttext-segment=.*)'), PassThrough),
( ('(-Trodata-segment=.*)'), PassThrough),
( ('(--section-start)', '(.+)'),PassThrough),
( ('(--section-start=.*)'), PassThrough),
( ('(-e)','(.*)'), PassThrough),
( '(--entry=.*)', PassThrough),
( '(-M)', PassThrough),
( '(-t)', PassThrough),
( ('-y','(.*)'), PassThrough),
( ('(-defsym)','(.*)'), PassThrough),
( '-export-dynamic', PassThrough),
( '(--print-gc-sections)', PassThrough),
( '(--gc-sections)', PassThrough),
( '(--unresolved-symbols=.*)', PassThrough),
( '(--dynamic-linker=.*)', PassThrough),
( '(-g)', PassThrough),
( '(--build-id)', PassThrough),
( '-melf_nacl', "env.set('ARCH', 'X8632')"),
( ('-m','elf_nacl'), "env.set('ARCH', 'X8632')"),
( '-melf64_nacl', "env.set('ARCH', 'X8664')"),
( ('-m','elf64_nacl'), "env.set('ARCH', 'X8664')"),
( '-marmelf_nacl', "env.set('ARCH', 'ARM')"),
( ('-m','armelf_nacl'), "env.set('ARCH', 'ARM')"),
( '-mmipselelf_nacl', "env.set('ARCH', 'MIPS32')"),
( ('-m','mipselelf_nacl'), "env.set('ARCH', 'MIPS32')"),
# Inputs and options that need to be kept in order
( '(--no-as-needed)', "env.append('INPUTS', $0)"),
( '(--as-needed)', "env.append('INPUTS', $0)"),
( '(--start-group)', "env.append('INPUTS', $0)"),
( '(--end-group)', "env.append('INPUTS', $0)"),
( '(-Bstatic)', "env.append('INPUTS', $0)"),
( '(-Bdynamic)', "env.append('INPUTS', $0)"),
# This is the file passed from llc during translation (used to be via shmem)
( ('--llc-translated-file=(.*)'), "env.append('INPUTS', $0)\n"
"env.set('LLC_TRANSLATED_FILE', $0)"),
( '-split-module=([0-9]+)', "env.set('SPLIT_MODULE', $0)"),
( '(--(no-)?whole-archive)', "env.append('INPUTS', $0)"),
( '(-l.*)', "env.append('INPUTS', $0)"),
( '(--undefined=.*)', "env.append('INPUTS', $0)"),
( '(-.*)', UnrecognizedOption),
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))"),
]
def main(argv):
env.update(EXTRA_ENV)
ParseArgs(argv, LDPatterns)
GetArch(required=True)
inputs = env.get('INPUTS')
output = env.getone('OUTPUT')
if output == '':
output = pathtools.normalize('a.out')
# Expand all parameters
# This resolves -lfoo into actual filenames,
# and expands linker scripts into command-line arguments.
inputs = ldtools.ExpandInputs(inputs,
env.get('SEARCH_DIRS'),
env.getbool('STATIC'),
ldtools.LibraryTypes.NATIVE)
env.push()
env.set('inputs', *inputs)
env.set('output', output)
if env.getbool('SANDBOXED'):
RunLDSandboxed()
else:
Run('${RUN_LD}')
env.pop()
# only reached in case of no errors
return 0
def IsFlag(arg):
return arg.startswith('-')
def RunLDSandboxed():
if not env.getbool('USE_STDLIB'):
Log.Fatal('-nostdlib is not supported by the sandboxed translator')
CheckTranslatorPrerequisites()
# The "main" input file is the application's combined object file.
all_inputs = env.get('inputs')
main_input = env.getone('LLC_TRANSLATED_FILE')
if not main_input:
Log.Fatal("Sandboxed LD requires one shm input file")
outfile = env.getone('output')
modules = int(env.getone('SPLIT_MODULE'))
if modules > 1:
first_mainfile = all_inputs.index(main_input)
first_extra = all_inputs.index(main_input) + modules
# Just the split module files
llc_outputs = all_inputs[first_mainfile:first_extra]
# everything else
all_inputs = all_inputs[:first_mainfile] + all_inputs[first_extra:]
else:
llc_outputs = [main_input]
files = LinkerFiles(all_inputs)
ld_flags = env.get('LD_FLAGS')
script = MakeSelUniversalScriptForLD(ld_flags,
llc_outputs,
files,
outfile)
Run('${SEL_UNIVERSAL_PREFIX} ${SEL_UNIVERSAL} ' +
'${SEL_UNIVERSAL_FLAGS} -- ${LD_SB}',
stdin_contents=script,
# stdout/stderr will be automatically dumped
# upon failure
redirect_stderr=subprocess.PIPE,
redirect_stdout=subprocess.PIPE)
def MakeSelUniversalScriptForLD(ld_flags,
llc_outputs,
files,
outfile):
""" Return sel_universal script text for invoking LD.nexe with the
given ld_flags, llc_outputs (which are treated specially), and
other input files (for native libraries). The output will be written
to outfile. """
script = []
# Open the output file.
script.append('readwrite_file nexefile %s' % outfile)
files_to_map = list(files)
# Create a reverse-service mapping for each input file and add it to
# the sel universal script.
for f in files_to_map:
basename = pathtools.basename(f)
# If we are using the dummy shim, map it with the filename of the real
# shim, so the baked-in commandline will work.
if basename == 'libpnacl_irt_shim_dummy.a':
basename = 'libpnacl_irt_shim.a'
script.append('reverse_service_add_manifest_mapping files/%s %s' %
(basename, f))
modules = len(llc_outputs)
script.extend(['readonly_file objfile%d %s' % (i, f)
for i, f in zip(range(modules), llc_outputs)])
script.append('rpc RunWithSplit i(%d) ' % modules +
' '.join(['h(objfile%s)' % m for m in range(modules)] +
['h(invalid)' for x in range(modules, 16)]) +
' h(nexefile) *')
script.append('echo "ld complete"')
script.append('')
return '\n'.join(script)
# Given linker arguments (including -L, -l, and filenames),
# returns the list of files which are pulled by the linker,
# with real path names set set up the real -> flat name mapping.
def LinkerFiles(args):
ret = []
for f in args:
if IsFlag(f):
continue
else:
if not pathtools.exists(f):
Log.Fatal("Unable to open '%s'", pathtools.touser(f))
ret.append(f)
return ret
|
|
"""Tests the singleton class of pyexperiment
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import unittest
from itertools import count as count_up
from pyexperiment.utils.Singleton import Singleton
from pyexperiment.utils.Singleton import DefaultSingleton
from pyexperiment.utils.Singleton import delegate_singleton
from pyexperiment.utils.HierarchicalMapping import HierarchicalOrderedDict
class TestSingleton(unittest.TestCase):
"""Test the Singleton class/mixin
"""
def test_singleton_subclass(self):
"""Test that subclasses of Singleton always return same instance
"""
class SingletonTest(Singleton):
"""Singleton test class
"""
pass
singleton_a = SingletonTest.get_instance()
singleton_b = SingletonTest.get_instance()
self.assertEqual(singleton_a, singleton_b)
def test_singleton_resets(self):
"""Test that singletons properly reset themselves.
"""
class SingletonTest(Singleton):
"""Singleton test class
"""
def __init__(self):
"""Initializer
"""
self.memory = []
def add(self, number):
"""Append a number to the memory
"""
self.memory.append(number)
singleton_a = SingletonTest.get_instance()
singleton_a.add(12)
self.assertEqual(singleton_a.memory, [12])
SingletonTest.reset_instance()
singleton_b = SingletonTest.get_instance()
self.assertNotEqual(singleton_a, singleton_b)
self.assertEqual(singleton_b.memory, [])
class TestDefaultSingleton(unittest.TestCase):
"""Test the DefaultSingleton class
"""
def test_default_subclass(self):
"""Test that subclasses of DefaultSingleton return same instance
"""
class SingletonTest(DefaultSingleton):
"""Singleton test class
"""
@classmethod
def _get_pseudo_instance(cls):
return None
singleton_a = SingletonTest.get_instance()
singleton_b = SingletonTest.get_instance()
self.assertEqual(singleton_a, singleton_b)
def test_need_pseudo_instance(self):
"""Test that subclasses without _get_pseudo_instance raise error
"""
# pylint: disable=abstract-method
class SingletonTest(DefaultSingleton):
"""Singleton test class
"""
pass
self.assertRaises(NotImplementedError, SingletonTest.get_instance)
class TestDelegatedSingleton(unittest.TestCase):
"""Test delegating singletons
"""
def test_delegated_singleton_calls(self):
"""Test if the delegated singleton calls the singleton correctly
"""
class SingletonTest(Singleton):
"""Singleton test class
"""
def __init__(self):
self.memory = []
def add(self, number):
"""Add a number to the memory
"""
self.memory.append(number)
def reset(self):
"""Reset the memory
"""
memory = self.memory
self.memory = []
return memory
delegated = delegate_singleton(SingletonTest)
delegated.add(12)
self.assertEqual(delegated.memory, [12])
memory = delegated.reset()
self.assertEqual(memory, [12])
self.assertEqual(delegated.memory, [])
def test_delegated_get_instance(self):
"""Test if the delegated singleton can use get_instance
"""
class SingletonTest(Singleton):
"""Singleton test class
"""
def __init__(self):
"""Initializer
"""
self.foo_str = "foo"
def get_foo(self):
"""Returns the foo string
"""
return self.foo_str
delegated = delegate_singleton(SingletonTest)
direct = delegated.get_instance()
self.assertEqual(direct.get_foo(), "foo")
def test_delegated_reset_instance(self):
"""Test if the delegated singleton can use reset_instance
"""
class SingletonTest(Singleton):
"""Singleton test class
"""
def __init__(self):
"""Initializer
"""
self.memory = []
def add(self, number):
"""Add a number to the memory
"""
self.memory.append(number)
delegated = delegate_singleton(SingletonTest)
delegated.add(12)
self.assertEqual(SingletonTest.get_instance().memory, [12])
delegated.reset_instance()
self.assertEqual(SingletonTest.get_instance().memory, [])
def test_delegate_singleton_repr(self):
"""Test calling the repr method on a delegated singleton
"""
class FooSingleton(Singleton):
"""Singleton test class
"""
def __repr__(self):
"""Returns foo
"""
return "foo"
singleton = delegate_singleton(FooSingleton)
self.assertEqual(singleton.__repr__(), "foo")
def test_delegate_singleton_dir(self):
"""Test calling the dir method on a delegated singleton
"""
class FooSingleton(Singleton):
"""Singleton test class
"""
@staticmethod
def bla():
"""Returns foo
"""
return "foo"
singleton = delegate_singleton(FooSingleton)
self.assertIn('bla', dir(singleton))
def test_delegate_singleton_iter(self):
"""Test iterating over a delegated singleton
"""
class FooSingleton(Singleton, list):
"""Iterable Singleton
"""
pass
singleton = delegate_singleton(FooSingleton)
for i in range(10):
singleton.append(i)
for item, expected in zip(singleton, count_up()):
self.assertEqual(item, expected)
def test_deletate_singleton_next(self):
"""Test using a delegated singleton as an iterator
"""
class FooSingleton(Singleton):
"""Singleton Iterator
"""
def __init__(self):
"""Initializer
"""
self.state = 0
def __iter__(self):
"""Make FooSingleton an iterator...
"""
return self
def __next__(self):
"""Returns the next value
"""
if self.state < 3:
self.state += 1
return self.state
else:
raise StopIteration
def next(self):
"""For python 2.x compatibility"""
return self.__next__()
singleton = delegate_singleton(FooSingleton)
for item, expected in zip(singleton, count_up(1)):
self.assertEqual(item, expected)
def test_delegate_hierarchical(self):
"""Test using a singleton HierarchicalOrderedDict
"""
# pylint: disable=too-many-ancestors
class SingletonDict(HierarchicalOrderedDict, Singleton):
"""Singleton Iterator
"""
pass
singleton = delegate_singleton(SingletonDict)
singleton['a'] = 12
singleton['b.c'] = 13
self.assertIn('a', singleton)
self.assertIn('b.c', singleton)
self.assertEqual(singleton['a'], 12)
self.assertEqual(singleton['b.c'], 13)
def test_delegate_default(self):
"""Test using delegating a DefaultSingleton
"""
memory = []
class Default(DefaultSingleton):
"""Default Singleton
"""
def __init__(self):
"""Initializer
"""
self.memory = []
def append(self, value):
"""Append to the memory
"""
self.memory.append(value)
@classmethod
def _get_pseudo_instance(cls):
"""Return the external memory
"""
return memory
singleton = delegate_singleton(Default)
singleton.append(12)
singleton.append(13)
self.assertEqual(memory, [12, 13])
singleton.initialize()
singleton.append(14)
singleton.append(15)
self.assertEqual(memory, [12, 13])
self.assertEqual(singleton.memory, [14, 15])
|
|
"""
Adapters for the cheat sheets from the Learn X in Y project
Configuration parameters:
log.level
"""
# pylint: disable=relative-import
from __future__ import print_function
import os
import re
from config import CONFIG
from .git_adapter import GitRepositoryAdapter
class LearnXinY(GitRepositoryAdapter):
"""
Adapter for the LearnXinY project
"""
_adapter_name = 'learnxiny'
_output_format = 'code'
_cache_needed = True
_repository_url = "https://github.com/adambard/learnxinyminutes-docs"
def __init__(self):
self.adapters = _ADAPTERS
GitRepositoryAdapter.__init__(self)
def _get_page(self, topic, request_options=None):
"""
Return cheat sheet for `topic`
or empty string if nothing found
"""
lang, topic = topic.split('/', 1)
if lang not in self.adapters:
return ''
return self.adapters[lang].get_page(topic)
def _get_list(self, prefix=None):
"""
Return list of all learnxiny topics
"""
answer = []
for language_adapter in self.adapters.values():
answer += language_adapter.get_list(prefix=True)
return answer
def is_found(self, topic):
"""
Return whether `topic` is a valid learnxiny topic
"""
if '/' not in topic:
return False
lang, topic = topic.split('/', 1)
if lang not in self.adapters:
return False
return self.adapters[lang].is_valid(topic)
class LearnXYAdapter(object):
"""
Parent class of all languages adapters
"""
_learn_xy_path = LearnXinY.local_repository_location()
_replace_with = {}
_filename = ''
prefix = ''
_replace_with = {}
_splitted = True
_block_cut_start = 2
_block_cut_end = 0
def __init__(self):
self._whole_cheatsheet = self._read_cheatsheet()
self._blocks = self._extract_blocks()
self._topics_list = [x for x, _ in self._blocks]
if "Comments" in self._topics_list:
self._topics_list = [x for x in self._topics_list if x != "Comments"] + ["Comments"]
self._topics_list += [":learn", ":list"]
if self._whole_cheatsheet and CONFIG.get("log.level") >= 5:
print(self.prefix, self._topics_list)
def _is_block_separator(self, before, now, after):
if (re.match(r'////////*', before)
and re.match(r'// ', now)
and re.match(r'////////*', after)):
block_name = re.sub(r'//\s*', '', now).replace('(', '').replace(')', '')
block_name = '_'.join(block_name.strip(", ").split())
for character in '/,':
block_name = block_name.replace(character, '')
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
return block_name
return None
def _cut_block(self, block, start_block=False):
if not start_block:
answer = block[self._block_cut_start:-self._block_cut_end]
if answer == []:
return answer
if answer[0].strip() == '':
answer = answer[1:]
if answer[-1].strip() == '':
answer = answer[:1]
return answer
def _read_cheatsheet(self):
filename = os.path.join(self._learn_xy_path, self._filename)
# if cheat sheets are not there (e.g. were not yet fetched),
# just skip it
if not os.path.exists(filename):
return None
with open(filename) as f_cheat_sheet:
code_mode = False
answer = []
for line in f_cheat_sheet.readlines():
if line.startswith('```'):
if not code_mode:
code_mode = True
continue
else:
code_mode = False
if code_mode:
answer.append(line.rstrip('\n'))
return answer
def _extract_blocks(self):
if not self._splitted:
return []
lines = self._whole_cheatsheet
if lines is None:
return []
answer = []
block = []
block_name = "Comments"
for before, now, after in zip([""]+lines, lines, lines[1:]):
new_block_name = self._is_block_separator(before, now, after)
if new_block_name:
if block_name:
block_text = self._cut_block(block)
if block_text != []:
answer.append((block_name, block_text))
block_name = new_block_name
block = []
continue
else:
block.append(before)
answer.append((block_name, self._cut_block(block)))
return answer
def is_valid(self, name):
"""
Check whether topic `name` is valid.
"""
for topic_list in self._topics_list:
if topic_list == name:
return True
return False
def get_list(self, prefix=None):
"""
Get list of topics for `prefix`
"""
if prefix:
return ["%s/%s" % (self.prefix, x) for x in self._topics_list]
return self._topics_list
def get_page(self, name, partial=False):
"""
Return specified cheat sheet `name` for the language.
If `partial`, cheat sheet name may be shortened
"""
if name == ":list":
return "\n".join(self.get_list()) + "\n"
if name == ":learn":
return "\n".join(self._whole_cheatsheet) + "\n"
if partial:
possible_names = []
for block_name, _ in self._blocks:
if block_name.startswith(name):
possible_names.append(block_name)
if possible_names == [] or len(possible_names) > 1:
return None
name = possible_names[0]
for block_name, block_contents in self._blocks:
if block_name == name:
return "\n".join(block_contents)
return None
#
# Specific programming languages LearnXY cheat sheets configurations
# Contains much code for the moment; should contain data only
# ideally should be replaced with YAML
#
class LearnAwkAdapter(LearnXYAdapter):
"Learn AWK in Y Minutes"
prefix = "awk"
_filename = "awk.html.markdown"
_splitted = False
class LearnBashAdapter(LearnXYAdapter):
"Learn Bash in Y Minutes"
prefix = "bash"
_filename = "bash.html.markdown"
_splitted = False
class LearnBfAdapter(LearnXYAdapter):
"Learn Brainfuck in Y Minutes"
prefix = "bf"
_filename = "bf.html.markdown"
_splitted = False
class LearnCAdapter(LearnXYAdapter):
"Learn C in Y Minutes"
prefix = "c"
_filename = "c.html.markdown"
_splitted = False
class LearnChapelAdapter(LearnXYAdapter):
"Learn Chapel in Y Minutes"
prefix = "chapel"
_filename = "chapel.html.markdown"
_splitted = False
class LearnClojureAdapter(LearnXYAdapter):
"""
Learn Clojure in Y Minutes
"""
prefix = "clojure"
_filename = "clojure.html.markdown"
def _is_block_separator(self, before, now, after):
if (re.match(r'\s*$', before)
and re.match(r';\s*', now)
and re.match(r';;;;;;+', after)):
block_name = re.sub(r';\s*', '', now)
block_name = '_'.join([x.strip(",&:") for x in block_name.strip(", ").split()])
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
if not start_block:
answer = block[2:]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnCoffeeScriptAdapter(LearnXYAdapter):
"Learn coffeescript in Y Minutes"
prefix = "coffee"
_filename = "coffeescript.html.markdown"
_splitted = False
class LearnCppAdapter(LearnXYAdapter):
"""
Learn C++ in Y Minutes
"""
prefix = "cpp"
_filename = "c++.html.markdown"
_replace_with = {
'More_about_Objects': 'Prototypes',
}
def _is_block_separator(self, before, now, after):
if (re.match(r'////////*', before)
and re.match(r'// ', now)
and re.match(r'////////*', after)):
block_name = re.sub(r'//\s*', '', now).replace('(', '').replace(')', '')
block_name = '_'.join(block_name.strip(", ").split())
for character in '/,':
block_name = block_name.replace(character, '')
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer == []:
return answer
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnCsharpAdapter(LearnXYAdapter):
"Learn C# in Y Minutes"
prefix = "csharp"
_filename = "csharp.html.markdown"
_splitted = False
class LearnDAdapter(LearnXYAdapter):
"Learn D in Y Minutes"
prefix = "d"
_filename = "d.html.markdown"
_splitted = False
class LearnDartAdapter(LearnXYAdapter):
"Learn Dart in Y Minutes"
prefix = "dart"
_filename = "dart.html.markdown"
_splitted = False
class LearnFactorAdapter(LearnXYAdapter):
"Learn Factor in Y Minutes"
prefix = "factor"
_filename = "factor.html.markdown"
_splitted = False
class LearnForthAdapter(LearnXYAdapter):
"Learn Forth in Y Minutes"
prefix = "forth"
_filename = "forth.html.markdown"
_splitted = False
class LearnFsharpAdapter(LearnXYAdapter):
"Learn F# in Y Minutes"
prefix = "fsharp"
_filename = "fsharp.html.markdown"
_splitted = False
class LearnElispAdapter(LearnXYAdapter):
"Learn Elisp in Y Minutes"
prefix = "elisp"
_filename = "elisp.html.markdown"
_splitted = False
class LearnElixirAdapter(LearnXYAdapter):
"""
Learn Elixir in Y Minutes
"""
prefix = "elixir"
_filename = "elixir.html.markdown"
_replace_with = {
'More_about_Objects': 'Prototypes',
}
def _is_block_separator(self, before, now, after):
if (re.match(r'## ---*', before)
and re.match(r'## --', now)
and re.match(r'## ---*', after)):
block_name = re.sub(r'## --\s*', '', now)
block_name = '_'.join(block_name.strip(", ").split())
for character in '/,':
block_name = block_name.replace(character, '')
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnElmAdapter(LearnXYAdapter):
"""
Learn Elm in Y Minutes
"""
prefix = "elm"
_filename = "elm.html.markdown"
_replace_with = {
'More_about_Objects': 'Prototypes',
}
def _is_block_separator(self, before, now, after):
if (re.match(r'\s*', before)
and re.match(r'\{--.*--\}', now)
and re.match(r'\s*', after)):
block_name = re.sub(r'\{--+\s*', '', now)
block_name = re.sub(r'--\}', '', block_name)
block_name = '_'.join(block_name.strip(", ").split())
for character in '/,':
block_name = block_name.replace(character, '')
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnErlangAdapter(LearnXYAdapter):
"""
Learn Erlang in Y Minutes
"""
prefix = "erlang"
_filename = "erlang.html.markdown"
def _is_block_separator(self, before, now, after):
if (re.match('%%%%%%+', before)
and re.match(r'%%\s+[0-9]+\.', now)
and re.match('%%%%%%+', after)):
block_name = re.sub(r'%%+\s+[0-9]+\.\s*', '', now)
block_name = '_'.join(block_name.strip('.').strip().split())
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnFortranAdapter(LearnXYAdapter):
"Learn Fortran in Y Minutes"
prefix = "fortran"
_filename = "fortran95.html.markdown"
_splitted = False
class LearnGoAdapter(LearnXYAdapter):
"Learn Go in Y Minutes"
prefix = "go"
_filename = "go.html.markdown"
_splitted = False
class LearnGroovyAdapter(LearnXYAdapter):
"Learn Groovy in Y Minutes"
prefix = "groovy"
_filename = "groovy.html.markdown"
_splitted = False
class LearnJavaAdapter(LearnXYAdapter):
"Learn Java in Y Minutes"
prefix = "java"
_filename = "java.html.markdown"
_splitted = False
class LearnJavaScriptAdapter(LearnXYAdapter):
"""
Learn JavaScript in Y Minutes
"""
prefix = "js"
_filename = "javascript.html.markdown"
_replace_with = {
'More_about_Objects': 'Prototypes',
}
def _is_block_separator(self, before, now, after):
if (re.match('//////+', before)
and re.match(r'//+\s+[0-9]+\.', now)
and re.match(r'\s*', after)):
block_name = re.sub(r'//+\s+[0-9]+\.\s*', '', now)
block_name = '_'.join(block_name.strip(", ").split())
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnJuliaAdapter(LearnXYAdapter):
"""
Learn Julia in Y Minutes
"""
prefix = "julia"
_filename = "julia.html.markdown"
def _is_block_separator(self, before, now, after):
if (re.match('####+', before)
and re.match(r'##\s*', now)
and re.match('####+', after)):
block_name = re.sub(r'##\s+[0-9]+\.\s*', '', now)
block_name = '_'.join(block_name.strip(", ").split())
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnHaskellAdapter(LearnXYAdapter):
"""
Learn Haskell in Y Minutes
"""
prefix = "haskell"
_filename = "haskell.html.markdown"
_replace_with = {
'More_about_Objects': 'Prototypes',
}
def _is_block_separator(self, before, now, after):
if (re.match('------+', before)
and re.match(r'--+\s+[0-9]+\.', now)
and re.match('------+', after)):
block_name = re.sub(r'--+\s+[0-9]+\.\s*', '', now)
block_name = '_'.join(block_name.strip(", ").split())
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnLispAdapter(LearnXYAdapter):
"Learn Lisp in Y Minutes"
prefix = "lisp"
_filename = "common-lisp.html.markdown"
_splitted = False
class LearnLuaAdapter(LearnXYAdapter):
"""
Learn Lua in Y Minutes
"""
prefix = "lua"
_filename = "lua.html.markdown"
_replace_with = {
'1_Metatables_and_metamethods': 'Metatables',
'2_Class-like_tables_and_inheritance': 'Class-like_tables',
'Variables_and_flow_control': 'Flow_control',
}
def _is_block_separator(self, before, now, after):
if (re.match('-----+', before)
and re.match('-------+', after)
and re.match(r'--\s+[0-9]+\.', now)):
block_name = re.sub(r'--+\s+[0-9]+\.\s*', '', now)
block_name = '_'.join(block_name.strip('.').strip().split())
if block_name in self._replace_with:
block_name = self._replace_with[block_name]
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnMathematicaAdapter(LearnXYAdapter):
"Learn Mathematica in Y Minutes"
prefix = "mathematica"
_filename = "wolfram.html.markdown"
_splitted = False
class LearnMatlabAdapter(LearnXYAdapter):
"Learn Matlab in Y Minutes"
prefix = "matlab"
_filename = "matlab.html.markdown"
_splitted = False
class LearnOctaveAdapter(LearnXYAdapter):
"Learn Octave in Y Minutes"
prefix = "octave"
_filename = "matlab.html.markdown"
_splitted = False
class LearnKotlinAdapter(LearnXYAdapter):
"""
Learn Kotlin in Y Minutes
"""
prefix = "kotlin"
_filename = "kotlin.html.markdown"
def _is_block_separator(self, before, now, after):
if (re.match('#######+', before)
and re.match('#######+', after)
and re.match(r'#+\s+[0-9]+\.', now)):
block_name = re.sub(r'#+\s+[0-9]+\.\s*', '', now)
block_name = '_'.join(block_name.strip().split())
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnObjectiveCAdapter(LearnXYAdapter):
"Learn Objective C in Y Minutes"
prefix = "objective-c"
_filename = "objective-c.html.markdown"
_splitted = False
class LearnOCamlAdapter(LearnXYAdapter):
"""
Learn OCaml in Y Minutes
"""
prefix = "ocaml"
_filename = "ocaml.html.markdown"
_replace_with = {
'More_about_Objects': 'Prototypes',
}
def _is_block_separator(self, before, now, after):
if (re.match(r'\s*', before)
and re.match(r'\(\*\*\*+', now)
and re.match(r'\s*', after)):
block_name = re.sub(r'\(\*\*\*+\s*', '', now)
block_name = re.sub(r'\s*\*\*\*\)', '', block_name)
block_name = '_'.join(block_name.strip(", ").split())
for k in self._replace_with:
if k in block_name:
block_name = self._replace_with[k]
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnPerlAdapter(LearnXYAdapter):
"""
Learn Perl in Y Minutes
"""
prefix = "perl"
_filename = "perl.html.markdown"
_replace_with = {
'Conditional_and_looping_constructs': 'Control_Flow',
'Perl_variable_types': 'Types',
'Files_and_I/O': 'Files',
'Writing_subroutines': 'Subroutines',
}
def _is_block_separator(self, before, now, after):
if re.match(r'####+\s+', now):
block_name = re.sub(r'#+\s', '', now)
block_name = '_'.join(block_name.strip().split())
if block_name in self._replace_with:
block_name = self._replace_with[block_name]
return block_name
else:
return None
@staticmethod
def _cut_block(block, start_block=False):
if not start_block:
answer = block[2:]
if answer == []:
return answer
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnPerl6Adapter(LearnXYAdapter):
"Learn Perl 6 in Y Minutes"
prefix = "perl6"
_filename = "perl6.html.markdown"
_splitted = False
class LearnPHPAdapter(LearnXYAdapter):
"""
Learn PHP in Y Minutes
"""
prefix = "php"
_filename = "php.html.markdown"
def _is_block_separator(self, before, now, after):
if (re.match(r'/\*\*\*\*\*+', before)
and re.match(r'\s*\*/', after)
and re.match(r'\s*\*\s*', now)):
block_name = re.sub(r'\s*\*\s*', '', now)
block_name = re.sub(r'&', '', block_name)
block_name = '_'.join(block_name.strip().split())
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
return block[2:]
class LearnPythonAdapter(LearnXYAdapter):
"""
Learn Python in Y Minutes
"""
prefix = "python"
_filename = "python.html.markdown"
def _is_block_separator(self, before, now, after):
if (re.match('#######+', before)
and re.match('#######+', after)
and re.match(r'#+\s+[0-9]+\.', now)):
block_name = re.sub(r'#+\s+[0-9]+\.\s*', '', now)
block_name = '_'.join(block_name.strip().split())
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnPython3Adapter(LearnXYAdapter):
"Learn Python 3 in Y Minutes"
prefix = "python3"
_filename = "python3.html.markdown"
_splitted = False
class LearnRAdapter(LearnXYAdapter):
"Learn R in Y Minutes"
prefix = "r"
_filename = "r.html.markdown"
_splitted = False
class LearnRacketAdapter(LearnXYAdapter):
"Learn Racket in Y Minutes"
prefix = "racket"
_filename = "racket.html.markdown"
_splitted = False
class LearnRubyAdapter(LearnXYAdapter):
"""
Learn Ruby in Y Minutes
Format of the file was changed, so we have to fix the function too.
This case is a good case for health check:
if number of extracted cheat sheets is suddenly became 1,
one should check the markup
"""
prefix = "ruby"
_filename = "ruby.html.markdown"
def _is_block_separator(self, before, now, after):
if (re.match('#######+', before)
and re.match('#######+', after)
and re.match(r'#+\s+[0-9]+\.', now)):
block_name = re.sub(r'#+\s+[0-9]+\.\s*', '', now)
block_name = '_'.join(block_name.strip().split())
return block_name
return None
@staticmethod
def _cut_block(block, start_block=False):
answer = block[2:-1]
if answer[0].split() == '':
answer = answer[1:]
if answer[-1].split() == '':
answer = answer[:1]
return answer
class LearnRustAdapter(LearnXYAdapter):
"Learn Rust in Y Minutes"
prefix = "rust"
_filename = "rust.html.markdown"
_splitted = False
class LearnSolidityAdapter(LearnXYAdapter):
"Learn Solidity in Y Minutes"
prefix = "solidity"
_filename = "solidity.html.markdown"
_splitted = False
class LearnSwiftAdapter(LearnXYAdapter):
"Learn Swift in Y Minutes"
prefix = "swift"
_filename = "swift.html.markdown"
_splitted = False
class LearnTclAdapter(LearnXYAdapter):
"Learn Tcl in Y Minutes"
prefix = "tcl"
_filename = "tcl.html.markdown"
_splitted = False
class LearnTcshAdapter(LearnXYAdapter):
"Learn Tcsh in Y Minutes"
prefix = "tcsh"
_filename = "tcsh.html.markdown"
_splitted = False
class LearnVisualBasicAdapter(LearnXYAdapter):
"Learn Visual Basic in Y Minutes"
prefix = "vb"
_filename = "visualbasic.html.markdown"
_splitted = False
class LearnCMakeAdapter(LearnXYAdapter):
"Learn CMake in Y Minutes"
prefix = "cmake"
_filename = "cmake.html.markdown"
_splitted = False
class LearnNimAdapter(LearnXYAdapter):
"Learn Nim in Y Minutes"
prefix = "nim"
_filename = "nim.html.markdown"
_splitted = False
class LearnGitAdapter(LearnXYAdapter):
"Learn Git in Y Minutes"
prefix = "git"
_filename = "git.html.markdown"
_splitted = False
class LearnLatexAdapter(LearnXYAdapter):
"Learn Nim in Y Minutes"
prefix = "latex"
_filename = "latex.html.markdown"
_splitted = False
_ADAPTERS = {cls.prefix: cls() for cls in vars()['LearnXYAdapter'].__subclasses__()}
|
|
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Literal,
Sequence,
TypeVar,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
lib,
missing as libmissing,
)
from pandas._typing import (
ArrayLike,
AstypeArg,
DtypeObj,
NpDtype,
PositionalIndexer,
Scalar,
ScalarIndexer,
SequenceIndexer,
Shape,
npt,
)
from pandas.errors import AbstractMethodError
from pandas.util._decorators import doc
from pandas.util._validators import validate_fillna_kwargs
from pandas.core.dtypes.astype import astype_nansafe
from pandas.core.dtypes.base import ExtensionDtype
from pandas.core.dtypes.common import (
is_bool,
is_bool_dtype,
is_datetime64_dtype,
is_dtype_equal,
is_float_dtype,
is_integer_dtype,
is_list_like,
is_object_dtype,
is_scalar,
is_string_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import BaseMaskedDtype
from pandas.core.dtypes.inference import is_array_like
from pandas.core.dtypes.missing import (
array_equivalent,
is_valid_na_for_dtype,
isna,
notna,
)
from pandas.core import (
arraylike,
missing,
nanops,
ops,
)
from pandas.core.algorithms import (
factorize_array,
isin,
take,
)
from pandas.core.array_algos import masked_reductions
from pandas.core.array_algos.quantile import quantile_with_mask
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.indexers import check_array_indexer
from pandas.core.ops import invalid_comparison
if TYPE_CHECKING:
from pandas import Series
from pandas.core.arrays import BooleanArray
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas.compat.numpy import function as nv
BaseMaskedArrayT = TypeVar("BaseMaskedArrayT", bound="BaseMaskedArray")
class BaseMaskedArray(OpsMixin, ExtensionArray):
"""
Base class for masked arrays (which use _data and _mask to store the data).
numpy based
"""
# The value used to fill '_data' to avoid upcasting
_internal_fill_value: Scalar
# our underlying data and mask are each ndarrays
_data: np.ndarray
_mask: npt.NDArray[np.bool_]
# Fill values used for any/all
_truthy_value = Scalar # bool(_truthy_value) = True
_falsey_value = Scalar # bool(_falsey_value) = False
def __init__(
self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False
) -> None:
# values is supposed to already be validated in the subclass
if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_):
raise TypeError(
"mask should be boolean numpy array. Use "
"the 'pd.array' function instead"
)
if values.shape != mask.shape:
raise ValueError("values.shape must match mask.shape")
if copy:
values = values.copy()
mask = mask.copy()
self._data = values
self._mask = mask
@classmethod
def _from_sequence(
cls: type[BaseMaskedArrayT], scalars, *, dtype=None, copy: bool = False
) -> BaseMaskedArrayT:
values, mask = cls._coerce_to_array(scalars, dtype=dtype, copy=copy)
return cls(values, mask)
@property
def dtype(self) -> BaseMaskedDtype:
raise AbstractMethodError(self)
@overload
def __getitem__(self, item: ScalarIndexer) -> Any:
...
@overload
def __getitem__(self: BaseMaskedArrayT, item: SequenceIndexer) -> BaseMaskedArrayT:
...
def __getitem__(
self: BaseMaskedArrayT, item: PositionalIndexer
) -> BaseMaskedArrayT | Any:
item = check_array_indexer(self, item)
newmask = self._mask[item]
if is_bool(newmask):
# This is a scalar indexing
if newmask:
return self.dtype.na_value
return self._data[item]
return type(self)(self._data[item], newmask)
@doc(ExtensionArray.fillna)
def fillna(
self: BaseMaskedArrayT, value=None, method=None, limit=None
) -> BaseMaskedArrayT:
value, method = validate_fillna_kwargs(value, method)
mask = self._mask
if is_array_like(value):
if len(value) != len(self):
raise ValueError(
f"Length of 'value' does not match. Got ({len(value)}) "
f" expected {len(self)}"
)
value = value[mask]
if mask.any():
if method is not None:
func = missing.get_fill_func(method, ndim=self.ndim)
npvalues = self._data.copy().T
new_mask = mask.copy().T
func(npvalues, limit=limit, mask=new_mask)
return type(self)(npvalues.T, new_mask.T)
else:
# fill with value
new_values = self.copy()
new_values[mask] = value
else:
new_values = self.copy()
return new_values
@classmethod
def _coerce_to_array(
cls, values, *, dtype: DtypeObj, copy: bool = False
) -> tuple[np.ndarray, np.ndarray]:
raise AbstractMethodError(cls)
def _validate_setitem_value(self, value):
"""
Check if we have a scalar that we can cast losslessly.
Raises
------
TypeError
"""
kind = self.dtype.kind
# TODO: get this all from np_can_hold_element?
if kind == "b":
if lib.is_bool(value):
return value
elif kind == "f":
if lib.is_integer(value) or lib.is_float(value):
return value
else:
if lib.is_integer(value) or (lib.is_float(value) and value.is_integer()):
return value
# TODO: unsigned checks
# Note: without the "str" here, the f-string rendering raises in
# py38 builds.
raise TypeError(f"Invalid value '{str(value)}' for dtype {self.dtype}")
def __setitem__(self, key, value) -> None:
key = check_array_indexer(self, key)
if is_scalar(value):
if is_valid_na_for_dtype(value, self.dtype):
self._mask[key] = True
else:
value = self._validate_setitem_value(value)
self._data[key] = value
self._mask[key] = False
return
value, mask = self._coerce_to_array(value, dtype=self.dtype)
self._data[key] = value
self._mask[key] = mask
def __iter__(self):
if self.ndim == 1:
for i in range(len(self)):
if self._mask[i]:
yield self.dtype.na_value
else:
yield self._data[i]
else:
for i in range(len(self)):
yield self[i]
def __len__(self) -> int:
return len(self._data)
@property
def shape(self) -> Shape:
return self._data.shape
@property
def ndim(self) -> int:
return self._data.ndim
def swapaxes(self: BaseMaskedArrayT, axis1, axis2) -> BaseMaskedArrayT:
data = self._data.swapaxes(axis1, axis2)
mask = self._mask.swapaxes(axis1, axis2)
return type(self)(data, mask)
def delete(self: BaseMaskedArrayT, loc, axis: int = 0) -> BaseMaskedArrayT:
data = np.delete(self._data, loc, axis=axis)
mask = np.delete(self._mask, loc, axis=axis)
return type(self)(data, mask)
def reshape(self: BaseMaskedArrayT, *args, **kwargs) -> BaseMaskedArrayT:
data = self._data.reshape(*args, **kwargs)
mask = self._mask.reshape(*args, **kwargs)
return type(self)(data, mask)
def ravel(self: BaseMaskedArrayT, *args, **kwargs) -> BaseMaskedArrayT:
# TODO: need to make sure we have the same order for data/mask
data = self._data.ravel(*args, **kwargs)
mask = self._mask.ravel(*args, **kwargs)
return type(self)(data, mask)
@property
def T(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return type(self)(self._data.T, self._mask.T)
def round(self, decimals: int = 0, *args, **kwargs):
"""
Round each value in the array a to the given number of decimals.
Parameters
----------
decimals : int, default 0
Number of decimal places to round to. If decimals is negative,
it specifies the number of positions to the left of the decimal point.
*args, **kwargs
Additional arguments and keywords have no effect but might be
accepted for compatibility with NumPy.
Returns
-------
NumericArray
Rounded values of the NumericArray.
See Also
--------
numpy.around : Round values of an np.array.
DataFrame.round : Round values of a DataFrame.
Series.round : Round values of a Series.
"""
nv.validate_round(args, kwargs)
values = np.round(self._data, decimals=decimals, **kwargs)
# Usually we'll get same type as self, but ndarray[bool] casts to float
return self._maybe_mask_result(values, self._mask.copy())
# ------------------------------------------------------------------
# Unary Methods
def __invert__(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
return type(self)(~self._data, self._mask.copy())
def __neg__(self):
return type(self)(-self._data, self._mask.copy())
def __pos__(self):
return self.copy()
def __abs__(self):
return type(self)(abs(self._data), self._mask.copy())
# ------------------------------------------------------------------
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value: Scalar | lib.NoDefault | libmissing.NAType = lib.no_default,
) -> np.ndarray:
"""
Convert to a NumPy Array.
By default converts to an object-dtype NumPy array. Specify the `dtype` and
`na_value` keywords to customize the conversion.
Parameters
----------
dtype : dtype, default object
The numpy dtype to convert to.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
the array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary. This is typically
only possible when no missing values are present and `dtype`
is the equivalent numpy dtype.
na_value : scalar, optional
Scalar missing value indicator to use in numpy array. Defaults
to the native missing value indicator of this array (pd.NA).
Returns
-------
numpy.ndarray
Examples
--------
An object-dtype is the default result
>>> a = pd.array([True, False, pd.NA], dtype="boolean")
>>> a.to_numpy()
array([True, False, <NA>], dtype=object)
When no missing values are present, an equivalent dtype can be used.
>>> pd.array([True, False], dtype="boolean").to_numpy(dtype="bool")
array([ True, False])
>>> pd.array([1, 2], dtype="Int64").to_numpy("int64")
array([1, 2])
However, requesting such dtype will raise a ValueError if
missing values are present and the default missing value :attr:`NA`
is used.
>>> a = pd.array([True, False, pd.NA], dtype="boolean")
>>> a
<BooleanArray>
[True, False, <NA>]
Length: 3, dtype: boolean
>>> a.to_numpy(dtype="bool")
Traceback (most recent call last):
...
ValueError: cannot convert to bool numpy array in presence of missing values
Specify a valid `na_value` instead
>>> a.to_numpy(dtype="bool", na_value=False)
array([ True, False, False])
"""
if na_value is lib.no_default:
na_value = libmissing.NA
if dtype is None:
dtype = object
if self._hasna:
if (
not is_object_dtype(dtype)
and not is_string_dtype(dtype)
and na_value is libmissing.NA
):
raise ValueError(
f"cannot convert to '{dtype}'-dtype NumPy array "
"with missing values. Specify an appropriate 'na_value' "
"for this dtype."
)
# don't pass copy to astype -> always need a copy since we are mutating
data = self._data.astype(dtype)
data[self._mask] = na_value
else:
data = self._data.astype(dtype, copy=copy)
return data
@overload
def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
...
@overload
def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
...
@overload
def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
...
def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
dtype = pandas_dtype(dtype)
if is_dtype_equal(dtype, self.dtype):
if copy:
return self.copy()
return self
# if we are astyping to another nullable masked dtype, we can fastpath
if isinstance(dtype, BaseMaskedDtype):
# TODO deal with NaNs for FloatingArray case
data = self._data.astype(dtype.numpy_dtype, copy=copy)
# mask is copied depending on whether the data was copied, and
# not directly depending on the `copy` keyword
mask = self._mask if data is self._data else self._mask.copy()
cls = dtype.construct_array_type()
return cls(data, mask, copy=False)
if isinstance(dtype, ExtensionDtype):
eacls = dtype.construct_array_type()
return eacls._from_sequence(self, dtype=dtype, copy=copy)
na_value: float | np.datetime64 | lib.NoDefault
# coerce
if is_float_dtype(dtype):
# In astype, we consider dtype=float to also mean na_value=np.nan
na_value = np.nan
elif is_datetime64_dtype(dtype):
na_value = np.datetime64("NaT")
else:
na_value = lib.no_default
# to_numpy will also raise, but we get somewhat nicer exception messages here
if is_integer_dtype(dtype) and self._hasna:
raise ValueError("cannot convert NA to integer")
if is_bool_dtype(dtype) and self._hasna:
# careful: astype_nansafe converts np.nan to True
raise ValueError("cannot convert float NaN to bool")
data = self.to_numpy(dtype=dtype, na_value=na_value, copy=copy)
if self.dtype.kind == "f":
# TODO: make this consistent between IntegerArray/FloatingArray,
# see test_astype_str
return astype_nansafe(data, dtype, copy=False)
return data
__array_priority__ = 1000 # higher than ndarray so ops dispatch to us
def __array__(self, dtype: NpDtype | None = None) -> np.ndarray:
"""
the array interface, return my values
We return an object array here to preserve our scalar values
"""
return self.to_numpy(dtype=dtype)
_HANDLED_TYPES: tuple[type, ...]
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
# For MaskedArray inputs, we apply the ufunc to ._data
# and mask the result.
out = kwargs.get("out", ())
for x in inputs + out:
if not isinstance(x, self._HANDLED_TYPES + (BaseMaskedArray,)):
return NotImplemented
# for binary ops, use our custom dunder methods
result = ops.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
if "out" in kwargs:
# e.g. test_ufunc_with_out
return arraylike.dispatch_ufunc_with_out(
self, ufunc, method, *inputs, **kwargs
)
if method == "reduce":
result = arraylike.dispatch_reduction_ufunc(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
mask = np.zeros(len(self), dtype=bool)
inputs2 = []
for x in inputs:
if isinstance(x, BaseMaskedArray):
mask |= x._mask
inputs2.append(x._data)
else:
inputs2.append(x)
def reconstruct(x):
# we don't worry about scalar `x` here, since we
# raise for reduce up above.
from pandas.core.arrays import (
BooleanArray,
FloatingArray,
IntegerArray,
)
if is_bool_dtype(x.dtype):
m = mask.copy()
return BooleanArray(x, m)
elif is_integer_dtype(x.dtype):
m = mask.copy()
return IntegerArray(x, m)
elif is_float_dtype(x.dtype):
m = mask.copy()
if x.dtype == np.float16:
# reached in e.g. np.sqrt on BooleanArray
# we don't support float16
x = x.astype(np.float32)
return FloatingArray(x, m)
else:
x[mask] = np.nan
return x
result = getattr(ufunc, method)(*inputs2, **kwargs)
if ufunc.nout > 1:
# e.g. np.divmod
return tuple(reconstruct(x) for x in result)
elif method == "reduce":
# e.g. np.add.reduce; test_ufunc_reduce_raises
if self._mask.any():
return self._na_value
return result
else:
return reconstruct(result)
def __arrow_array__(self, type=None):
"""
Convert myself into a pyarrow Array.
"""
import pyarrow as pa
return pa.array(self._data, mask=self._mask, type=type)
@property
def _hasna(self) -> bool:
# Note: this is expensive right now! The hope is that we can
# make this faster by having an optional mask, but not have to change
# source code using it..
# error: Incompatible return value type (got "bool_", expected "bool")
return self._mask.any() # type: ignore[return-value]
def _propagate_mask(
self, mask: npt.NDArray[np.bool_] | None, other
) -> npt.NDArray[np.bool_]:
if mask is None:
mask = self._mask.copy() # TODO: need test for BooleanArray needing a copy
if other is libmissing.NA:
# GH#45421 don't alter inplace
mask = mask | True
else:
mask = self._mask | mask
return mask
def _arith_method(self, other, op):
op_name = op.__name__
omask = None
if isinstance(other, BaseMaskedArray):
other, omask = other._data, other._mask
elif is_list_like(other):
if not isinstance(other, ExtensionArray):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
# We wrap the non-masked arithmetic logic used for numpy dtypes
# in Series/Index arithmetic ops.
other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
pd_op = ops.get_array_op(op)
other = ensure_wrapped_if_datetimelike(other)
if op_name in {"pow", "rpow"} and isinstance(other, np.bool_):
# Avoid DeprecationWarning: In future, it will be an error
# for 'np.bool_' scalars to be interpreted as an index
# e.g. test_array_scalar_like_equivalence
other = bool(other)
mask = self._propagate_mask(omask, other)
if other is libmissing.NA:
result = np.ones_like(self._data)
if self.dtype.kind == "b":
if op_name in {
"floordiv",
"rfloordiv",
"pow",
"rpow",
"truediv",
"rtruediv",
}:
# GH#41165 Try to match non-masked Series behavior
# This is still imperfect GH#46043
raise NotImplementedError(
f"operator '{op_name}' not implemented for bool dtypes"
)
elif op_name in {"mod", "rmod"}:
dtype = "int8"
else:
dtype = "bool"
result = result.astype(dtype)
elif "truediv" in op_name and self.dtype.kind != "f":
# The actual data here doesn't matter since the mask
# will be all-True, but since this is division, we want
# to end up with floating dtype.
result = result.astype(np.float64)
else:
# Make sure we do this before the "pow" mask checks
# to get an expected exception message on shape mismatch.
if self.dtype.kind in ["i", "u"] and op_name in ["floordiv", "mod"]:
# TODO(GH#30188) ATM we don't match the behavior of non-masked
# types with respect to floordiv-by-zero
pd_op = op
with np.errstate(all="ignore"):
result = pd_op(self._data, other)
if op_name == "pow":
# 1 ** x is 1.
mask = np.where((self._data == 1) & ~self._mask, False, mask)
# x ** 0 is 1.
if omask is not None:
mask = np.where((other == 0) & ~omask, False, mask)
elif other is not libmissing.NA:
mask = np.where(other == 0, False, mask)
elif op_name == "rpow":
# 1 ** x is 1.
if omask is not None:
mask = np.where((other == 1) & ~omask, False, mask)
elif other is not libmissing.NA:
mask = np.where(other == 1, False, mask)
# x ** 0 is 1.
mask = np.where((self._data == 0) & ~self._mask, False, mask)
return self._maybe_mask_result(result, mask)
_logical_method = _arith_method
def _cmp_method(self, other, op) -> BooleanArray:
from pandas.core.arrays import BooleanArray
mask = None
if isinstance(other, BaseMaskedArray):
other, mask = other._data, other._mask
elif is_list_like(other):
other = np.asarray(other)
if other.ndim > 1:
raise NotImplementedError("can only perform ops with 1-d structures")
if len(self) != len(other):
raise ValueError("Lengths must match to compare")
if other is libmissing.NA:
# numpy does not handle pd.NA well as "other" scalar (it returns
# a scalar False instead of an array)
# This may be fixed by NA.__array_ufunc__. Revisit this check
# once that's implemented.
result = np.zeros(self._data.shape, dtype="bool")
mask = np.ones(self._data.shape, dtype="bool")
else:
with warnings.catch_warnings():
# numpy may show a FutureWarning:
# elementwise comparison failed; returning scalar instead,
# but in the future will perform elementwise comparison
# before returning NotImplemented. We fall back to the correct
# behavior today, so that should be fine to ignore.
warnings.filterwarnings("ignore", "elementwise", FutureWarning)
with np.errstate(all="ignore"):
method = getattr(self._data, f"__{op.__name__}__")
result = method(other)
if result is NotImplemented:
result = invalid_comparison(self._data, other, op)
mask = self._propagate_mask(mask, other)
return BooleanArray(result, mask, copy=False)
def _maybe_mask_result(self, result, mask):
"""
Parameters
----------
result : array-like or tuple[array-like]
mask : array-like bool
"""
if isinstance(result, tuple):
# i.e. divmod
div, mod = result
return (
self._maybe_mask_result(div, mask),
self._maybe_mask_result(mod, mask),
)
if is_float_dtype(result.dtype):
from pandas.core.arrays import FloatingArray
return FloatingArray(result, mask, copy=False)
elif is_bool_dtype(result.dtype):
from pandas.core.arrays import BooleanArray
return BooleanArray(result, mask, copy=False)
elif result.dtype == "timedelta64[ns]":
# e.g. test_numeric_arr_mul_tdscalar_numexpr_path
from pandas.core.arrays import TimedeltaArray
if not isinstance(result, TimedeltaArray):
result = TimedeltaArray._simple_new(result)
result[mask] = result.dtype.type("NaT")
return result
elif is_integer_dtype(result.dtype):
from pandas.core.arrays import IntegerArray
return IntegerArray(result, mask, copy=False)
else:
result[mask] = np.nan
return result
def isna(self) -> np.ndarray:
return self._mask.copy()
@property
def _na_value(self):
return self.dtype.na_value
@property
def nbytes(self) -> int:
return self._data.nbytes + self._mask.nbytes
@classmethod
def _concat_same_type(
cls: type[BaseMaskedArrayT],
to_concat: Sequence[BaseMaskedArrayT],
axis: int = 0,
) -> BaseMaskedArrayT:
data = np.concatenate([x._data for x in to_concat], axis=axis)
mask = np.concatenate([x._mask for x in to_concat], axis=axis)
return cls(data, mask)
def take(
self: BaseMaskedArrayT,
indexer,
*,
allow_fill: bool = False,
fill_value: Scalar | None = None,
axis: int = 0,
) -> BaseMaskedArrayT:
# we always fill with 1 internally
# to avoid upcasting
data_fill_value = self._internal_fill_value if isna(fill_value) else fill_value
result = take(
self._data,
indexer,
fill_value=data_fill_value,
allow_fill=allow_fill,
axis=axis,
)
mask = take(
self._mask, indexer, fill_value=True, allow_fill=allow_fill, axis=axis
)
# if we are filling
# we only fill where the indexer is null
# not existing missing values
# TODO(jreback) what if we have a non-na float as a fill value?
if allow_fill and notna(fill_value):
fill_mask = np.asarray(indexer) == -1
result[fill_mask] = fill_value
mask = mask ^ fill_mask
return type(self)(result, mask, copy=False)
# error: Return type "BooleanArray" of "isin" incompatible with return type
# "ndarray" in supertype "ExtensionArray"
def isin(self, values) -> BooleanArray: # type: ignore[override]
from pandas.core.arrays import BooleanArray
# algorithms.isin will eventually convert values to an ndarray, so no extra
# cost to doing it here first
values_arr = np.asarray(values)
result = isin(self._data, values_arr)
if self._hasna:
values_have_NA = is_object_dtype(values_arr.dtype) and any(
val is self.dtype.na_value for val in values_arr
)
# For now, NA does not propagate so set result according to presence of NA,
# see https://github.com/pandas-dev/pandas/pull/38379 for some discussion
result[self._mask] = values_have_NA
mask = np.zeros(self._data.shape, dtype=bool)
return BooleanArray(result, mask, copy=False)
def copy(self: BaseMaskedArrayT) -> BaseMaskedArrayT:
data, mask = self._data, self._mask
data = data.copy()
mask = mask.copy()
return type(self)(data, mask, copy=False)
@doc(ExtensionArray.searchsorted)
def searchsorted(
self,
value: NumpyValueArrayLike | ExtensionArray,
side: Literal["left", "right"] = "left",
sorter: NumpySorter = None,
) -> npt.NDArray[np.intp] | np.intp:
if self._hasna:
raise ValueError(
"searchsorted requires array to be sorted, which is impossible "
"with NAs present."
)
if isinstance(value, ExtensionArray):
value = value.astype(object)
# Base class searchsorted would cast to object, which is *much* slower.
return self._data.searchsorted(value, side=side, sorter=sorter)
@doc(ExtensionArray.factorize)
def factorize(self, na_sentinel: int = -1) -> tuple[np.ndarray, ExtensionArray]:
arr = self._data
mask = self._mask
codes, uniques = factorize_array(arr, na_sentinel=na_sentinel, mask=mask)
# check that factorize_array correctly preserves dtype.
assert uniques.dtype == self.dtype.numpy_dtype, (uniques.dtype, self.dtype)
uniques_ea = type(self)(uniques, np.zeros(len(uniques), dtype=bool))
return codes, uniques_ea
@doc(ExtensionArray._values_for_argsort)
def _values_for_argsort(self) -> np.ndarray:
return self._data
def value_counts(self, dropna: bool = True) -> Series:
"""
Returns a Series containing counts of each unique value.
Parameters
----------
dropna : bool, default True
Don't include counts of missing values.
Returns
-------
counts : Series
See Also
--------
Series.value_counts
"""
from pandas import (
Index,
Series,
)
from pandas.arrays import IntegerArray
# compute counts on the data with no nans
data = self._data[~self._mask]
value_counts = Index(data).value_counts()
index = value_counts.index
# if we want nans, count the mask
if dropna:
counts = value_counts._values
else:
counts = np.empty(len(value_counts) + 1, dtype="int64")
counts[:-1] = value_counts
counts[-1] = self._mask.sum()
index = index.insert(len(index), self.dtype.na_value)
index = index.astype(self.dtype)
mask = np.zeros(len(counts), dtype="bool")
counts = IntegerArray(counts, mask)
return Series(counts, index=index)
@doc(ExtensionArray.equals)
def equals(self, other) -> bool:
if type(self) != type(other):
return False
if other.dtype != self.dtype:
return False
# GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT
# equal.
if not np.array_equal(self._mask, other._mask):
return False
left = self._data[~self._mask]
right = other._data[~other._mask]
return array_equivalent(left, right, dtype_equal=True)
def _quantile(
self: BaseMaskedArrayT, qs: npt.NDArray[np.float64], interpolation: str
) -> BaseMaskedArrayT:
"""
Dispatch to quantile_with_mask, needed because we do not have
_from_factorized.
Notes
-----
We assume that all impacted cases are 1D-only.
"""
mask = np.atleast_2d(np.asarray(self.isna()))
npvalues: np.ndarray = np.atleast_2d(np.asarray(self))
res = quantile_with_mask(
npvalues,
mask=mask,
fill_value=self.dtype.na_value,
qs=qs,
interpolation=interpolation,
)
assert res.ndim == 2
assert res.shape[0] == 1
res = res[0]
try:
out = type(self)._from_sequence(res, dtype=self.dtype)
except TypeError:
# GH#42626: not able to safely cast Int64
# for floating point output
# error: Incompatible types in assignment (expression has type
# "ndarray[Any, dtype[floating[_64Bit]]]", variable has type
# "BaseMaskedArrayT")
out = np.asarray(res, dtype=np.float64) # type: ignore[assignment]
return out
# ------------------------------------------------------------------
# Reductions
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if name in {"any", "all", "min", "max", "sum", "prod"}:
return getattr(self, name)(skipna=skipna, **kwargs)
data = self._data
mask = self._mask
if name in {"mean"}:
op = getattr(masked_reductions, name)
result = op(data, mask, skipna=skipna, **kwargs)
return result
# coerce to a nan-aware float if needed
# (we explicitly use NaN within reductions)
if self._hasna:
data = self.to_numpy("float64", na_value=np.nan)
# median, var, std, skew, kurt, idxmin, idxmax
op = getattr(nanops, "nan" + name)
result = op(data, axis=0, skipna=skipna, mask=mask, **kwargs)
if np.isnan(result):
return libmissing.NA
return result
def _wrap_reduction_result(self, name: str, result, skipna, **kwargs):
if isinstance(result, np.ndarray):
axis = kwargs["axis"]
if skipna:
# we only retain mask for all-NA rows/columns
mask = self._mask.all(axis=axis)
else:
mask = self._mask.any(axis=axis)
return self._maybe_mask_result(result, mask)
return result
def sum(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_sum((), kwargs)
# TODO: do this in validate_sum?
if "out" in kwargs:
# np.sum; test_floating_array_numpy_sum
if kwargs["out"] is not None:
raise NotImplementedError
kwargs.pop("out")
result = masked_reductions.sum(
self._data,
self._mask,
skipna=skipna,
min_count=min_count,
axis=axis,
)
return self._wrap_reduction_result(
"sum", result, skipna=skipna, axis=axis, **kwargs
)
def prod(self, *, skipna=True, min_count=0, axis: int | None = 0, **kwargs):
nv.validate_prod((), kwargs)
result = masked_reductions.prod(
self._data,
self._mask,
skipna=skipna,
min_count=min_count,
axis=axis,
)
return self._wrap_reduction_result(
"prod", result, skipna=skipna, axis=axis, **kwargs
)
def min(self, *, skipna=True, axis: int | None = 0, **kwargs):
nv.validate_min((), kwargs)
return masked_reductions.min(
self._data,
self._mask,
skipna=skipna,
axis=axis,
)
def max(self, *, skipna=True, axis: int | None = 0, **kwargs):
nv.validate_max((), kwargs)
return masked_reductions.max(
self._data,
self._mask,
skipna=skipna,
axis=axis,
)
def any(self, *, skipna: bool = True, **kwargs):
"""
Return whether any element is truthy.
Returns False unless there is at least one element that is truthy.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
.. versionchanged:: 1.4.0
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be False, as for an empty array.
If `skipna` is False, the result will still be True if there is
at least one element that is truthy, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.any : Numpy version of this method.
BaseMaskedArray.all : Return whether all elements are truthy.
Examples
--------
The result indicates whether any element is truthy (and by default
skips NAs):
>>> pd.array([True, False, True]).any()
True
>>> pd.array([True, False, pd.NA]).any()
True
>>> pd.array([False, False, pd.NA]).any()
False
>>> pd.array([], dtype="boolean").any()
False
>>> pd.array([pd.NA], dtype="boolean").any()
False
>>> pd.array([pd.NA], dtype="Float64").any()
False
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, False, pd.NA]).any(skipna=False)
True
>>> pd.array([1, 0, pd.NA]).any(skipna=False)
True
>>> pd.array([False, False, pd.NA]).any(skipna=False)
<NA>
>>> pd.array([0, 0, pd.NA]).any(skipna=False)
<NA>
"""
kwargs.pop("axis", None)
nv.validate_any((), kwargs)
values = self._data.copy()
# Argument 3 to "putmask" has incompatible type "object"; expected
# "Union[_SupportsArray[dtype[Any]], _NestedSequence[
# _SupportsArray[dtype[Any]]], bool, int, float, complex, str, bytes, _Nested
# Sequence[Union[bool, int, float, complex, str, bytes]]]" [arg-type]
np.putmask(values, self._mask, self._falsey_value) # type: ignore[arg-type]
result = values.any()
if skipna:
return result
else:
if result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
def all(self, *, skipna: bool = True, **kwargs):
"""
Return whether all elements are truthy.
Returns True unless there is at least one element that is falsey.
By default, NAs are skipped. If ``skipna=False`` is specified and
missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
is used as for logical operations.
.. versionchanged:: 1.4.0
Parameters
----------
skipna : bool, default True
Exclude NA values. If the entire array is NA and `skipna` is
True, then the result will be True, as for an empty array.
If `skipna` is False, the result will still be False if there is
at least one element that is falsey, otherwise NA will be returned
if there are NA's present.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
bool or :attr:`pandas.NA`
See Also
--------
numpy.all : Numpy version of this method.
BooleanArray.any : Return whether any element is truthy.
Examples
--------
The result indicates whether all elements are truthy (and by default
skips NAs):
>>> pd.array([True, True, pd.NA]).all()
True
>>> pd.array([1, 1, pd.NA]).all()
True
>>> pd.array([True, False, pd.NA]).all()
False
>>> pd.array([], dtype="boolean").all()
True
>>> pd.array([pd.NA], dtype="boolean").all()
True
>>> pd.array([pd.NA], dtype="Float64").all()
True
With ``skipna=False``, the result can be NA if this is logically
required (whether ``pd.NA`` is True or False influences the result):
>>> pd.array([True, True, pd.NA]).all(skipna=False)
<NA>
>>> pd.array([1, 1, pd.NA]).all(skipna=False)
<NA>
>>> pd.array([True, False, pd.NA]).all(skipna=False)
False
>>> pd.array([1, 0, pd.NA]).all(skipna=False)
False
"""
kwargs.pop("axis", None)
nv.validate_all((), kwargs)
values = self._data.copy()
# Argument 3 to "putmask" has incompatible type "object"; expected
# "Union[_SupportsArray[dtype[Any]], _NestedSequence[
# _SupportsArray[dtype[Any]]], bool, int, float, complex, str, bytes, _Neste
# dSequence[Union[bool, int, float, complex, str, bytes]]]" [arg-type]
np.putmask(values, self._mask, self._truthy_value) # type: ignore[arg-type]
result = values.all()
if skipna:
return result
else:
if not result or len(self) == 0 or not self._mask.any():
return result
else:
return self.dtype.na_value
|
|
'''
ModernGL extension
'''
import logging
import struct
import ModernGL
import numpy as np
import PIL as Pillow
__all__ = [
'set_default_context',
'load',
'image',
'show',
]
__version__ = '0.2.0'
log = logging.getLogger('ModernGL.ext.textools')
lookup_components = {
'L': 1,
'RGB': 3,
'RGBA': 4,
}
lookup_mode = {
1: 'L',
2: 'RGB',
3: 'RGB',
4: 'RGBA',
}
default_context = None
def clampi(x):
if x < 0:
return 0
if x > 255:
return 255
return int(x)
def clampf(x):
if x < 0.0:
return 0.0
if x > 1.0:
return 1.0
return int(x * 255.0)
def set_default_context(ctx) -> None:
'''
Set the default context.
Args:
ctx (:py:class:`ModernGL.Context`): The Context to use when needed.
Examples:
.. code-block:: python
import ModernGL
from ModernGL.ext import textools
ctx = ModernGL.create_standalone_context()
# ctx = ModernGL.create_context()
textools.set_default_context(ctx)
texture = textools.load('brick.jpg')
textools.show(texture)
'''
global default_context
default_context = ctx
def load(filename, convert=None, ctx=None) -> ModernGL.Texture:
'''
Load a texture. If ctx is ``None`` the default_context is used.
Args:
filename (str): The name of the file to load.
Keyword Args:
convert (str): Convert the texture before loading. Possible values are: ``L``, ``RGB`` and ``RGBA``
ctx (:py:class:`ModernGL.Context`): The Context to use for loading the texture.
Returns:
:py:class:`ModernGL.Texture`: The texture.
Examples:
.. code-block:: python
import ModernGL
from ModernGL.ext import textools
ctx = ModernGL.create_standalone_context()
# ctx = ModernGL.create_context()
texture = textools.load('brick.jpg', ctx=ctx)
texture.use()
'''
if ctx is None:
ctx = default_context
if ctx is None:
raise Exception('no context')
img = Pillow.Image.open(filename)
if convert is not None:
img = img.convert(convert)
components = lookup_components.get(img.mode, None)
if not components:
img = img.convert('RGB')
components = 3
return ctx.texture(img.size, components, img.tobytes())
def image(texture, modify=None, ctx=None) -> Pillow.Image:
'''
Read a texture to a Pillow Image. If ctx is ``None`` the default_context is used.
Args:
texture (:py:class:`ModernGL.Texture`): The texture to read.
Keyword Args:
modify (lambda): Modify the color values before storing them in the Image.
ctx (:py:class:`ModernGL.Context`): The Context to use for loading the texture.
Returns:
:py:class:`Pillow.Image`: The image.
Examples:
.. code-block:: python
import ModernGL
from ModernGL.ext import textools
ctx = ModernGL.create_standalone_context()
# ctx = ModernGL.create_context()
texture = textools.load('brick.jpg', ctx=ctx)
img = textools.image(texture)
img.save('texture.png')
'''
if ctx is None:
ctx = default_context
if not texture.samples:
if modify is None:
modify = lambda x: x
mode = lookup_mode[texture.components]
if texture.floats:
array = np.frombuffer(texture.read(), 'float32')
clamp = clampf
else:
array = np.frombuffer(texture.read(), 'uint8')
clamp = clampi
pixels = np.array([clamp(modify(x)) for x in array], dtype='uint8').tobytes()
if texture.components == 2:
pixels = b''.join(pixels[i : i + 2] + b'\x00' for i in range(0, len(pixels), 2))
return Pillow.Image.frombytes(mode, texture.size, pixels)
else:
if ctx is None:
raise Exception('no context')
if texture.depth:
raise NotImplementedError('not yet implemented')
new_texture = ctx.texture(texture.size, texture.components, floats=texture.floats)
fbo1 = ctx.framebuffer(texture)
fbo2 = ctx.framebuffer(new_texture)
ctx.copy_framebuffer(fbo2, fbo1)
result = image(new_texture, modify)
fbo1.release()
fbo2.release()
new_texture.release()
return result
def show(texture, modify=None, ctx=None) -> None:
'''
Show the texture using Pillow. If ctx is ``None`` the default_context is used.
Args:
texture (:py:class:`ModernGL.Texture`): The texture to show.
Keyword Args:
modify (lambda): Modify the color values before storing them in the Image.
ctx (:py:class:`ModernGL.Context`): The Context to use for loading the texture.
Examples:
.. code-block:: python
import ModernGL
from ModernGL.ext import textools
ctx = ModernGL.create_standalone_context()
# ctx = ModernGL.create_context()
texture = textools.load('brick.jpg', ctx=ctx)
textools.show(texture)
'''
image(texture, modify=modify, ctx=ctx).show()
|
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import operator
import warnings
from django.utils import six
from haystack import connection_router, connections
from haystack.backends import SQ
from haystack.constants import DEFAULT_OPERATOR, ITERATOR_LOAD_PER_QUERY, REPR_OUTPUT_SIZE
from haystack.exceptions import NotHandled
from haystack.inputs import AutoQuery, Clean, Raw
from haystack.utils import log as logging
class SearchQuerySet(object):
"""
Provides a way to specify search parameters and lazily load results.
Supports chaining (a la QuerySet) to narrow the search.
"""
def __init__(self, using=None, query=None):
# ``_using`` should only ever be a value other than ``None`` if it's
# been forced with the ``.using`` method.
self._using = using
self.query = None
self._determine_backend()
# If ``query`` is present, it should override even what the routers
# think.
if query is not None:
self.query = query
self._result_cache = []
self._result_count = None
self._cache_full = False
self._load_all = False
self._ignored_result_count = 0
self.log = logging.getLogger('haystack')
def _determine_backend(self):
from haystack import connections
# A backend has been manually selected. Use it instead.
if self._using is not None:
self.query = connections[self._using].get_query()
return
# No backend, so rely on the routers to figure out what's right.
hints = {}
if self.query:
hints['models'] = self.query.models
backend_alias = connection_router.for_read(**hints)
if isinstance(backend_alias, (list, tuple)) and len(backend_alias):
# We can only effectively read from one engine.
backend_alias = backend_alias[0]
# The ``SearchQuery`` might swap itself out for a different variant
# here.
if self.query:
self.query = self.query.using(backend_alias)
else:
self.query = connections[backend_alias].get_query()
def __getstate__(self):
"""
For pickling.
"""
len(self)
obj_dict = self.__dict__.copy()
obj_dict['_iter'] = None
obj_dict['log'] = None
return obj_dict
def __setstate__(self, data_dict):
"""
For unpickling.
"""
self.__dict__ = data_dict
self.log = logging.getLogger('haystack')
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE])
if len(self) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
if not self._result_count:
self._result_count = self.query.get_count()
# Some backends give weird, false-y values here. Convert to zero.
if not self._result_count:
self._result_count = 0
# This needs to return the actual number of hits, not what's in the cache.
return self._result_count - self._ignored_result_count
def __iter__(self):
if self._cache_is_full():
# We've got a fully populated cache. Let Python do the hard work.
return iter(self._result_cache)
return self._manual_iter()
def __and__(self, other):
if isinstance(other, EmptySearchQuerySet):
return other._clone()
combined = self._clone()
combined.query.combine(other.query, SQ.AND)
return combined
def __or__(self, other):
combined = self._clone()
if isinstance(other, EmptySearchQuerySet):
return combined
combined.query.combine(other.query, SQ.OR)
return combined
def _cache_is_full(self):
if not self.query.has_run():
return False
if len(self) <= 0:
return True
try:
self._result_cache.index(None)
return False
except ValueError:
# No ``None``s found in the results. Check the length of the cache.
return len(self._result_cache) > 0
def _manual_iter(self):
# If we're here, our cache isn't fully populated.
# For efficiency, fill the cache as we go if we run out of results.
# Also, this can't be part of the __iter__ method due to Python's rules
# about generator functions.
current_position = 0
current_cache_max = 0
while True:
if len(self._result_cache) > 0:
try:
current_cache_max = self._result_cache.index(None)
except ValueError:
current_cache_max = len(self._result_cache)
while current_position < current_cache_max:
yield self._result_cache[current_position]
current_position += 1
if self._cache_is_full():
raise StopIteration
# We've run out of results and haven't hit our limit.
# Fill more of the cache.
if not self._fill_cache(current_position, current_position + ITERATOR_LOAD_PER_QUERY):
raise StopIteration
def _fill_cache(self, start, end, **kwargs):
# Tell the query where to start from and how many we'd like.
self.query._reset()
self.query.set_limits(start, end)
results = self.query.get_results(**kwargs)
if results == None or len(results) == 0:
return False
# Setup the full cache now that we know how many results there are.
# We need the ``None``s as placeholders to know what parts of the
# cache we have/haven't filled.
# Using ``None`` like this takes up very little memory. In testing,
# an array of 100,000 ``None``s consumed less than .5 Mb, which ought
# to be an acceptable loss for consistent and more efficient caching.
if len(self._result_cache) == 0:
self._result_cache = [None] * self.query.get_count()
if start is None:
start = 0
if end is None:
end = self.query.get_count()
to_cache = self.post_process_results(results)
# Assign by slice.
self._result_cache[start:start + len(to_cache)] = to_cache
return True
def post_process_results(self, results):
to_cache = []
# Check if we wish to load all objects.
if self._load_all:
models_pks = {}
loaded_objects = {}
# Remember the search position for each result so we don't have to resort later.
for result in results:
models_pks.setdefault(result.model, []).append(result.pk)
# Load the objects for each model in turn.
for model in models_pks:
try:
ui = connections[self.query._using].get_unified_index()
index = ui.get_index(model)
objects = index.read_queryset(using=self.query._using)
loaded_objects[model] = objects.in_bulk(models_pks[model])
except NotHandled:
self.log.warning("Model '%s' not handled by the routers", model)
# Revert to old behaviour
loaded_objects[model] = model._default_manager.in_bulk(models_pks[model])
for result in results:
if self._load_all:
# We have to deal with integer keys being cast from strings
model_objects = loaded_objects.get(result.model, {})
if not result.pk in model_objects:
try:
result.pk = int(result.pk)
except ValueError:
pass
try:
result._object = model_objects[result.pk]
except KeyError:
# The object was either deleted since we indexed or should
# be ignored; fail silently.
self._ignored_result_count += 1
continue
to_cache.append(result)
return to_cache
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, six.integer_types)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
# Remember if it's a slice or not. We're going to treat everything as
# a slice to simply the logic and will `.pop()` at the end as needed.
if isinstance(k, slice):
is_slice = True
start = k.start
if k.stop is not None:
bound = int(k.stop)
else:
bound = None
else:
is_slice = False
start = k
bound = k + 1
# We need check to see if we need to populate more of the cache.
if len(self._result_cache) <= 0 or (None in self._result_cache[start:bound] and not self._cache_is_full()):
try:
self._fill_cache(start, bound)
except StopIteration:
# There's nothing left, even though the bound is higher.
pass
# Cache should be full enough for our needs.
if is_slice:
return self._result_cache[start:bound]
else:
return self._result_cache[start]
# Methods that return a SearchQuerySet.
def all(self):
"""Returns all results for the query."""
return self._clone()
def none(self):
"""Returns an empty result list for the query."""
return self._clone(klass=EmptySearchQuerySet)
def filter(self, *args, **kwargs):
"""Narrows the search based on certain attributes and the default operator."""
if DEFAULT_OPERATOR == 'OR':
return self.filter_or(*args, **kwargs)
else:
return self.filter_and(*args, **kwargs)
def exclude(self, *args, **kwargs):
"""Narrows the search by ensuring certain attributes are not included."""
clone = self._clone()
clone.query.add_filter(~SQ(*args, **kwargs))
return clone
def filter_and(self, *args, **kwargs):
"""Narrows the search by looking for (and including) certain attributes."""
clone = self._clone()
clone.query.add_filter(SQ(*args, **kwargs))
return clone
def filter_or(self, *args, **kwargs):
"""Narrows the search by ensuring certain attributes are not included."""
clone = self._clone()
clone.query.add_filter(SQ(*args, **kwargs), use_or=True)
return clone
def order_by(self, *args):
"""Alters the order in which the results should appear."""
clone = self._clone()
for field in args:
clone.query.add_order_by(field)
return clone
def highlight(self):
"""Adds highlighting to the results."""
clone = self._clone()
clone.query.add_highlight()
return clone
def models(self, *models):
"""Accepts an arbitrary number of Model classes to include in the search."""
clone = self._clone()
for model in models:
if not model in connections[self.query._using].get_unified_index().get_indexed_models():
warnings.warn('The model %r is not registered for search.' % (model,))
clone.query.add_model(model)
return clone
def result_class(self, klass):
"""
Allows specifying a different class to use for results.
Overrides any previous usages. If ``None`` is provided, Haystack will
revert back to the default ``SearchResult`` object.
"""
clone = self._clone()
clone.query.set_result_class(klass)
return clone
def boost(self, term, boost):
"""Boosts a certain aspect of the query."""
clone = self._clone()
clone.query.add_boost(term, boost)
return clone
def facet(self, field, **options):
"""Adds faceting to a query for the provided field."""
clone = self._clone()
clone.query.add_field_facet(field, **options)
return clone
def within(self, field, point_1, point_2):
"""Spatial: Adds a bounding box search to the query."""
clone = self._clone()
clone.query.add_within(field, point_1, point_2)
return clone
def dwithin(self, field, point, distance):
"""Spatial: Adds a distance-based search to the query."""
clone = self._clone()
clone.query.add_dwithin(field, point, distance)
return clone
def stats(self, field):
"""Adds stats to a query for the provided field."""
return self.stats_facet(field, facet_fields=None)
def stats_facet(self, field, facet_fields=None):
"""Adds stats facet for the given field and facet_fields represents
the faceted fields."""
clone = self._clone()
stats_facets = []
try:
stats_facets.append(sum(facet_fields,[]))
except TypeError:
if facet_fields: stats_facets.append(facet_fields)
clone.query.add_stats_query(field,stats_facets)
return clone
def distance(self, field, point):
"""
Spatial: Denotes results must have distance measurements from the
provided point.
"""
clone = self._clone()
clone.query.add_distance(field, point)
return clone
def date_facet(self, field, start_date, end_date, gap_by, gap_amount=1):
"""Adds faceting to a query for the provided field by date."""
clone = self._clone()
clone.query.add_date_facet(field, start_date, end_date, gap_by, gap_amount=gap_amount)
return clone
def query_facet(self, field, query):
"""Adds faceting to a query for the provided field with a custom query."""
clone = self._clone()
clone.query.add_query_facet(field, query)
return clone
def narrow(self, query):
"""Pushes existing facet choices into the search."""
if isinstance(query, SQ):
# produce query string using empty query of the same class
empty_query = self.query._clone()
empty_query._reset()
query = query.as_query_string(empty_query.build_query_fragment)
clone = self._clone()
clone.query.add_narrow_query(query)
return clone
def raw_search(self, query_string, **kwargs):
"""Passes a raw query directly to the backend."""
return self.filter(content=Raw(query_string, **kwargs))
def load_all(self):
"""Efficiently populates the objects in the search results."""
clone = self._clone()
clone._load_all = True
return clone
def auto_query(self, query_string, fieldname='content'):
"""
Performs a best guess constructing the search query.
This method is somewhat naive but works well enough for the simple,
common cases.
"""
kwargs = {
fieldname: AutoQuery(query_string)
}
return self.filter(**kwargs)
def autocomplete(self, **kwargs):
"""
A shortcut method to perform an autocomplete search.
Must be run against fields that are either ``NgramField`` or
``EdgeNgramField``.
"""
clone = self._clone()
query_bits = []
for field_name, query in kwargs.items():
for word in query.split(' '):
bit = clone.query.clean(word.strip())
if bit:
kwargs = {
field_name: bit,
}
query_bits.append(SQ(**kwargs))
return clone.filter(six.moves.reduce(operator.__and__, query_bits))
def using(self, connection_name):
"""
Allows switching which connection the ``SearchQuerySet`` uses to
search in.
"""
clone = self._clone()
clone.query = self.query.using(connection_name)
clone._using = connection_name
return clone
# Methods that do not return a SearchQuerySet.
def count(self):
"""Returns the total number of matching results."""
return len(self)
def best_match(self):
"""Returns the best/top search result that matches the query."""
return self[0]
def latest(self, date_field):
"""Returns the most recent search result that matches the query."""
clone = self._clone()
clone.query.clear_order_by()
clone.query.add_order_by("-%s" % date_field)
return clone.best_match()
def more_like_this(self, model_instance):
"""Finds similar results to the object passed in."""
clone = self._clone()
clone.query.more_like_this(model_instance)
return clone
def facet_counts(self):
"""
Returns the facet counts found by the query.
This will cause the query to execute and should generally be used when
presenting the data.
"""
if self.query.has_run():
return self.query.get_facet_counts()
else:
clone = self._clone()
return clone.query.get_facet_counts()
def stats_results(self):
"""
Returns the stats results found by the query.
"""
if self.query.has_run():
return self.query.get_stats()
else:
clone = self._clone()
return clone.query.get_stats()
def spelling_suggestion(self, preferred_query=None):
"""
Returns the spelling suggestion found by the query.
To work, you must set ``INCLUDE_SPELLING`` within your connection's
settings dictionary to ``True``. Otherwise, ``None`` will be returned.
This will cause the query to execute and should generally be used when
presenting the data.
"""
if self.query.has_run():
return self.query.get_spelling_suggestion(preferred_query)
else:
clone = self._clone()
return clone.query.get_spelling_suggestion(preferred_query)
def values(self, *fields):
"""
Returns a list of dictionaries, each containing the key/value pairs for
the result, exactly like Django's ``ValuesQuerySet``.
"""
qs = self._clone(klass=ValuesSearchQuerySet)
qs._fields.extend(fields)
return qs
def values_list(self, *fields, **kwargs):
"""
Returns a list of field values as tuples, exactly like Django's
``QuerySet.values``.
Optionally accepts a ``flat=True`` kwarg, which in the case of a
single field being provided, will return a flat list of that field
rather than a list of tuples.
"""
flat = kwargs.pop("flat", False)
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
qs = self._clone(klass=ValuesListSearchQuerySet)
qs._fields.extend(fields)
qs._flat = flat
return qs
# Utility methods.
def _clone(self, klass=None):
if klass is None:
klass = self.__class__
query = self.query._clone()
clone = klass(query=query)
clone._load_all = self._load_all
return clone
class EmptySearchQuerySet(SearchQuerySet):
"""
A stubbed SearchQuerySet that behaves as normal but always returns no
results.
"""
def __len__(self):
return 0
def _cache_is_full(self):
# Pretend the cache is always full with no results.
return True
def _clone(self, klass=None):
clone = super(EmptySearchQuerySet, self)._clone(klass=klass)
clone._result_cache = []
return clone
def _fill_cache(self, start, end):
return False
def facet_counts(self):
return {}
class ValuesListSearchQuerySet(SearchQuerySet):
"""
A ``SearchQuerySet`` which returns a list of field values as tuples, exactly
like Django's ``ValuesListQuerySet``.
"""
def __init__(self, *args, **kwargs):
super(ValuesListSearchQuerySet, self).__init__(*args, **kwargs)
self._flat = False
self._fields = []
# Removing this dependency would require refactoring much of the backend
# code (_process_results, etc.) and these aren't large enough to make it
# an immediate priority:
self._internal_fields = ['id', 'django_ct', 'django_id', 'score']
def _clone(self, klass=None):
clone = super(ValuesListSearchQuerySet, self)._clone(klass=klass)
clone._fields = self._fields
clone._flat = self._flat
return clone
def _fill_cache(self, start, end):
query_fields = set(self._internal_fields)
query_fields.update(self._fields)
kwargs = {
'fields': query_fields
}
return super(ValuesListSearchQuerySet, self)._fill_cache(start, end, **kwargs)
def post_process_results(self, results):
to_cache = []
if self._flat:
accum = to_cache.extend
else:
accum = to_cache.append
for result in results:
accum([getattr(result, i, None) for i in self._fields])
return to_cache
class ValuesSearchQuerySet(ValuesListSearchQuerySet):
"""
A ``SearchQuerySet`` which returns a list of dictionaries, each containing
the key/value pairs for the result, exactly like Django's
``ValuesQuerySet``.
"""
def _fill_cache(self, start, end):
query_fields = set(self._internal_fields)
query_fields.update(self._fields)
kwargs = {
'fields': query_fields
}
return super(ValuesListSearchQuerySet, self)._fill_cache(start, end, **kwargs)
def post_process_results(self, results):
to_cache = []
for result in results:
to_cache.append(dict((i, getattr(result, i, None)) for i in self._fields))
return to_cache
class RelatedSearchQuerySet(SearchQuerySet):
"""
A variant of the SearchQuerySet that can handle `load_all_queryset`s.
This is predominantly different in the `_fill_cache` method, as it is
far less efficient but needs to fill the cache before it to maintain
consistency.
"""
def __init__(self, *args, **kwargs):
super(RelatedSearchQuerySet, self).__init__(*args, **kwargs)
self._load_all_querysets = {}
self._result_cache = []
def _cache_is_full(self):
return len(self._result_cache) >= len(self)
def _manual_iter(self):
# If we're here, our cache isn't fully populated.
# For efficiency, fill the cache as we go if we run out of results.
# Also, this can't be part of the __iter__ method due to Python's rules
# about generator functions.
current_position = 0
current_cache_max = 0
while True:
current_cache_max = len(self._result_cache)
while current_position < current_cache_max:
yield self._result_cache[current_position]
current_position += 1
if self._cache_is_full():
raise StopIteration
# We've run out of results and haven't hit our limit.
# Fill more of the cache.
start = current_position + self._ignored_result_count
if not self._fill_cache(start, start + ITERATOR_LOAD_PER_QUERY):
raise StopIteration
def _fill_cache(self, start, end):
# Tell the query where to start from and how many we'd like.
self.query._reset()
self.query.set_limits(start, end)
results = self.query.get_results()
if len(results) == 0:
return False
if start is None:
start = 0
if end is None:
end = self.query.get_count()
# Check if we wish to load all objects.
if self._load_all:
models_pks = {}
loaded_objects = {}
# Remember the search position for each result so we don't have to resort later.
for result in results:
models_pks.setdefault(result.model, []).append(result.pk)
# Load the objects for each model in turn.
for model in models_pks:
if model in self._load_all_querysets:
# Use the overriding queryset.
loaded_objects[model] = self._load_all_querysets[model].in_bulk(models_pks[model])
else:
# Check the SearchIndex for the model for an override.
try:
index = connections[self.query._using].get_unified_index().get_index(model)
qs = index.load_all_queryset()
loaded_objects[model] = qs.in_bulk(models_pks[model])
except NotHandled:
# The model returned doesn't seem to be handled by the
# routers. We should silently fail and populate
# nothing for those objects.
loaded_objects[model] = []
if len(results) + len(self._result_cache) < len(self) and len(results) < ITERATOR_LOAD_PER_QUERY:
self._ignored_result_count += ITERATOR_LOAD_PER_QUERY - len(results)
for result in results:
if self._load_all:
# We have to deal with integer keys being cast from strings; if this
# fails we've got a character pk.
try:
result.pk = int(result.pk)
except ValueError:
pass
try:
result._object = loaded_objects[result.model][result.pk]
except (KeyError, IndexError):
# The object was either deleted since we indexed or should
# be ignored; fail silently.
self._ignored_result_count += 1
continue
self._result_cache.append(result)
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice, six.integer_types)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
# Remember if it's a slice or not. We're going to treat everything as
# a slice to simply the logic and will `.pop()` at the end as needed.
if isinstance(k, slice):
is_slice = True
start = k.start
if k.stop is not None:
bound = int(k.stop)
else:
bound = None
else:
is_slice = False
start = k
bound = k + 1
# We need check to see if we need to populate more of the cache.
if len(self._result_cache) <= 0 or not self._cache_is_full():
try:
while len(self._result_cache) < bound and not self._cache_is_full():
current_max = len(self._result_cache) + self._ignored_result_count
self._fill_cache(current_max, current_max + ITERATOR_LOAD_PER_QUERY)
except StopIteration:
# There's nothing left, even though the bound is higher.
pass
# Cache should be full enough for our needs.
if is_slice:
return self._result_cache[start:bound]
else:
return self._result_cache[start]
def load_all_queryset(self, model, queryset):
"""
Allows for specifying a custom ``QuerySet`` that changes how ``load_all``
will fetch records for the provided model.
This is useful for post-processing the results from the query, enabling
things like adding ``select_related`` or filtering certain data.
"""
clone = self._clone()
clone._load_all_querysets[model] = queryset
return clone
def _clone(self, klass=None):
if klass is None:
klass = self.__class__
query = self.query._clone()
clone = klass(query=query)
clone._load_all = self._load_all
clone._load_all_querysets = self._load_all_querysets
return clone
|
|
#
#
#
from __future__ import absolute_import, print_function, unicode_literals
from base64 import b64encode
from datetime import datetime, timedelta
from django.core.paginator import Page
from django.test import TestCase
from performant_pagination.pagination import PerformantPaginator
from performant_pagination.tests.models import RelatedModel, SimpleModel, \
TimedModel
class TestBasicPagination(TestCase):
maxDiff = None
def setUp(self):
SimpleModel.objects.bulk_create(
[SimpleModel(name='object {0}'.format(i)) for i in range(33)]
)
def test_page_1_work_around(self):
objects = SimpleModel.objects.order_by('pk')
# defaults
paginator = PerformantPaginator(SimpleModel.objects.all())
self.assertTrue(paginator)
# first page
page = paginator.page(1)
self.assertTrue(page)
self.assertIsInstance(page, Page)
# make sure we got the first page
self.assertEquals(list(objects[:25]), list(page))
# and the expected next tokens
self.assertEquals(None, page.token)
self.assertEquals(b64encode(str(objects[24].pk)), page.next_token)
self.assertEquals(None, page.previous_token)
def test_allow_count(self):
objects = SimpleModel.objects.order_by('pk')
# defaults, no count
paginator = PerformantPaginator(SimpleModel.objects.all())
self.assertTrue(paginator)
self.assertEquals(None, paginator.count())
# allow_count
paginator = PerformantPaginator(SimpleModel.objects.all(),
allow_count=True)
self.assertTrue(paginator)
self.assertEquals(len(objects), paginator.count())
def test_default_page_number(self):
paginator = PerformantPaginator(None)
self.assertEquals(None, paginator.default_page_number())
def test_validate_number(self):
paginator = PerformantPaginator(None)
for number in (None, '', 42, 1, -1, 'something'):
self.assertEquals(number, paginator.validate_number(number))
def test_has_other_pages(self):
# defaults
paginator = PerformantPaginator(SimpleModel.objects.all())
self.assertTrue(paginator)
page = paginator.page()
self.assertTrue(page.has_other_pages())
while page.has_next():
self.assertTrue(page.has_other_pages())
page = paginator.page(page.next_page_number())
# per_page > total count so no next/prev
paginator = PerformantPaginator(SimpleModel.objects.all(),
per_page=999)
self.assertTrue(paginator)
page = paginator.page()
self.assertFalse(page.has_other_pages())
def test_basic(self):
objects = SimpleModel.objects.order_by('pk')
# defaults
paginator = PerformantPaginator(SimpleModel.objects.all())
self.assertTrue(paginator)
self.assertTrue(str(paginator))
# first page
page = paginator.page()
self.assertTrue(page)
self.assertTrue(str(page))
# make sure we got the expected data
self.assertEquals(list(objects[:25]), list(page))
# and the expected next tokens
self.assertEquals(None, page.token)
self.assertEquals(b64encode(str(objects[24].pk)), page.next_token)
self.assertEquals(None, page.previous_token)
# check the page's methods
self.assertFalse(page.has_previous())
self.assertEquals(None, page.previous_page_number())
self.assertTrue(page.has_next())
self.assertEquals(b64encode(str(objects[24].pk)),
page.next_page_number())
# these guys are not applicable/implemented with our system
self.assertFalse(page.start_index())
self.assertFalse(page.end_index())
# now lets check the 2nd page
page = paginator.page(page.next_page_number())
self.assertTrue(page)
# make sure we got the expected data
self.assertEquals(list(objects[25:]), list(page))
# and the expected next tokens
self.assertEquals(b64encode(str(objects[24].pk)), page.token)
self.assertEquals(None, page.next_token)
self.assertEquals('', page.previous_token)
# check the page's methods
self.assertTrue(page.has_previous())
self.assertEquals('', page.previous_page_number())
self.assertFalse(page.has_next())
self.assertEquals(None, page.next_page_number())
def test_reversed(self):
objects = SimpleModel.objects.order_by('-pk')
# defaults
paginator = PerformantPaginator(SimpleModel.objects.all(),
ordering='-pk')
self.assertTrue(paginator)
# first page
page = paginator.page()
self.assertTrue(page)
# make sure we got the expected data
self.assertEquals(list(objects[:25]), list(page))
# and the expected next tokens
self.assertEquals(None, page.token)
self.assertEquals(b64encode(str(objects[24].pk)), page.next_token)
self.assertEquals(None, page.previous_token)
# check the page's methods
self.assertFalse(page.has_previous())
self.assertEquals(None, page.previous_page_number())
self.assertTrue(page.has_next())
self.assertEquals(b64encode(str(objects[24].pk)),
page.next_page_number())
# these guys are not applicable/implemented with our system
self.assertFalse(page.start_index())
self.assertFalse(page.end_index())
# now lets check the 2nd page
page = paginator.page(page.next_page_number())
self.assertTrue(page)
# make sure we got the expected data
self.assertEquals(list(objects[25:]), list(page))
# and the expected next tokens
self.assertEquals(b64encode(str(objects[24].pk)), page.token)
self.assertEquals(None, page.next_token)
self.assertEquals('', page.previous_token)
# check the page's methods
self.assertTrue(page.has_previous())
self.assertEquals('', page.previous_page_number())
self.assertFalse(page.has_next())
self.assertEquals(None, page.next_page_number())
def test_non_pk(self):
objects = SimpleModel.objects.order_by('name')
# defaults
paginator = PerformantPaginator(SimpleModel.objects.all(),
ordering='name')
self.assertTrue(paginator)
# first page
page = paginator.page()
self.assertTrue(page)
# make sure we got the expected data
self.assertEquals(list(objects[:25]), list(page))
# and the expected next tokens
self.assertEquals(None, page.token)
self.assertEquals(b64encode(objects[24].name), page.next_token)
self.assertEquals(None, page.previous_token)
# check the page's methods
self.assertFalse(page.has_previous())
self.assertEquals(None, page.previous_page_number())
self.assertTrue(page.has_next())
self.assertEquals(b64encode(objects[24].name), page.next_page_number())
# these guys are not applicable/implemented with our system
self.assertFalse(page.start_index())
self.assertFalse(page.end_index())
# now lets check the 2nd page
page = paginator.page(page.next_page_number())
self.assertTrue(page)
# make sure we got the expected data
self.assertEquals(list(objects[25:]), list(page))
# and the expected next tokens
self.assertEquals(b64encode(objects[24].name), page.token)
self.assertEquals(None, page.next_token)
self.assertEquals('', page.previous_token)
# check the page's methods
self.assertTrue(page.has_previous())
self.assertEquals('', page.previous_page_number())
self.assertFalse(page.has_next())
self.assertEquals(None, page.next_page_number())
def test_page_sizes(self):
objects = SimpleModel.objects.order_by('pk')
# defaults
paginator = PerformantPaginator(SimpleModel.objects.all(),
per_page=11)
self.assertTrue(paginator)
# first page
page = paginator.page()
self.assertTrue(page)
# make sure we got the expected data
self.assertEquals(list(objects[:11]), list(page))
# and the expected next tokens
self.assertEquals(None, page.token)
self.assertEquals(b64encode(str(objects[10].pk)), page.next_token)
self.assertEquals(None, page.previous_token)
# check the page's methods
self.assertFalse(page.has_previous())
self.assertEquals(None, page.previous_page_number())
self.assertTrue(page.has_next())
self.assertEquals(b64encode(str(objects[10].pk)),
page.next_page_number())
# these guys are not applicable/implemented with our system
self.assertFalse(page.start_index())
self.assertFalse(page.end_index())
# now lets check the 2nd page
page = paginator.page(page.next_page_number())
self.assertTrue(page)
# make sure we got the expected data
self.assertEquals(list(objects[11:22]), list(page))
# and the expected next tokens
self.assertEquals(b64encode(str(objects[10].pk)), page.token)
self.assertEquals(b64encode(str(objects[21].pk)), page.next_token)
self.assertEquals('', page.previous_token)
# check the page's methods
self.assertTrue(page.has_previous())
self.assertEquals('', page.previous_page_number())
self.assertTrue(page.has_next())
self.assertEquals(b64encode(str(objects[21].pk)),
page.next_page_number())
# and finally the 3rd page
page = paginator.page(page.next_page_number())
self.assertTrue(page)
# make sure we got the expected data
self.assertEquals(list(objects[22:]), list(page))
# and the expected next tokens
self.assertEquals(b64encode(str(objects[21].pk)), page.token)
self.assertEquals(None, page.next_token)
self.assertEquals(b64encode(str(objects[10].pk)), page.previous_token)
# check the page's methods
self.assertTrue(page.has_previous())
self.assertEquals(b64encode(str(objects[10].pk)),
page.previous_page_number())
self.assertFalse(page.has_next())
self.assertEquals(None, page.next_page_number())
class TestLarger(TestCase):
def setUp(self):
SimpleModel.objects.bulk_create(
[SimpleModel(name='object {0}'.format(i)) for i in range(333)]
)
def test_larger(self):
objects = list(SimpleModel.objects.order_by('pk'))
paginator = PerformantPaginator(SimpleModel.objects.all(),
per_page=50)
page = paginator.page()
self.assertFalse(page.previous_page_number())
off = 0
while page.has_next():
self.assertEquals(50, len(page))
self.assertEquals(list(objects[off:off + 50]), list(page))
page = paginator.page(page.next_page_number())
off += 50
# last page
self.assertTrue(page.previous_page_number() is not None)
self.assertEquals(33, len(page))
self.assertEquals(list(objects[off:off + 33]), list(page))
self.assertEquals(None, page.next_page_number())
# now let's go backwards
page = paginator.page(page.previous_page_number())
while page.has_previous():
off -= 50
self.assertEquals(50, len(page))
self.assertEquals(list(objects[off:off + 50]), list(page))
page = paginator.page(page.previous_page_number())
class TestRelationships(TestCase):
def setUp(self):
SimpleModel.objects.bulk_create(
[SimpleModel(name='object {0}'.format(i)) for i in range(27)]
)
relateds = []
for simple in SimpleModel.objects.all():
relateds.extend(
[RelatedModel(number=i, simple=simple) for i in range(5)]
)
RelatedModel.objects.bulk_create(relateds)
def test_related_order(self):
objects = RelatedModel.objects.order_by('simple__name')
# defaults
paginator = PerformantPaginator(RelatedModel.objects.all(),
ordering='simple__name')
self.assertTrue(paginator)
self.assertTrue(str(paginator))
# first page
page = paginator.page()
self.assertTrue(page)
self.assertTrue(str(page))
# make sure we got the expected data
self.assertEquals(list(objects[:25]), list(page))
# and the expected next tokens
self.assertEquals(None, page.token)
self.assertEquals(b64encode(objects[24].simple.name), page.next_token)
self.assertEquals(None, page.previous_token)
# page 2
page = paginator.page(page.next_token)
self.assertTrue(page)
# make sure we got the expected data
self.assertEquals(list(objects[25:50]), list(page))
# and the expected next tokens
self.assertEquals('', page.previous_token)
self.assertEquals(b64encode(objects[24].simple.name), page.token)
self.assertEquals(b64encode(objects[49].simple.name), page.next_token)
def test_related_reverse(self):
objects = RelatedModel.objects.order_by('-simple__name')
# defaults
paginator = PerformantPaginator(RelatedModel.objects.all(),
ordering='-simple__name')
self.assertTrue(paginator)
self.assertTrue(str(paginator))
# first page
page = paginator.page()
self.assertTrue(page)
self.assertTrue(str(page))
# make sure we got the expected data
self.assertEquals(list(objects[:25]), list(page))
# and the expected next tokens
self.assertEquals(None, page.token)
self.assertEquals(b64encode(objects[24].simple.name), page.next_token)
self.assertEquals(None, page.previous_token)
class TestDateTime(TestCase):
maxDiff = None
@classmethod
def setUpClass(cls):
# build up the test objects
timed_models = []
base = datetime(2013, 10, 27, 8, 44, 0)
for i in range(65):
# with days and minutes datetime, date, and time will have
# interesting differences
when = base - timedelta(days=i, minutes=i)
# two objects at each time
timed_models.append(TimedModel(when_datetime=when, when_date=when,
when_time=when))
# create them
TimedModel.objects.bulk_create(timed_models)
def test_date_time(self):
# fetch them all in our order, now that they're created
ordering = 'when_datetime'
timed_models = list(TimedModel.objects.order_by(ordering))
paginator = PerformantPaginator(TimedModel.objects.all(),
ordering=ordering)
self.assertTrue(paginator)
# first page
page = paginator.page()
self.assertTrue(Page)
self.assertEquals(None, page.previous_token)
self.assertEquals(None, page.token)
datetime_format = '%Y-%m-%dT%H:%M:%S'
def tokenize_datetime(dt):
return b64encode(dt.strftime(datetime_format))
self.assertEquals(tokenize_datetime(timed_models[24].when_datetime),
page.next_token)
self.assertEquals(timed_models[:25], list(page))
# second page
page = paginator.page(page.next_token)
self.assertTrue(Page)
self.assertEquals('', page.previous_token)
self.assertEquals(tokenize_datetime(timed_models[24].when_datetime),
page.token)
self.assertEquals(tokenize_datetime(timed_models[49].when_datetime),
page.next_token)
self.assertEquals(timed_models[25:50], list(page))
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Submission', fields ['title']
db.delete_unique('challenges_submission', ['title'])
def backwards(self, orm):
# Adding unique constraint on 'Submission', fields ['title']
db.create_unique('challenges_submission', ['title'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'challenges.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'})
},
'challenges.challenge': {
'Meta': {'object_name': 'Challenge'},
'allow_voting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'moderate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '60', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'})
},
'challenges.exclusionflag': {
'Meta': {'object_name': 'ExclusionFlag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']"})
},
'challenges.externallink': {
'Meta': {'object_name': 'ExternalLink'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
'challenges.judgeassignment': {
'Meta': {'unique_together': "(('submission', 'judge'),)", 'object_name': 'JudgeAssignment'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'judge': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']"}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']"})
},
'challenges.judgement': {
'Meta': {'unique_together': "(('submission', 'judge'),)", 'object_name': 'Judgement'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'judge': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']"}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']"})
},
'challenges.judginganswer': {
'Meta': {'unique_together': "(('judgement', 'criterion'),)", 'object_name': 'JudgingAnswer'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.JudgingCriterion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'judgement': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': "orm['challenges.Judgement']"}),
'rating': ('django.db.models.fields.IntegerField', [], {})
},
'challenges.judgingcriterion': {
'Meta': {'ordering': "('id',)", 'object_name': 'JudgingCriterion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_value': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'phases': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'judgement_criteria'", 'blank': 'True', 'through': "orm['challenges.PhaseCriterion']", 'to': "orm['challenges.Phase']"}),
'question': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'challenges.phase': {
'Meta': {'ordering': "('order',)", 'unique_together': "(('challenge', 'name'),)", 'object_name': 'Phase'},
'challenge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': "orm['challenges.Challenge']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 15, 14, 39, 16, 646545)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'})
},
'challenges.phasecriterion': {
'Meta': {'unique_together': "(('phase', 'criterion'),)", 'object_name': 'PhaseCriterion'},
'criterion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.JudgingCriterion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Phase']"}),
'weight': ('django.db.models.fields.DecimalField', [], {'default': '10', 'max_digits': '4', 'decimal_places': '2'})
},
'challenges.phaseround': {
'Meta': {'object_name': 'PhaseRound'},
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Phase']"}),
'slug': ('django_extensions.db.fields.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '50', 'separator': "u'-'", 'blank': 'True', 'populate_from': "'name'", 'overwrite': 'False', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {})
},
'challenges.submission': {
'Meta': {'ordering': "['-id']", 'object_name': 'Submission'},
'brief_description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Category']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.Profile']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_winner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Phase']"}),
'phase_round': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.PhaseRound']", 'null': 'True', 'blank': 'True'}),
'sketh_note': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'challenges.submissionparent': {
'Meta': {'ordering': "('-created',)", 'object_name': 'SubmissionParent'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']"})
},
'challenges.submissionversion': {
'Meta': {'unique_together': "(('submission', 'parent'),)", 'object_name': 'SubmissionVersion'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.SubmissionParent']"}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['challenges.Submission']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.project': {
'Meta': {'object_name': 'Project'},
'allow_participation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_sub_projects': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'projects_following'", 'symmetrical': 'False', 'to': "orm['users.Profile']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_project_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'sub_project_label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'team_members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['users.Profile']", 'symmetrical': 'False'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['topics.Topic']", 'symmetrical': 'False'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'topics.topic': {
'Meta': {'object_name': 'Topic'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'long_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['challenges']
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiprocess file credential storage.
This module provides file-based storage that supports multiple credentials and
cross-thread and process access.
This module supersedes the functionality previously found in `multistore_file`.
This module provides :class:`MultiprocessFileStorage` which:
* Is tied to a single credential via a user-specified key. This key can be
used to distinguish between multiple users, client ids, and/or scopes.
* Can be safely accessed and refreshed across threads and processes.
Process & thread safety guarantees the following behavior:
* If one thread or process refreshes a credential, subsequent refreshes
from other processes will re-fetch the credentials from the file instead
of performing an http request.
* If two processes or threads attempt to refresh concurrently, only one
will be able to acquire the lock and refresh, with the deadlock caveat
below.
* The interprocess lock will not deadlock, instead, the if a process can
not acquire the interprocess lock within ``INTERPROCESS_LOCK_DEADLINE``
it will allow refreshing the credential but will not write the updated
credential to disk, This logic happens during every lock cycle - if the
credentials are refreshed again it will retry locking and writing as
normal.
Usage
=====
Before using the storage, you need to decide how you want to key the
credentials. A few common strategies include:
* If you're storing credentials for multiple users in a single file, use
a unique identifier for each user as the key.
* If you're storing credentials for multiple client IDs in a single file,
use the client ID as the key.
* If you're storing multiple credentials for one user, use the scopes as
the key.
* If you have a complicated setup, use a compound key. For example, you
can use a combination of the client ID and scopes as the key.
Create an instance of :class:`MultiprocessFileStorage` for each credential you
want to store, for example::
filename = 'credentials'
key = '{}-{}'.format(client_id, user_id)
storage = MultiprocessFileStorage(filename, key)
To store the credentials::
storage.put(credentials)
If you're going to continue to use the credentials after storing them, be sure
to call :func:`set_store`::
credentials.set_store(storage)
To retrieve the credentials::
storage.get(credentials)
"""
import base64
import json
import logging
import os
import threading
import fasteners
from six import iteritems
from oauth2client import _helpers
from oauth2client import client
#: The maximum amount of time, in seconds, to wait when acquire the
#: interprocess lock before falling back to read-only mode.
INTERPROCESS_LOCK_DEADLINE = 1
logger = logging.getLogger(__name__)
_backends = {}
_backends_lock = threading.Lock()
def _create_file_if_needed(filename):
"""Creates the an empty file if it does not already exist.
Returns:
True if the file was created, False otherwise.
"""
if os.path.exists(filename):
return False
else:
old_umask = os.umask(0o177)
try:
# Equivalent to "touch".
open(filename, 'a+b').close()
finally:
os.umask(old_umask)
logger.info('Credential file {0} created'.format(filename))
return True
def _load_credentials_file(credentials_file):
"""Load credentials from the given file handle.
The file is expected to be in this format:
{
"file_version": 2,
"credentials": {
"key": "base64 encoded json representation of credentials."
}
}
This function will warn and return empty credentials instead of raising
exceptions.
Args:
credentials_file: An open file handle.
Returns:
A dictionary mapping user-defined keys to an instance of
:class:`oauth2client.client.Credentials`.
"""
try:
credentials_file.seek(0)
data = json.load(credentials_file)
except Exception:
logger.warning(
'Credentials file could not be loaded, will ignore and '
'overwrite.')
return {}
if data.get('file_version') != 2:
logger.warning(
'Credentials file is not version 2, will ignore and '
'overwrite.')
return {}
credentials = {}
for key, encoded_credential in iteritems(data.get('credentials', {})):
try:
credential_json = base64.b64decode(encoded_credential)
credential = client.Credentials.new_from_json(credential_json)
credentials[key] = credential
except:
logger.warning(
'Invalid credential {0} in file, ignoring.'.format(key))
return credentials
def _write_credentials_file(credentials_file, credentials):
"""Writes credentials to a file.
Refer to :func:`_load_credentials_file` for the format.
Args:
credentials_file: An open file handle, must be read/write.
credentials: A dictionary mapping user-defined keys to an instance of
:class:`oauth2client.client.Credentials`.
"""
data = {'file_version': 2, 'credentials': {}}
for key, credential in iteritems(credentials):
credential_json = credential.to_json()
encoded_credential = _helpers._from_bytes(base64.b64encode(
_helpers._to_bytes(credential_json)))
data['credentials'][key] = encoded_credential
credentials_file.seek(0)
json.dump(data, credentials_file)
credentials_file.truncate()
class _MultiprocessStorageBackend(object):
"""Thread-local backend for multiprocess storage.
Each process has only one instance of this backend per file. All threads
share a single instance of this backend. This ensures that all threads
use the same thread lock and process lock when accessing the file.
"""
def __init__(self, filename):
self._file = None
self._filename = filename
self._process_lock = fasteners.InterProcessLock(
'{0}.lock'.format(filename))
self._thread_lock = threading.Lock()
self._read_only = False
self._credentials = {}
def _load_credentials(self):
"""(Re-)loads the credentials from the file."""
if not self._file:
return
loaded_credentials = _load_credentials_file(self._file)
self._credentials.update(loaded_credentials)
logger.debug('Read credential file')
def _write_credentials(self):
if self._read_only:
logger.debug('In read-only mode, not writing credentials.')
return
_write_credentials_file(self._file, self._credentials)
logger.debug('Wrote credential file {0}.'.format(self._filename))
def acquire_lock(self):
self._thread_lock.acquire()
locked = self._process_lock.acquire(timeout=INTERPROCESS_LOCK_DEADLINE)
if locked:
_create_file_if_needed(self._filename)
self._file = open(self._filename, 'r+')
self._read_only = False
else:
logger.warn(
'Failed to obtain interprocess lock for credentials. '
'If a credential is being refreshed, other processes may '
'not see the updated access token and refresh as well.')
if os.path.exists(self._filename):
self._file = open(self._filename, 'r')
else:
self._file = None
self._read_only = True
self._load_credentials()
def release_lock(self):
if self._file is not None:
self._file.close()
self._file = None
if not self._read_only:
self._process_lock.release()
self._thread_lock.release()
def _refresh_predicate(self, credentials):
if credentials is None:
return True
elif credentials.invalid:
return True
elif credentials.access_token_expired:
return True
else:
return False
def locked_get(self, key):
# Check if the credential is already in memory.
credentials = self._credentials.get(key, None)
# Use the refresh predicate to determine if the entire store should be
# reloaded. This basically checks if the credentials are invalid
# or expired. This covers the situation where another process has
# refreshed the credentials and this process doesn't know about it yet.
# In that case, this process won't needlessly refresh the credentials.
if self._refresh_predicate(credentials):
self._load_credentials()
credentials = self._credentials.get(key, None)
return credentials
def locked_put(self, key, credentials):
self._load_credentials()
self._credentials[key] = credentials
self._write_credentials()
def locked_delete(self, key):
self._load_credentials()
self._credentials.pop(key, None)
self._write_credentials()
def _get_backend(filename):
"""A helper method to get or create a backend with thread locking.
This ensures that only one backend is used per-file per-process, so that
thread and process locks are appropriately shared.
Args:
filename: The full path to the credential storage file.
Returns:
An instance of :class:`_MultiprocessStorageBackend`.
"""
filename = os.path.abspath(filename)
with _backends_lock:
if filename not in _backends:
_backends[filename] = _MultiprocessStorageBackend(filename)
return _backends[filename]
class MultiprocessFileStorage(client.Storage):
"""Multiprocess file credential storage.
Args:
filename: The path to the file where credentials will be stored.
key: An arbitrary string used to uniquely identify this set of
credentials. For example, you may use the user's ID as the key or
a combination of the client ID and user ID.
"""
def __init__(self, filename, key):
self._key = key
self._backend = _get_backend(filename)
def acquire_lock(self):
self._backend.acquire_lock()
def release_lock(self):
self._backend.release_lock()
def locked_get(self):
"""Retrieves the current credentials from the store.
Returns:
An instance of :class:`oauth2client.client.Credentials` or `None`.
"""
credential = self._backend.locked_get(self._key)
if credential is not None:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Writes the given credentials to the store.
Args:
credentials: an instance of
:class:`oauth2client.client.Credentials`.
"""
return self._backend.locked_put(self._key, credentials)
def locked_delete(self):
"""Deletes the current credentials from the store."""
return self._backend.locked_delete(self._key)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""for_loop and pfor ops."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.parallel_for.pfor import PFor
from tensorflow.python.ops.parallel_for.pfor import PForConfig
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
def for_loop(loop_fn, loop_fn_dtypes, iters, parallel_iterations=None):
"""Runs `loop_fn` `iters` times and stacks the outputs.
Runs `loop_fn` `iters` times, with input values from 0 to `iters - 1`, and
stacks corresponding outputs of the different runs.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and returns a possibly nested structure of tensor
objects. The shape of these outputs should not depend on the input.
loop_fn_dtypes: dtypes for the outputs of loop_fn.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: The number of iterations that can be dispatched in
parallel. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked output tensor objects with the same
nested structure as the output of `loop_fn`.
"""
flat_loop_fn_dtypes = nest.flatten(loop_fn_dtypes)
is_none_list = []
def while_body(i, *ta_list):
"""Body of while loop."""
fn_output = nest.flatten(loop_fn(i))
if len(fn_output) != len(flat_loop_fn_dtypes):
raise ValueError(
"Number of expected outputs, %d, does not match the number of "
"actual outputs, %d, from loop_fn" % (len(flat_loop_fn_dtypes),
len(fn_output)))
outputs = []
del is_none_list[:]
is_none_list.extend([x is None for x in fn_output])
for out, ta in zip(fn_output, ta_list):
# TODO(agarwal): support returning Operation objects from loop_fn.
if out is not None:
# out may be a ref tensor, wrap it in identity to get a non-ref tensor.
ta = ta.write(i, array_ops.expand_dims(out, 0))
outputs.append(ta)
return tuple([i + 1] + outputs)
if parallel_iterations is not None:
extra_args = {"parallel_iterations": parallel_iterations}
else:
extra_args = {}
ta_list = control_flow_ops.while_loop(
lambda i, *ta: i < iters,
while_body,
[0] + [tensor_array_ops.TensorArray(dtype.base_dtype, iters)
for dtype in flat_loop_fn_dtypes],
**extra_args)[1:]
# TODO(rachelim): enable this for sparse tensors
output = [None if is_none else ta.concat()
for ta, is_none in zip(ta_list, is_none_list)]
assert len(output) in (0, len(flat_loop_fn_dtypes))
if not output:
# This may happen for the case where iters == 0.
return None
else:
return nest.pack_sequence_as(loop_fn_dtypes, output)
def _flatten_first_two_dims(x):
"""Flattens the first two dimensions of x into a single dimension."""
old_shape = array_ops.shape(x)
new_shape = array_ops.concat([[old_shape[0] * old_shape[1]], old_shape[2:]],
axis=0)
return array_ops.reshape(x, new_shape)
PFOR_CONFIG_ARG = "pfor_config"
def _is_under_xla_context():
"""Check if we are currently inside an XLA compile context."""
g = ops.get_default_graph()
while g is not None:
control_flow_context = g._get_control_flow_context() # pylint: disable=protected-access
while control_flow_context is not None:
if control_flow_context.IsXLAContext():
return True
else:
control_flow_context = control_flow_context.outer_context
# If g is a FuncGraph, get its outer_graph.
g = getattr(g, "outer_graph", None)
return False
def pfor(loop_fn, iters, parallel_iterations=None):
"""Equivalent to running `loop_fn` `iters` times and stacking the outputs.
`pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters`
times, with input from 0 to `iters - 1`, and stacking corresponding output of
each iteration. However the implementation does not use a tf.while_loop.
Instead it adds new operations to the graph that collectively compute the same
value as what running `loop_fn` in a loop would compute.
This is an experimental feature and currently has a lot of limitations:
- There should be no data dependency between the different iterations. For
example, a future iteration should not depend on a value or side-effect of
a previous iteration.
- Stateful kernels may mostly not be supported since these often imply a
data dependency or ordering of the iterations. We do support a limited set
of such stateful kernels though (like RandomFoo, Variable operations like
reads, etc).
- Conversion works only on a limited set of kernels for which a converter
has been registered.
- loop_fn has limited support for control flow operations. tf.cond in
particular is not supported.
- `loop_fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of `loop_fn` outputs should not depend on the input
to loop_fn.
Args:
loop_fn: A function that takes an int32 scalar tf.Tensor object representing
the iteration number, and optionally a keyword argument `pfor_config` set
to a PForConfig object. It returns a possibly nested structure of Tensor
or Operation objects. Note that if setting `parallel_iterations` argument
to something other than None, `loop_fn` may be called more than once
during graph construction. So it may need to avoid mutating global state.
iters: Number of iterations for which to run loop_fn.
parallel_iterations: A knob to control how many iterations are vectorized
and dispatched in parallel. The default value of None corresponds to
vectorizing all the iterations. If `parallel_iterations` is smaller than
`iters`, then chunks of at most that many iterations are dispatched in
sequence. This knob can be used to control the total memory usage.
Returns:
Returns a nested structure of stacked tensor objects with the same nested
structure as the output of `loop_fn`.
Raises:
ValueError: If parallel_iterations is not None and not an integer > 1.
"""
def f():
return _pfor_impl(loop_fn, iters, parallel_iterations=parallel_iterations)
# Note that we wrap into a tf.function if in eager execution mode or under
# XLA compilation. The latter is so that we don't compile operations like
# tf.placeholder that are created by the loop body.
if context.executing_eagerly() or _is_under_xla_context():
f = function.defun(f)
return f()
def _loop_fn_has_config(loop_fn):
"""Test if `loop_fn` has a `pfor_config` argument."""
if tf_inspect.isfunction(loop_fn):
argspec = tf_inspect.getargspec(loop_fn)
return PFOR_CONFIG_ARG in argspec.args
elif isinstance(loop_fn, functools.partial):
fn = loop_fn.func
argspec = tf_inspect.getargspec(fn)
return (PFOR_CONFIG_ARG in argspec.args and
PFOR_CONFIG_ARG not in loop_fn.keywords)
else:
loop_class = tf_decorator.unwrap(loop_fn)[1]
if not hasattr(loop_class, "__call__"):
raise ValueError("loop_fn object did not have a __call__ method")
argspec = tf_inspect.getargspec(loop_class.__call__)
return PFOR_CONFIG_ARG in argspec.args
def _pfor_impl(loop_fn, iters, parallel_iterations=None, pfor_config=None):
"""Implementation of pfor."""
loop_fn_has_config = _loop_fn_has_config(loop_fn)
existing_ops = set(ops.get_default_graph().get_operations())
# Run the loop body
with ops.name_scope("loop_body"):
loop_var = array_ops.placeholder(dtypes.int32, shape=[])
if loop_fn_has_config:
if pfor_config is None:
pfor_config = PForConfig()
pfor_config._set_iters(iters) # pylint: disable=protected-access
loop_fn_outputs = loop_fn(loop_var, **{PFOR_CONFIG_ARG: pfor_config})
else:
assert pfor_config is None
loop_fn_outputs = loop_fn(loop_var)
# Convert outputs to Tensor if needed.
tmp_loop_fn_outputs = []
for loop_fn_output in nest.flatten(loop_fn_outputs):
if (loop_fn_output is not None and not isinstance(
loop_fn_output,
(ops.Operation, ops.Tensor, sparse_tensor.SparseTensor))):
if isinstance(loop_fn_output, indexed_slices.IndexedSlices):
logging.warn("Converting %s to a dense representation may make it slow."
" Alternatively, output the indices and values of the"
" IndexedSlices separately, and handle the vectorized"
" outputs directly." % loop_fn_output)
loop_fn_output = ops.convert_to_tensor(loop_fn_output)
tmp_loop_fn_outputs.append(loop_fn_output)
loop_fn_outputs = nest.pack_sequence_as(loop_fn_outputs, tmp_loop_fn_outputs)
new_ops = set(ops.get_default_graph().get_operations()) - existing_ops
iters = ops.convert_to_tensor(iters)
if parallel_iterations is not None:
if parallel_iterations < 1:
raise ValueError("parallel_iterations must be None or a positive integer")
if parallel_iterations == 1:
raise ValueError("Found parallel_iterations == 1. Use for_loop instead.")
iters_value = tensor_util.constant_value(iters)
if iters_value is not None and iters_value < parallel_iterations:
parallel_iterations = None
if parallel_iterations is None:
with ops.name_scope("pfor"):
converter = PFor(loop_var, iters, new_ops, pfor_config=pfor_config)
outputs = []
for loop_fn_output in nest.flatten(loop_fn_outputs):
outputs.append(converter.convert(loop_fn_output))
return nest.pack_sequence_as(loop_fn_outputs, outputs)
else:
if pfor_config is not None and pfor_config._has_reductions(): # pylint: disable=protected-access
raise ValueError("Setting parallel_iterations currently unsupported if"
" reductions across iterations are performed.")
num_tiled_iterations = iters // parallel_iterations
num_remaining_iterations = iters % parallel_iterations
# TODO(agarwal): Avoid calling loop_fn twice. Generate the loop body inside
# a tf.function and extract the graph from there to vectorize it.
with ops.name_scope("pfor_untiled"):
converter = PFor(loop_var, num_remaining_iterations, new_ops,
pfor_config=pfor_config)
remaining_outputs = []
flattened_loop_fn_outputs = nest.flatten(loop_fn_outputs)
for loop_fn_output in flattened_loop_fn_outputs:
remaining_outputs.append(converter.convert(loop_fn_output))
with ops.name_scope("pfor_tiled"):
loop_fn_dtypes = [ops.convert_to_tensor(x).dtype
for x in flattened_loop_fn_outputs]
def tiled_loop_body(j):
offset = j * parallel_iterations + num_remaining_iterations
def tiled_loop_fn(i, pfor_config=None):
if loop_fn_has_config:
return nest.flatten(loop_fn(i + offset, pfor_config=pfor_config))
else:
return nest.flatten(loop_fn(i + offset))
return _pfor_impl(
tiled_loop_fn, parallel_iterations, pfor_config=pfor_config)
tiled_outputs = for_loop(tiled_loop_body, loop_fn_dtypes,
num_tiled_iterations, parallel_iterations=1)
tiled_outputs = [_flatten_first_two_dims(y) for y in tiled_outputs]
with ops.name_scope("pfor"):
iters_value = tensor_util.constant_value(iters)
if iters_value is None or iters_value % parallel_iterations:
outputs = control_flow_ops.cond(
math_ops.equal(num_remaining_iterations, 0),
lambda: tiled_outputs,
lambda: [array_ops.concat([x, y], axis=0)
for x, y in zip(remaining_outputs, tiled_outputs)])
else:
outputs = tiled_outputs
return nest.pack_sequence_as(loop_fn_outputs, nest.flatten(outputs))
@tf_export("vectorized_map")
def vectorized_map(fn, elems):
"""Parallel map on the list of tensors unpacked from `elems` on dimension 0.
This method works similar to tf.map_fn but is optimized to run much faster,
possibly with a much larger memory footprint. The speedups are obtained by
vectorization (see https://arxiv.org/pdf/1903.04243.pdf). The idea behind
vectorization is to semantically launch all the invocations of `fn` in
parallel and fuse corresponding operations across all these invocations. This
fusion is done statically at graph generation time and the generated code is
often similar in performance to a manually fused version.
Because `tf.vectorized_map` fully parallelizes the batch, this method will
generally be significantly faster than using `tf.map_fn`, especially in eager
mode. However this is an experimental feature and currently has a lot of
limitations:
- There should be no data dependency between the different semantic
invocations of `fn`, i.e. it should be safe to map the elements of the
inputs in any order.
- Stateful kernels may mostly not be supported since these often imply a
data dependency. We do support a limited set of such stateful kernels
though (like RandomFoo, Variable operations like reads, etc).
- `fn` has limited support for control flow operations. `tf.cond` in
particular is not supported.
- `fn` should return nested structure of Tensors or Operations. However
if an Operation is returned, it should have zero outputs.
- The shape and dtype of any intermediate or output tensors in the
computation of `fn` should not depend on the input to `fn`.
Examples:
```python
def outer_product(a):
return tf.tensordot(a, a, 0)
batch_size = 100
a = tf.ones((batch_size, 32, 32))
c = tf.vectorized_map(outer_product, a)
assert c.shape == (batch_size, 32, 32, 32, 32)
```
```python
# Computing per-example gradients
batch_size = 10
num_features = 32
layer = tf.keras.layers.Dense(1)
def model_fn(arg):
with tf.GradientTape() as g:
inp, label = arg
inp = tf.expand_dims(inp, 0)
label = tf.expand_dims(label, 0)
prediction = layer(inp)
loss = tf.nn.l2_loss(label - prediction)
return g.gradient(loss, (layer.kernel, layer.bias))
inputs = tf.random_uniform([batch_size, num_features])
labels = tf.random_uniform([batch_size, 1])
per_example_gradients = tf.vectorized_map(model_fn, (inputs, labels))
assert per_example_gradients[0].shape == (batch_size, num_features, 1)
assert per_example_gradients[1].shape == (batch_size, 1)
```
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same (possibly nested) structure as `elems`, and returns a possibly
nested structure of Tensors and Operations, which may be different than
the structure of `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unpacked along their first dimension. The nested sequence of the
resulting slices will be mapped over by `fn`.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
"""
def loop_fn(i):
gathered_elems = nest.map_structure(lambda x: array_ops.gather(x, i), elems)
return fn(gathered_elems)
batch_size = None
first_elem_shape = nest.flatten(elems)[0].shape
if first_elem_shape.rank is not None:
batch_size = first_elem_shape.as_list()[0]
if batch_size is None:
batch_size = array_ops.shape()[0]
return pfor(loop_fn, batch_size)
|
|
from flask import Blueprint
from CTFd.models import (
ChallengeFiles,
Challenges,
Fails,
Flags,
Hints,
Solves,
Tags,
db,
)
from CTFd.plugins import register_plugin_assets_directory
from CTFd.plugins.flags import FlagException, get_flag_class
from CTFd.utils.uploads import delete_file
from CTFd.utils.user import get_ip
class BaseChallenge(object):
id = None
name = None
templates = {}
scripts = {}
challenge_model = Challenges
@classmethod
def create(cls, request):
"""
This method is used to process the challenge creation request.
:param request:
:return:
"""
data = request.form or request.get_json()
challenge = cls.challenge_model(**data)
db.session.add(challenge)
db.session.commit()
return challenge
@classmethod
def read(cls, challenge):
"""
This method is in used to access the data of a challenge in a format processable by the front end.
:param challenge:
:return: Challenge object, data dictionary to be returned to the user
"""
data = {
"id": challenge.id,
"name": challenge.name,
"value": challenge.value,
"description": challenge.description,
"category": challenge.category,
"state": challenge.state,
"max_attempts": challenge.max_attempts,
"type": challenge.type,
"type_data": {
"id": cls.id,
"name": cls.name,
"templates": cls.templates,
"scripts": cls.scripts,
},
}
return data
@classmethod
def update(cls, challenge, request):
"""
This method is used to update the information associated with a challenge. This should be kept strictly to the
Challenges table and any child tables.
:param challenge:
:param request:
:return:
"""
data = request.form or request.get_json()
for attr, value in data.items():
setattr(challenge, attr, value)
db.session.commit()
return challenge
@classmethod
def delete(cls, challenge):
"""
This method is used to delete the resources used by a challenge.
:param challenge:
:return:
"""
Fails.query.filter_by(challenge_id=challenge.id).delete()
Solves.query.filter_by(challenge_id=challenge.id).delete()
Flags.query.filter_by(challenge_id=challenge.id).delete()
files = ChallengeFiles.query.filter_by(challenge_id=challenge.id).all()
for f in files:
delete_file(f.id)
ChallengeFiles.query.filter_by(challenge_id=challenge.id).delete()
Tags.query.filter_by(challenge_id=challenge.id).delete()
Hints.query.filter_by(challenge_id=challenge.id).delete()
Challenges.query.filter_by(id=challenge.id).delete()
cls.challenge_model.query.filter_by(id=challenge.id).delete()
db.session.commit()
@classmethod
def attempt(cls, challenge, request):
"""
This method is used to check whether a given input is right or wrong. It does not make any changes and should
return a boolean for correctness and a string to be shown to the user. It is also in charge of parsing the
user's input from the request itself.
:param challenge: The Challenge object from the database
:param request: The request the user submitted
:return: (boolean, string)
"""
data = request.form or request.get_json()
submission = data["submission"].strip()
flags = Flags.query.filter_by(challenge_id=challenge.id).all()
for flag in flags:
try:
if get_flag_class(flag.type).compare(flag, submission):
return True, "Correct"
except FlagException as e:
return False, e.message
return False, "Incorrect"
@classmethod
def solve(cls, user, team, challenge, request):
"""
This method is used to insert Solves into the database in order to mark a challenge as solved.
:param team: The Team object from the database
:param chal: The Challenge object from the database
:param request: The request the user submitted
:return:
"""
data = request.form or request.get_json()
submission = data["submission"].strip()
solve = Solves(
user_id=user.id,
team_id=team.id if team else None,
challenge_id=challenge.id,
ip=get_ip(req=request),
provided=submission,
)
db.session.add(solve)
db.session.commit()
@classmethod
def fail(cls, user, team, challenge, request):
"""
This method is used to insert Fails into the database in order to mark an answer incorrect.
:param team: The Team object from the database
:param chal: The Challenge object from the database
:param request: The request the user submitted
:return:
"""
data = request.form or request.get_json()
submission = data["submission"].strip()
wrong = Fails(
user_id=user.id,
team_id=team.id if team else None,
challenge_id=challenge.id,
ip=get_ip(request),
provided=submission,
)
db.session.add(wrong)
db.session.commit()
class CTFdStandardChallenge(BaseChallenge):
id = "standard" # Unique identifier used to register challenges
name = "standard" # Name of a challenge type
templates = { # Templates used for each aspect of challenge editing & viewing
"create": "/plugins/challenges/assets/create.html",
"update": "/plugins/challenges/assets/update.html",
"view": "/plugins/challenges/assets/view.html",
}
scripts = { # Scripts that are loaded when a template is loaded
"create": "/plugins/challenges/assets/create.js",
"update": "/plugins/challenges/assets/update.js",
"view": "/plugins/challenges/assets/view.js",
}
# Route at which files are accessible. This must be registered using register_plugin_assets_directory()
route = "/plugins/challenges/assets/"
# Blueprint used to access the static_folder directory.
blueprint = Blueprint(
"standard", __name__, template_folder="templates", static_folder="assets"
)
challenge_model = Challenges
def get_chal_class(class_id):
"""
Utility function used to get the corresponding class from a class ID.
:param class_id: String representing the class ID
:return: Challenge class
"""
cls = CHALLENGE_CLASSES.get(class_id)
if cls is None:
raise KeyError
return cls
"""
Global dictionary used to hold all the Challenge Type classes used by CTFd. Insert into this dictionary to register
your Challenge Type.
"""
CHALLENGE_CLASSES = {"standard": CTFdStandardChallenge}
def load(app):
register_plugin_assets_directory(app, base_path="/plugins/challenges/assets/")
|
|
import base64
import json
import os
import platform
from tempfile import NamedTemporaryFile
from unittest import TestCase, mock
import pytest
import responses
from semantic_release.errors import ImproperConfigurationError
from semantic_release.hvcs import (
Gitea,
Github,
Gitlab,
check_build_status,
check_token,
get_hvcs,
post_changelog,
)
from . import mock, wrapped_config_get
from .mocks.mock_gitlab import mock_gitlab
temp_dir = (
os.path.join(os.path.abspath(os.path.dirname(__file__)), "tmp")
if platform.system() == "Windows"
else "/tmp/"
)
@mock.patch("semantic_release.hvcs.config.get", wrapped_config_get(hvcs="github"))
def test_get_hvcs_should_return_github():
assert get_hvcs() == Github
@mock.patch("semantic_release.hvcs.config.get", wrapped_config_get(hvcs="gitlab"))
def test_get_hvcs_should_return_gitlab():
assert get_hvcs() == Gitlab
@mock.patch("semantic_release.hvcs.config.get", wrapped_config_get(hvcs="gitea"))
def test_get_hvcs_should_return_gitea():
assert get_hvcs() == Gitea
@mock.patch("semantic_release.hvcs.config.get", lambda *x: "doesnotexist")
def test_get_hvcs_should_raise_improper_config():
pytest.raises(ImproperConfigurationError, get_hvcs)
@mock.patch("semantic_release.hvcs.Github.check_build_status")
def test_check_build_status(mock_github_helper):
check_build_status("owner", "name", "ref")
mock_github_helper.assert_called_once_with("owner", "name", "ref")
@mock.patch("os.environ", {"GH_TOKEN": "token"})
def test_check_token_should_return_true():
assert check_token() is True
@mock.patch("os.environ", {})
def test_check_token_should_return_false():
assert check_token() is False
###############################
# test custom token variables #
###############################
@mock.patch("os.environ", {"CUSTOM_GH_TOKEN": "token"})
@mock.patch(
"semantic_release.hvcs.config.get",
wrapped_config_get(github_token_var="CUSTOM_GH_TOKEN"),
)
def test_check_custom_github_token_var_should_return_true():
assert check_token() is True
@mock.patch("os.environ", {"GH_TOKEN": "token"})
@mock.patch(
"semantic_release.hvcs.config.get",
wrapped_config_get(github_token_var="CUSTOM_TOKEN"),
)
def test_check_custom_github_token_should_return_false():
assert check_token() is False
@mock.patch("os.environ", {"GITEA_TOKEN": "token"})
@mock.patch(
"semantic_release.hvcs.config.get",
wrapped_config_get(gitea_token_var="CUSTOM_TOKEN"),
)
def test_check_custom_gitea_token_should_return_false():
assert check_token() is False
@mock.patch("os.environ", {"GITEA_CUSTOM_TOKEN": "token"})
@mock.patch(
"semantic_release.hvcs.config.get",
wrapped_config_get(gitea_token_var="GITEA_CUSTOM_TOKEN"),
)
def test_check_custom_gitea_token_should_return_false():
assert check_token() is False
@mock.patch("os.environ", {"CUSTOM_GL_TOKEN": "token"})
@mock.patch(
"semantic_release.hvcs.config.get",
wrapped_config_get(github_token_var="CUSTOM_GL_TOKEN"),
)
def test_check_custom_gitlab_token_var_should_return_true():
assert check_token() is True
@mock.patch("os.environ", {"GL_TOKEN": "token"})
@mock.patch(
"semantic_release.hvcs.config.get",
wrapped_config_get(gitlab_token_var="CUSTOM_TOKEN"),
)
def test_check_custom_gitlab_token_should_return_false():
assert check_token() is False
@pytest.mark.parametrize(
"hvcs,hvcs_domain,expected_domain,expected_api_url,ci_server_host,hvcs_api_domain",
[
("github", None, "github.com", "https://api.github.com", None, None),
("gitlab", None, "gitlab.com", "https://gitlab.com", None, None),
(
"gitea",
"git.grassrootseconomics.net",
"git.grassrootseconomics.net",
"https://git.grassrootseconomics.net/api/v1",
None,
None,
),
(
"github",
"github.example.com",
"github.example.com",
"https://api.github.com",
None,
None,
),
(
"github",
"github.example.com",
"github.example.com",
"https://api.github.enterprise",
None,
"api.github.enterprise",
),
(
"gitlab",
"example.gitlab.com",
"example.gitlab.com",
"https://example.gitlab.com",
None,
None,
),
(
"gitlab",
"example.gitlab.com/path",
"example.gitlab.com/path",
"https://example.gitlab.com/path",
None,
None,
),
(
"gitlab",
"example2.gitlab.com",
"example2.gitlab.com",
"https://example2.gitlab.com",
"ciserverhost.gitlab.com",
None,
),
],
)
@mock.patch("os.environ", {"GL_TOKEN": "token"})
def test_get_domain_should_have_expected_domain(
hvcs,
hvcs_domain,
expected_domain,
expected_api_url,
ci_server_host,
hvcs_api_domain,
):
with mock.patch(
"semantic_release.hvcs.config.get",
wrapped_config_get(
hvcs_domain=hvcs_domain, hvcs=hvcs, hvcs_api_domain=hvcs_api_domain
),
):
with mock.patch(
"os.environ",
{
"GL_TOKEN": "token",
"GH_TOKEN": "token",
"GITEA_TOKEN": "token",
"CI_SERVER_HOST": ci_server_host,
},
):
assert get_hvcs().domain() == expected_domain
assert get_hvcs().api_url() == expected_api_url
@mock.patch("semantic_release.hvcs.config.get", wrapped_config_get(hvcs="github"))
@mock.patch(
"os.environ",
{
"GITHUB_API_URL": "api.github.enterprise",
"GITHUB_SERVER_URL": "github.enterprise",
},
)
def test_ghe_domain_should_be_retrieved_from_env():
assert get_hvcs().domain() == "github.enterprise"
assert get_hvcs().api_url() == "https://api.github.enterprise"
@mock.patch("semantic_release.hvcs.config.get", wrapped_config_get(hvcs="gitlab"))
@mock.patch("os.environ", {"GL_TOKEN": "token"})
def test_get_token():
assert get_hvcs().token() == "token"
class GithubCheckBuildStatusTests(TestCase):
url = (
"https://api.github.com/repos/relekang/rmoq/commits/"
"6dcb09b5b57875f334f61aebed695e2e4193db5e/status"
)
def get_response(self, status):
return json.dumps(
{
"state": status,
"sha": "6dcb09b5b57875f334f61aebed695e2e4193db5e",
"total_count": 2,
}
)
@responses.activate
def test_should_return_false_if_pending(self):
responses.add(
responses.GET,
self.url,
body=self.get_response("pending"),
content_type="application/json",
)
self.assertFalse(
Github.check_build_status(
"relekang", "rmoq", "6dcb09b5b57875f334f61aebed695e2e4193db5e"
)
)
@responses.activate
def test_should_return_false_if_failure(self):
responses.add(
responses.GET,
self.url,
body=self.get_response("failure"),
content_type="application/json",
)
self.assertFalse(
Github.check_build_status(
"relekang", "rmoq", "6dcb09b5b57875f334f61aebed695e2e4193db5e"
)
)
@responses.activate
def test_should_return_true_if_success(self):
responses.add(
responses.GET,
self.url,
body=self.get_response("success"),
content_type="application/json",
)
self.assertTrue(
Github.check_build_status(
"relekang", "rmoq", "6dcb09b5b57875f334f61aebed695e2e4193db5e"
)
)
class GiteaCheckBuildStatusTests(TestCase):
url = "https://gitea.com/api/v1/repos/gitea/tea/statuses/268aa061795d53a1914b734c479b7c1f62cfd7af"
def get_response(self, status):
return json.dumps(
{
"status": status,
}
)
@responses.activate
def test_should_return_false_if_pending(self):
responses.add(
responses.GET,
self.url,
body=self.get_response("pending"),
content_type="application/json",
)
self.assertFalse(
Gitea.check_build_status(
"gitea", "tea", "268aa061795d53a1914b734c479b7c1f62cfd7af"
)
)
@responses.activate
def test_should_return_false_if_failure(self):
responses.add(
responses.GET,
self.url,
body=self.get_response("failure"),
content_type="application/json",
)
self.assertFalse(
Gitea.check_build_status(
"gitea", "tea", "268aa061795d53a1914b734c479b7c1f62cfd7af"
)
)
@responses.activate
def test_should_return_true_if_success(self):
responses.add(
responses.GET,
self.url,
body=self.get_response("success"),
content_type="application/json",
)
self.assertTrue(
Gitea.check_build_status(
"gitea", "tea", "268aa061795d53a1914b734c479b7c1f62cfd7af"
)
)
class GitlabCheckBuildStatusTests(TestCase):
@mock_gitlab("pending")
def test_should_return_false_if_pending(self, mock_auth, mock_project):
self.assertFalse(check_build_status("owner", "repo", "my_ref"))
@mock_gitlab("failure")
def test_should_return_false_if_failure(self, mock_auth, mock_project):
self.assertFalse(check_build_status("owner", "repo", "my_ref"))
@mock_gitlab("allow_failure")
def test_should_return_true_if_allow_failure(self, mock_auth, mock_project):
self.assertTrue(check_build_status("owner", "repo", "my_ref"))
@mock_gitlab("success")
def test_should_return_true_if_success(self, mock_auth, mock_project):
self.assertTrue(check_build_status("owner", "repo", "my_ref"))
class GithubReleaseTests(TestCase):
url = "https://api.github.com/repos/relekang/rmoq/releases"
edit_url = "https://api.github.com/repos/relekang/rmoq/releases/1"
get_url = "https://api.github.com/repos/relekang/rmoq/releases/tags/v1.0.0"
asset_url = "https://uploads.github.com/repos/relekang/rmoq/releases/1/assets"
asset_url_params = (
"https://uploads.github.com/repos/relekang/rmoq/releases/1/assets"
"?name=testupload.md&label=Dummy+file"
)
asset_no_extension_url_params = (
"https://uploads.github.com/repos/relekang/rmoq/releases/1/assets"
"?name=testupload-no-extension&label=Dummy+file+no+extension"
)
dist_asset_url_params = (
"https://uploads.github.com/repos/relekang/rmoq/releases/1/assets"
"?name=testupload.md"
)
@responses.activate
@mock.patch("semantic_release.hvcs.Github.token", return_value="super-token")
def test_should_post_changelog_using_github_token(self, mock_token):
with NamedTemporaryFile("w") as netrc_file:
netrc_file.write("machine api.github.com\n")
netrc_file.write("login username\n")
netrc_file.write("password password\n")
netrc_file.flush()
def request_callback(request):
payload = json.loads(request.body)
self.assertEqual(payload["tag_name"], "v1.0.0")
self.assertEqual(payload["body"], "text")
self.assertEqual(payload["draft"], False)
self.assertEqual(payload["prerelease"], False)
self.assertEqual(
"token super-token", request.headers.get("Authorization")
)
return 201, {}, json.dumps({})
responses.add_callback(
responses.POST,
self.url,
callback=request_callback,
content_type="application/json",
)
with mock.patch.dict(os.environ, {"NETRC": netrc_file.name}):
status = Github.post_release_changelog(
"relekang", "rmoq", "1.0.0", "text"
)
self.assertTrue(status)
@responses.activate
@mock.patch("semantic_release.hvcs.Github.token", return_value=None)
def test_should_post_changelog_using_netrc(self, mock_token):
with NamedTemporaryFile("w") as netrc_file:
netrc_file.write("machine api.github.com\n")
netrc_file.write("login username\n")
netrc_file.write("password password\n")
netrc_file.flush()
def request_callback(request):
payload = json.loads(request.body)
self.assertEqual(payload["tag_name"], "v1.0.0")
self.assertEqual(payload["body"], "text")
self.assertEqual(payload["draft"], False)
self.assertEqual(payload["prerelease"], False)
self.assertEqual(
"Basic "
+ base64.encodebytes(b"username:password").decode("ascii").strip(),
request.headers.get("Authorization"),
)
return 201, {}, json.dumps({})
responses.add_callback(
responses.POST,
self.url,
callback=request_callback,
content_type="application/json",
)
with mock.patch.dict(os.environ, {"NETRC": netrc_file.name}):
status = Github.post_release_changelog(
"relekang", "rmoq", "1.0.0", "text"
)
self.assertTrue(status)
@responses.activate
def test_should_return_false_status_if_it_failed(self):
responses.add(
responses.POST,
self.url,
status=400,
body="{}",
content_type="application/json",
)
responses.add(
responses.GET,
self.get_url,
status=200,
body='{"id": 1}',
content_type="application/json",
)
responses.add(
responses.POST,
self.edit_url,
status=400,
body="{}",
content_type="application/json",
)
self.assertFalse(
Github.post_release_changelog("relekang", "rmoq", "1.0.0", "text")
)
@responses.activate
@mock.patch("semantic_release.hvcs.Github.token", return_value="super-token")
def test_should_upload_asset(self, mock_token):
# Create temporary file to upload
dummy_file_path = os.path.join(temp_dir, "testupload.md")
os.makedirs(os.path.dirname(dummy_file_path), exist_ok=True)
dummy_content = "# Test File\n\n*Dummy asset for testing uploads.*"
with open(dummy_file_path, "w") as dummy_file:
dummy_file.write(dummy_content)
def request_callback(request):
self.assertEqual(request.body.decode().replace("\r\n", "\n"), dummy_content)
self.assertEqual(request.url, self.asset_url_params)
self.assertEqual(request.headers["Content-Type"], "text/markdown")
self.assertEqual("token super-token", request.headers.get("Authorization"))
return 201, {}, json.dumps({})
responses.add_callback(
responses.POST, self.asset_url, callback=request_callback
)
status = Github.upload_asset(
"relekang", "rmoq", 1, dummy_file_path, "Dummy file"
)
self.assertTrue(status)
# Remove test file
os.remove(dummy_file_path)
@responses.activate
@mock.patch("semantic_release.hvcs.Github.token", return_value="super-token")
def test_should_upload_asset_with_no_extension(self, mock_token):
# Create temporary file to upload
dummy_file_path = os.path.join(temp_dir, "testupload-no-extension")
os.makedirs(os.path.dirname(dummy_file_path), exist_ok=True)
dummy_content = (
"# Test File with no extension\n\n*Dummy asset for testing uploads.*"
)
with open(dummy_file_path, "w") as dummy_file:
dummy_file.write(dummy_content)
def request_callback(request):
self.assertEqual(request.body.decode().replace("\r\n", "\n"), dummy_content)
self.assertEqual(request.url, self.asset_no_extension_url_params)
self.assertEqual(
request.headers["Content-Type"], "application/octet-stream"
)
self.assertEqual("token super-token", request.headers["Authorization"])
return 201, {}, json.dumps({})
responses.add_callback(
responses.POST, self.asset_url, callback=request_callback
)
status = Github.upload_asset(
"relekang", "rmoq", 1, dummy_file_path, "Dummy file no extension"
)
self.assertTrue(status)
# Remove test file
os.remove(dummy_file_path)
@responses.activate
@mock.patch("semantic_release.hvcs.Github.token", return_value="super-token")
def test_should_upload_dists(self, mock_token):
# Create temporary file to upload
dummy_file_path = os.path.join(temp_dir, "dist", "testupload.md")
os.makedirs(os.path.dirname(dummy_file_path), exist_ok=True)
dummy_content = "# Test File\n\n*Dummy asset for testing uploads.*"
with open(dummy_file_path, "w") as dummy_file:
dummy_file.write(dummy_content)
def request_callback(request):
self.assertEqual(request.body.decode().replace("\r\n", "\n"), dummy_content)
self.assertEqual(request.url, self.dist_asset_url_params)
self.assertEqual(request.headers["Content-Type"], "text/markdown")
self.assertEqual("token super-token", request.headers.get("Authorization"))
return 201, {}, json.dumps({})
responses.add(
responses.GET,
self.get_url,
status=200,
body='{"id": 1}',
content_type="application/json",
)
responses.add_callback(
responses.POST, self.asset_url, callback=request_callback
)
status = Github.upload_dists(
"relekang", "rmoq", "1.0.0", os.path.dirname(dummy_file_path)
)
self.assertTrue(status)
# Remove test file
os.remove(dummy_file_path)
class GiteaReleaseTests(TestCase):
url = "https://gitea.com/api/v1/repos/gitea/tea/releases"
edit_url = "https://gitea.com/api/v1/repos/gitea/tea/releases/1"
get_url = "https://gitea.com/api/v1/repos/gitea/tea/releases/tags/v1.0.0"
asset_url = "https://gitea.com/api/v1/repos/gitea/tea/releases/1/assets"
asset_url_params = (
"https://gitea.com/api/v1/repos/gitea/tea/releases/1/assets"
"?name=testupload.md"
)
asset_no_extension_url_params = (
"https://gitea.com/api/v1/repos/gitea/tea/releases/1/assets"
"?name=testupload-no-extension"
)
dist_asset_url_params = (
"https://gitea.com/api/v1/repos/gitea/tea/releases/1/assets"
"?name=testupload.md"
)
@responses.activate
@mock.patch("semantic_release.hvcs.Gitea.token", return_value="super-token")
def test_should_post_changelog_using_gitea_token(self, mock_token):
with NamedTemporaryFile("w") as netrc_file:
netrc_file.write("machine gitea.com\n")
netrc_file.write("login username\n")
netrc_file.write("password password\n")
netrc_file.flush()
def request_callback(request):
payload = json.loads(request.body)
self.assertEqual(payload["tag_name"], "v1.0.0")
self.assertEqual(payload["body"], "text")
self.assertEqual(payload["draft"], False)
self.assertEqual(payload["prerelease"], False)
self.assertEqual(
"token super-token", request.headers.get("Authorization")
)
return 201, {}, json.dumps({})
responses.add_callback(
responses.POST,
self.url,
callback=request_callback,
content_type="application/json",
)
with mock.patch.dict(os.environ, {"NETRC": netrc_file.name}):
status = Gitea.post_release_changelog("gitea", "tea", "1.0.0", "text")
self.assertTrue(status)
@responses.activate
@mock.patch("semantic_release.hvcs.Gitea.token", return_value=None)
def test_should_post_changelog_using_netrc(self, mock_token):
with NamedTemporaryFile("w") as netrc_file:
netrc_file.write("machine gitea.com\n")
netrc_file.write("login username\n")
netrc_file.write("password password\n")
netrc_file.flush()
def request_callback(request):
payload = json.loads(request.body)
self.assertEqual(payload["tag_name"], "v1.0.0")
self.assertEqual(payload["body"], "text")
self.assertEqual(payload["draft"], False)
self.assertEqual(payload["prerelease"], False)
self.assertEqual(
"Basic "
+ base64.encodebytes(b"username:password").decode("ascii").strip(),
request.headers.get("Authorization"),
)
return 201, {}, json.dumps({})
responses.add_callback(
responses.POST,
self.url,
callback=request_callback,
content_type="application/json",
)
with mock.patch.dict(os.environ, {"NETRC": netrc_file.name}):
status = Gitea.post_release_changelog("gitea", "tea", "1.0.0", "text")
self.assertTrue(status)
@responses.activate
def test_should_return_false_status_if_it_failed(self):
responses.add(
responses.POST,
self.url,
status=400,
body="{}",
content_type="application/json",
)
responses.add(
responses.GET,
self.get_url,
status=200,
body='{"id": 1}',
content_type="application/json",
)
responses.add(
responses.POST,
self.edit_url,
status=400,
body="{}",
content_type="application/json",
)
self.assertFalse(Gitea.post_release_changelog("gitea", "tea", "1.0.0", "text"))
@responses.activate
@mock.patch("semantic_release.hvcs.Gitea.token", return_value="super-token")
def test_should_upload_asset(self, mock_token):
# Create temporary file to upload
dummy_file_path = os.path.join(temp_dir, "testupload.md")
os.makedirs(os.path.dirname(dummy_file_path), exist_ok=True)
dummy_content = "# Test File\n\n*Dummy asset for testing uploads.*"
with open(dummy_file_path, "w") as dummy_file:
dummy_file.write(dummy_content)
def request_callback(request):
self.assertTrue(dummy_content in request.body.decode())
self.assertEqual(request.url, self.asset_url_params)
self.assertTrue("multipart/form-data" in request.headers["Content-Type"])
self.assertEqual("token super-token", request.headers.get("Authorization"))
return 201, {}, json.dumps({})
responses.add_callback(
responses.POST, self.asset_url, callback=request_callback
)
status = Gitea.upload_asset("gitea", "tea", 1, dummy_file_path, "Dummy file")
self.assertTrue(status)
# Remove test file
os.remove(dummy_file_path)
@responses.activate
@mock.patch("semantic_release.hvcs.Gitea.token", return_value="super-token")
def test_should_upload_asset_with_no_extension(self, mock_token):
# Create temporary file to upload
dummy_file_path = os.path.join(temp_dir, "testupload-no-extension")
os.makedirs(os.path.dirname(dummy_file_path), exist_ok=True)
dummy_content = (
"# Test File with no extension\n\n*Dummy asset for testing uploads.*"
)
with open(dummy_file_path, "w") as dummy_file:
dummy_file.write(dummy_content)
def request_callback(request):
self.assertTrue(dummy_content in request.body.decode())
self.assertEqual(request.url, self.asset_no_extension_url_params)
self.assertTrue("multipart/form-data" in request.headers["Content-Type"])
self.assertEqual("token super-token", request.headers["Authorization"])
return 201, {}, json.dumps({})
responses.add_callback(
responses.POST, self.asset_url, callback=request_callback
)
status = Gitea.upload_asset(
"gitea", "tea", 1, dummy_file_path, "Dummy file no extension"
)
self.assertTrue(status)
# Remove test file
os.remove(dummy_file_path)
@responses.activate
@mock.patch("semantic_release.hvcs.Gitea.token", return_value="super-token")
def test_should_upload_dists(self, mock_token):
# Create temporary file to upload
dummy_file_path = os.path.join(temp_dir, "dist", "testupload.md")
os.makedirs(os.path.dirname(dummy_file_path), exist_ok=True)
dummy_content = "# Test File\n\n*Dummy asset for testing uploads.*"
with open(dummy_file_path, "w") as dummy_file:
dummy_file.write(dummy_content)
def request_callback(request):
self.assertTrue(dummy_content in request.body.decode())
self.assertEqual(request.url, self.dist_asset_url_params)
self.assertTrue("multipart/form-data" in request.headers["Content-Type"])
self.assertEqual("token super-token", request.headers.get("Authorization"))
return 201, {}, json.dumps({})
responses.add(
responses.GET,
self.get_url,
status=200,
body='{"id": 1}',
content_type="application/json",
)
responses.add_callback(
responses.POST, self.asset_url, callback=request_callback
)
status = Gitea.upload_dists(
"gitea", "tea", "1.0.0", os.path.dirname(dummy_file_path)
)
self.assertTrue(status)
# Remove test file
os.remove(dummy_file_path)
class GitlabReleaseTests(TestCase):
@mock_gitlab()
def test_should_return_true_if_success(self, mock_auth, mock_project):
self.assertTrue(post_changelog("owner", "repo", "my_good_tag", "changelog"))
@mock_gitlab()
def test_should_return_false_if_bad_tag(self, mock_auth, mock_project):
self.assertFalse(post_changelog("owner", "repo", "my_bad_tag", "changelog"))
@mock_gitlab()
def test_should_return_true_for_locked_tags(self, mock_auth, mock_project):
self.assertTrue(post_changelog("owner", "repo", "my_locked_tag", "changelog"))
def test_gitea_token():
with mock.patch("os.environ", {"GITEA_TOKEN": "token"}):
assert Gitea.token() == "token"
def test_github_token():
with mock.patch("os.environ", {"GH_TOKEN": "token"}):
assert Github.token() == "token"
|
|
"""
The MIT License (MIT)
Copyright (c) 2012-2014 Alexander Turkin
Copyright (c) 2014 William Hallatt
Copyright (c) 2015 Jacob Dawid
Copyright (c) 2016 Luca Weiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import math
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class QtWaitingSpinner(QWidget):
def __init__(self, parent, centerOnParent=True, disableParentWhenSpinning=False, modality=Qt.NonModal):
super().__init__(parent)
self._centerOnParent = centerOnParent
self._disableParentWhenSpinning = disableParentWhenSpinning
# WAS IN initialize()
self._color = QColor(Qt.black)
self._roundness = 100.0
self._minimumTrailOpacity = 3.14159265358979323846
self._trailFadePercentage = 80.0
self._revolutionsPerSecond = 1.57079632679489661923
self._numberOfLines = 20
self._lineLength = 10
self._lineWidth = 2
self._innerRadius = 10
self._currentCounter = 0
self._isSpinning = False
self._timer = QTimer(self)
self._timer.timeout.connect(self.rotate)
self.updateSize()
self.updateTimer()
self.hide()
# END initialize()
self.setWindowModality(modality)
self.setAttribute(Qt.WA_TranslucentBackground)
def paintEvent(self, QPaintEvent):
self.updatePosition()
painter = QPainter(self)
painter.fillRect(self.rect(), Qt.transparent)
painter.setRenderHint(QPainter.Antialiasing, True)
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
painter.setPen(Qt.NoPen)
for i in range(0, self._numberOfLines):
painter.save()
painter.translate(self._innerRadius + self._lineLength, self._innerRadius + self._lineLength)
rotateAngle = float(360 * i) / float(self._numberOfLines)
painter.rotate(rotateAngle)
painter.translate(self._innerRadius, 0)
distance = self.lineCountDistanceFromPrimary(i, self._currentCounter, self._numberOfLines)
color = self.currentLineColor(distance, self._numberOfLines, self._trailFadePercentage,
self._minimumTrailOpacity, self._color)
painter.setBrush(color)
rect = QRect(0, int(-self._lineWidth / 2), int(self._lineLength), int(self._lineWidth))
painter.drawRoundedRect(rect, self._roundness, self._roundness, Qt.RelativeSize)
painter.restore()
def start(self):
self.updatePosition()
self._isSpinning = True
self.show()
if self.parentWidget and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(False)
if not self._timer.isActive():
self._timer.start()
self._currentCounter = 0
def stop(self):
self._isSpinning = False
self.hide()
if self.parentWidget() and self._disableParentWhenSpinning:
self.parentWidget().setEnabled(True)
if self._timer.isActive():
self._timer.stop()
self._currentCounter = 0
def setNumberOfLines(self, lines):
self._numberOfLines = lines
self._currentCounter = 0
self.updateTimer()
def setLineLength(self, length):
self._lineLength = length
self.updateSize()
def setLineWidth(self, width):
self._lineWidth = width
self.updateSize()
def setInnerRadius(self, radius):
self._innerRadius = radius
self.updateSize()
def color(self):
return self._color
def roundness(self):
return self._roundness
def minimumTrailOpacity(self):
return self._minimumTrailOpacity
def trailFadePercentage(self):
return self._trailFadePercentage
def revolutionsPersSecond(self):
return self._revolutionsPerSecond
def numberOfLines(self):
return self._numberOfLines
def lineLength(self):
return self._lineLength
def lineWidth(self):
return self._lineWidth
def innerRadius(self):
return self._innerRadius
def isSpinning(self):
return self._isSpinning
def setRoundness(self, roundness):
self._roundness = max(0.0, min(100.0, roundness))
def setColor(self, color=Qt.black):
self._color = QColor(color)
def setRevolutionsPerSecond(self, revolutionsPerSecond):
self._revolutionsPerSecond = revolutionsPerSecond
self.updateTimer()
def setTrailFadePercentage(self, trail):
self._trailFadePercentage = trail
def setMinimumTrailOpacity(self, minimumTrailOpacity):
self._minimumTrailOpacity = minimumTrailOpacity
def rotate(self):
self._currentCounter += 1
if self._currentCounter >= self._numberOfLines:
self._currentCounter = 0
self.update()
def updateSize(self):
size = int((self._innerRadius + self._lineLength) * 2)
self.setFixedSize(size, size)
def updateTimer(self):
self._timer.setInterval(int(1000 / (self._numberOfLines * self._revolutionsPerSecond)))
def updatePosition(self):
if self.parentWidget() and self._centerOnParent:
self.move(int(self.parentWidget().width() / 2 - self.width() / 2),
int(self.parentWidget().height() / 2 - self.height() / 2))
def lineCountDistanceFromPrimary(self, current, primary, totalNrOfLines):
distance = primary - current
if distance < 0:
distance += totalNrOfLines
return distance
def currentLineColor(self, countDistance, totalNrOfLines, trailFadePerc, minOpacity, colorinput):
color = QColor(colorinput)
if countDistance == 0:
return color
minAlphaF = minOpacity / 100.0
distanceThreshold = int(math.ceil((totalNrOfLines - 1) * trailFadePerc / 100.0))
if countDistance > distanceThreshold:
color.setAlphaF(minAlphaF)
else:
alphaDiff = color.alphaF() - minAlphaF
gradient = alphaDiff / float(distanceThreshold + 1)
resultAlpha = color.alphaF() - gradient * countDistance
# If alpha is out of bounds, clip it.
resultAlpha = min(1.0, max(0.0, resultAlpha))
color.setAlphaF(resultAlpha)
return color
|
|
# From https://github.com/magical/nlzss
# used http://code.google.com/p/u-lzss/source/browse/trunk/js/lib/ulzss.js as
# a guide
from sys import stderr
from collections import defaultdict
from operator import itemgetter
from struct import pack, unpack
class SlidingWindow:
# The size of the sliding window
size = 4096
# The minimum displacement.
disp_min = 2
# The hard minimum - a disp less than this can't be represented in the
# compressed stream.
disp_start = 1
# The minimum length for a successful match in the window
match_min = 1
# The maximum length of a successful match, inclusive.
match_max = None
def __init__(self, buf):
self.data = buf
self.hash = defaultdict(list)
self.full = False
self.start = 0
self.stop = 0
#self.index = self.disp_min - 1
self.index = 0
assert self.match_max is not None
def next(self):
if self.index < self.disp_start - 1:
self.index += 1
return
if self.full:
olditem = self.data[self.start]
assert self.hash[olditem][0] == self.start
self.hash[olditem].pop(0)
item = self.data[self.stop]
self.hash[item].append(self.stop)
self.stop += 1
self.index += 1
if self.full:
self.start += 1
else:
if self.size <= self.stop:
self.full = True
def advance(self, n=1):
"""Advance the window by n bytes"""
for _ in range(n):
self.next()
def search(self):
match_max = self.match_max
match_min = self.match_min
counts = []
indices = self.hash[self.data[self.index]]
for i in indices:
matchlen = self.match(i, self.index)
if matchlen >= match_min:
disp = self.index - i
#assert self.index - disp >= 0
#assert self.disp_min <= disp < self.size + self.disp_min
if self.disp_min <= disp:
counts.append((matchlen, -disp))
if matchlen >= match_max:
#assert matchlen == match_max
return counts[-1]
if counts:
match = max(counts, key=itemgetter(0))
return match
return None
def match(self, start, bufstart):
size = self.index - start
if size == 0:
return 0
matchlen = 0
it = range(min(len(self.data) - bufstart, self.match_max))
for i in it:
if self.data[start + (i % size)] == self.data[bufstart + i]:
matchlen += 1
else:
break
return matchlen
class NLZ10Window(SlidingWindow):
size = 4096
match_min = 3
match_max = 3 + 0xf
class NLZ11Window(SlidingWindow):
size = 4096
match_min = 3
match_max = 0x111 + 0xFFFF
class NOverlayWindow(NLZ10Window):
disp_min = 3
def _compress(input, windowclass=NLZ10Window):
"""Generates a stream of tokens. Either a byte (int) or a tuple of (count,
displacement)."""
window = windowclass(input)
i = 0
while True:
if len(input) <= i:
break
match = window.search()
if match:
yield match
#if match[1] == -283:
# raise Exception(match, i)
window.advance(match[0])
i += match[0]
else:
yield input[i]
window.next()
i += 1
def packflags(flags):
n = 0
for i in range(8):
n <<= 1
try:
if flags[i]:
n |= 1
except IndexError:
pass
return n
def chunkit(it, n):
buf = []
for x in it:
buf.append(x)
if n <= len(buf):
yield buf
buf = []
if buf:
yield buf
def compress(input, out):
# header
out.write(pack("<L", (len(input) << 8) + 0x10))
# body
length = 0
for tokens in chunkit(_compress(input), 8):
flags = [type(t) == tuple for t in tokens]
out.write(pack(">B", packflags(flags)))
for t in tokens:
if type(t) == tuple:
count, disp = t
count -= 3
disp = (-disp) - 1
assert 0 <= disp < 4096
sh = (count << 12) | disp
out.write(pack(">H", sh))
else:
if isinstance(t, str):
out.write(t)
else:
out.write(pack(">B", t))
length += 1
length += sum(2 if f else 1 for f in flags)
# padding
padding = 4 - (length % 4 or 4)
if padding:
out.write(b'\xff' * padding)
def compress_nlz11(input, out):
# header
out.write(pack("<L", (len(input) << 8) + 0x11))
# body
length = 0
for tokens in chunkit(_compress(input, windowclass=NLZ11Window), 8):
flags = [type(t) == tuple for t in tokens]
out.write(pack(">B", packflags(flags)))
length += 1
for t in tokens:
if type(t) == tuple:
count, disp = t
disp = (-disp) - 1
#if disp == 282:
# raise Exception
assert 0 <= disp <= 0xFFF
if count <= 1 + 0xF:
count -= 1
assert 2 <= count <= 0xF
sh = (count << 12) | disp
out.write(pack(">H", sh))
length += 2
elif count <= 0x11 + 0xFF:
count -= 0x11
assert 0 <= count <= 0xFF
b = count >> 4
sh = ((count & 0xF) << 12) | disp
out.write(pack(">BH", b, sh))
length += 3
elif count <= 0x111 + 0xFFFF:
count -= 0x111
assert 0 <= count <= 0xFFFF
l = (1 << 28) | (count << 12) | disp
out.write(pack(">L", l))
length += 4
else:
raise ValueError(count)
else:
out.write(pack(">B", t))
length += 1
# padding
padding = 4 - (length % 4 or 4)
if padding:
out.write(b'\xff' * padding)
def dump_compress_nlz11(input, out):
# body
length = 0
def dump():
for t in _compress(input, windowclass=NLZ11Window):
if type(t) == tuple:
yield t
from pprint import pprint
pprint(list(dump()))
if __name__ == '__main__':
from sys import stdout, argv
data = open(argv[1], "rb").read()
stdout = stdout.detach()
#compress(data, stdout)
compress_nlz11(data, stdout)
#dump_compress_nlz11(data, stdout)
|
|
#gameheart.entities.forms
from django import forms
from django.contrib.admin import widgets
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from gameheart.entities.models import *
from gameheart.entities.helpers import *
class GHForm(forms.ModelForm):
class Meta:
abstract = True
def __init__(self, user=None, *args, **kwargs):
super(GHForm, self).__init__(*args, **kwargs)
userinfo = getuserinfo(user)
if not userinfo:
admin = True
else:
admin = userinfo['isadmin']
instance = kwargs.pop('instance',None)
owned = isowned(instance,user)
approver = isapprover(instance,user)
if hasattr(self,'readonlyfields'):
for field in self.readonlyfields:
self.fields[field].widget.attrs['readonly'] = True
self.fields[field].widget.attrs['disabled'] = True
self.fields[field].required = False
if hasattr(self,'adminonlyfields') and admin == False:
for field in self.adminonlyfields:
self.fields[field].widget = forms.HiddenInput()
self.fields[field].widget.attrs['disabled'] = True
if hasattr(self,'isprivate'):
if self.isprivate == True and owned == False and admin == False and instance:
for field in self.Meta.fields:
self.fields[field].widget.attrs['readonly'] = True
self.fields[field].widget.attrs['disabled'] = True
if hasattr(self,'ownedonlyfields') and owned == False:
for field in self.ownedonlyfields:
self.fields[field].widget = forms.HiddenInput()
self.fields[field].widget.attrs['disabled'] = True
if hasattr(self,'approveronlyfields') and approver == False:
for field in self.approveronlyfields:
self.fields[field].widget = forms.HiddenInput()
self.fields[field].widget.attrs['disabled'] = True
if hasattr(self,'hiddenfields'):
for field in self.hiddenfields:
self.fields[field].widget = forms.HiddenInput()
self.fields[field].widget.attrs['disabled'] = True
class UserForm(UserCreationForm):
class Meta:
model = User
fields = ['username','email',]
widgets = {
'password':forms.PasswordInput
}
isadmin = False
isprivate = True
mname = 'User'
class UserAccountForm(forms.ModelForm):
class Meta:
model = User
fields = ['username','first_name','last_name','email']
buttons = [
{'name':'xpspend','class':'button','id':'xpspend','value':'Spend XP','link':'spendxp/','isadmin':False,'isnew':False}]
isadmin = False
isprivate = False
mname = 'User'
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['name','description']
isadmin = False
isprivate = False
mname = 'User'
class UserDetailProfileForm(GHForm):
class Meta:
model = UserProfile
fields = ['name','isadmin','description']
adminonlyfields = ['isadmin']
isadmin = False
isprivate = False
mname = 'User'
class UserLoginForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'password']
widgets = {
'password':forms.PasswordInput
}
isadmin = False
isprivate = False
mname = 'User'
class ChapterTypeForm(GHForm):
class Meta:
model = ChapterType
fields = ['name', 'description','dateactive','dateexpiry']
adminonlyfields = ['dateactive','dateexpiry']
ifields = ['name']
sname = Vocabulary.objects.get(name='ChapterType').displayname
surl = '/types/chapters/'
sheading = ''.join(['Add New ',sname])
isadmin = True
isprivate = False
mname = 'ChapterType'
class StaffTypeForm(GHForm):
class Meta:
model = StaffType
fields = ['name', 'isapprover', 'isdirector', 'description','dateactive','dateexpiry']
adminonlyfields = ['dateactive','dateexpiry','isapprover', 'isdirector']
ifields = ['name']
sname = Vocabulary.objects.get(name='StaffType').displayname
surl = '/types/staff/'
sheading = ''.join(['Add New ',sname])
isadmin = True
isprivate = False
mname = 'StaffType'
class StaffForm(GHForm):
class Meta:
model = Staff
fields = ['user', 'type','dateactive','dateexpiry']
adminonlyfields = ['dateactive','dateexpiry']
lfield = 'chapter'
sname = Vocabulary.objects.get(name='Staff').displayname
surl = '/staff/'
sheading = ''.join(['Add New ',sname])
isadmin = False
isprivate = False
mname = 'Staff'
def __init__(self, *args, **kwargs):
super(StaffForm,self).__init__(*args, **kwargs)
self.fields['user'].label = Vocabulary.objects.get(name='User').displayname
class ChapterForm(GHForm):
class Meta:
model = Chapter
fields = ['name', 'type', 'description', 'dateactive', 'dateexpiry']
exclude = []
adminonlyfields = ['dateactive','dateexpiry']
ifields = ['name', 'type']
buttons = []
lform = StaffForm
sname = Vocabulary.objects.get(name='Chapter').displayname
surl = '/chapters/'
sheading = ''.join(['Add New ',sname])
isadmin = True
isprivate = True
mname = 'Chapter'
class ChapterAddressForm(GHForm):
class Meta:
model = ChapterAddress
fields = ['name', 'chapter', 'address1', 'address2', 'city', 'state', 'zip', 'dateactive','dateexpiry']
adminonlyfields = ['dateactive','dateexpiry']
ifields = ['name', 'chapter', 'city', 'state']
sname = ''.join([Vocabulary.objects.get(name='ChapterAddress').displayname])
surl = '/chapters/addresses/'
sheading = ''.join(['Add New ',sname])
isadmin = False
isprivate = False
mname = 'ChapterAddress'
def __init__(self, *args, **kwargs):
super(ChapterAddressForm,self).__init__(*args, **kwargs)
self.fields['chapter'].label = Vocabulary.objects.get(name='Chapter').displayname
class CharacterTypeForm(GHForm):
class Meta:
model = CharacterType
fields = ['name', 'description','dateactive','dateexpiry']
adminonlyfields = ['dateactive','dateexpiry']
ifields = ['name']
sname = Vocabulary.objects.get(name='CharacterType').displayname
surl = '/types/characters/'
sheading = ''.join(['Add New ',sname])
isadmin = True
isprivate = False
mname = 'CharacterType'
class CharacterOwnerForm(GHForm):
class Meta:
model = CharacterOwner
fields = ['user','iscontroller','dateactive','dateexpiry']
adminonlyfields = ['iscontroller','dateactive','dateexpiry']
lfield = 'character'
sname = 'Character Owner'
surl = '/characters/owners/'
sheading = 'Assign Character to User'
isadmin = False
isprivate = False
mname = 'CharacterOwner'
class CharacterForm(GHForm):
class Meta:
model = Character
fields = ['name', 'type', 'chapter', 'public_description', 'private_description','dateactive','dateexpiry']
exclude = []
adminonlyfields = ['dateactive','dateexpiry']
ownedonlyfields = ['type','private_description']
ifields = ['name', 'type', 'chapter']
directorfields = ['name', 'type', 'chapter', 'isprimary', 'isnew', 'public_description', 'private_description']
lform = CharacterOwnerForm
sname = Vocabulary.objects.get(name='Character').displayname
surl = '/characters/'
sheading = ''.join(['Add New ',sname])
buttons = [
{'name':'sheet','class':'button','id':'sheet','value':'View Sheet','link':'grid/','isadmin':False,'isnew':False,'check':False,'newtab':False,'controlleronly':False,'directoronly':False},
{'name':'print','class':'button','id':'print','value':'Print','link':'print/','isadmin':False,'isnew':False,'check':False,'newtab':True,'controlleronly':False,'directoronly':False},
{'name':'xplog','class':'button','id':'xplog','value':'XP Log','link':'calcxp/','isadmin':False,'isnew':False,'check':False,'newtab':False,'controlleronly':False,'directoronly':False},
#{'name':'fix','class':'button','id':'fix','value':'Fix','link':'fix/','isadmin':False,'isnew':False,'check':False,'newtab':False,'controlleronly':False,'directoronly':True},
{'name':'labels','class':'button','id':'labels','value':'Labels','link':'traits/labels/','isadmin':False,'isnew':False,'check':False,'newtab':False,'controlleronly':False,'directoronly':True},
{'name':'remove','class':'button','id':'remove','value':'Remove','link':'hide/','isadmin':False,'isnew':True,'check':True,'newtab':False,'controlleronly':True,'directoronly':True}
]
isadmin = False
isprivate = True
mname = 'Character'
def __init__(self, *args, **kwargs):
super(CharacterForm,self).__init__(*args, **kwargs)
self.fields['chapter'].label = Vocabulary.objects.get(name='Chapter').displayname
class TraitTypeForm(GHForm):
charactertypes = forms.ModelMultipleChoiceField(queryset=CharacterType.objects.activeonly(),widget=forms.CheckboxSelectMultiple(),required=False)
chaptertypes = forms.ModelMultipleChoiceField(queryset=ChapterType.objects.activeonly(),widget=forms.CheckboxSelectMultiple(),required=False)
class Meta:
model = TraitType
fields = ['name', 'aggregate', 'onepercharacter', 'multiplyxp', 'labelable', 'xpcost1','xpcost2','xpcost3','xpcost4','xpcost5','cotrait','availtocontroller','availtoapprover','availtodirector','description', 'charactertypes', 'chaptertypes', 'dateactive','dateexpiry']
adminonlyfields = ['dateactive','dateexpiry']
ifields = ['name']
sname = Vocabulary.objects.get(name='TraitType').displayname
surl = '/types/traits/'
sheading = ''.join(['Add New ',sname])
isadmin = True
isprivate = False
mname = 'TraitType'
def __init__(self, *args, **kwargs):
super(TraitTypeForm,self).__init__(*args, **kwargs)
self.fields['labelable'].label = 'Can be Labeled'
class TraitForm(GHForm):
charactertypes = forms.ModelMultipleChoiceField(queryset=CharacterType.objects.activeonly(),widget=forms.CheckboxSelectMultiple(),required=False)
chaptertypes = forms.ModelMultipleChoiceField(queryset=ChapterType.objects.activeonly(),widget=forms.CheckboxSelectMultiple(),required=False)
cotraits = forms.ModelMultipleChoiceField(queryset=Trait.objects.cotraits(),widget=widgets.FilteredSelectMultiple(verbose_name='Required Traits',is_stacked=False),required=False)
bantraits = forms.ModelMultipleChoiceField(queryset=Trait.objects.cotraits(),widget=widgets.FilteredSelectMultiple(verbose_name='Banned Traits',is_stacked=False),required=False)
addtraits = forms.ModelMultipleChoiceField(queryset=Trait.objects.cotraits(),widget=widgets.FilteredSelectMultiple(verbose_name='Add Traits',is_stacked=False),required=False)
class Meta:
model = Trait
fields = ['name', 'type', 'level', 'isadmin', 'renamable', 'description', 'charactertypes', 'chaptertypes', 'cotraits','bantraits','addtraits','dateactive','dateexpiry']
adminonlyfields = ['isadmin', 'dateactive','dateexpiry']
ifields = ['type', 'name']
fieldlist = ['id', 'name', 'level', 'xpcost', 'bpcost', 'description']
sname = Vocabulary.objects.get(name='Trait').displayname
surl = '/traits/'
sheading = ''.join(['Add New ',sname])
isadmin = True
isprivate = False
mname = 'Trait'
def __init__(self, *args, **kwargs):
super(TraitForm,self).__init__(*args, **kwargs)
self.fields['renamable'].label = 'Can be Ranamed'
Trait.__unicode__ = Trait.cotrait_label
class CharacterTraitForm(GHForm):
class Meta:
model = CharacterTrait
fields = ['character', 'trait', 'iscreation', 'isfree', 'authorizedby', 'dateauthorized', 'dateremoved', 'dateactive','dateexpiry']
adminonlyfields = ['authorizedby','dateauthorized','dateactive','dateexpiry']
approveronlyfields = ['iscreation','isfree','authorizedby','dateauthorized','dateremoved','dateactive','dateexpiry']
readonlyfields = ['character','trait']
sname = 'Character Trait'
surl = '/characters/traits/'
sheading = 'Add New Trait to Character'
sredirect = 'user_index'
isadmin = False
isprivate = False
mname = 'CharacterTrait'
class AttendanceForm(GHForm):
class Meta:
model = Attendance
fields = ['user','character','event','xpawarded','authorizedby']
adminonlyfields = ['user','event','authorizedby']
hiddenfields = ['user','event','authorizedby']
fieldlabels = [Vocabulary.objects.get(name='User').displayname,Vocabulary.objects.get(name='Character').displayname,Vocabulary.objects.get(name='Event').displayname,'xpawarded','authorizedby']
lfield = 'event'
sname = 'Attendance'
surl = '/chapterss/events/attendance/'
sheading = ''.join(['Sign in to ',Vocabulary.objects.get(name='Event').displayname])
isadmin = False
isprivate = False
mname = 'Attendance'
def __init__(self, *args, **kwargs):
super(AttendanceForm,self).__init__(*args, **kwargs)
self.fields['user'].label = Vocabulary.objects.get(name='User').displayname
self.fields['event'].label = Vocabulary.objects.get(name='Event').displayname
class AttendanceGameForm(GHForm):
class Meta:
model = Attendance
fields = ['user','character','event','xpawarded','authorizedby']
adminonlyfields = ['user','event','authorizedby']
hiddenfields = ['user','event','authorizedby']
fieldlabels = [Vocabulary.objects.get(name='User').displayname,Vocabulary.objects.get(name='Character').displayname,Vocabulary.objects.get(name='Event').displayname,'xpawarded','authorizedby']
lfield = 'event'
sname = 'Attendance'
surl = '//'
sheading = ''.join(['Sign in to ',Vocabulary.objects.get(name='Event').displayname])
isadmin = False
isprivate = False
mname = 'Attendance'
def __init__(self, *args, **kwargs):
super(AttendanceGameForm,self).__init__(*args, **kwargs)
self.fields['user'].label = Vocabulary.objects.get(name='User').displayname
self.fields['event'].label = Vocabulary.objects.get(name='Event').displayname
class EventForm(GHForm):
class Meta:
model = Event
fields = ['name', 'chapter', 'chapteraddress', 'dateheld','dateactive','dateexpiry']
adminonlyfields = ['dateactive','dateexpiry']
ifields = ['name', 'chapter']
#approveronlyfields = ['name', 'chapter', 'chapteraddress', 'dateheld']
lform = AttendanceForm
sname = Vocabulary.objects.get(name='Event').displayname
surl = '/chapters/events/'
sheading = ''.join(['Add New ',sname])
isadmin = False
isprivate = False
mname = 'Event'
buttons = []
def __init__(self, *args, **kwargs):
super(EventForm,self).__init__(*args, **kwargs)
self.fields['chapter'].label = Vocabulary.objects.get(name='Chapter').displayname
self.fields['chapteraddress'].label = Vocabulary.objects.get(name='ChapterAddress').displayname
class NoteForm(GHForm):
class Meta:
model = Note
fields = ['subject', 'body','character','chapter','trait','traitlevel','stafftype','dateactive','dateexpiry']
adminonlyfields = ['dateactive','dateexpiry']
sname = Vocabulary.objects.get(name='Note').displayname
surl = '/notes/'
sheading = ''.join(['Add New ',sname])
isadmin = False
isprivate = False
mname = 'Note'
class NoteTagForm(GHForm):
class Meta:
model = NoteTag
fields = ['tag','dateactive','dateexpiry']
adminonlyfields = ['dateactive','dateexpiry']
sname = 'Note'
surl = '/notes/tags/'
sheading = 'Add New Note Tag'
isadmin = False
isprivate = False
mname = 'NoteTag'
class FavoriteUserForm(GHForm):
class Meta:
model = FavoriteUser
fields = ['favoriteuser']
adminonlyfields = []
fkmodel = UserProfile
fkfield = 'favoriteuser'
sname = ''.join([Vocabulary.objects.get(name='Favorite').displayname, ' ', Vocabulary.objects.get(name='User').displayname])
surl = '/account/favorites/users/'
sheading = ''.join([Vocabulary.objects.get(name='Favorite').displayname, ' ', Vocabulary.objects.get(name='User').displayplural])
isadmin = False
isprivate = False
lform = UserForm
mname = 'FavoriteUser'
def __init__(self, *args, **kwargs):
super(FavoriteUserForm,self).__init__(*args, **kwargs)
self.fields['favoriteuser'].label = ''.join([Vocabulary.objects.get(name='Favorite').displayname,' ',Vocabulary.objects.get(name='User').displayname])
class FavoriteChapterForm(GHForm):
class Meta:
model = FavoriteChapter
fields = ['favoritechapter']
adminonlyfields = []
fkmodel = Chapter
fkfield = 'favoritechapter'
sname = ''.join([Vocabulary.objects.get(name='Favorite').displayname, ' ', Vocabulary.objects.get(name='Chapter').displayname])
surl = '/account/favorites/chapters/'
sheading = ''.join([Vocabulary.objects.get(name='Favorite').displayname, ' ', Vocabulary.objects.get(name='Chapter').displayplural])
isadmin = False
isprivate = False
lform = ChapterForm
mname = 'FavoriteChapter'
def __init__(self, *args, **kwargs):
super(FavoriteChapterForm,self).__init__(*args, **kwargs)
self.fields['favoritechapter'].label = ''.join([Vocabulary.objects.get(name='Favorite').displayname,' ',Vocabulary.objects.get(name='Chapter').displayname])
class FavoriteCharacterForm(GHForm):
class Meta:
model = FavoriteCharacter
fields = ['favoritecharacter']
adminonlyfields = []
fkmodel = Character
fkfield = 'favoritecharacter'
sname = ''.join([Vocabulary.objects.get(name='Favorite').displayname, ' ', Vocabulary.objects.get(name='Character').displayname])
surl = '/account/favorites/characters/'
sheading = ''.join([Vocabulary.objects.get(name='Favorite').displayname, ' ', Vocabulary.objects.get(name='Character').displayplural])
isadmin = False
isprivate = False
lform = CharacterForm
mname = 'FavoriteCharacter'
def __init__(self, *args, **kwargs):
super(FavoriteCharacterForm,self).__init__(*args, **kwargs)
self.fields['favoritecharacter'].label = ''.join([Vocabulary.objects.get(name='Favorite').displayname,' ',Vocabulary.objects.get(name='Character').displayname])
class VocabularyForm(GHForm):
class Meta:
model = Vocabulary
fields = ['displayname','displayplural']
adminonlyfields = ['displayname', 'displayplural']
sname = 'Vocabulary'
surl = '/vocabulary/'
sheading = 'Vocabulary'
isadmin = False
isprivate = False
mname = 'Vocabulary'
class TransactionForm(GHForm):
class Meta:
model = Transaction
fields = ['user','txnid','amount','dateactive','dateexpiry']
adminonlyfields = ['dateactive','dateexpiry']
isadmin = False
isprivate = False
mname = 'Transaction'
class SubscriptionForm(GHForm):
class Meta:
model = Subscription
fields = ['user','name','pp_period3','pp_auth','pp_charset','pp_receiver_email','pp_amount3','pp_form_charset','pp_item_number','pp_payer_email','pp_recurring','pp_last_name','pp_payer_id','pp_mc_amount3','pp_subscr_id','pp_mc_currency','pp_txn_id','pp_txn_type','pp_btn_id','pp_item_name','pp_payer_status','pp_password','pp_reattempt','pp_residence_country','pp_business','pp_subscr_date','pp_first_name','notes','dateactive','dateexpiry']
adminonlyfields = ['dateactive','dateexpiry']
surl = '/subscriptions/'
isadmin = False
isprivate = False
mname = 'Subscription'
|
|
"""
This module contains code for detection of doors.
"""
import freenect
import cv2
import numpy as np
import time
import serial
import math
import matplotlib.mlab as mlab
from matplotlib import pyplot as plt
import sys
sys.path.append('/usr/lib/python2.7/dist-packages')
__name__ = "Kinect"
DOOR_FLAG = False
DOOR_COUNT = 0
AXIS_PLOT = 0
global SERIALDATA
SERIALDATA = serial.Serial('/dev/ttyUSB0')
global XAXIS
global YAXIS
global GLOBAL_DEPTH_MAP
global TEST_CASES
global DIRECTION
def filter_noise(depth_array, mask, masked_array, row, col):
"""
* Function Name:filter_noise
* Input: Original frame, noise mask, Original
frame with noise pixels being made to 255 value,
no. of row tiles, No. of column tiles.
* Output: Filters the noise from the original depth frame.
* Logic: The function divides rows and cols of the frame in
some number of pixels. It then finds the mean of the
tile and assigns the value to the noise pixels in that
tile.
* Example Call: filter_noise(depth_array, mask, ad, 3, 4)
"""
row_ratio = 480/row
column_ratio = 640/col
temp_y = 0
for i in xrange(col):
temp_x = 0
for j in xrange(row):
area = masked_array[temp_x:temp_x+row_ratio-1, \
temp_y:temp_y+column_ratio-1]
mask[temp_x:temp_x+row_ratio-1, temp_y:temp_y+column_ratio-1] \
*= area.mean()
depth_array[temp_x:temp_x+row_ratio-1, \
temp_y:temp_y+column_ratio-1] += \
mask[temp_x:temp_x+row_ratio-1, temp_y:temp_y+column_ratio-1]
temp_x = temp_x + row_ratio
temp_y = temp_y + column_ratio
return depth_array
def filter_smooth(depth_array):
"""
* Function Name: filter_smooth
* Input: Original Depth frame in mm.
* Output: Filters the noise from the depth frame
* Logic: It creates a mask for the noise. It makes
all the noise pixels to 255 to send to filter noise.
The output from filter noise is smoothened using
bilateral filter
* Example Call: filter_smooth(a)
"""
ret, mask = cv2.threshold(depth_array, 10, 255, cv2.THRESH_BINARY_INV)
mask_1 = mask/255
masked_array = depth_array + mask
blur = filter_noise(depth_array, mask_1, masked_array, 3, 4)
blur = cv2.bilateralFilter(blur, 5, 50, 100)
return blur
def get_depth():
"""
* Function Name: get_depth
* Input: None
* Output: Returns the depth information from pixel values of 0 to 255
* Logic: It recieves the depth information from the Kinect sensor in mm.
The depth range is 40cm to 800cm. The values are brought
down from 0 to 255. It then changes the data type
to 1 bytes. It then smoothens the frame and returns it.
* Example Call: get_depth()
"""
depth_array = freenect.sync_get_depth(format=freenect.DEPTH_MM)[0]
depth_array = depth_array/30.0
depth_array = depth_array.astype(np.uint8)
depth_array = filter_smooth(depth_array)
depth_array[0:479, 630:639] = depth_array[0:479, 620:629]
return depth_array
def contours_return(depth_array, num):
"""
* Function Name: contours_return
* Input: Depth Frame and a number for shifting left or right the matrix.
* Output: Returns the left or right edges contours
* Logic: It does noise removal process on the frame and
shifts the frame matrix by num places so that
change in values are highlighted in the
image by Binary Thresholding it.
* Example Call: contours_return(a,5)
"""
temp_b = np.roll(depth_array, num)
res = np.subtract(temp_b, depth_array)
result = cv2.medianBlur(res, 11)
mask = result > 200
result[mask] = 0
mask = result < 100
result[mask] = 0
ret, th3 = cv2.threshold(result, 50, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(th3, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
return contours
def potential_rightedge(contours):
"""
* Function Name: potential_rightedge
* Input: A contour
* Output: Returns a potential rightedge which fulfills all the conditions
returns topmost, bottommost and centroid of the contour.
* Logic: If area of the contour crosses a threshoold than the
extreme points are calculated of the contour. If
the difference in opposite extreme points lies
in the given threshold and the width and height
of the bounding rectangle lies in the threshold
than the contour is a potential contour and a line
is drawn as an edge. The centroid is calculated for
further use
* Example Call: potential_rightedge(c)
"""
temp_area = 1000
thresh_y = 250
thresh_x = 60
if cv2.contourArea(contours) > temp_area:
leftmost = tuple(contours[contours[:, :, 0].argmin()][0])
rightmost = tuple(contours[contours[:, :, 0].argmax()][0])
topmost = tuple(contours[contours[:, :, 1].argmin()][0])
bottommost = tuple(contours[contours[:, :, 1].argmax()][0])
x_1 = leftmost[0]
x_2 = rightmost[0]
x_3 = topmost[0]
x_4 = bottommost[0]
y_1 = topmost[1]
y_2 = bottommost[1]
width = 50
if (y_2 - y_1 > thresh_y) and (abs(x_2 - x_1) < thresh_x) \
and x_3 < 620 and x_4 < 620:
pts1 = np.float32([[topmost[0]-width, y_1],
[topmost[0], y_1], [bottommost[0]-width, y_2],
[bottommost[0], y_2]])
pts2 = np.float32([[0, 0], [width, 0],
[0, y_2-y_1], [width, y_2-y_1]])
flat = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(GLOBAL_DEPTH_MAP, flat, (width, y_2-y_1))
meandst = dst.mean()
if meandst > 50:
cv2.line(GLOBAL_DEPTH_MAP, topmost, bottommost, (0, 255, 0), 5)
right_top = topmost
right_bottom = bottommost
flat = cv2.moments(contours)
cxr = int(flat['m10'] / flat['m00'])
return right_top, right_bottom, cxr
return 0, 0, 0
def potential_leftedge(contours):
"""
* Function Name: potential_leftedge
* Input: A contour
* Output: Returns a potential leftedge which fulfills all the conditions
returns topmost, bottommost and centroid of the contour.
* Logic: If area of the contour crosses a threshoold than the
extreme points are calculated of the contour. If
the difference in opposite extreme points lies
in the given threshold and the width and height
of the bounding rectangle lies in the threshold
than the contour is a potential contour and a
line is drawn as an edge. The centroid is
calculated for further use
* Example Call: potential_leftedge(c)
"""
temp_area = 1000
thresh_y = 250
thresh_x = 60
if cv2.contourArea(contours) > temp_area:
leftmost = tuple(contours[contours[:, :, 0].argmin()][0])
rightmost = tuple(contours[contours[:, :, 0].argmax()][0])
topmost = tuple(contours[contours[:, :, 1].argmin()][0])
bottommost = tuple(contours[contours[:, :, 1].argmax()][0])
x_1 = leftmost[0]
x_2 = rightmost[0]
x_3 = topmost[0]
x_4 = bottommost[0]
y_1 = topmost[1]
y_2 = bottommost[1]
width = 50
if (y_2 - y_1 > thresh_y) and (abs(x_2 - x_1) < thresh_x) \
and x_3 > 20 and x_4 > 20:
pts1 = np.float32([[topmost[0], y_1],
[topmost[0] + width, y_1], [bottommost[0], y_2],
[bottommost[0] + width, y_2]])
pts2 = np.float32([[0, 0], [width, 0], [0, y_2-y_1],
[width, y_2-y_1]])
flat = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(GLOBAL_DEPTH_MAP, flat, (width, y_2-y_1))
meandst = dst.mean()
if meandst > 50:
cv2.line(GLOBAL_DEPTH_MAP, topmost, bottommost, (0, 255, 0), 5)
left_top = topmost
left_bottom = bottommost
flat = cv2.moments(contours)
cxl = int(flat['m10'] / flat['m00'])
return left_top, left_bottom, cxl
return 0, 0, 0
def is_door(left_bottom, left_top, right_bottom, right_top, cxr, cxl):
"""
* Function Name: doorway_movement
* Input: left_bottom, left_top, right_bottom,
right_top, right_edge_centroid,
left_edge_centroid
* Output: Movement of the robot on door detection
* Logic: Pixel Heights of the edges are calculated.
If the pixel height difference and centroid
of left and right edge difference crosses a
threshold than the edges are door edges.
The midpoint is calculated. If midpoint lies
in middle frame than robot moves forward and
if it lies on the left or right part than the
robot takes a turn. When mode is 1 the door is
detected and in mode 0 mode regular movement is
followed.
* Example Call: doorway_movement(lb,lt,rb,rt,cxr,cxl)
"""
diffl = left_bottom[1]-left_top[1]
diffr = right_bottom[1]-right_top[1]
if abs(diffl - diffr) < 150 and((cxr - cxl) > 50 and(cxr - cxl) < 400):
cv2.line(GLOBAL_DEPTH_MAP, left_top, left_bottom, (128, 255, 0), 10)
cv2.line(GLOBAL_DEPTH_MAP, right_top, right_bottom, (128, 255, 0), 10)
return 1
return 0
def left_right_lines(contours_right, contours_left):
"""
* Function Name: left_right_lines
* Input: right contours, left contours, original depth frame
* Output: detects left and right edges and accordingly calls
the door movement function.
* Logic: creates an array of all the required parameters
of all the potential left and right edges and
sends them two at a time to doorway_movement function.
* Example Call: left_right_lines(contoursright,contoursleft,z)
"""
templ = 0
tempr = 0
ltl = []
lbl = []
cxll = []
rtl = []
rbl = []
cxrl = []
for contours in contours_left:
left_top, left_bottom, cxl = potential_leftedge(contours)
if cxl != 0:
ltl.append(left_top)
lbl.append(left_bottom)
cxll.append(cxl)
templ += 1
for contours in contours_right:
right_top, right_bottom, cxr = potential_rightedge(contours)
if cxr != 0:
rtl.append(right_top)
rbl.append(right_bottom)
cxrl.append(cxr)
tempr += 1
return ltl, lbl, cxll, rtl, rbl, cxrl, templ, tempr
def horizontal_lines():
"""
* Function Name: horizontal_lines()
* Input: None
* Output: Returns information about the edges.
* Logic: Analyzes the depth array for any drastic
change in vertical DIRECTION. Areas with
sudden increase/decrease in depth are marked as edges.
* Example Call: horizontal_line()
"""
contour = contours_return(GLOBAL_DEPTH_MAP, 6400)
temph = 0
hll = []
hrl = []
cxhl = []
for contours in contour:
height_left, height_right, cxh = horizontal_edge(contours)
if cxh != 0:
hll.append(height_left)
hrl.append(height_right)
cxhl.append(cxh)
temph += 1
return hll, hrl, cxhl, temph
def actual_width_in_mm(left_bottom, left_top, right_bottom, right_top,
cxr, cxl):
"""
* Function Name:actual_width_in_mm()
* Input: left_bottom: bottom most co-ordinate of the left edge.
left_top: top most co-ordinate of the right edge.
right_bottom: Bottom most co-ordinate of the right edge
right_top: Top most co-ordinate of the right edge
cxr: Centroid of the right edge.
cxl: Centroid of the left edge.
* Output: Returns the real width of the obstacle.
* Logic: Calculates the real width in mm using
basic trigonometric calculations.
* Example Call: acutal_width_in_mm(10,10,20,20,15,15)
"""
depth_map = freenect.sync_get_depth(format=freenect.DEPTH_MM)[0]
depth_map = depth_map / 30.0
depth_map = depth_map.astype(np.uint8)
ret, mask = cv2.threshold(depth_map, 1, 255, cv2.THRESH_BINARY_INV)
masked = depth_map + mask
pts1 = np.float32([[left_top[0]-30, left_top[1]],
[left_top[0], left_top[1]], [left_bottom[0]-30,
left_bottom[1]],
[left_bottom[0], left_bottom[1]]])
pts2 = np.float32([[0, 0], [30, 0], [0, left_bottom[1]-left_top[1]],
[30, left_bottom[1]-left_top[1]]])
flat = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(masked, flat, (30, left_bottom[1]-left_top[1]))
left_depth = np.amin(dst)*30
pts1 = np.float32([[right_top[0], right_top[1]],
[right_top[0]+30, right_top[1]],
[right_bottom[0], right_bottom[1]],
[right_bottom[0]+30, right_bottom[1]]])
pts2 = np.float32([[0, 0], [30, 0], [0, right_bottom[1]-right_top[1]],
[30, right_bottom[1]-right_top[1]]])
flat = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(masked, flat, (30, right_bottom[1]-right_top[1]))
right_depth = np.amin(dst)*30
pixel_width = cxr-cxl
angle = (pixel_width/640.0)*(57/180.0)*(math.pi)
width = (left_depth*left_depth) + (right_depth*right_depth) - \
(2*left_depth*right_depth*math.cos(angle))
width = math.sqrt(width)
return width
def actual_height_in_mm(left_bottom, left_top, right_bottom, right_top):
"""
* Function Name:actual_height_in_mm()
* Input: left_bottom: bottom most co-ordinate of the left edge.
left_top: top most co-ordinate of the right edge.
right_bottom: Bottom most co-ordinate of the right edge
right_top: Top most co-ordinate of the right edge
cxr: Centroid of the right edge.
cxl: Centroid of the left edge.
* Output: Returns the real height of the obstacle.
* Logic: Calculates the real height in mm using cosine rule.
* Example Call: acutal_hieght_in_mm(10,10,20,20,15,15)
"""
depth_map = freenect.sync_get_depth(format=freenect.DEPTH_MM)[0]
depth_map = depth_map / 30.0
depth_map = depth_map.astype(np.uint8)
ret, mask = cv2.threshold(depth_map, 1, 255, cv2.THRESH_BINARY_INV)
masked = depth_map + mask
lefttop = masked[left_top[1]:left_top[1]+10, left_top[0]-30:left_top[0]]
lefttop_depth = np.amin(lefttop)*30
leftbottom = masked[left_bottom[1]-10:left_bottom[1], \
left_bottom[0]-30:left_bottom[0]]
leftbottom_depth = \
np.amin(leftbottom)*30
righttop = masked[right_top[1]:right_top[1] + 10, \
right_top[0]:right_top[0] + 30]
righttop_depth = np.amin(righttop)*30
rightbottom = masked[right_bottom[1]-10:right_bottom[1], \
right_bottom[0]:right_bottom[0] + 30]
rightbottom_depth = np.amin(rightbottom)*30
left_pixel_height = left_bottom[1] - left_top[1]
right_pixel_height = right_bottom[1] - right_top[1]
left_angle = (left_pixel_height/480.0)*(47/180.0)*(math.pi)
right_angle = (right_pixel_height/480.0)*(47/180.0)*(math.pi)
left_height = lefttop_depth * lefttop_depth + \
leftbottom_depth * leftbottom_depth - 2 * leftbottom_depth * \
lefttop_depth * math.cos(left_angle)
right_height = righttop_depth * righttop_depth + \
rightbottom_depth * rightbottom_depth - \
2 * rightbottom_depth * righttop_depth * \
math.cos(right_angle)
left_height = math.sqrt(left_height)
right_height = math.sqrt(right_height)
return left_height, right_height
def return_height_in_mm(left_bottom, left_top, right_bottom, right_top):
"""
* Function Name:return_height_in_mm()
* Input: left_bottom: bottom most co-ordinate of the left edge.
left_top: top most co-ordinate of the right edge.
right_bottom: Bottom most co-ordinate of the right edge
right_top: Top most co-ordinate of the right edge
cxr: Centroid of the right edge.
cxl: Centroid of the left edge.
* Output: Returns the real height of the obstacle.
* Logic: Calculates the real height in mm using pythagoras theorem.
* Example Call: return_height_in_mm(10,10,20,20,15,15)
"""
depth_map = freenect.sync_get_depth(format=freenect.DEPTH_MM)[0]
left_bottom_y = left_bottom[1]
left_top_x, left_top_y = left_top[0], left_top[1]
right_bottom_y = right_bottom[1]
right_top_x, right_top_y = right_top[0], right_top[1]
left_top_area = depth_map[left_top_y:left_top_y+10, \
left_top_x-10:left_top_x]
mask = left_top_area == 0
left_top_area[mask] = 8000
top = np.amin(left_top_area)
bound_rect = depth_map[left_top_y:left_bottom_y, \
left_top_x - 20:left_top_x]
mask = bound_rect == 0
bound_rect[mask] = 8000
bottom = np.amin(bound_rect)
left_height = math.sqrt(top**2 - bottom **2)
right_top_area = depth_map[right_top_y:right_top_y+10, \
right_top_x:right_top_x+10]
mask = right_top_area == 0
right_top_area[mask] = 8000
top = np.amin(right_top_area)
bound_rect_right = depth_map[right_top_y:right_bottom_y \
, right_top_x:right_top_x + 20]
mask = bound_rect_right == 0
bound_rect_right[mask] = 8000
bottom = np.amin(bound_rect_right)
right_height = math.sqrt(top**2 - bottom **2)
cv2.line(GLOBAL_DEPTH_MAP, left_top, left_bottom, (128, 255, 0), 10)
cv2.line(GLOBAL_DEPTH_MAP, right_top, right_bottom, (128, 255, 0), 10)
return left_height, right_height
def rectangle_door_test(left_bottom, left_top, right_bottom,
right_top, cxl, cxr, height_left, height_right, cxh):
"""
* Function Name:return_height_in_mm()
* Input: left_bottom: bottom most co-ordinate of the left edge.
left_top: top most co-ordinate of the right edge.
right_bottom: Bottom most co-ordinate of the right edge
right_top: Top most co-ordinate of the right edge
cxr: Centroid of the right edge.
cxl: Centroid of the left edge.
height_left: height in pixels of left edge
height_right: height in pixels of right edge.
* Output: Returns a probability value based on whether the obstacle
is a door or not.
* Logic: Calculates probability using different tests
* Example Call: rectangle_door_test(150,150,250,150,200,200,50,50,25)
"""
if cxh > cxl and cxh < cxr:
top_edge_pixel_length = height_right[0] - height_left[0]
top = right_top[0] - left_top[0]
middle = cxr - cxl
bottom = right_bottom[0] - left_bottom[0]
top_error = top - top_edge_pixel_length
middle_error = middle - top_edge_pixel_length
bottom_error = bottom - top_edge_pixel_length
probtop = probability(0, 200, top_error)
probmiddle = probability(0, 200, middle_error)
probbottom = probability(0, 200, bottom_error)
probavg = (probtop + probmiddle + probbottom) / 3
return probavg
def probability(std_value, sigma, data):
"""
* Function Name:probability()
* Input: std_value: Standard deviation
sigma: A parameter for Gaussian curve.
data: Information for which probability is calculated
* Output: Returns a probability value using Gaussian distribution function.
* Logic: Calculates probability using a Gaussian distribution curve
* Example Call: probability(1500,500,1750)
"""
param_x = int(round(data))
bounds = np.linspace(std_value-sigma, std_value+sigma, 2*sigma)
temp = mlab.normpdf(bounds, std_value, sigma)
temp = temp / (temp[len(temp) / 2]) * 100
newvalue = []
for i in xrange(2*sigma):
newvalue.append(((temp[i] - 60) * 100) / 40)
if param_x >= std_value-sigma and param_x <= std_value+sigma:
return newvalue[param_x - (std_value - sigma)]
else: return 0
def actual_width_test(width):
"""
* Function Name:actual_width_test()
* Input: width: Width of door in mm.
* Output: Returns a probability value.
* Logic: Calculates probability using a Gaussian distribution curve
* Example Call: actual_width_test(1500,500,1750)
"""
prob = probability(1000, 1000, width)
return prob
def actual_height_test(left_height, right_height):
"""
* Function Name:actual_height_test()
* Input: std_value: Standard deviation
sigma: A parameter for Gaussian curve.
data: Information for which probability is calculated
* Output: Returns a probability value using Gaussian distribution function.
* Logic: Calculates probability using a Gaussian distribution curve
* Example Call: probability(1500,500,1750)
"""
left_prob = probability(1500, 1500, left_height)
right_prob = probability(1000, 1000, right_height)
return (left_prob+right_prob) / 2.0
def door_detection(contours_right, contours_left):
"""
* Function Name:probability()
* Input: std_value: Standard deviation
sigma: A parameter for Gaussian curve.
data: Information for which probability is calculated
* Output: Returns a probability value using Gaussian distribution function.
* Logic: Calculates probability using a Gaussian distribution curve
* Example Call: probability(1500,500,1750)
"""
global DOOR_COUNT
global DOOR_FLAG
global AXIS_PLOT
prob_1 = 0
weighted_probability = 0
ltl, lbl, cxll, rtl, rbl, cxrl, templ, tempr = \
left_right_lines(contours_right, contours_left)
test_1, test_2, test_3 = TEST_CASES
hll, hrl, cxhl, temph = horizontal_lines()
for i in xrange(templ):
for j in xrange(tempr):
if is_door(lbl[i], ltl[i], rbl[j], rtl[j], cxrl[j], cxll[i]):
left_height, right_height = \
actual_height_in_mm(lbl[i], ltl[i], rbl[j], rtl[j])
width = actual_width_in_mm(lbl[i], ltl[i],
rbl[j], rtl[j], cxrl[j], cxll[i])
if test_2:
prob_2 = actual_height_test(left_height, right_height)
if test_3:
prob_3 = actual_width_test(width)
for k in xrange(temph):
if test_1:
max_prob = \
rectangle_door_test(lbl[i],
ltl[i], rbl[j], rtl[j],
cxll[i], cxrl[j], hll[k], hrl[k], cxhl[k])
else:
max_prob = 0
if max_prob > prob_1:
prob_1 = max_prob
weighted_probability = 0.1 * prob_1 + \
0.5 * prob_2 + 0.4 * prob_3
print "Door Detected with confidence: " + \
str(weighted_probability)
if weighted_probability > 60:
mid_point = (cxrl[j]+cxll[i])/2.
doorway_movement(mid_point)
DOOR_COUNT += 1
for i in xrange(5):
SERIALDATA.write('\x37')
time.sleep(0.5)
for i in xrange(5):
SERIALDATA.write('\x39')
XAXIS.append(AXIS_PLOT)
YAXIS.append(weighted_probability)
AXIS_PLOT += 1
plt.plot(XAXIS, YAXIS)
plt.draw()
if AXIS_PLOT == 100:
AXIS_PLOT = 0
XAXIS[:] = []
YAXIS[:] = []
plt.clf()
if DOOR_COUNT == 1:
DOOR_FLAG = True
def take_right():
"""
* Function Name: take_right
* Input: None
* Output: Takes Right turn
* Logic: This function takes a right turn until
the mean of the middlearea crosses
the threshold value
* Example Call: take_right()
"""
while True:
GLOBAL_DEPTH_MAP = get_depth()
back_movement(GLOBAL_DEPTH_MAP)
middlearea = GLOBAL_DEPTH_MAP[200:479, 200:439]
middleval = middlearea.mean()
print middleval
SERIALDATA.write("\x44")
if middleval > 30:
return
def take_left():
"""
* Function Name: take_left
* Input: None
* Output: Takes Left turn
* Logic: This function takes a left turn until
the mean of the middlearea crosses the
threshold value
* Example Call: take_left()
"""
while True:
GLOBAL_DEPTH_MAP = get_depth()
back_movement(GLOBAL_DEPTH_MAP)
middlearea = GLOBAL_DEPTH_MAP[200:479, 200:439]
middleval = middlearea.mean()
SERIALDATA.write("\x45")
if middleval > 30:
return
def take_right_near():
"""
* Function Name: take_right_near
* Input: None
* Output: Takes Right turn
* Logic: This function takes a Right turn until the
obstacle is not detected i.e. If the
obstacle is in range it will turn
until it is out of its sight
* Example Call: take_right_near()
"""
while True:
GLOBAL_DEPTH_MAP = get_depth()
back_movement(GLOBAL_DEPTH_MAP)
middlearea = GLOBAL_DEPTH_MAP[0:479, 240:399]
contoursrightnear = contours_return(GLOBAL_DEPTH_MAP, -10)
contoursleftnear = contours_return(GLOBAL_DEPTH_MAP, 10)
door_detection(contoursrightnear, contoursleftnear)
SERIALDATA.write("\x44")
if count_near_pixels(middlearea, 900) < 1000:
return
def take_left_near():
"""
* Function Name: take_left_near
* Input: None
* Output: Takes Left turn
* Logic: This function takes a Left turn until
the obstacle is not detected i.e. If
the obstacle is in range it will turn
until it is out of its sight
* Example Call: take_left_near()
"""
while True:
GLOBAL_DEPTH_MAP = get_depth()
middlearea = GLOBAL_DEPTH_MAP[0:479, 240:399]
contoursrightnear = contours_return(GLOBAL_DEPTH_MAP, -10)
contoursleftnear = contours_return(GLOBAL_DEPTH_MAP, 10)
door_detection(contoursrightnear, contoursleftnear)
SERIALDATA.write("\x45")
if count_near_pixels(middlearea, 900) < 1000:
return
def stuck_pos_movement():
"""
* Function Name: stuck_pos_movement
* Input: None
* Output: Removes robot from a stuck position
* Logic: When both the middle left and middle right
detect an obstacle it takes the mean of the
left and right area and the area with lesser
mean is the preferable area to go.
* Example Call: stuck_pos_movement()
"""
GLOBAL_DEPTH_MAP = get_depth()
leftarea = GLOBAL_DEPTH_MAP[0:479, 0:200]
rightarea = GLOBAL_DEPTH_MAP[0:479, 439:639]
leftvals = leftarea.mean()
rightvals = rightarea.mean()
if leftvals > rightvals:
take_left_near()
else:
take_right_near()
def data_send(left_motor_value, right_motor_value):
"""
* Function Name: data_send
* Input: left and right speed mode
* Output: Sends speed mode of the robot wheels to the Fire bird V
for further analysis
* Logic: Total 25 different possibilities of speed modes are
there according to the vertical frame in which
the obstacle is detected and using if else
statements proper speed mode is sent.
* Example Call: data_send(speed_left,speed_right)
"""
if left_motor_value == 0:
if right_motor_value == 0:
SERIALDATA.write('\x00')
elif right_motor_value == 1:
SERIALDATA.write('\x01')
elif right_motor_value == 2:
SERIALDATA.write('\x02')
elif right_motor_value == 3:
SERIALDATA.write('\x03')
elif right_motor_value == 4:
SERIALDATA.write('\x04')
elif left_motor_value == 1:
if right_motor_value == 0:
SERIALDATA.write('\x10')
elif right_motor_value == 1:
SERIALDATA.write('\x11')
elif right_motor_value == 2:
SERIALDATA.write('\x12')
elif right_motor_value == 3:
SERIALDATA.write('\x13')
elif right_motor_value == 4:
SERIALDATA.write('\x14')
elif left_motor_value == 2:
if right_motor_value == 0:
SERIALDATA.write('\x20')
elif right_motor_value == 1:
SERIALDATA.write('\x21')
elif right_motor_value == 2:
SERIALDATA.write('\x22')
elif right_motor_value == 3:
SERIALDATA.write('\x23')
elif right_motor_value == 4:
SERIALDATA.write('\x24')
elif left_motor_value == 3:
if right_motor_value == 0:
SERIALDATA.write('\x30')
elif right_motor_value == 1:
SERIALDATA.write('\x31')
elif right_motor_value == 2:
SERIALDATA.write('\x32')
elif right_motor_value == 3:
SERIALDATA.write('\x33')
elif right_motor_value == 4:
SERIALDATA.write('\x34')
elif left_motor_value == 4:
if right_motor_value == 0:
SERIALDATA.write('\x40')
elif right_motor_value == 1:
SERIALDATA.write('\x41')
elif right_motor_value == 2:
SERIALDATA.write('\x42')
elif right_motor_value == 3:
SERIALDATA.write('\x43')
elif right_motor_value == 4:
stuck_pos_movement()
def count_near_pixels(area, dist):
"""
* Function Name: count_near_pixels()
* Input: area and the distance upto which the obstacle should be detected.
* Output: Returns the number of obstacle
pixels that are in the distance range.
* Logic: The depth data is Binary thresholded according to the obstacle
detected in its range. Than the NonZeros are counted as
they are the obstacle.
* Example Call: count_near_pixels()(area,900)
"""
ret, th3 = cv2.threshold(area, dist / 30, 255, cv2.THRESH_BINARY_INV)
count = cv2.countNonZero(th3)
return count
def door_movement():
"""
* Function Name:door_movement
* Input: GLOBAL_DEPTH_MAP
* Output: Sends SERIALDATAial code to FireBird V to exit the door.
* Logic: The robot exits the door by moving in a DIRECTION that
appears to be free.
* Example Call: door_movement(global_depth)
"""
middlearea = GLOBAL_DEPTH_MAP[200:479, 200:439]
middleval = count_near_pixels(middlearea, 900)
leftarea = GLOBAL_DEPTH_MAP[0:479, 0:100]
rightarea = GLOBAL_DEPTH_MAP[0:479, 539:639]
leftval = leftarea.mean()
rightval = rightarea.mean()
if middleval < 1000:
SERIALDATA.write("\x00")
time.sleep(0.1)
else:
if leftval > rightval:
take_left()
else:
take_right()
def search_wall(directionwall):
"""
* Function Name: search_wall
* Input: left/right wall
* Output: follows left or right wall
* Logic: If left wall is selected for instance then the
robot moves along the wall. The robot keeps track of
the objects on the left side of frame for left wall
and if the frame does not have any object in the range
than the robot moves left until it is detected
* Example Call: search_wall(0)
"""
if directionwall == 0:
while True:
GLOBAL_DEPTH_MAP = get_depth()
area = GLOBAL_DEPTH_MAP[0:479, 0:319]
contoursrightwall = contours_return(GLOBAL_DEPTH_MAP, -10)
contoursleftwall = contours_return(GLOBAL_DEPTH_MAP, 10)
door_detection(contoursrightwall, contoursleftwall)
SERIALDATA.write("\x03")
if count_near_pixels(area, 1800) > 1000:
break
elif directionwall == 1:
while True:
GLOBAL_DEPTH_MAP = get_depth()
area = GLOBAL_DEPTH_MAP[0:479, 320:639]
contoursrightwall = contours_return(GLOBAL_DEPTH_MAP, -10)
contoursleftwall = contours_return(GLOBAL_DEPTH_MAP, 10)
door_detection(contoursrightwall, contoursleftwall)
SERIALDATA.write("\x30")
if count_near_pixels(area, 1800) > 1000:
break
def regular_movement():
"""
* Function Name: regular_movement
* Input: Original depth frame
* Output: robot moves without bumping into any obstacle
* Logic: The frame is divided in 8 vertical sections.
Speed mode is selected from the speed_right and speed_left. There are 4 l
left frames and 4 right frames. The frame loop starts from middle
and if any frame detects any obstacle break the loop and the
corresponding data is saved in the speed_left or
speed_right variable
* Example Call: regular_movement(original)
"""
temp_x = 320
speed = 4
for i in xrange(4):
area = GLOBAL_DEPTH_MAP[0:479, temp_x:temp_x+79]
if count_near_pixels(area, 900) > 1000:
break
speed = speed - 1
temp_x = temp_x + 80
speed_right = speed
temp_x = 319
speed = 4
for i in xrange(4):
area = GLOBAL_DEPTH_MAP[0:479, temp_x-79:temp_x]
if count_near_pixels(area, 900) > 1000:
break
speed = speed - 1
temp_x = temp_x - 80
speed_left = speed
if speed_left != 0 or speed_right != 0:
data_send(speed_left, speed_right)
else:
search_wall(DIRECTION)
SERIALDATA.write("\x00")
def horizontal_edge(contours):
"""
* Function Name:horizontal_edge
* Input: Contours of edges
* Output: Detects actual edges of a door from given contours
* Logic: The coordinates of the topmost, bottommost and
centroid of edges are calculated using moments.
These values are compared with a threshold and returned
if they lie above a threshold
* Example Call: horizontal_edge(contours)
"""
area = 500
thresh_y = 50
thresh_x = 100
if cv2.contourArea(contours) > area:
leftmost = tuple(contours[contours[:, :, 0].argmin()][0])
rightmost = tuple(contours[contours[:, :, 0].argmax()][0])
topmost = tuple(contours[contours[:, :, 1].argmin()][0])
bottommost = tuple(contours[contours[:, :, 1].argmax()][0])
x_1 = leftmost[0]
x_2 = rightmost[0]
y_1 = topmost[1]
y_2 = bottommost[1]
if (y_2 - y_1 < thresh_y) and (abs(x_2 - x_1) > thresh_x):
cv2.line(GLOBAL_DEPTH_MAP, leftmost, rightmost, (0, 255, 0), 5)
left_height = leftmost
right_height = rightmost
moment = cv2.moments(contours)
cxh = int(moment['m10']/moment['m00'])
return left_height, right_height, cxh
return 0, 0, 0
def doorway_movement(mid_point):
"""
* Function Name:doorway_movement
* Input: midpoint of the door
* Output: orients itself so that door is in front
* Logic: The midpoint is received. If the midpoint lies in the
left part of the frame than the robot takes a left
and if it is in the right than it takes a right
* Example Call: doorway_movement(300)
"""
if mid_point > 80 and mid_point < 200:
for i in xrange(5):
data_send(0, 4)
time.sleep(0.1)
if mid_point > 200 and mid_point < 320:
for i in xrange(5):
data_send(0, 2)
time.sleep(0.1)
if mid_point > 320 and mid_point < 440:
for i in xrange(5):
data_send(4, 0)
time.sleep(0.1)
if mid_point > 440 and mid_point < 560:
for i in xrange(5):
data_send(2, 0)
time.sleep(0.1)
def back_movement(GLOBAL_DEPTH_MAP):
"""
* Function Name:back_movement
* Input: depth map
* Output: if object is near it takes a turns back
* Logic: If the middle frame is very noisy or any part of the frame
is noisy than the robot will turn back
* Example Call: back_movement(depth_map)
"""
if GLOBAL_DEPTH_MAP[0:479, 200:439].mean()\
> 200 or GLOBAL_DEPTH_MAP[0:479, 0:199].mean()\
> 200 or GLOBAL_DEPTH_MAP[0:479, 440:639].mean() > 200:
SERIALDATA.write('\x50')
time.sleep(3)
def start():
global XAXIS
global YAXIS
global TEST_CASES
global DIRECTION
global GLOBAL_DEPTH_MAP
plt.ion()
plt.figure()
XAXIS = []
YAXIS = []
ctx = freenect.init()
dev = freenect.open_device(ctx, freenect.num_devices(ctx) - 1)
freenect.set_tilt_degs(dev, 20)
freenect.close_device(dev)
TEST_CASES = [True, True, True]
DIRECTION = 0
for i in xrange(5):
initial_map = get_depth()
while True:
GLOBAL_DEPTH_MAP = get_depth() #returns the depth frame
back_movement(GLOBAL_DEPTH_MAP)
contoursright = contours_return(GLOBAL_DEPTH_MAP, -10)
contoursleft = contours_return(GLOBAL_DEPTH_MAP, 10)
door_detection(contoursright, contoursleft)
if DOOR_FLAG:
door_movement()
else: regular_movement()
cv2.imshow('final', GLOBAL_DEPTH_MAP)
if cv2.waitKey(1) != -1:
SERIALDATA.write('\x35')
SERIALDATA.close()
break
cv2.destroyAllWindows()
|
|
import turbogears
from ecrm.model import *
from turbogears import controllers, expose, flash,redirect,paginate,identity,widgets
import re
import logging,os,random,traceback,zipfile,shutil
from datetime import datetime
from ecrm.util.excel_helper import *
from cherrypy.lib.cptools import serveFile
import zlib
def export(kw):
h = KsPoHeader.get(id=kw["header_id"])
######### update hangtag view in excel
conn = KsPoDetail._connection
sql = '''select h.hangtag from (select header_id,hangtag from kohls_po_detail
group by header_id,hangtag) h
where h.header_id = %s
''' % (kw["header_id"])
rs = conn.queryAll(sql)
h.hangtag = ",".join([item[0] for item in rs])
#########
podetail_ids = [id for id in kw["podetail_ids"].split("|") if id]
sln_ids = [id for id in kw["sln_ids"].split("|") if id]
po1Objs = [KsPoDetail.get(id=id) for id in podetail_ids ]
slnObjs = [SLNDetail.get(id=id) for id in sln_ids]
isAdmin = "Admin" in identity.current.groups
if not isAdmin:
any = lambda fun: lambda iterable: reduce(lambda a,b:a or b, imap(fun,iterable))
if any(lambda obj:obj.hasExported!=0)( po1Objs + slnObjs ): #if any of the item has been exported before, can't export again.
flash("Some items in the list have been exported before ,please contact the admin if you want to generate the excel again.")
raise redirect("/kohlspo/detail?id=%s" %kw["header_id"])
result = []
hangtag = None #use to fill in the item name in the excel header , not the real attr for the KsPoHeader
poUOM = None #use to gen the excel file name,just use the first measurementCode in the detail or the SLN.
total_qty = 0
###########
for d in po1Objs:
if poUOM is None:
poUOM = d.measurementCode
if hangtag is None:
hangtag = d.hangtag
total_qty += int(d.poQty)
tmp_list = re.split('^(\d*)\s*(\D*)$',(d.size))
_list = [n for n in tmp_list if n]
upcORean = d.upc if d.upc else d.eanCode
if len(_list) > 1:
result.append( (d.styleNo,d.colorCode,d.colorDesc,d.deptNo,d.classNo,d.subclassNo,
upcORean,d.retailPrice,_list[0],_list[1].upper(),"","","","","",d.poQty) )
else:
result.append( (d.styleNo,d.colorCode,d.colorDesc,d.deptNo,d.classNo,d.subclassNo,
upcORean,d.retailPrice,d.size.upper(),"","","","","","",d.poQty) )
if not isAdmin: d.set(hasExported=1)
for s in slnObjs:
logging.info(str(s))
if poUOM is None:
poUOM = s.poDetail.measurementCode
if hangtag is None:
hangtag = s.poDetail.hangtag
po1Qty = s.poDetail.poQty
total_qty += int(s.qty*po1Qty)
tmp_list = re.split('^(\d*)\s*(\D*)$',(s.size))
_list = [n for n in tmp_list if n]
upcORean = s.upc if s.upc else s.eanCode
if len(_list) > 1:
result.append( (s.styleNo,s.colorCode,s.colorDesc,s.deptNo,s.classNo,s.subclassNo,
upcORean,s.retailPrice,_list[0],_list[1].upper(),"","","","","",s.qty*po1Qty) )
else:
result.append( (s.styleNo,s.colorCode,s.colorDesc,s.deptNo,s.classNo,s.subclassNo,
upcORean,s.retailPrice,s.size.upper(),"","","","","","",s.qty*po1Qty) )
if not isAdmin: s.set(hasExported=1)
#h.hangtag = hangtag
get = turbogears.config.get
current = datetime.now()
dateStr = current.today().strftime("%Y%m%d")
fileDir = os.path.join(get("ecrm.downloads"),"kohlsPO_download",identity.current.user_name,dateStr)
#logging.info(fileDir)
if not os.path.exists(fileDir):
os.makedirs(fileDir)
timeStr = current.time().strftime("%H%M%S")
rn = random.randint(0,10000)
username = identity.current.user_name
templatePath = os.path.join(os.path.abspath(os.path.curdir),"report_download/TEMPLATE/Kohls_TEMPLATE.xls")
#The following is used to gen the excel file name in the special format.
if poUOM == 'AS':
_UOM = 'Assorted'
elif poUOM == 'EA':
_UOM = 'Each'
if h.poType == 'BK' :
_poType = ''
else :
_poType = h.poType + '_'
if h.poPurposeCode == '07' :
_purposeCode = '_Rev'
else :
_purposeCode = ''
if isAdmin:
xlsFileName = "%s%s%s_%s.xls" % (_poType, h.poNo, _purposeCode, _UOM)
else:
h.set(exportVersion=h.exportVersion+1) #update the export version
xlsFileName = "%s%s%s_%s-%d.xls" % (_poType, h.poNo, _purposeCode, _UOM , h.exportVersion )
#******finish the excel file name gen process ***************
filename = os.path.join(fileDir,xlsFileName)
ke = KohlsPOExcel(templatePath = templatePath,destinationPath = filename)
try:
ke.inputData( POHeader=h,data=result,qty =total_qty)
ke.outputData()
if "Admin" not in identity.current.groups:
_updateExportFlag(h)
return serveFile(filename, "application/x-download", "attachment")
except:
traceback.print_exc()
if ke:
ke.clearData()
flash("Error occur in the Excel Exporting !")
raise redirect("index")
def productExport(kw):
h = KsPoHeader.get(id=kw["header_id"])
######### update hangtag view in excel
conn = KsPoDetail._connection
sql = '''select h.hangtag from (select header_id,hangtag from kohls_po_detail
group by header_id,hangtag) h
where h.header_id = %s
''' % (kw["header_id"])
rs = conn.queryAll(sql)
h.hangtag = ",".join([item[0] for item in rs])
#########
podetail_ids = [id for id in kw["podetail_ids"].split("|") if id]
sln_ids = [id for id in kw["sln_ids"].split("|") if id]
po1Objs = [KsPoDetail.get(id=id) for id in podetail_ids ]
slnObjs = [SLNDetail.get(id=id) for id in sln_ids]
isAdmin = "Admin" in identity.current.groups
if not isAdmin:
any = lambda fun: lambda iterable: reduce(lambda a,b:a or b, imap(fun,iterable))
if any(lambda obj:obj.hasExported!=0)( po1Objs + slnObjs ): #if any of the item has been exported before, can't export again.
flash("Some items in the list have been exported before ,please contact the admin if you want to generate the excel again.")
raise redirect("/kohlspo/detail?id=%s" %kw["header_id"])
result = []
hangtag = None #use to fill in the item name in the excel header , not the real attr for the KsPoHeader
poUOM = None #use to gen the excel file name,just use the first measurementCode in the detail or the SLN.
eanCode = None
upcCode = None
total_qty = 0
###########
for d in po1Objs:
if poUOM is None:
poUOM = d.measurementCode
if hangtag is None:
hangtag = d.hangtag
if eanCode is None:
eanCode = d.eanCode if d.eanCode else None
if upcCode is None:
upcCode = d.upc if d.upc else None
total_qty += int(d.poQty)
tmp_list = re.split('^(\d*)\s*(\D*)$',(d.size))
_list = [n for n in tmp_list if n]
upcORean = d.upc if d.upc else d.eanCode
if len(_list) > 1:
result.append( (d.styleNo,d.colorCode,d.colorDesc,d.deptNo,d.classNo,d.subclassNo,
upcORean,d.retailPrice,_list[0],_list[1].upper(),d.productDesc.upper().split(":")[0],"","","","","",d.poQty) )
else:
result.append( (d.styleNo,d.colorCode,d.colorDesc,d.deptNo,d.classNo,d.subclassNo,
upcORean,d.retailPrice,d.size.upper(),"",d.productDesc.upper().split(":")[0],"","","","","",d.poQty) )
if not isAdmin: d.set(hasExported=1)
for s in slnObjs:
logging.info(str(s))
if poUOM is None:
poUOM = s.poDetail.measurementCode
if hangtag is None:
hangtag = s.poDetail.hangtag
if eanCode is None:
eanCode = s.eanCode if s.eanCode else None
if upcCode is None:
upcCode = s.upc if s.upc else None
po1Qty = s.poDetail.poQty
total_qty += int(s.qty*po1Qty)
tmp_list = re.split('^(\d*)\s*(\D*)$',(s.size))
_list = [n for n in tmp_list if n]
upcORean = s.upc if s.upc else s.eanCode
if len(_list) > 1:
result.append( (s.styleNo,s.colorCode,s.colorDesc,s.deptNo,s.classNo,s.subclassNo,
upcORean,s.retailPrice,_list[0],_list[1].upper(),s.productDesc.upper().split(":")[0],"","","","","",s.qty*po1Qty) )
else:
result.append( (s.styleNo,s.colorCode,s.colorDesc,s.deptNo,s.classNo,s.subclassNo,
upcORean,s.retailPrice,s.size.upper(),"",s.productDesc.upper().split(":")[0],"","","","","",s.qty*po1Qty) )
if not isAdmin: s.set(hasExported=1)
#h.hangtag = hangtag
h.upc = upcCode
h.eanCode = eanCode
get = turbogears.config.get
current = datetime.now()
dateStr = current.today().strftime("%Y%m%d")
fileDir = os.path.join(get("ecrm.downloads"),"kohlsPO_download",identity.current.user_name,dateStr)
#logging.info(fileDir)
if not os.path.exists(fileDir):
os.makedirs(fileDir)
timeStr = current.time().strftime("%H%M%S")
rn = random.randint(0,10000)
username = identity.current.user_name
templatePath = os.path.join(os.path.abspath(os.path.curdir),"report_download/TEMPLATE/Kohls_PRODUCT_TEMPLATE.xls")
#The following is used to gen the excel file name in the special format.
if poUOM == 'AS':
_UOM = 'Assorted'
elif poUOM == 'EA':
_UOM = 'Each'
if h.poType == 'BK' :
_poType = ''
else :
_poType = h.poType + '_'
if h.poPurposeCode == '07' :
_purposeCode = '_Rev'
else :
_purposeCode = ''
if isAdmin:
xlsFileName = "%s%s%s_%s.xls" % (_poType, h.poNo, _purposeCode, _UOM)
else:
h.set(exportVersion=h.exportVersion+1) #update the export version
xlsFileName = "%s%s%s_%s-%d.xls" % (_poType, h.poNo, _purposeCode, _UOM , h.exportVersion )
#******finish the excel file name gen process ***************
filename = os.path.join(fileDir,xlsFileName)
ke = KohlsPOExcel(templatePath = templatePath,destinationPath = filename)
try:
ke.inputData( POHeader=h,data=result,qty =total_qty)
ke.outputData()
if "Admin" not in identity.current.groups:
_updateExportFlag(h)
return serveFile(filename, "application/x-download", "attachment")
except:
traceback.print_exc()
if ke:
ke.clearData()
flash("Error occur in the Excel Exporting !")
raise redirect("index")
def exportBatch(kw,exportType=None):
template_config = {"_export":"report_download/TEMPLATE/Kohls_TEMPLATE.xls",
"product_export":"report_download/TEMPLATE/Kohls_PRODUCT_TEMPLATE.xls",
}
header_ids = kw["header_ids"]
if type(header_ids) != list:
header_ids = [header_ids]
get = turbogears.config.get
current = datetime.now()
dateStr = current.today().strftime("%Y%m%d%H%M%S")
fileDir = os.path.join(get("ecrm.downloads"),"kohlsPO_download",identity.current.user_name,dateStr)
if not os.path.exists(fileDir):
os.makedirs(fileDir)
dlzipFile = os.path.join(fileDir,"report_%s.zip" % current.strftime("%Y%m%d%H%M%S"))
try:
templatePath = os.path.join(os.path.abspath(os.path.curdir),template_config[exportType])
rm = random.randint(1,1000)
copyTemplatePath = os.path.join(fileDir,"Kohls_TEMPLATE_%d.xls" %rm)
#copy the template to the dest folder to invoid the confict.
shutil.copyfile(templatePath,copyTemplatePath)
isAdmin = "Admin" in identity.current.groups #add by ray on 29/5
fileList = []
for header_id in header_ids:
#print header_id,fileDir,copyTemplatePath,"=========================="
rs = KsPoHeader.get(id=header_id)
if rs.latestFlag < 1 and not isAdmin:# and turbogears.identity.user.user_name <> 'admin':
continue
if exportType == "_export":
(flag,fileName) = _createExcel(header_id,fileDir,copyTemplatePath)
else:
(flag,fileName) = product_createExcel(header_id,fileDir,copyTemplatePath)
if flag:
fileList.append( fileName )
dlzip = zipfile.ZipFile(dlzipFile,"w",zlib.DEFLATED)
for fl in fileList:
logging.info(os.path.abspath(fl))
dlzip.write(os.path.abspath(str(fl)),os.path.basename(str(fl)))
dlzip.close()
try:
for fl in fileList:
os.remove(fl)
os.remove(copyTemplatePath)
except:
pass
return serveFile(dlzipFile, "application/x-download", "attachment")
except:
traceback.print_exc()
flash("Error occur in the Excel Exporting !")
raise redirect("index")
def _createExcel(header_id,fileDir,templateFilePath):
h = KsPoHeader.get(id=header_id)
######### update hangtag view in excel 5-18
conn = KsPoDetail._connection
sql = '''select h.hangtag from (select header_id,hangtag from kohls_po_detail
group by header_id,hangtag) h
where h.header_id = %s
''' % (header_id)
rs_hang = conn.queryAll(sql)
h.hangtag = ",".join([item[0] for item in rs_hang])
#########
ds = KsPoDetail.selectBy(header=h)
result = []
hangtag = None #use to fill in the item name in the excel header , not the real attr for the KsPoHeader
poUOM = None #use to gen the excel file name,just use the first measurementCode in the detail or the SLN.
isAdmin = "Admin" in identity.current.groups
for d in ds:
hangtag = d.hangtag
if poUOM is None:
poUOM = d.measurementCode
if d.measurementCode == "EA":
tmp_list = re.split('^(\d*)\s*(\D*)$',(d.size))
_list = [n for n in tmp_list if n]
upcORean = d.upc if d.upc else d.eanCode
if len(_list) > 1:
result.append( (d.styleNo,d.colorCode,d.colorDesc,d.deptNo,d.classNo,d.subclassNo,
upcORean,d.retailPrice,_list[0],_list[1].upper(),"","","","","",d.poQty) )
else:
result.append( (d.styleNo,d.colorCode,d.colorDesc,d.deptNo,d.classNo,d.subclassNo,
upcORean,d.retailPrice,d.size.upper(),"","","","","","",d.poQty) )
if not isAdmin : d.hasExported = 1
elif d.measurementCode == "AS":
po1Qty = d.poQty
ss = SLNDetail.selectBy(poDetail = d)
for s in ss :
tmp_list = re.split('^(\d*)\s*(\D*)$',(s.size))
_list = [n for n in tmp_list if n]
upcORean = s.upc if s.upc else s.eanCode
if len(_list) > 1:
result.append( (s.styleNo,s.colorCode,s.colorDesc,s.deptNo,s.classNo,s.subclassNo,
upcORean,s.retailPrice,_list[0],_list[1].upper(),"","","","","",s.qty*po1Qty) )
else:
result.append( (s.styleNo,s.colorCode,s.colorDesc,s.deptNo,s.classNo,s.subclassNo,
upcORean,s.retailPrice,s.size.upper(),"","","","","","",s.qty*po1Qty) )
if not isAdmin : s.hasExported = 1
#h.hangtag = hangtag
#The following is used to gen the excel file name in the special format.
#1:EA/AS 2:BK.. 3:00/07 3:po#
xlsFileName = _generateFileName( poUOM,h.poType,h.poPurposeCode,h.poNo)
#******finish the excel file name gen process ***************
filename = os.path.join(fileDir,xlsFileName)
# ke = KohlsPOExcel(templatePath = templatePath,destinationPath = filename)
ke = KohlsPOExcel(templatePath = templateFilePath,destinationPath = filename)
try:
ke.inputData( POHeader=h,data=result)
ke.outputData()
if "Admin" not in identity.current.groups:
_updateExportFlag(h)
return (1,filename)
# return serveFile(filename, "application/x-download", "attachment")
except:
traceback.print_exc()
if ke:
ke.clearData()
return (0,"Error occur in the Excel Exporting !")
def product_createExcel(header_id,fileDir,templateFilePath):
h = KsPoHeader.get(id=header_id)
######### update hangtag view in excel 5-18
conn = KsPoDetail._connection
sql = '''select h.hangtag from (select header_id,hangtag from kohls_po_detail
group by header_id,hangtag) h
where h.header_id = %s
''' % (header_id)
rs_hang = conn.queryAll(sql)
h.hangtag = ",".join([item[0] for item in rs_hang])
#########
ds = KsPoDetail.selectBy(header=h)
result = []
hangtag = None #use to fill in the item name in the excel header , not the real attr for the KsPoHeader
poUOM = None #use to gen the excel file name,just use the first measurementCode in the detail or the SLN.
isAdmin = "Admin" in identity.current.groups
for d in ds:
hangtag = d.hangtag
if poUOM is None:
poUOM = d.measurementCode
if d.measurementCode == "EA":
tmp_list = re.split('^(\d*)\s*(\D*)$',(d.size))
_list = [n for n in tmp_list if n]
upcORean = d.upc if d.upc else d.eanCode
if len(_list) > 1:
result.append( (d.styleNo,d.colorCode,d.colorDesc,d.deptNo,d.classNo,d.subclassNo,
upcORean,d.retailPrice,_list[0],_list[1].upper(),d.productDesc.upper().split(":")[0],"","","","","",d.poQty) )
else:
result.append( (d.styleNo,d.colorCode,d.colorDesc,d.deptNo,d.classNo,d.subclassNo,
upcORean,d.retailPrice,d.size.upper(),"",d.productDesc.upper().split(":")[0],"","","","","",d.poQty) )
if not isAdmin : d.hasExported = 1
elif d.measurementCode == "AS":
po1Qty = d.poQty
ss = SLNDetail.selectBy(poDetail = d)
for s in ss :
tmp_list = re.split('^(\d*)\s*(\D*)$',(s.size))
_list = [n for n in tmp_list if n]
upcORean = s.upc if s.upc else s.eanCode
if len(_list) > 1:
result.append( (s.styleNo,s.colorCode,s.colorDesc,s.deptNo,s.classNo,s.subclassNo,
upcORean,s.retailPrice,_list[0],_list[1].upper(),s.productDesc.upper().split(":")[0],"","","","","",s.qty*po1Qty) )
else:
result.append( (s.styleNo,s.colorCode,s.colorDesc,s.deptNo,s.classNo,s.subclassNo,
upcORean,s.retailPrice,s.size.upper(),"",s.productDesc.upper().split(":")[0],"","","","","",s.qty*po1Qty) )
if not isAdmin : s.hasExported = 1
#h.hangtag = hangtag
#The following is used to gen the excel file name in the special format.
#1:EA/AS 2:BK.. 3:00/07 3:po#
xlsFileName = _generateFileName( poUOM,h.poType,h.poPurposeCode,h.poNo)
#******finish the excel file name gen process ***************
filename = os.path.join(fileDir,xlsFileName)
# ke = KohlsPOExcel(templatePath = templatePath,destinationPath = filename)
ke = KohlsPOExcel(templatePath = templateFilePath,destinationPath = filename)
try:
ke.inputData( POHeader=h,data=result)
ke.outputData()
if "Admin" not in identity.current.groups:
_updateExportFlag(h)
return (1,filename)
# return serveFile(filename, "application/x-download", "attachment")
except:
traceback.print_exc()
if ke:
ke.clearData()
return (0,"Error occur in the Excel Exporting !")
def _generateFileName(poUOM,poType,poPurposeCode,poNo):
if poUOM == 'AS':
_UOM = 'Assorted'
elif poUOM == 'EA':
_UOM = 'Each'
if poType == 'BK' :
_poType = ''
else :
_poType = poType + '_'
if poPurposeCode == '07' :
_purposeCode = '_Rev'
else :
_purposeCode = ''
return "%s%s%s_%s.xls" % (_poType, poNo, _purposeCode, _UOM )
def _updateExportFlag(poHeader):
if not poHeader or not poHeader.items:
return
try:
userRegion = identity.current.user.getUserRegion()
if userRegion and userRegion.name == 'Taiwan':
poHeader.region = userRegion
poHeader.belong = identity.current.user_name
poHeader.soNo = "Taiwan Order"
poHeader.remark = str(identity.current.user_name) + str(datetime.now())
for d in poHeader.items:
if d.measurementCode == "EA":
d.hasExported = 1
elif d.measurementCode == "AS" and d.slns:
for s in d.slns:
s.hasExported = 1
except:
traceback.print_exc()
return
|
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
# Create your views here.
from miniblog.models import Article, Author, Tag, Classification, RootClassification
from miniblog.forms import ArticleForm
from miniblog.forms import TagForm
def blog_note_show(request, article_id):
article = get_object_or_404(Article, pk=article_id)
#article.content = mistune.markdown(article.content)
if request.is_ajax():
return render(request, 'miniblog/blog_note_show_ajax.html', {"article": article})
else:
articles_list = Article.objects.all()
articles_list = pagination(request, articles_list)
classes_list = Classification.objects.all()
rootClass_list = RootClassification.objects.all()
return render(request, 'miniblog/blog_note_show.html', {
"classes":classes_list,
"articles": articles_list,
"article": article,
"current_class": "all",
"root_classes": rootClass_list
})
def blog_note_all(request):
articles_list = Article.objects.all()
articles_list = pagination(request, articles_list)
# if is ajax request
if request.is_ajax():
return render(request, 'miniblog/blog_note_article_list_ajax.html',
{"articles": articles_list, "current_class": "all"})
# not an ajax request should return the whole page
else:
classes_list = Classification.objects.all()
article = articles_list[0]
rootClass_list = RootClassification.objects.all()
return render(request, 'miniblog/blog_note_show.html', {
"classes":classes_list,
"articles": articles_list,
"article": article,
"current_class": "all",
"root_classes": rootClass_list
})
def blog_note_by_class(request, class_id):
try:
classobject = Classification.objects.get(pk = int(class_id))
rootClass_list = RootClassification.objects.all()
articles_list = classobject.article_set.all()
articles_list = pagination(request, articles_list)
if request.is_ajax():
return render(request, 'miniblog/blog_note_article_list_ajax.html',
{"articles": articles_list, "current_class": class_id})
else:
classes_list = Classification.objects.all()
article = articles_list[0]
return render(request, 'miniblog/blog_note_show.html', {
"classes":classes_list,
"articles": articles_list,
"current_class": class_id,
"article": article,
"root_classes": rootClass_list
})
# if no article or no class is found
except ObjectDoesNotExist:
return blog_note_all(request)
def pagination(request, queryset):
if settings.PAGINATION == True:
pageNO = request.GET.get('page', 1)
num_in_page = settings.NUM_IN_PAGE
paginator = Paginator(queryset, num_in_page)
articles_list = queryset
try:
articles_list = paginator.page(pageNO)
print '%r' %('pagevalid')
except PageNotAnInteger:
articles_list = paginator.page(1)
print '%r' %('PageNotAnInteger')
except EmptyPage:
print '%r' %('EmptyPage')
articles_list = paginator.page(paginator.num_pages)
finally:
return articles_list
else:
return queryset
def blog_note(request):
class_id = request.GET.get('class', 'all')
# if no filter is specified
if class_id == 'all':
return blog_note_all(request)
# the classname is specified
else:
return blog_note_by_class(request, int(class_id))
def article_list(request):
articles_list = Article.objects.all()
paginator = Paginator(articles_list, 1)
page = request.GET.get('page')
tags = Tag.objects.all()
try:
articles = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
articles = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
articles = paginator.page(paginator.num_pages)
return render(request, 'miniblog/article_list.html', {"articles": articles, "tags": tags})
def article_show(request, article_id):
article = get_object_or_404(Article, pk=article_id)
#article.content = mistune.markdown(article.content)
return render(request, 'miniblog/article_show.html', {"article": article})
def article_filter(request, tag_id = ''):
tags = Tag.objects.all()
tag = Tag.objects.get(id = tag_id)
articles = tag.article_set.all()
return render(request, 'miniblog/article_filter.html', {
"articles": articles,
"tag": tag,
"tags": tags
})
@csrf_exempt
def article_add(request):
if request.method == 'POST':
# form = ArticleForm(request.POST)
# tag = TagForm(request.POST)
# if form.is_valid() and tag.is_valid():
# cd = form.cleaned_data
# cdtag = tag.cleaned_data
# tagname = cdtag['tag_name']
# for taglist in tagname.split():
# Tag.objects.get_or_create(tag_name=taglist.strip())
# title = cd['caption']
# author = Author.objects.get(id=1)
# content = cd['content']
# article = Article(caption=title, author=author, content=content)
# article.save()
# for taglist in tagname.split():
# article.tags.add(Tag.objects.get(tag_name=taglist.strip()))
# article.save()
# id = Article.objects.order_by('-publish_time')[0].id
# return HttpResponseRedirect('/miniblog/article/%s' % id)
try:
blogTitle = request.POST.get("blogTitle")
blogAuthor = Author.objects.get(name = request.POST.get("blogAuthor"))
blogContent = request.POST.get("blogContent")
blogClass = Classification.objects.get(pk = int(request.POST.get("blogClass")))
article = Article(caption=blogTitle, author=blogAuthor, content=blogContent, classification=blogClass)
article.save()
response = HttpResonse('success')
response['Access-Control-Allow-Origin'] = "http://localhost:3000"
print response
return response
except KeyError:
response = HttpResonse('failed')
response['Access-Control-Allow-Origin'] = "http://localhost:3000"
return response
else:
form = ArticleForm()
tag = TagForm(initial = {'tag_name': 'notages'})
return render(request, 'miniblog/article_add.html',{'form': form, 'tag': tag})
def article_update(request, article_id = ""):
id = article_id
# if the form is submitted
if request.method == 'POST':
form = ArticleForm(request.POST)
tag = TagForm(request.POST)
if form.is_valid() and tag.is_valid():
cd = form.cleaned_data
cdtag = tag.cleaned_data
tagname = cdtag['tag_name']
tagnamelist = tagname.split()
for taglist in tagnamelist:
Tag.objects.get_or_create(tag_name=taglist.strip())
title = cd['caption']
content = cd['content']
article = Article.objects.get(id=id)
if article:
article.caption = title
article.content = content
article.save()
# add the new tags
for taglist in tagnamelist:
article.tags.add(Tag.objects.get(tag_name=taglist.strip()))
article.save()
tags = article.tags.all()
# delete the old tags
for tagname in tags:
tagname = unicode(str(tagname), "utf-8")
if tagname not in tagnamelist:
notag = article.tags.get(tag_name=tagname)
article.tags.remove(notag)
else:
# create an new article
article = article(caption=article.caption, content=article.content)
article.save()
return HttpResponseRedirect('/miniblog/article/%s' % id)
# if the form is not submitted
else:
# if cannot find the article
try:
article = article.objects.get(id=id)
except Exception:
raise Http404
# create the first form
form = articleForm(initial={'caption': article.caption, 'content': article.content}, auto_id=False)
tags = article.tags.all()
# if the article has tags then populate the form with the tags
if tags:
taginit = ''
for x in tags:
taginit += str(x) + ' '
tag = TagForm(initial={'tag_name': taginit})
# else create an empty form
else:
tag = TagForm()
return render(request, 'miniblog/article_add.html', {'article': article, 'form': form,
'id': id, 'tag': tag})
# this is equal to the render() method
# render_to_response('article_add.html',
# {'article': article, 'form': form, 'id': id, 'tag': tag},
# context_instance=RequestContext(request))
def article_delete(request, article_id=""):
try:
article = Article.objects.get(id = article_id)
except Exception:
raise Http404
# if find the article
if article:
article.delete()
return HttpResponseRedirect("/miniblog/articlelist/")
# if cannot find the article
articles = Article.objects.all()
return render(request, 'miniblog/article_list.html', {"articles": articles})
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class filteraction(base_resource) :
""" Configuration for filter action resource. """
def __init__(self) :
self._name = ""
self._qual = ""
self._servicename = ""
self._value = ""
self._respcode = 0
self._page = ""
self._isdefault = False
self._builtin = []
self.___count = 0
@property
def name(self) :
"""Name for the filtering action. Must begin with a letter, number, or the underscore character (_). Other characters allowed, after the first character, are the hyphen (-), period (.) hash (#), space ( ), at sign (@), equals (=), and colon (:) characters. Choose a name that helps identify the type of action. The name of a filter action cannot be changed after it is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my action" or 'my action').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the filtering action. Must begin with a letter, number, or the underscore character (_). Other characters allowed, after the first character, are the hyphen (-), period (.) hash (#), space ( ), at sign (@), equals (=), and colon (:) characters. Choose a name that helps identify the type of action. The name of a filter action cannot be changed after it is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my action" or 'my action').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def qual(self) :
"""Qualifier, which is the action to be performed. The qualifier cannot be changed after it is set. The available options function as follows:
ADD - Adds the specified HTTP header.
RESET - Terminates the connection, sending the appropriate termination notice to the user's browser.
FORWARD - Redirects the request to the designated service. You must specify either a service name or a page, but not both.
DROP - Silently deletes the request, without sending a response to the user's browser.
CORRUPT - Modifies the designated HTTP header to prevent it from performing the function it was intended to perform, then sends the request/response to the server/browser.
ERRORCODE. Returns the designated HTTP error code to the user's browser (for example, 404, the standard HTTP code for a non-existent Web page).<br/>Possible values = reset, add, corrupt, forward, errorcode, drop.
"""
try :
return self._qual
except Exception as e:
raise e
@qual.setter
def qual(self, qual) :
"""Qualifier, which is the action to be performed. The qualifier cannot be changed after it is set. The available options function as follows:
ADD - Adds the specified HTTP header.
RESET - Terminates the connection, sending the appropriate termination notice to the user's browser.
FORWARD - Redirects the request to the designated service. You must specify either a service name or a page, but not both.
DROP - Silently deletes the request, without sending a response to the user's browser.
CORRUPT - Modifies the designated HTTP header to prevent it from performing the function it was intended to perform, then sends the request/response to the server/browser.
ERRORCODE. Returns the designated HTTP error code to the user's browser (for example, 404, the standard HTTP code for a non-existent Web page).<br/>Possible values = reset, add, corrupt, forward, errorcode, drop
"""
try :
self._qual = qual
except Exception as e:
raise e
@property
def servicename(self) :
"""Service to which to forward HTTP requests. Required if the qualifier is FORWARD.<br/>Minimum length = 1.
"""
try :
return self._servicename
except Exception as e:
raise e
@servicename.setter
def servicename(self, servicename) :
"""Service to which to forward HTTP requests. Required if the qualifier is FORWARD.<br/>Minimum length = 1
"""
try :
self._servicename = servicename
except Exception as e:
raise e
@property
def value(self) :
"""String containing the header_name and header_value. If the qualifier is ADD, specify <header_name>:<header_value>. If the qualifier is CORRUPT, specify only the header_name.<br/>Minimum length = 1.
"""
try :
return self._value
except Exception as e:
raise e
@value.setter
def value(self, value) :
"""String containing the header_name and header_value. If the qualifier is ADD, specify <header_name>:<header_value>. If the qualifier is CORRUPT, specify only the header_name.<br/>Minimum length = 1
"""
try :
self._value = value
except Exception as e:
raise e
@property
def respcode(self) :
"""Response code to be returned for HTTP requests (for use with the ERRORCODE qualifier).<br/>Minimum length = 1.
"""
try :
return self._respcode
except Exception as e:
raise e
@respcode.setter
def respcode(self, respcode) :
"""Response code to be returned for HTTP requests (for use with the ERRORCODE qualifier).<br/>Minimum length = 1
"""
try :
self._respcode = respcode
except Exception as e:
raise e
@property
def page(self) :
"""HTML page to return for HTTP requests (For use with the ERRORCODE qualifier).<br/>Minimum length = 1.
"""
try :
return self._page
except Exception as e:
raise e
@page.setter
def page(self, page) :
"""HTML page to return for HTTP requests (For use with the ERRORCODE qualifier).<br/>Minimum length = 1
"""
try :
self._page = page
except Exception as e:
raise e
@property
def isdefault(self) :
"""A value of true is returned if it is a default filteraction.
"""
try :
return self._isdefault
except Exception as e:
raise e
@property
def builtin(self) :
""".<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.
"""
try :
return self._builtin
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(filteraction_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.filteraction
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add filteraction.
"""
try :
if type(resource) is not list :
addresource = filteraction()
addresource.name = resource.name
addresource.qual = resource.qual
addresource.servicename = resource.servicename
addresource.value = resource.value
addresource.respcode = resource.respcode
addresource.page = resource.page
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ filteraction() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].qual = resource[i].qual
addresources[i].servicename = resource[i].servicename
addresources[i].value = resource[i].value
addresources[i].respcode = resource[i].respcode
addresources[i].page = resource[i].page
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete filteraction.
"""
try :
if type(resource) is not list :
deleteresource = filteraction()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ filteraction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ filteraction() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update filteraction.
"""
try :
if type(resource) is not list :
updateresource = filteraction()
updateresource.name = resource.name
updateresource.servicename = resource.servicename
updateresource.value = resource.value
updateresource.respcode = resource.respcode
updateresource.page = resource.page
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ filteraction() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].servicename = resource[i].servicename
updateresources[i].value = resource[i].value
updateresources[i].respcode = resource[i].respcode
updateresources[i].page = resource[i].page
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of filteraction resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = filteraction()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ filteraction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ filteraction() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the filteraction resources that are configured on netscaler.
"""
try :
if not name :
obj = filteraction()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = filteraction()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [filteraction() for _ in range(len(name))]
obj = [filteraction() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = filteraction()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of filteraction resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = filteraction()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the filteraction resources configured on NetScaler.
"""
try :
obj = filteraction()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of filteraction resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = filteraction()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Builtin:
MODIFIABLE = "MODIFIABLE"
DELETABLE = "DELETABLE"
IMMUTABLE = "IMMUTABLE"
PARTITION_ALL = "PARTITION_ALL"
class Qual:
reset = "reset"
Add = "add"
corrupt = "corrupt"
forward = "forward"
errorcode = "errorcode"
drop = "drop"
class filteraction_response(base_response) :
def __init__(self, length=1) :
self.filteraction = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.filteraction = [filteraction() for _ in range(length)]
|
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Neurongrouper
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Specials.Simulaters.Simulater"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
from ShareYourSystem.Specials.Simulaters import Synapser
#</ImportSpecificModules>
#<DefineLocals>
NeurongroupPreTeamKeyStr='Pres'
NeurongroupPostTeamKeyStr='Posts'
NeurongroupSpikeTeamKeyStr='Spikes'
NeurongroupStateTeamKeyStr='States'
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class NeurongrouperClass(BaseClass):
def default_init(self,
_NeurongroupDeriveBrianerVariable=None,
_NeurongroupingBrianKwargDict=None,
_NeurongroupingVariableStrToGetStrDict=None,
_NeurongroupedPostModelInsertStrsList=None,
_NeurongroupedPostModelAddDict=None,
_NeurongroupedEquationStrsList=None,
_NeurongroupedBrianVariable=None,
_NeurongroupedSpikeMonitorsList=None,
_NeurongroupedStateMonitorsList=None,
**_KwargVariablesDict
):
#Call the parent __init__ method
BaseClass.__init__(self,**_KwargVariablesDict)
#team
map(
lambda __KeyStr:
self.team(__KeyStr),
[
NeurongroupPreTeamKeyStr,
NeurongroupPostTeamKeyStr,
NeurongroupSpikeTeamKeyStr,
NeurongroupStateTeamKeyStr
]
)
def do_neurongroup(
self
):
#debug
'''
self.debug(('self.',self,[
'NeurongroupingBrianKwargDict'
]))
'''
#maybe should import
from brian2 import NeuronGroup,SpikeMonitor,StateMonitor
#Check
if 'N' not in self.NeurongroupingBrianKwargDict:
self.NeurongroupingBrianKwargDict['N']=self.SimulatingUnitsInt
#add the synaptic model strs
'''
self.debug(('self.',self,['CollectionsOrderedDict']))
'''
#map
self.NeurongroupedPostModelInsertStrsList=list(
set(
SYS.flat(
map(
lambda __PreConnecter:
__PreConnecter.PostModelInsertStrsList,
self.TeamDict[NeurongroupPostTeamKeyStr].ManagementDict.values()
)
)
)
)
#map
'''
self.debug(
[
'self.PreConnectersCollectionOrderedDict.keys() is ',
self.PreConnectersCollectionOrderedDict.keys(),
'self.PostConnectersCollectionOrderedDict.keys() is ',
self.PostConnectersCollectionOrderedDict.keys(),
]
)
'''
#map
map(
lambda __PreConnecter:
map(
lambda __ItemTuple:
self.NeurongroupedPostModelAddDict.__setitem__(
__ItemTuple[0],
list(
set(
(self.NeurongroupedPostModelAddDict[__ItemTuple[0]]
if __ItemTuple[0] in self.NeurongroupedPostModelAddDict
else [])+__ItemTuple[1]
)
)
),
__PreConnecter.PostModelAddDict.items()
),
self.TeamDict[NeurongroupPreTeamKeyStr].ManagementDict.values()
)
#debug
'''
self.debug(('self.',self,[
'NeurongroupedPostModelInsertStrsList',
'NeurongroupedPostModelAddDict'
]))
'''
#Check
if 'model' not in self.NeurongroupingBrianKwargDict:
self.NeurongroupingBrianKwargDict['model']=''
#add synaptic model variables
map(
lambda __NeurongroupedPostModelInsertStr:
self.NeurongroupingBrianKwargDict.__setitem__(
'model',
self.NeurongroupingBrianKwargDict['model'
]+'\n'+__NeurongroupedPostModelInsertStr
),
self.NeurongroupedPostModelInsertStrsList
)
#map
self.NeurongroupedEquationStrsList=map(
lambda __KeyStr:
SYS.chunk(
['d'+__KeyStr+'/dt',')/'],
self.NeurongroupingBrianKwargDict['model'],
)[0],
self.NeurongroupedPostModelAddDict.keys()
)
#map
map(
lambda __NeurongroupedEquationStr,__AddStrsList:
self.NeurongroupingBrianKwargDict.__setitem__(
'model',
self.NeurongroupingBrianKwargDict['model'].replace(
__NeurongroupedEquationStr,
__NeurongroupedEquationStr+'+'+'+'.join(__AddStrsList)
)
),
self.NeurongroupedEquationStrsList,
self.NeurongroupedPostModelAddDict.values()
)
#debug
'''
self.debug(('self.',self,[
'NeurongroupedEquationStrsList',
'NeurongroupingBrianKwargDict'
]))
'''
#init
self.NeurongroupedBrianVariable=NeuronGroup(
**self.NeurongroupingBrianKwargDict
)
#debug
'''
self.debug(('self.',self,['NeurongroupedBrianVariable']))
'''
#update variables
map(
lambda __ItemTuple:
setattr(
self.NeurongroupedBrianVariable,
__ItemTuple[0],
self[__ItemTuple[1]]
),
self.NeurongroupingVariableStrToGetStrDict.items()
)
#debug
'''
self.debug(('self.',self,['NeurongroupedBrianVariable']))
'''
#map
self.NeurongroupedSpikeMonitorsList=map(
lambda __DeriveMoniter:
__DeriveMoniter.__setitem__(
'SpikeMonitor',
SpikeMonitor(
self.NeurongroupedBrianVariable
)
).SpikeMonitor,
self.TeamDict[NeurongroupSpikeTeamKeyStr].ManagementDict.values()
)
#debug
'''
self.debug(
[
('self.',self,[
'NeurongroupedSpikeMonitorsList'
])
]
)
'''
#map
self.NeurongroupedStateMonitorsList=map(
lambda __DeriveMoniter:
__DeriveMoniter.__setitem__(
'StateMonitor',
StateMonitor(
self.NeurongroupedBrianVariable,
__DeriveMoniter.MoniteringVariableStr,
__DeriveMoniter.MoniteringRecordTimeIndexIntsArray
)
).StateMonitor,
self.TeamDict[NeurongroupStateTeamKeyStr].ManagementDict.values()
)
#debug
'''
self.debug(('self.',self,['NeurongroupedStateMonitorsList']))
'''
"""
def propertize_setWatchAfterParentWithParenterBool(self,_SettingValueVariable):
#debug
self.debug(
[
'We have grand parents',
'map(type,self.ParentedDeriveTeamersList) is '+str(
map(type,self.ParentedDeriveTeamersList))
]
)
#Check
if type(self.ParentTopDeriveTeamerVariable)==SYS.BrianerClass:
#alias
self.NeurongroupDeriveBrianerVariable=self.ParentTopDeriveTeamerVariable
else:
#index
self.NeurongroupDeriveBrianerVariable=self.ParentedDeriveTeamersList[
map(
type,
self.ParentedDeriveTeamersList
).index(SYS.BrianerClass)
]
#manage self
self.NeurongroupDeriveBrianerVariable.TeamDict[
self.ParentTopDeriveTeamerVariable.Module.BrianPopulationTeamKeyStr
].manage(self)
#call the base method
BaseClass.propertize_setWatchAfterParentWithParenterBool(self,_SettingValueVariable)
"""
#</DefineClass>
#</DefinePrint>
NeurongrouperClass.PrintingClassSkipKeyStrsList.extend(
[
'NeurongroupingBrianKwargDict',
'NeurongroupingVariableStrToGetStrDict',
'NeurongroupedPostModelInsertStrsList',
'NeurongroupedPostModelAddDict',
'NeurongroupedEquationStrsList',
'NeurongroupedBrianVariable',
'NeurongroupedSpikeMonitorsList',
'NeurongroupedStateMonitorsList'
]
)
#<DefinePrint>
|
|
# Copyright (c) 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
import six
import webob
from nova.api.openstack import api_version_request as avr
from nova.api.openstack.compute import server_groups as sg_v21
from nova import context
from nova import exception
from nova import objects
from nova.policies import server_groups as sg_policies
from nova import test
from nova.tests import fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import policy_fixture
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def server_group_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
return sgroup
def server_group_resp_template(**kwargs):
sgroup = kwargs.copy()
sgroup.setdefault('name', 'test')
if 'policy' not in kwargs:
sgroup.setdefault('policies', [])
sgroup.setdefault('members', [])
return sgroup
def server_group_db(sg):
attrs = copy.deepcopy(sg)
if 'id' in attrs:
attrs['uuid'] = attrs.pop('id')
if 'policies' in attrs:
policies = attrs.pop('policies')
attrs['policies'] = policies
else:
attrs['policies'] = []
if 'policy' in attrs:
del attrs['policies']
if 'members' in attrs:
members = attrs.pop('members')
attrs['members'] = members
else:
attrs['members'] = []
attrs['deleted'] = 0
attrs['deleted_at'] = None
attrs['created_at'] = None
attrs['updated_at'] = None
if 'user_id' not in attrs:
attrs['user_id'] = fakes.FAKE_USER_ID
if 'project_id' not in attrs:
attrs['project_id'] = fakes.FAKE_PROJECT_ID
attrs['id'] = 7
return AttrDict(attrs)
class ServerGroupTestV21(test.NoDBTestCase):
USES_DB_SELF = True
validation_error = exception.ValidationError
wsgi_api_version = '2.1'
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
self.req = fakes.HTTPRequest.blank('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.foo_req = fakes.HTTPRequest.blank('', project_id='foo')
self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(fixtures.Database(database='api'))
cells = fixtures.CellDatabases()
cells.add_cell_database(uuidsentinel.cell1)
cells.add_cell_database(uuidsentinel.cell2)
self.useFixture(cells)
ctxt = context.get_admin_context()
self.cells = {}
for uuid in (uuidsentinel.cell1, uuidsentinel.cell2):
cm = objects.CellMapping(context=ctxt,
uuid=uuid,
database_connection=uuid,
transport_url=uuid)
cm.create()
self.cells[cm.uuid] = cm
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
sgroup = server_group_template()
sgroup['policies'] = policies
res_dict = self.controller.create(self.req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group_with_new_policy_before_264(self):
req = fakes.HTTPRequest.blank('', version='2.63')
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
# 'policy' isn't an acceptable request key before 2.64
sgroup = server_group_template(policy=policy)
result = self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
self.assertIn(
"Invalid input for field/attribute server_group",
six.text_type(result)
)
# 'rules' isn't an acceptable request key before 2.64
sgroup = server_group_template(rules=rules)
result = self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
self.assertIn(
"Invalid input for field/attribute server_group",
six.text_type(result)
)
def test_create_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
self._create_server_group_normal(policies=[policy])
def test_create_server_group_rbac_default(self):
sgroup = server_group_template()
sgroup['policies'] = ['affinity']
# test as admin
self.controller.create(self.admin_req, body={'server_group': sgroup})
# test as non-admin
self.controller.create(self.req, body={'server_group': sgroup})
def test_create_server_group_rbac_admin_only(self):
sgroup = server_group_template()
sgroup['policies'] = ['affinity']
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'create'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
self.controller.create(self.admin_req, body={'server_group': sgroup})
# check for failure as non-admin
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create, self.req,
body={'server_group': sgroup})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def _create_instance(self, ctx, cell):
with context.target_cell(ctx, cell) as cctx:
instance = objects.Instance(context=cctx,
image_ref=uuidsentinel.fake_image_ref,
node='node1', reservation_id='a',
host='host1',
project_id=fakes.FAKE_PROJECT_ID,
vm_state='fake',
system_metadata={'key': 'value'})
instance.create()
im = objects.InstanceMapping(context=ctx,
project_id=ctx.project_id,
user_id=ctx.user_id,
cell_mapping=cell,
instance_uuid=instance.uuid)
im.create()
return instance
def _create_instance_group(self, context, members):
ig = objects.InstanceGroup(context=context, name='fake_name',
user_id='fake_user', project_id=fakes.FAKE_PROJECT_ID,
members=members)
ig.create()
return ig.uuid
def _create_groups_and_instances(self, ctx):
cell1 = self.cells[uuidsentinel.cell1]
cell2 = self.cells[uuidsentinel.cell2]
instances = [self._create_instance(ctx, cell=cell1),
self._create_instance(ctx, cell=cell2),
self._create_instance(ctx, cell=None)]
members = [instance.uuid for instance in instances]
ig_uuid = self._create_instance_group(ctx, members)
return (ig_uuid, instances, members)
def _test_list_server_group_all(self, api_version='2.1'):
self._test_list_server_group(api_version=api_version,
limited='',
path='/os-server-groups?all_projects=True')
def _test_list_server_group_offset_and_limit(self, api_version='2.1'):
self._test_list_server_group(api_version=api_version,
limited='&offset=1&limit=1',
path='/os-server-groups?all_projects=True')
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
@mock.patch('nova.objects.InstanceGroupList.get_all')
def _test_list_server_group(self, mock_get_all, mock_get_by_project,
path, api_version='2.1', limited=None):
policies = ['anti-affinity']
policy = "anti-affinity"
members = []
metadata = {} # always empty
names = ['default-x', 'test']
p_id = fakes.FAKE_PROJECT_ID
u_id = fakes.FAKE_USER_ID
ver = avr.APIVersionRequest(api_version)
if ver >= avr.APIVersionRequest("2.64"):
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policy=policy,
rules={},
members=members,
project_id=p_id,
user_id=u_id)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policy=policy,
rules={},
members=members,
project_id=p_id,
user_id=u_id)
elif ver >= avr.APIVersionRequest("2.13"):
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
else:
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata)
tenant_groups = [sg2]
all_groups = [sg1, sg2]
if limited:
all = {'server_groups': [sg2]}
tenant_specific = {'server_groups': []}
else:
all = {'server_groups': all_groups}
tenant_specific = {'server_groups': tenant_groups}
def return_all_server_groups():
return objects.InstanceGroupList(
objects=[objects.InstanceGroup(
**server_group_db(sg)) for sg in all_groups])
mock_get_all.return_value = return_all_server_groups()
def return_tenant_server_groups():
return objects.InstanceGroupList(
objects=[objects.InstanceGroup(
**server_group_db(sg)) for sg in tenant_groups])
mock_get_by_project.return_value = return_tenant_server_groups()
path = path or '/os-server-groups?all_projects=True'
if limited:
path += limited
req = fakes.HTTPRequest.blank(path, version=api_version)
admin_req = fakes.HTTPRequest.blank(path, use_admin_context=True,
version=api_version)
# test as admin
res_dict = self.controller.index(admin_req)
self.assertEqual(all, res_dict)
# test as non-admin
res_dict = self.controller.index(req)
self.assertEqual(tenant_specific, res_dict)
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
def _test_list_server_group_by_tenant(self, mock_get_by_project,
api_version='2.1'):
policies = ['anti-affinity']
members = []
metadata = {} # always empty
names = ['default-x', 'test']
p_id = fakes.FAKE_PROJECT_ID
u_id = fakes.FAKE_USER_ID
if api_version >= '2.13':
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata,
project_id=p_id,
user_id=u_id)
else:
sg1 = server_group_resp_template(id=uuidsentinel.sg1_id,
name=names[0],
policies=policies,
members=members,
metadata=metadata)
sg2 = server_group_resp_template(id=uuidsentinel.sg2_id,
name=names[1],
policies=policies,
members=members,
metadata=metadata)
groups = [sg1, sg2]
expected = {'server_groups': groups}
def return_server_groups():
return objects.InstanceGroupList(
objects=[objects.InstanceGroup(
**server_group_db(sg)) for sg in groups])
return_get_by_project = return_server_groups()
mock_get_by_project.return_value = return_get_by_project
path = '/os-server-groups'
req = fakes.HTTPRequest.blank(path, version=api_version)
res_dict = self.controller.index(req)
self.assertEqual(expected, res_dict)
def test_display_members(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
res_dict = self.controller.show(self.req, ig_uuid)
result_members = res_dict['server_group']['members']
self.assertEqual(3, len(result_members))
for member in members:
self.assertIn(member, result_members)
def test_display_members_with_nonexistent_group(self):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, uuidsentinel.group)
def test_display_active_members_only(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
# delete an instance
im = objects.InstanceMapping.get_by_instance_uuid(ctx,
instances[1].uuid)
with context.target_cell(ctx, im.cell_mapping) as cctxt:
instances[1]._context = cctxt
instances[1].destroy()
# check that the instance does not exist
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
ctx, instances[1].uuid)
res_dict = self.controller.show(self.req, ig_uuid)
result_members = res_dict['server_group']['members']
# check that only the active instance is displayed
self.assertEqual(2, len(result_members))
self.assertIn(instances[0].uuid, result_members)
def test_display_members_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
ig_uuid = self._create_groups_and_instances(ctx)[0]
# test as admin
self.controller.show(self.admin_req, ig_uuid)
# test as non-admin, same project
self.controller.show(self.req, ig_uuid)
# test as non-admin, different project
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.foo_req, ig_uuid)
def test_display_members_rbac_admin_only(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
ig_uuid = self._create_groups_and_instances(ctx)[0]
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'show'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
self.controller.show(self.admin_req, ig_uuid)
# check for failure as non-admin
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, self.req, ig_uuid)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_create_server_group_with_non_alphanumeric_in_name(self):
# The fix for bug #1434335 expanded the allowable character set
# for server group names to include non-alphanumeric characters
# if they are printable.
sgroup = server_group_template(name='good* $%name',
policies=['affinity'])
res_dict = self.controller.create(self.req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'good* $%name')
def test_create_server_group_with_illegal_name(self):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with unprintable character
sgroup = server_group_template(name='bad\x00name',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# name with out of range char U0001F4A9
sgroup = server_group_template(name=u"\U0001F4A9",
policies=['affinity'])
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
self.req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(
api_version=self.wsgi_api_version)
def test_list_server_group_all_v20(self):
self._test_list_server_group_all(api_version='2.0')
def test_list_server_group_all(self):
self._test_list_server_group_all(
api_version=self.wsgi_api_version)
def test_list_server_group_offset_and_limit(self):
self._test_list_server_group_offset_and_limit(
api_version=self.wsgi_api_version)
def test_list_server_groups_rbac_default(self):
# test as admin
self.controller.index(self.admin_req)
# test as non-admin
self.controller.index(self.req)
def test_list_server_group_multiple_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
limited='&offset=2&limit=2&limit=1&offset=1',
path='/os-server-groups?all_projects=False&all_projects=True')
def test_list_server_group_additional_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
limited='&offset=1&limit=1',
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_param_as_int(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
limited='&offset=1&limit=1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_negative_int_as_offset(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&offset=-1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_string_int_as_offset(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&offset=dummy',
path='/os-server-groups?all_projects=1')
def test_list_server_group_multiparam_string_as_offset(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&offset=dummy&offset=1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_negative_int_as_limit(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&limit=-1',
path='/os-server-groups?all_projects=1')
def test_list_server_group_string_int_as_limit(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&limit=dummy',
path='/os-server-groups?all_projects=1')
def test_list_server_group_multiparam_string_as_limit(self):
self.assertRaises(exception.ValidationError,
self._test_list_server_group,
api_version=self.wsgi_api_version,
limited='&limit=dummy&limit=1',
path='/os-server-groups?all_projects=1')
def test_list_server_groups_rbac_admin_only(self):
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'index'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
self.controller.index(self.admin_req)
# check for failure as non-admin
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
@mock.patch('nova.objects.InstanceGroup.destroy')
def test_delete_server_group_by_id(self, mock_destroy):
sg = server_group_template(id=uuidsentinel.sg1_id)
def return_server_group(_cls, context, group_id):
self.assertEqual(sg['id'], group_id)
return objects.InstanceGroup(**server_group_db(sg))
self.stub_out('nova.objects.InstanceGroup.get_by_uuid',
return_server_group)
resp = self.controller.delete(self.req, uuidsentinel.sg1_id)
mock_destroy.assert_called_once_with()
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller, sg_v21.ServerGroupController):
status_int = self.controller.delete.wsgi_code
else:
status_int = resp.status_int
self.assertEqual(204, status_int)
def test_delete_non_existing_server_group(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, 'invalid')
def test_delete_server_group_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
# test as admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
self.controller.delete(self.admin_req, ig_uuid)
# test as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
self.controller.delete(self.req, ig_uuid)
def test_delete_server_group_rbac_admin_only(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
# override policy to restrict to admin
rule_name = sg_policies.POLICY_ROOT % 'delete'
rules = {rule_name: 'is_admin:True'}
self.policy.set_rules(rules, overwrite=False)
# check for success as admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
self.controller.delete(self.admin_req, ig_uuid)
# check for failure as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller.delete, self.req, ig_uuid)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class ServerGroupTestV213(ServerGroupTestV21):
wsgi_api_version = '2.13'
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def test_list_server_group_all(self):
self._test_list_server_group_all(api_version='2.13')
def test_list_server_group_offset_and_limit(self):
self._test_list_server_group_offset_and_limit(api_version='2.13')
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(api_version='2.13')
class ServerGroupTestV264(ServerGroupTestV213):
wsgi_api_version = '2.64'
def _setup_controller(self):
self.controller = sg_v21.ServerGroupController()
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template()
sgroup['rules'] = rules or {}
sgroup['policy'] = policy
res_dict = self.controller.create(req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policy'], policy)
self.assertEqual(res_dict['server_group']['rules'], rules or {})
return res_dict['server_group']['id']
def test_list_server_group_all(self):
self._test_list_server_group_all(api_version=self.wsgi_api_version)
def test_create_and_show_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
g_uuid = self._create_server_group_normal(
policy=policy)
res_dict = self._display_server_group(g_uuid)
self.assertEqual(res_dict['server_group']['policy'], policy)
self.assertEqual(res_dict['server_group']['rules'], {})
def _display_server_group(self, uuid):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
group = self.controller.show(req, uuid)
return group
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=33)
def test_create_and_show_server_group_with_rules(self, mock_get_v):
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
g_uuid = self._create_server_group_normal(
policy=policy, rules=rules)
res_dict = self._display_server_group(g_uuid)
self.assertEqual(res_dict['server_group']['policy'], policy)
self.assertEqual(res_dict['server_group']['rules'], rules)
def test_create_affinity_server_group_with_invalid_policy(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body={'server_group': sgroup})
self.assertIn("Only anti-affinity policy supports rules",
six.text_type(result))
def test_create_anti_affinity_server_group_with_invalid_rules(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# A negative test for key is unknown, the value is not positive
# and not integer
invalid_rules = [{'unknown_key': '3'},
{'max_server_per_host': 0},
{'max_server_per_host': 'foo'}]
for r in invalid_rules:
sgroup = server_group_template(policy='anti-affinity', rules=r)
result = self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
self.assertIn(
"Invalid input for field/attribute", six.text_type(result)
)
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=32)
def test_create_server_group_with_low_version_compute_service(self,
mock_get_v):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='anti-affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(
webob.exc.HTTPConflict,
self.controller.create, req, body={'server_group': sgroup})
self.assertIn("Creating an anti-affinity group with rule "
"max_server_per_host > 1 is not yet supported.",
six.text_type(result))
def test_create_server_group(self):
policies = ['affinity', 'anti-affinity']
for policy in policies:
self._create_server_group_normal(policy=policy)
def test_policies_since_264(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# 'policies' isn't allowed in request >= 2.64
sgroup = server_group_template(policies=['anti-affinity'])
self.assertRaises(
self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_without_policy(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# 'policy' is required request key in request >= 2.64
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
# blank policy
sgroup = server_group_template(policy='')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(policy=7)
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(policy='invalid')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(policy=None)
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_additional_params(self):
req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
sgroup = server_group_template(unknown='unknown')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
class ServerGroupTestV275(ServerGroupTestV264):
wsgi_api_version = '2.75'
def test_list_server_group_additional_param_old_version(self):
self._test_list_server_group(api_version='2.74',
limited='&offset=1&limit=1',
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_additional_param(self):
req = fakes.HTTPRequest.blank('/os-server-groups?dummy=False',
version=self.wsgi_api_version)
self.assertRaises(self.validation_error, self.controller.index,
req)
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._legacy}.
"""
from time import time
import logging as py_logging
from zope.interface.verify import verifyObject, BrokenMethodImplementation
from twisted.trial import unittest
from twisted.python import context
from twisted.python import log as legacyLog
from twisted.python.failure import Failure
from .._levels import LogLevel
from .._observer import ILogObserver
from .._format import formatEvent
from .._legacy import LegacyLogObserverWrapper
from .._legacy import publishToNewObserver
class LegacyLogObserverWrapperTests(unittest.TestCase):
"""
Tests for L{LegacyLogObserverWrapper}.
"""
def test_interface(self):
"""
L{LegacyLogObserverWrapper} is an L{ILogObserver}.
"""
legacyObserver = lambda e: None
observer = LegacyLogObserverWrapper(legacyObserver)
try:
verifyObject(ILogObserver, observer)
except BrokenMethodImplementation as e:
self.fail(e)
def test_repr(self):
"""
L{LegacyLogObserverWrapper} returns the expected string.
"""
class LegacyObserver(object):
def __repr__(self):
return "<Legacy Observer>"
def __call__(self):
return
observer = LegacyLogObserverWrapper(LegacyObserver())
self.assertEqual(
repr(observer),
"LegacyLogObserverWrapper(<Legacy Observer>)"
)
def observe(self, event):
"""
Send an event to a wrapped legacy observer and capture the event as
seen by that observer.
@param event: an event
@type event: L{dict}
@return: the event as observed by the legacy wrapper
"""
events = []
legacyObserver = lambda e: events.append(e)
observer = LegacyLogObserverWrapper(legacyObserver)
observer(event)
self.assertEqual(len(events), 1)
return events[0]
def forwardAndVerify(self, event):
"""
Send an event to a wrapped legacy observer and verify that its data is
preserved.
@param event: an event
@type event: L{dict}
@return: the event as observed by the legacy wrapper
"""
# Make sure keys that are expected by the logging system are present
event.setdefault("log_time", time())
event.setdefault("log_system", "-")
event.setdefault("log_level", LogLevel.info)
# Send a copy: don't mutate me, bro
observed = self.observe(dict(event))
# Don't expect modifications
for key, value in event.items():
self.assertIn(key, observed)
return observed
def test_forward(self):
"""
Basic forwarding: event keys as observed by a legacy observer are the
same.
"""
self.forwardAndVerify(dict(foo=1, bar=2))
def test_time(self):
"""
The new-style C{"log_time"} key is copied to the old-style C{"time"}
key.
"""
stamp = time()
event = self.forwardAndVerify(dict(log_time=stamp))
self.assertEqual(event["time"], stamp)
def test_timeAlreadySet(self):
"""
The new-style C{"log_time"} key does not step on a pre-existing
old-style C{"time"} key.
"""
stamp = time()
event = self.forwardAndVerify(dict(log_time=stamp + 1, time=stamp))
self.assertEqual(event["time"], stamp)
def test_system(self):
"""
The new-style C{"log_system"} key is copied to the old-style
C{"system"} key.
"""
event = self.forwardAndVerify(dict(log_system="foo"))
self.assertEqual(event["system"], "foo")
def test_systemAlreadySet(self):
"""
The new-style C{"log_system"} key does not step on a pre-existing
old-style C{"system"} key.
"""
event = self.forwardAndVerify(dict(log_system="foo", system="bar"))
self.assertEqual(event["system"], "bar")
def test_noSystem(self):
"""
If the new-style C{"log_system"} key is absent, the old-style
C{"system"} key is set to C{"-"}.
"""
# Don't use forwardAndVerify(), since that sets log_system.
event = dict(log_time=time(), log_level=LogLevel.info)
observed = self.observe(dict(event))
self.assertEqual(observed["system"], "-")
def test_levelNotChange(self):
"""
If explicitly set, the C{isError} key will be preserved when forwarding
from a new-style logging emitter to a legacy logging observer,
regardless of log level.
"""
self.forwardAndVerify(dict(log_level=LogLevel.info, isError=1))
self.forwardAndVerify(dict(log_level=LogLevel.warn, isError=1))
self.forwardAndVerify(dict(log_level=LogLevel.error, isError=0))
self.forwardAndVerify(dict(log_level=LogLevel.critical, isError=0))
def test_pythonLogLevelNotSet(self):
"""
The new-style C{"log_level"} key is not translated to the old-style
C{"logLevel"} key.
Events are forwarded from the old module from to new module and are
then seen by old-style observers.
We don't want to add unexpected keys to old-style events.
"""
event = self.forwardAndVerify(dict(log_level=LogLevel.info))
self.assertNotIn("logLevel", event)
def test_stringPythonLogLevel(self):
"""
If a stdlib log level was provided as a string (eg. C{"WARNING"}) in
the legacy "logLevel" key, it does not get converted to a number.
The documentation suggested that numerical values should be used but
this was not a requirement.
"""
event = self.forwardAndVerify(dict(
logLevel="WARNING", # py_logging.WARNING is 30
))
self.assertEqual(event["logLevel"], "WARNING")
def test_message(self):
"""
The old-style C{"message"} key is added, even if no new-style
C{"log_format"} is given, as it is required, but may be empty.
"""
event = self.forwardAndVerify(dict())
self.assertEqual(event["message"], ()) # "message" is a tuple
def test_messageAlreadySet(self):
"""
The old-style C{"message"} key is not modified if it already exists.
"""
event = self.forwardAndVerify(dict(message=("foo", "bar")))
self.assertEqual(event["message"], ("foo", "bar"))
def test_format(self):
"""
Formatting is translated such that text is rendered correctly, even
though old-style logging doesn't use PEP 3101 formatting.
"""
event = self.forwardAndVerify(
dict(log_format="Hello, {who}!", who="world")
)
self.assertEqual(
legacyLog.textFromEventDict(event),
"Hello, world!"
)
def test_formatMessage(self):
"""
Using the message key, which is special in old-style, works for
new-style formatting.
"""
event = self.forwardAndVerify(
dict(log_format="Hello, {message}!", message="world")
)
self.assertEqual(
legacyLog.textFromEventDict(event),
"Hello, world!"
)
def test_formatAlreadySet(self):
"""
Formatting is not altered if the old-style C{"format"} key already
exists.
"""
event = self.forwardAndVerify(
dict(log_format="Hello!", format="Howdy!")
)
self.assertEqual(legacyLog.textFromEventDict(event), "Howdy!")
def eventWithFailure(self, **values):
"""
Create a new-style event with a captured failure.
@param values: Additional values to include in the event.
@type values: L{dict}
@return: the new event
@rtype: L{dict}
"""
failure = Failure(RuntimeError("nyargh!"))
return self.forwardAndVerify(dict(
log_failure=failure,
log_format="oopsie...",
**values
))
def test_failure(self):
"""
Captured failures in the new style set the old-style C{"failure"},
C{"isError"}, and C{"why"} keys.
"""
event = self.eventWithFailure()
self.assertIs(event["failure"], event["log_failure"])
self.assertTrue(event["isError"])
self.assertEqual(event["why"], "oopsie...")
def test_failureAlreadySet(self):
"""
Captured failures in the new style do not step on a pre-existing
old-style C{"failure"} key.
"""
failure = Failure(RuntimeError("Weak salsa!"))
event = self.eventWithFailure(failure=failure)
self.assertIs(event["failure"], failure)
def test_isErrorAlreadySet(self):
"""
Captured failures in the new style do not step on a pre-existing
old-style C{"isError"} key.
"""
event = self.eventWithFailure(isError=0)
self.assertEqual(event["isError"], 0)
def test_whyAlreadySet(self):
"""
Captured failures in the new style do not step on a pre-existing
old-style C{"failure"} key.
"""
event = self.eventWithFailure(why="blah")
self.assertEqual(event["why"], "blah")
class PublishToNewObserverTests(unittest.TestCase):
"""
Tests for L{publishToNewObserver}.
"""
def setUp(self):
self.events = []
self.observer = self.events.append
def legacyEvent(self, *message, **values):
"""
Return a basic old-style event as would be created by L{legacyLog.msg}.
@param message: a message event value in the legacy event format
@type message: L{tuple} of L{bytes}
@param values: additional event values in the legacy event format
@type event: L{dict}
@return: a legacy event
"""
event = (context.get(legacyLog.ILogContext) or {}).copy()
event.update(values)
event["message"] = message
event["time"] = time()
if "isError" not in event:
event["isError"] = 0
return event
def test_observed(self):
"""
The observer is called exactly once.
"""
publishToNewObserver(
self.observer, self.legacyEvent(), legacyLog.textFromEventDict
)
self.assertEqual(len(self.events), 1)
def test_time(self):
"""
The old-style C{"time"} key is copied to the new-style C{"log_time"}
key.
"""
publishToNewObserver(
self.observer, self.legacyEvent(), legacyLog.textFromEventDict
)
self.assertEqual(
self.events[0]["log_time"], self.events[0]["time"]
)
def test_message(self):
"""
A published old-style event should format as text in the same way as
the given C{textFromEventDict} callable would format it.
"""
def textFromEventDict(event):
return "".join(reversed(" ".join(event["message"])))
event = self.legacyEvent("Hello,", "world!")
text = textFromEventDict(event)
publishToNewObserver(self.observer, event, textFromEventDict)
self.assertEqual(formatEvent(self.events[0]), text)
def test_defaultLogLevel(self):
"""
Published event should have log level of L{LogLevel.info}.
"""
publishToNewObserver(
self.observer, self.legacyEvent(), legacyLog.textFromEventDict
)
self.assertEqual(self.events[0]["log_level"], LogLevel.info)
def test_isError(self):
"""
If C{"isError"} is set to C{1} (true) on the legacy event, the
C{"log_level"} key should get set to L{LogLevel.critical}.
"""
publishToNewObserver(
self.observer,
self.legacyEvent(isError=1),
legacyLog.textFromEventDict
)
self.assertEqual(self.events[0]["log_level"], LogLevel.critical)
def test_stdlibLogLevel(self):
"""
If the old-style C{"logLevel"} key is set to a standard library logging
level, using a predefined (L{int}) constant, the new-style
C{"log_level"} key should get set to the corresponding log level.
"""
publishToNewObserver(
self.observer,
self.legacyEvent(logLevel=py_logging.WARNING),
legacyLog.textFromEventDict
)
self.assertEqual(self.events[0]["log_level"], LogLevel.warn)
def test_stdlibLogLevelWithString(self):
"""
If the old-style C{"logLevel"} key is set to a standard library logging
level, using a string value, the new-style C{"log_level"} key should
get set to the corresponding log level.
"""
publishToNewObserver(
self.observer,
self.legacyEvent(logLevel="WARNING"),
legacyLog.textFromEventDict
)
self.assertEqual(self.events[0]["log_level"], LogLevel.warn)
def test_stdlibLogLevelWithGarbage(self):
"""
If the old-style C{"logLevel"} key is set to a standard library logging
level, using an unknown value, the new-style C{"log_level"} key should
not get set.
"""
publishToNewObserver(
self.observer,
self.legacyEvent(logLevel="Foo!!!!!"),
legacyLog.textFromEventDict
)
self.assertNotIn("log_level", self.events[0])
def test_defaultNamespace(self):
"""
Published event should have a namespace of C{"log_legacy"} to indicate
that it was forwarded from legacy logging.
"""
publishToNewObserver(
self.observer, self.legacyEvent(), legacyLog.textFromEventDict
)
self.assertEqual(self.events[0]["log_namespace"], "log_legacy")
def test_system(self):
"""
The old-style C{"system"} key is copied to the new-style
C{"log_system"} key.
"""
publishToNewObserver(
self.observer, self.legacyEvent(), legacyLog.textFromEventDict
)
self.assertEqual(
self.events[0]["log_system"], self.events[0]["system"]
)
|
|
"""
Module containing analysis functions for raster datasets.
"""
import itertools, operator
from .data import *
from . import manager
from .. import vector
import PIL.Image, PIL.ImageMath, PIL.ImageStat, PIL.ImageMorph
import math
# Zonal aggregation
def zonal_statistics(zonaldata, valuedata, zonalband=0, valueband=0, outstat="mean", nodataval=-999):
"""
Summarizes values of a raster dataset in groups or regions defined by a zonal dataset, which
can be either vector data or a categorical raster.
For each unique zone in "zonaldata" (each feature in the case of vector data), summarizes "valuedata"
cells that overlaps that zone.
Which band to use must be specified for each with "zonalband" and "valueband".
The "outstat" statistics option can be one of: mean (default), median, max, min, stdev, var, count, or sum
NOTE: For now, both must have same crs, no auto conversion done under the hood.
"""
# handle zonaldata being vector type
if not isinstance(zonaldata, RasterData):
zonaldata = manager.rasterize(zonaldata, **valuedata.rasterdef)
zonaldata = zonaldata.conditional("val > 0") # necessary bc rasterize returns 8bit instead of binary
# resample value grid into zonal grid
if zonaldata.affine != valuedata.affine:
valuedata = manager.resample(valuedata, **zonaldata.rasterdef)
# pick one band for each
zonalband = zonaldata.bands[zonalband]
valueband = valuedata.bands[valueband]
# create output image, using nullzone as nullvalue
georef = dict(width=valuedata.width, height=valuedata.height,
affine=valuedata.affine)
outrast = RasterData(mode="float32", **georef)
outrast.add_band(nodataval=nodataval)
# get stats for each unique value in zonal data
zonevalues = (val for count,val in zonalband.img.getcolors(zonaldata.width*zonaldata.height))
zonesdict = {}
#print zonalband, zonalband.summarystats()
#zonalband.view()
#valueband.view()
for zoneval in zonevalues:
# exclude nullzone
if zoneval == zonalband.nodataval: continue
#print "zone",zoneval
# mask valueband to only the current zone
curzone = valueband.copy()
#print "copy"
#print curzone.summarystats()
#curzone.view() #.img.show()
curzone.mask = zonalband.conditional("val != %s" % zoneval).img # returns true everywhere, which is not correct..., maybe due to nodataval???
#print "cond",zoneval
#print zonalband.conditional("val != %s" % zoneval).summarystats()
#zonalband.conditional("val != %s" % zoneval).view() #img.show()
#print "mask"
#print curzone.summarystats()
#curzone.view() #img.show()
# also exclude null values from calculations
curzone.mask = valueband.mask # pastes additional nullvalues
curzone._cached_mask = None # force having to recreate the mask using the combined old and pasted nullvals
#print "mask2", curzone
#print curzone.summarystats()
#curzone.view() #img.show()
# retrieve stats
stats = curzone.summarystats(outstat)
zonesdict[zoneval] = stats
# write chosen stat to outimg
if stats[outstat] is None:
stats[outstat] = nodataval
outrast.bands[0].img.paste(stats[outstat], mask=curzone.mask)
return zonesdict, outrast
# Raster math
def mathexpr(mathexpr, rasters):
"""Performs math operations on one or more raster datasets.
The math is given in "mathexpr" as a string expression, where each input raster is
referred to as "rast1", "rast2", etc, according to their order in the input raster list.
Supports all of Python's math expressions. Logical operations like == or > are also supported
and will return binary rasters.
TODO: For now just uses band 0 for each raster, should add support for specifying bands.
TODO: Check that all math works correctly, such as divide and floats vs ints.
Alias: Raster algebra.
"""
#print rasters
# align all to same affine
rasters = (rast for rast in rasters)
reference = next(rasters)
def _aligned():
yield reference
for rast in rasters:
if rast.affine != reference.affine:
rast = manager.resample(rast, width=reference.width, height=reference.height, affine=reference.affine)
yield rast
# convert all nullvalues to zero before doing any math
def _nulled():
for rast in _aligned():
for band in rast:
# TODO: recode here somehow blanks out everything...
#band.recode("val == %s"%band.nodataval, 0.0)
pass
yield rast
# calculate math
# basic math + - * / ** %
# note: logical ops ~ & | ^ makes binary mask and return the pixel value where mask is valid
# note: relational ops < > == != return only binary mask
# note: other useful is min() and max(), equiv to (r1 < r2) | r2
rastersdict = dict([("rast%i"%(i+1),rast.bands[0].img)#.convert("F"))
for i,rast in enumerate(_nulled())])
img = PIL.ImageMath.eval(mathexpr, **rastersdict)
# should maybe create a combined mask of nullvalues for all rasters
# and filter away those nullcells from math result
# ...
# return result
outraster = RasterData(image=img, **reference.meta)
return outraster
# Interpolation
def interpolate(pointdata, rasterdef, valuefield=None, algorithm="idw", **kwargs):
"""Exact interpolation between point data values. Original values are kept intact.
The raster extent and cell size on which to interpolate must be defined in "rasterdef".
First all points are burnt onto the output raster. By default, each point counts as a value of 1,
but "valuefield" can also be set to a field name that determies the relative weight of each
point feature.
When multiple points land in the same output cell, the point values are aggregated according
to "aggval" (defaults to mean) to determine the cell's final value.
When the points are converted to cell values, the remaining cells without any point features are
interpolated.
NOTE: The algorithm for interpolating is set with "algorithm", but currently only allows "idw" or
inverse distance weighting.
TODO: Add spline, kdtree, and kriging methods.
"""
# some links
#http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.interpolate.RegularGridInterpolator.html
#https://github.com/JohannesBuchner/regulargrid
#http://stackoverflow.com/questions/24978052/interpolation-over-regular-grid-in-python
#http://www.qgistutorials.com/en/docs/creating_heatmaps.html
#see especially: http://resources.arcgis.com/en/help/main/10.1/index.html#//009z0000000v000000
# TODO: require aggfunc with exception...
if not pointdata.type == "Point":
raise Exception("Pointdata must be of type point")
if rasterdef["mode"] == "1bit":
raise Exception("Cannot do interpolation to a 1bit raster")
algorithm = algorithm.lower()
if algorithm == "idw":
# create output raster
raster = RasterData(**rasterdef)
newband = raster.add_band() # add empty band
# default options
neighbours = kwargs.get("neighbours")
sensitivity = kwargs.get("sensitivity")
aggfunc = kwargs.get("aggfunc", "mean")
# collect counts or sum field values
from ..vector import sql
def key(feat):
x,y = feat.geometry["coordinates"]
px,py = raster.geo_to_cell(x,y)
return px,py
def valfunc(feat):
val = feat[valuefield] if valuefield else 1
return val
fieldmapping = [("aggval",valfunc,aggfunc)]
points = dict()
for (px,py),feats in itertools.groupby(pointdata, key=key):
aggval = sql.aggreg(feats, fieldmapping)[0]
if isinstance(aggval,(int,float)): # only consider numeric values, ignore missing etc
points[(px,py)] = aggval
# retrieve input options
if neighbours == None:
# TODO: not yet implemented
neighbours = int(len(points)*0.10) #default neighbours is 10 percent of known points
if sensitivity == None:
sensitivity = 3 #same as power, ie that high sensitivity means much more effect from far away pointss
# some precalcs
senspow = (-sensitivity/2.0)
# some defs
def _calcvalue(gridx, gridy, points):
weighted_values_sum = 0.0
sum_of_weights = 0.0
for (px,py),pval in points.items():
weight = ((gridx-px)**2 + (gridy-py)**2)**senspow
sum_of_weights += weight
weighted_values_sum += weight * pval
return weighted_values_sum / sum_of_weights
# calculate values
for gridy in range(raster.height):
for gridx in range(raster.width):
newval = points.get((gridx,gridy))
if newval != None:
# gridxy to calculate is exact same as one of the point xy, so just use same value
pass
else:
# main calc
newval = _calcvalue(gridx, gridy, points)
newband.set(gridx,gridy,newval)
elif algorithm == "spline":
# see C scripts at http://davis.wpi.edu/~matt/courses/morph/2d.htm
# looks simple enough
# ...
raise Exception("Not yet implemented")
elif algorithm == "kdtree":
# https://github.com/stefankoegl/kdtree
# http://rosettacode.org/wiki/K-d_tree
raise Exception("Not yet implemented")
elif algorithm == "kriging":
# ...?
raise Exception("Not yet implemented")
else:
raise Exception("Not a valid interpolation algorithm")
return raster
def smooth(pointdata, rasterdef, valuefield=None, algorithm="radial", **kwargs):
"""
Bins and aggregates point data values, followed by simple value smearing to produce a smooth surface raster.
Different from interpolation in that the new values do not exactly pass through the original values.
The raster extent and cell size on which to smooth must be defined in "rasterdef".
Smoothing works by considering a region around each pixel, specified by "algorithm".
Supported binning regions include:
- "radial" (default): a circle of size "radius";
- "gauss": a Gaussian statistical function applied to the distance-weighted average of pixels
within "radius" distance of the output pixel.
The points considered to be part of that region
are then summarized with a statistic as determined by "aggfunc" (defaults to sum) and used as the pixel
value. For the Gaussian method, this is the function used to aggregate points to pixels before
blurring.
By default, each point counts as a value of 1,
but "valuefield" can also be set to a field name that determies the relative weight of each
point feature.
TODO: Add more methods such as box convolving.
Alias: convolve, blur, heatmap (but incorrect usage).
"""
# TODO: this assumes points, but isnt smoothing generally understood to apply to existing rasters?
# ...or are these the same maybe?
if not pointdata.type == "Point":
raise Exception("Pointdata must be of type point")
if rasterdef["mode"] == "1bit":
raise Exception("Cannot do interpolation to a 1bit raster")
algorithm = algorithm.lower()
if algorithm == "radial":
# create output raster
raster = RasterData(**rasterdef)
raster.add_band() # add empty band
band = raster.bands[0]
# calculate for each cell
if not hasattr(pointdata, "spindex"):
pointdata.create_spatial_index()
raster.convert("float32") # output will be floats
if not "radius" in kwargs:
raise Exception("Radius must be set for 'radial' method")
rad = float(kwargs["radius"])
c = None
for cell in band:
#if c != cell.row:
# print cell.row
# c = cell.row
px,py = cell.col,cell.row
x,y = raster.cell_to_geo(px,py)
def weights():
for feat in pointdata.quick_overlap([x-rad,y-rad,
x+rad,y+rad]):
fx,fy = feat.geometry["coordinates"] # assumes single point
dist = math.sqrt((fx-x)**2 + (fy-y)**2)
if dist <= rad:
weight = feat[valuefield] if valuefield else 1
yield weight * (1 - (dist / rad))
from ..vector import sql
valfunc = lambda v: v
aggfunc = kwargs.get("aggfunc", "sum")
fieldmapping = [("aggval",valfunc,aggfunc)]
aggval = sql.aggreg(weights(), fieldmapping)[0]
if aggval or aggval == 0:
cell.value = aggval
elif algorithm == "gauss":
# create output raster
raster = RasterData(**rasterdef)
raster.add_band() # add empty band
newband = raster.bands[0]
# collect counts or sum field values
from ..vector import sql
def key(feat):
x,y = feat.geometry["coordinates"]
px,py = raster.geo_to_cell(x,y)
return px,py
def valfunc(feat):
val = feat[valuefield] if valuefield else 1
return val
aggfunc = kwargs.get("aggfunc", "sum")
fieldmapping = [("aggval",valfunc,aggfunc)]
for (px,py),feats in itertools.groupby(pointdata, key=key):
aggval = sql.aggreg(feats, fieldmapping)[0]
newband.set(px,py, aggval)
# apply gaussian filter
if raster.mode.endswith("8"):
# PIL gauss filter only work on L mode images
import PIL, PIL.ImageOps, PIL.ImageFilter
rad = kwargs.get("radius", 3)
filt = PIL.ImageFilter.GaussianBlur(radius=rad)
newband.img = newband.img.filter(filt)
else:
# Gauss calculation in pure Python
# algorithm 1 from http://blog.ivank.net/fastest-gaussian-blur.html
# TODO: implement much faster algorithm 4
# TODO: output seems to consider a square around each feat, shouldnt it be circle
# TODO: output values are very low decimals, is that correct? maybe it's just a
# ...probability weight that has to be appleied to orig value?
# check out: https://homepages.inf.ed.ac.uk/rbf/HIPR2/gsmooth.htm
origband = newband.copy()
raster.convert("float32") # output values will be floats
rad = kwargs.get("radius", 3)
rs = int(rad*2.57+1) # significant radius
# some precalcs
rr2 = 2*rad*rad
prr2 = float(math.pi*2*rad*rad)
exp = math.exp
for i in range(raster.height):
#print i
for j in range(raster.width):
val = 0.0
wsum = 0.0
for iy in range(i-rs, i+rs+1):
for ix in range(j-rs, j+rs+1):
x = min([raster.width-1, max([0,ix])])
y = min([raster.height-1, max([0,iy])])
dsq = (ix-j)*(ix-j)+(iy-i)*(iy-i)
weight = exp(-dsq/rr2) / prr2
val += origband.get(x,y).value * weight
wsum += weight
newval = val/wsum
#print j,i,newval
newband.set(j,i, newval)
elif algorithm == "box":
# http://stackoverflow.com/questions/6652671/efficient-method-of-calculating-density-of-irregularly-spaced-points
# ...
pass
else:
raise Exception("Not a valid smoothing algorithm")
return raster
def density(pointdata, rasterdef, algorithm="radial", **kwargs):
"""Creates a raster of the density of points, ie the frequency of their occurance
without thinking about the values of each point. Same as using the smooth function
without setting the valuefield."""
# only difference being no value field contributes to heat
# TODO: allow density of linear and polygon features too,
# maybe by counting nearby features
return smooth(pointdata, rasterdef, valuefield=None, algorithm=algorithm, **kwargs)
def disperse(vectordata, valuekey, weight=None, **rasterdef):
"""Disperses values in a vector dataset based on a raster dataset containing weights.
If the raster weight is not given, then a raster geotransform must be given and the
value is divided into equal portions for all the cells.
After each feature disperses its values into cells, the sum of those cells should always equal
the original feature value. However, in the case of features that overlap each other, cells will
added on top of each other, and there will be no way of reconstructing how much of a cell's value
belonged to one feature or the other.
Returns a raster dataset of the dispersed values.
"""
if weight:
outrast = RasterData(mode="float32", **weight.rasterdef)
else:
outrast = RasterData(mode="float32", **rasterdef)
outband = outrast.add_band()
outband.nodataval = None
for feat in vectordata:
if not feat.geometry:
continue
featdata = vector.data.VectorData(features=[feat])
if weight:
featweight = manager.clip(weight, featdata)
else:
featweight = manager.rasterize(featdata, **outrast.rasterdef)
# TODO: Does clip and rasterize write nodataval to nonvalid areas? Is this correct?
# Unless nodataval is reset, those then prevent correct math operations somehow...
featweight.bands[0].nodataval = None
weightsum = featweight.bands[0].summarystats("sum")["sum"]
if weightsum is None:
continue
weightprop = featweight.bands[0] / float(weightsum) / 255.0 # / 255 is a hack, have to decide if binary rasters should be 1 or 255.
total = valuekey(feat)
weightvalue = weightprop * total
weightvalue.nodataval = None
outband = outband + weightvalue
outrast.bands[0] = outband
return outrast
# Distance Analysis
def distance(data, **rasterdef):
"""Calculates raster of distances to nearest feature in vector data.
Output raster extent and cell size must be set with keyword arguments.
Uses fast approach that rasterizes the edge of the vector data and only compares
distances to each edge pixel, significantly reducing time for complex geometries.
TODO: Distances are measured using eucledian distance, should also allow option for geodetic.
"""
# TODO: allow max dist limit
if isinstance(data, RasterData):
raise NotImplementedError("Distance tool requires vector data")
from shapely.geometry import Point, MultiPoint, LineString, asShape
outrast = RasterData(mode="float32", **rasterdef)
outband = outrast.add_band() # make sure all values are set to 0 dist at outset
fillband = manager.rasterize(data, **rasterdef).bands[0]
# ALT1: each pixel to each feat
# TODO: this approach is super slow...
## geoms = [feat.get_shapely() for feat in data]
## for cell in fillband:
## if cell.value == 0:
## # only calculate where vector is absent
## #print "calc..."
## point = Point(cell.x,cell.y) #asShape(cell.point)
## dist = point.distance(geoms[0]) #min((point.distance(g) for g in geoms))
## #print cell.col,cell.row,dist
## outband.set(cell.col, cell.row, dist)
## else:
## pass #print "already set", cell.value
# ALT2: each pixel to union
## # TODO: this approach gets stuck...
##
## import shapely
## outline = shapely.ops.cascaded_union([feat.get_shapely() for feat in data])
## for cell in fillband:
## if cell.value == 0:
## # only calculate where vector is absent
## #print "calc..."
## point = Point(cell.x,cell.y)
## dist = point.distance(outline)
## print cell.col,cell.row,dist
## outband.set(cell.col, cell.row, dist)
## else:
## pass #print "already set", cell.value
# ALT3: each pixel to each rasterized edge pixel
# Pixel to pixel inspiration from: https://trac.osgeo.org/postgis/wiki/PostGIS_Raster_SoC_Idea_2012/Distance_Analysis_Tools/document
# TODO: maybe shouldnt be outline points but outline line, to calc dist between points too?
# TODO: current morphology approach gets crazy for really large rasters
# maybe optimize by simplifying multiple points on straight line, and make into linestring
#outlineband = manager.rasterize(data.convert.to_lines(), **rasterdef).bands[0]
## outlinepixels = PIL.ImageMorph.MorphOp(op_name="edge").match(fillband.img)
## print "outlinepixels",len(outlinepixels)
##
## outlinepoints = MultiPoint([outrast.cell_to_geo(*px) for px in outlinepixels])
##
## for cell in fillband:
## if cell.value == 0:
## # only calculate where vector is absent
## point = Point(cell.x,cell.y)
## dist = point.distance(outlinepoints)
## outband.set(cell.col, cell.row, dist)
# ALT4: each pixel to each rasterized edge pixel, with spindex
#outlineband = manager.rasterize(data.convert.to_lines(), **rasterdef).bands[0]
outlinepixels = PIL.ImageMorph.MorphOp(op_name="edge").match(fillband.img)
print("outlinepixels",len(outlinepixels))
import rtree
spindex = rtree.index.Index()
outlinepoints = [outrast.cell_to_geo(*px) for px in outlinepixels]
for i,p in enumerate(outlinepoints):
bbox = list(p) + list(p)
spindex.insert(i, bbox)
for cell in fillband:
if cell.value == 0:
# only calculate where vector is absent
bbox = [cell.x, cell.y, cell.x, cell.y]
nearestid = next(spindex.nearest(bbox, num_results=1))
point = cell.x,cell.y
otherpoint = outlinepoints[nearestid]
dist = math.hypot(point[0]-otherpoint[0], point[1]-otherpoint[1])
outband.set(cell.col, cell.row, dist)
# ALT5: each pixel to reconstructed linestring of rasterized edge pixels, superfast if can reconstruct
## outlinepixels = PIL.ImageMorph.MorphOp(op_name="edge").match(fillband.img)
##
## # TODO: reconstruct linestring from outlinepixels...
## outline = LineString([outrast.cell_to_geo(*px) for px in outlinepixels])
##
## # TODO: simplify linestring...
#### print "outlinepixels",len(outlinepixels)
#### simplified = PIL.ImagePath.Path(outlinepixels)
#### simplified.compact(2) # 2 px
#### outlinepixels = simplified.tolist()
#### print "simplified",len(outlinepixels)
##
## for cell in fillband:
## if cell.value == 0:
## # only calculate where vector is absent
## point = Point(cell.x,cell.y)
## dist = point.distance(outline)
## outband.set(cell.col, cell.row, dist)
# ALT6: incremental neighbour growth check overlap
# ie
#im = fillband.img
#for _ in range(32):
# count,im = PIL.ImageMorph.MorphOp(op_name="erosion4").apply(im)
#im.show()
# ...
return outrast
# Morphology
def morphology(raster, selection, pattern, bandnum=0):
"""General purpose morphology pattern operations, returning binary raster.
First, "selection" is a conditional expression converting the raster to binary,
defining which vales to interpret as on-values.
Then, an algorithm analyzes the on-values and looks for the pattern set in "pattern",
which includes "edge", "dilation", "erosion", or a manual input input string as
expected by PIL.ImageMorph.
"""
premask = raster.mask
cond = raster.bands[bandnum].conditional(selection)
count,im = PIL.ImageMorph.MorphOp(op_name=pattern).apply(cond.img)
out = RasterData(image=im, **raster.rasterdef)
out.mask = premask
return out
# Path Analysis
def least_cost_path(point1, point2, **options):
# use https://github.com/elemel/python-astar
# maybe also: https://www.codeproject.com/articles/9040/maze-solver-shortest-path-finder
pass
# Terrain Analysis
def viewshed(point, direction, height, raster, **kwargs):
pass
def slope(raster):
pass
|
|
#!/usr/bin/env python
# -*- coding: ascii -*-
u"""
==============
CSS Minifier
==============
CSS Minifier.
The minifier is based on the semantics of the `YUI compressor`_\\, which
itself is based on `the rule list by Isaac Schlueter`_\\.
:Copyright:
Copyright 2011 - 2021
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This module is a re-implementation aiming for speed instead of maximum
compression, so it can be used at runtime (rather than during a preprocessing
step). RCSSmin does syntactical compression only (removing spaces, comments
and possibly semicolons). It does not provide semantic compression (like
removing empty blocks, collapsing redundant properties etc). It does, however,
support various CSS hacks (by keeping them working as intended).
Here's a feature list:
- Strings are kept, except that escaped newlines are stripped
- Space/Comments before the very end or before various characters are
stripped: ``:{});=>],!`` (The colon (``:``) is a special case, a single
space is kept if it's outside a ruleset.)
- Space/Comments at the very beginning or after various characters are
stripped: ``{}(=:>[,!``
- Optional space after unicode escapes is kept, resp. replaced by a simple
space
- whitespaces inside ``url()`` definitions are stripped, except if it's a
quoted non-base64 data url
- Comments starting with an exclamation mark (``!``) can be kept optionally.
- All other comments and/or whitespace characters are replaced by a single
space.
- Multiple consecutive semicolons are reduced to one
- The last semicolon within a ruleset is stripped
- CSS Hacks supported:
- IE7 hack (``>/**/``)
- Mac-IE5 hack (``/*\\*/.../**/``)
- The boxmodelhack is supported naturally because it relies on valid CSS2
strings
- Between ``:first-line`` and the following comma or curly brace a space is
inserted. (apparently it's needed for IE6)
- Same for ``:first-letter``
rcssmin.c is a reimplementation of rcssmin.py in C and improves runtime up to
factor 100 or so (depending on the input). docs/BENCHMARKS in the source
distribution contains the details.
Supported python versions are 2.7 and 3.4+.
.. _YUI compressor: https://github.com/yui/yuicompressor/
.. _the rule list by Isaac Schlueter: https://github.com/isaacs/cssmin/
"""
__author__ = u"Andr\xe9 Malo"
__license__ = "Apache License, Version 2.0"
__version__ = '1.1.0'
__all__ = ['cssmin']
import re as _re
def _make_cssmin(python_only=False):
"""
Generate CSS minifier.
Parameters:
python_only (bool):
Use only the python variant. If true, the c extension is not even
tried to be loaded.
Returns:
callable: Minifier
"""
# pylint: disable = too-many-locals
if not python_only:
try:
import _rcssmin
except ImportError:
pass
else:
# Ensure that the C version is in sync
if getattr(_rcssmin, '__version__', None) == __version__:
return _rcssmin.cssmin
nl = r'(?:[\n\f]|\r\n?)' # pylint: disable = invalid-name
spacechar = r'[\r\n\f\040\t]'
unicoded = r'[0-9a-fA-F]{1,6}(?:[\040\n\t\f]|\r\n?)?'
escaped = r'[^\n\r\f0-9a-fA-F]'
escape = r'(?:\\(?:%(unicoded)s|%(escaped)s))' % locals()
nmchar = r'[^\000-\054\056\057\072-\100\133-\136\140\173-\177]'
# nmstart = r'[^\000-\100\133-\136\140\173-\177]'
# ident = (r'(?:'
# r'-?(?:%(nmstart)s|%(escape)s)%(nmchar)s*(?:%(escape)s%(nmchar)s*)*'
# r')') % locals()
comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)'
# only for specific purposes. The bang is grouped:
_bang_comment = r'(?:/\*(!?)[^*]*\*+(?:[^/*][^*]*\*+)*/)'
string1 = \
r'(?:\047[^\047\\\r\n\f]*(?:\\[^\r\n\f][^\047\\\r\n\f]*)*\047)'
string2 = r'(?:"[^"\\\r\n\f]*(?:\\[^\r\n\f][^"\\\r\n\f]*)*")'
strings = r'(?:%s|%s)' % (string1, string2)
nl_string1 = \
r'(?:\047[^\047\\\r\n\f]*(?:\\(?:[^\r]|\r\n?)[^\047\\\r\n\f]*)*\047)'
nl_string2 = r'(?:"[^"\\\r\n\f]*(?:\\(?:[^\r]|\r\n?)[^"\\\r\n\f]*)*")'
nl_strings = r'(?:%s|%s)' % (nl_string1, nl_string2)
uri_nl_string1 = r'(?:\047[^\047\\]*(?:\\(?:[^\r]|\r\n?)[^\047\\]*)*\047)'
uri_nl_string2 = r'(?:"[^"\\]*(?:\\(?:[^\r]|\r\n?)[^"\\]*)*")'
uri_nl_strings = r'(?:%s|%s)' % (uri_nl_string1, uri_nl_string2)
nl_escaped = r'(?:\\%(nl)s)' % locals()
space = r'(?:%(spacechar)s|%(comment)s)' % locals()
ie7hack = r'(?:>/\*\*/)'
uri = (
# noqa pylint: disable = bad-option-value, bad-continuation
r'(?:'
r'(?:[^\000-\040"\047()\\\177]*'
r'(?:%(escape)s[^\000-\040"\047()\\\177]*)*)'
r'(?:'
r'(?:%(spacechar)s+|%(nl_escaped)s+)'
r'(?:'
r'(?:[^\000-\040"\047()\\\177]|%(escape)s|%(nl_escaped)s)'
r'[^\000-\040"\047()\\\177]*'
r'(?:%(escape)s[^\000-\040"\047()\\\177]*)*'
r')+'
r')*'
r')'
) % locals()
nl_unesc_sub = _re.compile(nl_escaped).sub
uri_data_plain = _re.compile((
r'[\047"][dD][aA][tT][aA]:([^\000-\040\\"\047,]*),'
)).match
uri_space_sub = _re.compile((
r'(%(escape)s+)|%(spacechar)s+|%(nl_escaped)s+'
) % locals()).sub
uri_space_subber = lambda m: m.groups()[0] or ''
space_sub_simple = _re.compile((
r'[\r\n\f\040\t;]+|(%(comment)s+)'
) % locals()).sub
space_sub_banged = _re.compile((
r'[\r\n\f\040\t;]+|(%(_bang_comment)s+)'
) % locals()).sub
post_esc_sub = _re.compile(r'[\r\n\f\t]+').sub
main_sub = _re.compile((
# noqa pylint: disable = bad-option-value, bad-continuation
r'([^\\"\047u>@\r\n\f\040\t/;:{}+]+)' # 1
r'|(?<=[{}(=:>[,!])(%(space)s+)' # 2
r'|^(%(space)s+)' # 3
r'|(%(space)s+)(?=(([:{});=>\],!])|$)?)' # 4, 5, 6
r'|;(%(space)s*(?:;%(space)s*)*)(?=(\})?)' # 7, 8
r'|(\{)' # 9
r'|(\})' # 10
r'|(%(strings)s)' # 11
r'|(?<!%(nmchar)s)url\(%(spacechar)s*(' # 12
r'%(uri_nl_strings)s'
r'|%(uri)s'
r')%(spacechar)s*\)'
r'|(@(?:' # 13
r'[mM][eE][dD][iI][aA]'
r'|[sS][uU][pP][pP][oO][rR][tT][sS]'
r'|[dD][oO][cC][uU][mM][eE][nN][tT]'
r'|(?:-(?:'
r'[wW][eE][bB][kK][iI][tT]|[mM][oO][zZ]|[oO]|[mM][sS]'
r')-)?'
r'[kK][eE][yY][fF][rR][aA][mM][eE][sS]'
r'))(?!%(nmchar)s)'
r'|(%(ie7hack)s)(%(space)s*)' # 14, 15
r'|(:[fF][iI][rR][sS][tT]-[lL]' # 16
r'(?:[iI][nN][eE]|[eE][tT][tT][eE][rR]))'
r'(%(space)s*)(?=[{,])' # 17
r'|(%(nl_strings)s)' # 18
r'|(%(escape)s[^\\"\047u>@\r\n\f\040\t/;:{}+]*)' # 19
) % locals()).sub
# print(main_sub.__self__.pattern)
def main_subber(keep_bang_comments):
""" Make main subber """
in_macie5, in_rule, at_group = [0], [0], [0]
if keep_bang_comments:
space_sub = space_sub_banged
def space_subber(match):
""" Space|Comment subber """
if match.lastindex:
group1, group2 = match.group(1, 2)
if group2:
if group1.endswith(r'\*/'):
in_macie5[0] = 1
else:
in_macie5[0] = 0
return group1
if group1.endswith(r'\*/'):
if in_macie5[0]:
return ''
in_macie5[0] = 1
return r'/*\*/'
elif in_macie5[0]:
in_macie5[0] = 0
return '/**/'
return ''
else:
space_sub = space_sub_simple
def space_subber(match):
""" Space|Comment subber """
if match.lastindex:
if match.group(1).endswith(r'\*/'):
if in_macie5[0]:
return ''
in_macie5[0] = 1
return r'/*\*/'
elif in_macie5[0]:
in_macie5[0] = 0
return '/**/'
return ''
def fn_space_post(group):
""" space with token after """
if group(5) is None or (
group(6) == ':' and not in_rule[0] and not at_group[0]):
return ' ' + space_sub(space_subber, group(4))
return space_sub(space_subber, group(4))
def fn_semicolon(group):
""" ; handler """
return ';' + space_sub(space_subber, group(7))
def fn_semicolon2(group):
""" ; handler """
if in_rule[0]:
return space_sub(space_subber, group(7))
return ';' + space_sub(space_subber, group(7))
def fn_open(_):
""" { handler """
if at_group[0]:
at_group[0] -= 1
else:
in_rule[0] = 1
return '{'
def fn_close(_):
""" } handler """
in_rule[0] = 0
return '}'
def fn_url(group):
""" url() handler """
uri = group(12)
data = uri_data_plain(uri)
if not data or data.group(1).lower().endswith(';base64'):
uri = uri_space_sub(uri_space_subber, uri)
return 'url(%s)' % (uri,)
def fn_at_group(group):
""" @xxx group handler """
at_group[0] += 1
return group(13)
def fn_ie7hack(group):
""" IE7 Hack handler """
if not in_rule[0] and not at_group[0]:
in_macie5[0] = 0
return group(14) + space_sub(space_subber, group(15))
return '>' + space_sub(space_subber, group(15))
table = (
# noqa pylint: disable = bad-option-value, bad-continuation
None,
None,
None,
None,
fn_space_post, # space with token after
fn_space_post, # space with token after
fn_space_post, # space with token after
fn_semicolon, # semicolon
fn_semicolon2, # semicolon
fn_open, # {
fn_close, # }
lambda g: g(11), # string
fn_url, # url(...)
fn_at_group, # @xxx expecting {...}
None,
fn_ie7hack, # ie7hack
None,
lambda g: g(16) + ' ' + space_sub(space_subber, g(17)),
# :first-line|letter followed
# by [{,] (apparently space
# needed for IE6)
lambda g: nl_unesc_sub('', g(18)), # nl_string
lambda g: post_esc_sub(' ', g(19)), # escape
)
def func(match):
""" Main subber """
idx, group = match.lastindex, match.group
if idx > 3:
return table[idx](group)
# shortcuts for frequent operations below:
elif idx == 1: # not interesting
return group(1)
# else: # space with token before or at the beginning
return space_sub(space_subber, group(idx))
return func
def cssmin(style, keep_bang_comments=False):
"""
Minify CSS.
Parameters:
style (str):
CSS to minify
keep_bang_comments (bool):
Keep comments starting with an exclamation mark? (``/*!...*/``)
Returns:
str: Minified style
"""
# pylint: disable = redefined-outer-name
is_bytes, style = _as_str(style)
style = main_sub(main_subber(keep_bang_comments), style)
if is_bytes:
style = style.encode('latin-1')
if is_bytes == 2:
style = bytearray(style)
return style
return cssmin
cssmin = _make_cssmin()
def _as_str(script):
""" Make sure the style is a text string """
is_bytes = False
if str is bytes:
if not isinstance(script, basestring): # noqa pylint: disable = undefined-variable
raise TypeError("Unexpected type")
elif isinstance(script, bytes):
is_bytes = True
script = script.decode('latin-1')
elif isinstance(script, bytearray):
is_bytes = 2
script = script.decode('latin-1')
elif not isinstance(script, str):
raise TypeError("Unexpected type")
return is_bytes, script
if __name__ == '__main__':
def main():
""" Main """
import sys as _sys
keep_bang_comments = (
'-b' in _sys.argv[1:]
or '-bp' in _sys.argv[1:]
or '-pb' in _sys.argv[1:]
)
if '-p' in _sys.argv[1:] or '-bp' in _sys.argv[1:] \
or '-pb' in _sys.argv[1:]:
xcssmin = _make_cssmin(python_only=True)
else:
xcssmin = cssmin
_sys.stdout.write(xcssmin(
_sys.stdin.read(), keep_bang_comments=keep_bang_comments
))
main()
|
|
#!/usr/bin/env python
#=============================================================================#
# #
# NAME: util_DB.py #
# #
# PURPOSE: Functions to interface with a sqlite3 database. #
# #
# MODIFIED: 06-December-2015 by C. Purcell #
# #
# CONTENTS: #
# #
# register_sqlite3_numpy_dtypes ... setup sqlite3 to read numpy arrays #
# create_DB ... create an empty DB using a dict of CREATE SQL #
# insert_arr_db ... insert recarray entries into the database #
# update_arr_db ... update DB entries using a recarray #
# select_into_arr ... run a SQL query and return a numpy recarray #
# trtype ... translate SQL types to numpy dtypes #
# schema_to_tabledef ... parse the SQL table definitions #
# sql_create_to_numpy_dtype .. create statement to recarray dtype
# get_tables_description ... get the description of all tables in a DB #
# #
#=============================================================================#
# #
# The MIT License (MIT) #
# #
# Copyright (c) 2015 Cormac R. Purcell #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#=============================================================================#
import os
import sys
import shutil
import re
import numpy as np
import sqlite3
import traceback
from util_rec import fields_view
#-----------------------------------------------------------------------------#
def register_sqlite3_numpy_dtypes():
"""
Map numpy data-types to the limited sqlite data-types. This must be called
before using the sqlite3 database or INSERT statements will fail.
"""
for t in (np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64):
sqlite3.register_adapter(t, long)
for t in (np.float16, np.float32, np.float64,
np.float128, np.double):
sqlite3.register_adapter(t, float)
#-----------------------------------------------------------------------------#
def create_db(dbFile, createSQLdict, LF=None):
"""
Create an empty SQLite3 database on disk.
"""
if os.path.exists(dbFile):
os.remove(dbFile)
try:
conn = sqlite3.connect(dbFile)
cursor = conn.cursor()
for sql in createSQLdict.values():
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
except Exception:
return False
return True
#-----------------------------------------------------------------------------#
def insert_arr_db(cursor, recArr, tabName, fieldNameLst=None,
insertSQL="INSERT OR REPLACE"):
"""
Insert a numpy recarray into a database via a cursor object. It is assumed
that the fields in the recarray and database have the same names and
compatable datatypes. If the recarray contains fields NOT in the database
the user must provide a list of field names to be to be inserted. These
must be a subset of the field names in the table.
"""
# Default to all fields
if not fieldNameLst:
fieldNameLst = recArr.dtype.names
sql = '%s INTO %s (%s) ' % (insertSQL, tabName, ', '.join(fieldNameLst))
sql += 'VALUES(%s) ' % (', '.join(['?']*len(fieldNameLst)))
cursor.executemany(sql, fields_view(recArr, fieldNameLst))
#-----------------------------------------------------------------------------#
def update_arr_db(cursor, recArr, tabName, keyName, fieldNameLst=None):
"""
Do an UPDATE on existing rows
"""
# Default to all fields
if not fieldNameLst:
fieldNameLst = list(recArr.dtype.names)
# key must exist in list of field names
if not keyName in fieldNameLst:
print "ERR: Key '%s' not in column list" % keyName
return
# Remove the key from the list and format the SQL
fieldNameLst.remove(keyName)
sql = 'UPDATE %s SET ' % tabName
sql += '=?, '.join(fieldNameLst) + '=? '
sql += 'WHERE %s = ?' % keyName
# Attach the key to the end of the field list and fetch a view
# Use fancy indexing to force the key to the last column
fieldNameLst.append(keyName)
a = fields_view(recArr, fieldNameLst)[fieldNameLst]
cursor.executemany(sql, a)
#-----------------------------------------------------------------------------#
def select_into_arr(cursor, sql, args=[]):
"""Run a SQL query and return a numpy record array."""
if args == []:
cursor.execute(sql)
else:
cursor.execute(sql, tuple(args))
try:
rows = cursor.fetchall()
if len(rows) == 0:
rows = np.array([], dtype='i4')
else:
columnNameLst = zip(*cursor.description)[0]
rows = np.rec.fromrecords(rows, names=columnNameLst)
return rows
except Exception:
print "WARNING: failed to convert SQL result to a recarray!"
print traceback.format_exc()
return None
#-----------------------------------------------------------------------------#
def trtype(dtype):
"""
Translation function for data types: SQL to recarray
RecArray dtypes:
bytes = b<n>, e.g. b1
ints = i<n>, e.g. i2, i4, i8,
unsigned ints = u<n>, e.g. u1, u2, u4, u8
floats = f<n>, e.g. f2, f4, f8
complex = c<n>, e.g. c8, c16
fixed length strings = a<n>, e.g. a10, a100
where <n> is the number of bytes / chars, so float32=f4, float64=f8
"""
floatRe = re.compile('^float')
doubleRe = re.compile('^double')
intRe = re.compile('^int')
charRe = re.compile('^varchar\((\d+)\)')
if floatRe.match(dtype):
return 'f4'
if doubleRe.match(dtype):
return 'f8'
if intRe.match(dtype):
return 'i8'
mch = charRe.match(dtype)
if mch:
return 'a' + mch.group(1)
return 'f8' # default to float64
#-----------------------------------------------------------------------------#
def schema_to_tabledef(schemaFile, addColDict={}):
"""
Parse the SQL file containing the CREATE TABLE definitions and convert to
a format that python can understand. This is then used to initialise the
numpy record arrays which store the catalogue in memory. Assumes the
create statement has the form:
'CREATE TABLE myTableName (entry1 double [args+], entry2 int(2), ... );'
The 'CREATE TABLE' statement must have the same case. Currently recognises
only 'float', 'double', 'int(n)' and 'varchar(n)' types in the SQL.
"""
# Return these
tableDtypeDict = {}
tableSQLdict = {}
# Compile a few useful regular expressions
comment = re.compile('#.*')
# Loop through the SQL statements
fileStr=''
FH = open(schemaFile)
for line in FH:
line = line.replace('\r', ' ') # kill carriage-return
line = line.replace('\n', ' ') # kill newlines
if not comment.match(line):
fileStr += line
sqlLst = fileStr.split(';')
FH.close()
for sql in sqlLst:
dtypeDict, sqlDict = sql_create_to_numpy_dtype(sql, addColDict)
tableDtypeDict.update(dtypeDict)
tableSQLdict.update(sqlDict)
return tableDtypeDict, tableSQLdict
#-----------------------------------------------------------------------------#
def sql_create_to_numpy_dtype(sql, addColDict={}):
# Compile a few useful regular expressions
spaces = re.compile('\s+')
commaAndSpaces = re.compile(',\s+')
spacesAndComma = re.compile('\s+,')
createRe = re.compile('^(CREATE TABLE|create table) (\w+)\s*\((.+)\)\s*$')
# Simplify the SQL statement
sql = sql.replace('\r', ' ') # kill carriage-return
sql = sql.replace('\n', ' ') # kill newlines
sql = sql.strip() # kill external whitespace
sql = spaces.sub(' ', sql) # shrink internal whitespaces
sql = commaAndSpaces.sub(',', sql) # kill ambiguous spaces
sql = spacesAndComma.sub(',', sql) # kill ambiguous spaces
tableSQLdict = {}
tableDtypeDict = {}
mch = createRe.match(sql)
if mch is not None:
tableName = mch.group(2).strip()
colDefStr = mch.group(3).strip()
# Add in columns if required
if tableName in addColDict:
colDefStr += ",%s" % addColDict[tableName]
tableSQLdict[tableName] = "CREATE TABLE %s (%s)" % \
(tableName, colDefStr)
colDefLst = colDefStr.strip().split(',')
colDefLst = [x.split(' ')[:2] for x in colDefLst]
# Translate the data types into a python recarray dtype list
for i in range(len(colDefLst)):
colDefLst[i][1] = trtype(colDefLst[i][1])
colDefLst[i] = tuple(colDefLst[i])
tableDtypeDict[tableName] = colDefLst
return tableDtypeDict, tableSQLdict
#-----------------------------------------------------------------------------#
def mk_primary_key(sqlCreateStr, colName):
"""Add a PRIMARY KEY statement to a column of a SQL create statement"""
# Regular expressions to pick apart the create statement
createRe = re.compile('^(CREATE TABLE|create table) (\w+)\s*\((.+)\)\s*$')
prikeyRe = re.compile(".*(PRIMARY KEY|primary key)$")
sqlCreateNewStr = ""
mch = createRe.match(sqlCreateStr)
if not mch:
return sqlCreateStr
else:
createStr = mch.group(1).strip()
tableName = mch.group(2).strip()
colsDefStr = mch.group(3).strip()
colDefLst = []
for colDefStr in colsDefStr.split(','):
name, dataType = colDefStr.split(" ", 1)
if name==colName:
mch = prikeyRe.match(dataType)
if not mch:
dataType += " PRIMARY KEY"
colDefLst.append("%s %s" % (name, dataType))
sqlCreateNewStr += createStr + " " + tableName + " ("
sqlCreateNewStr += ",".join(colDefLst)
sqlCreateNewStr += ")"
return sqlCreateNewStr
#-----------------------------------------------------------------------------#
def get_tables_description(cursor):
"""Return a dictionary contain PRAGMA table_info(tabName) results for all
tables in a SQLite database. The dictionary is indexed by table name."""
sql = "SELECT name FROM sqlite_master WHERE type='table';"
tabNameArr = select_into_arr(cursor, sql)['name']
descDict = {}
for tabName in tabNameArr:
sql = "PRAGMA table_info(%s);" % tabName
descDict[tabName] = select_into_arr(cursor, sql)
return descDict
|
|
# Copyright 2017 Sunkari Preetham Paul. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################################
"""
//TRAINING HOG DESCRIPTOR :
//Extract the downloaded data, into the folder - 'new_train_data' in current folder
< OR >
//If the data is not of logos as downloaded, make the data folders classwise as needed
//First run the other code - 'prepare_data.py'; for this, create the data as needed
in the folder - 'raw_train_data', in current folder
//The main 'raw_data_folder', must contain sub-folders of classes (ex: BRANDS),
then each of these folders must contain sub-folders of labels (ex: MODELS),
and then the relevant data must be in the respective folders
//The data for training will be saved in the 'new_train_data'
//The default paths can be changed in the beginning of each code
//But, this is not recommended
//Run the train.py code as follows:
1) For training data :
run 'train.py train'
When prompted,
For Logistic Classification: enter 'LOGIST'
For Linear SVM MulitClass classification : enter 'SVM'
2) For classifying based on folder names in the 'new_train_data' :
run 'train.py classify'
When prompted,
For Logistic Classification: enter 'LOGIST'
For Linear SVM MulitClass classification : enter 'SVM'
The relevant mode must be selected based on the mode used in training data
########################################################################################
The code works as follows:
1) The read_train_data_paths() function reads the data from the default data path
2) Cache is created only once, so that hog features are not created each time, code is run
3) Then the training starts..
4) The weights are saved in the folder 'hog_saved_weights', once the training is complete.
5) For classification, weights are retrieved from this folder
6) 'logo.jpg' file is used for testing the saved the weights. Make sure this file
is present in the current folder.
Change the training steps below, if required.
"""
###########################################################################
#num_classes will be length of class_list
num_classes = 0
training_steps = 1500
###########################################################################
import os, sys
import matplotlib.pyplot as plt
import matplotlib.image as iread
import tensorflow as tf
from PIL import Image
import numpy as np
from random import shuffle
from random import randint
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
cwd = os.getcwd()
if cwd == os.path.dirname(os.path.abspath(__file__)):
import hog
else:
folder = os.path.dirname(os.path.realpath(__file__))
from HOG import hog
image_path = os.path.join(cwd,'logo.jpg')
train_path = os.path.join(cwd,'new_train_data')
save_path = os.path.join(cwd,'hog_saved_weights')
hog_file_path = os.path.join(cwd,'hog_files')
class_list = []
train_list = []
hog_list = []
total_data = 0
batch_size = 100
class_data_count = []
##########################################################################################
#reads train data images and makes lists of paths
def read_train_data_paths(num_classes,total_data):
if not os.path.exists(train_path):
os.makedirs(train_path)
if not os.path.exists(hog_file_path):
os.makedirs(hog_file_path)
class_list.extend(os.listdir(train_path))
num_classes += len(class_list)
for folder,val in enumerate(class_list):
class_path = os.path.join(train_path,val)
hog_path = os.path.join(hog_file_path,val)
if not os.path.exists(hog_path):
os.makedirs(hog_path)
image_list = os.listdir(class_path)
class_data_count.append(len(image_list))
for i in image_list:
img_path = os.path.join(class_path,i)
train_list.append([img_path,folder])
#makes paths for cache
i = i.replace('.jpg','.txt')
i = i.replace('.JPG','.txt')
i = i.replace('.jpeg','.txt')
i = i.replace('.JPEG','.txt')
i = os.path.join(hog_path,i)
hog_list.append([i,folder])
total_data += len(hog_list)
return num_classes,total_data
#creates cache in the form of .txt files
def create_cache():
for index,image in enumerate(train_list):
if not os.path.exists(hog_list[index][0]):
#the following function is imported from hog file
hog.create_hog_file(image[0],hog_list[index][0])
else:
print('Found cache... '+hog_list[index][0])
#Creates the variables for weights and biases
def create_variables(num_classes):
W = tf.Variable(tf.truncated_normal([288, num_classes]),name='weights')
b = tf.Variable(tf.truncated_normal([1, num_classes]), name='biases')
return W,b
#creates labels; uses hog descriptors
def create_labels(count, hog_list, total_data, batch_size):
#labels are one-hot vectors. But 0 is replaced with -1
point = count
path = hog_list[count][0]
lab = hog_list[count][1]
y = np.zeros([1,num_classes])
y[0][lab] = 1
x = hog.read_hog_file(path)
x = np.expand_dims(x,axis=0)
count += 1
extra = np.min([batch_size,total_data-point])
while count<point+extra and count<total_data:
path = hog_list[count][0]
lab = hog_list[count][1]
y_new = np.zeros([1,num_classes])
y_new[0][lab] = 1
y = np.concatenate((y,y_new), axis=0)
x_new = hog.read_hog_file(path)
x_new = np.expand_dims(x_new,axis=0)
x = np.concatenate((x,x_new), axis=0)
count+=1
return x,y
#evaluates accuracy
def evaluate_accuracy(final,labels):
prediction = tf.argmax(final,axis=1)
ground_truth = tf.argmax(labels,axis=1)
evaluate = tf.equal(prediction,ground_truth)
accuracy = tf.reduce_mean(tf.cast(evaluate,dtype=tf.float32), axis=0)
return accuracy*100
#Creates a model for SOFTMAX
def model(W,b,num_classes):
x = tf.placeholder(tf.float32,[None, 288])
y = tf.placeholder(tf.float32,[None, num_classes])
logits = tf.add(tf.matmul(x,W),b)
prediction = tf.nn.softmax(logits)
loss = tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=logits)
loss = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer()
train_step = optimizer.minimize(loss)
accuracy = evaluate_accuracy(prediction,y)
return train_step,accuracy,x,y
#training in SOFTMAX Logistic mode
def train_values():
W,b = create_variables(num_classes)
train_step, accuracy,x,y = model(W,b,num_classes)
print('\n--------------------------------------------------------------------')
print('ONE v/s ALL training - SOFTMAX LOGISTIC MULTICLASSIFIER')
print('--------------------------------------------------------------------')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_steps):
print('\nTraining step : '+str(epoch+1)+' .....................')
count = 0
while count<total_data:
X,Y = create_labels(count, hog_list, total_data, batch_size)
_,accu = sess.run([train_step,accuracy],feed_dict={x:X,y:Y})
print('Batch training Accuracy : '+str(accu)+' ...')
extra = np.min([batch_size,total_data-count])
count += extra
#saving weights
write_ckpt(W,sess,'weights','LOGIST')
write_ckpt(b,sess,'biases','LOGIST')
weight = sess.run(W)
bias = sess.run(b)
#Here, test data is randomly selected from the main data set
k = int(0.1*(len(hog_list)))
test = generate_random_test(k)
X,Y = create_labels(0, test, k, k)
_,pred = classify(X, weight.astype(dtype=np.float32), bias.astype(dtype=np.float32))
accu = evaluate_accuracy(pred, Y)
#Accuracy for test
with tf.Session() as sess:
print('\nTest Accuracy : '+str(sess.run(accu))+' % ....')
return weight,bias
#Classifying using Logistic function
def classify(X,W,b):
batch = X.shape[0]
X = tf.convert_to_tensor(X,dtype=tf.float32)
logits = tf.add(tf.matmul(X,W),b)
y = tf.nn.softmax(logits)
#score is the maximum probability obtained by the classifier
score = tf.reduce_max(y, axis=1)
with tf.Session() as sess:
num = sess.run(tf.argmax(y,axis=1))
score = sess.run(score)
#creating label for calculating accuracy
prediction = np.zeros([batch,num_classes])
for i in range(batch):
prediction[i][num[i]] = 1
return score,prediction
#Saves weights to file
def write_ckpt(tensor, sess, name, mode):
if not os.path.exists(save_path):
os.makedirs(save_path)
#saves weights in the respective mode folder
mode_path = os.path.join(save_path,mode)
if not os.path.exists(mode_path):
os.makedirs(mode_path)
folder_path = os.path.join(mode_path,name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
#saves as a .ckpt file
saver = tf.train.Saver({name:tensor})
filename = name+'.ckpt'
path = os.path.join(folder_path,filename)
tensor_path = saver.save(sess, path)
print("\nHog tensor saved at %s", tensor_path)
#reads .ckpt file and restores variables
#Variables must be created before calling this
def read_ckpt(ckpt_path,name,tensor,sess):
saver = tf.train.Saver({name:tensor})
saver.restore(sess, ckpt_path)
#Creating SVM labels
#key for SVM is taken -1 here
def create_svm_labels(count, hog_list, total_data, batch_size, class_num, key):
point = count
path = hog_list[count][0]
lab = hog_list[count][1]
y = np.array([[key]])
if lab==class_num:
y[0][0] = 1
x = hog.read_hog_file(path)
x = np.expand_dims(x,axis=0)
count += 1
extra = np.min([batch_size,total_data-point])
while count<point+extra and count<total_data:
path = hog_list[count][0]
lab = hog_list[count][1]
y_new = np.array([[key]])
if lab==class_num:
y_new[0][0] = 1
y = np.concatenate((y,y_new), axis=0)
x_new = hog.read_hog_file(path)
x_new = np.expand_dims(x_new,axis=0)
x = np.concatenate((x,x_new), axis=0)
count+=1
return x,y
#Creates Linear SVM Model
def Linear_SVM_model(W,b):
#W must be of shape [288,1]
x = tf.placeholder(tf.float32,[None, 288])
y = tf.placeholder(tf.float32,[None, 1])
# Regularisation constant
C = 1
# Model is as follows:
# hyperplane : hplane = W*x + b
# cost = (1/n)*sum( max( 0, (1-y*hplane) ) ) + C*||W||^2
h_plane = tf.add(tf.matmul(x,W),b)
h_plane = 1.-tf.multiply(y,h_plane)
cost = tf.maximum(0.,h_plane)
cost = tf.reduce_mean(cost,axis=0)
cost += C*tf.reduce_sum(tf.square(W), axis=1)
optimizer = tf.train.AdamOptimizer()
train_step = optimizer.minimize(cost)
return train_step,x,y
#Generates random test data from the main data list
#num is the number of data
def generate_random_test(num):
test = []
for i in range(num):
s = randint(0,total_data)
test.append(hog_list[s])
return test
#Trains SVM model
#Training each class separately
#One vs All classification
def train_SVM():
print('\n--------------------------------------------------------------------')
print('ONE v/s ALL training - SVM MULTICLASSIFIER')
print('--------------------------------------------------------------------')
W_main = np.zeros([288,num_classes])
b_main = np.zeros([1,num_classes])
for i in range(num_classes):
W = tf.Variable(tf.truncated_normal([288,1]))
b = tf.Variable(tf.truncated_normal([1,1]))
print('\nTraining SVM for Class '+str(i+1)+'/'+str(num_classes)+' : ' + class_list[i]+' .......................................\n')
train_step,x,y = Linear_SVM_model(W,b)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_steps):
print('................ '+str(i+1)+'/'+str(num_classes)+' Training step : '+str(epoch+1)+' ................')
count = 0
while count<total_data:
print('Image: '+str(count+1)+'/'+str(total_data)+' ...')
X,Y = create_svm_labels(count, hog_list, total_data, batch_size, i, -1)
sess.run(train_step,feed_dict={x:X,y:Y})
extra = np.min([batch_size,total_data-count])
count += extra
#Weights for each class are added to the main matrix as columns
W_main[:,i] = (sess.run(W))[:,0]
b_main[:,i] = (sess.run(b))[:,0]
#Generates Test data and tests the trained model
k = int(0.1*(len(hog_list)))
test = generate_random_test(k)
X,Y = create_labels(0, test, k, k)
_,_,pred = SVM_classify(X, W_main.astype(dtype=np.float32), b_main.astype(dtype=np.float32))
accu = evaluate_accuracy(pred, Y)
with tf.Session() as sess:
print('\nTest Accuracy : '+str(sess.run(accu))+' % ....')
#Creates weights and biases for saving
W_final = tf.Variable(W_main.astype(dtype=np.float32),name='weights')
b_final = tf.Variable(b_main.astype(dtype=np.float32),name='biases')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
write_ckpt(W_final,sess,'weights','SVM')
write_ckpt(b_final,sess,'biases','SVM')
return W_main,b_main
#Classifier for SVM Model
def SVM_classify(X,W,b):
batch = X.shape[0]
X = tf.convert_to_tensor(X,dtype=tf.float32)
h_plane = tf.add(tf.matmul(X,W),b)
#score is the maximum positive distance from the hyperplane
score = tf.reduce_max(h_plane, axis=1)
with tf.Session() as sess:
num = sess.run(tf.argmax(h_plane,axis=1))
score = sess.run(score)
plane = sess.run(h_plane)
#Creating label vector for validating accuracy
prediction = np.zeros([batch,num_classes])
for i in range(batch):
prediction[i][num[i]] = 1
return score,plane, prediction
##################################################################################################
if __name__ == '__main__':
if sys.argv[1]=='train':
line = input('Enter the mode of training ( SVM / LOGIST ) : ?\n')
#in case of logos, num_classes is the no. of Brands
a,b = read_train_data_paths(num_classes,total_data)
num_classes += a
total_data += b
#Saving cache in the form of txt file
#Hog features are saved as cache
create_cache()
print('\nTotal '+str(total_data)+ ' images are found.....')
print('\nTraining Steps : '+str(training_steps)+'\n')
#checking the mode input
if line=='SVM':
weights,biases = train_SVM()
W_tensor = tf.convert_to_tensor(weights, dtype=tf.float32)
b_tensor = tf.convert_to_tensor(biases, dtype=tf.float32)
#hog module is called here again
X = hog.hog_from_path(image_path)
_,_,prediction = SVM_classify(X,W_tensor,b_tensor)
elif line=='LOGIST':
shuffle(hog_list)
W_new,b_new = train_values()
W_tensor = tf.convert_to_tensor(W_new, dtype=tf.float32)
b_tensor = tf.convert_to_tensor(b_new, dtype=tf.float32)
#hog module is called here again
X = hog.hog_from_path(image_path)
_,prediction = classify(X,W_tensor,b_tensor)
print('\nThe logo belongs to : '+str(class_list[np.argmax(prediction)]))
elif sys.argv[1]=='classify':
line = input('Enter mode of training used ( SVM / LOGIST ) : ?\n')
#in case of logos, num_classes is the no. of brands
a,b = read_train_data_paths(num_classes,total_data)
num_classes += a
total_data += b
W,b = create_variables(num_classes)
mode_path = os.path.join(save_path, line)
#reading weights from the saved checkpoints
with tf.Session() as sess:
read_ckpt(os.path.join(mode_path,'weights/weights.ckpt'),'weights',W,sess)
read_ckpt(os.path.join(mode_path,'biases/biases.ckpt'),'biases',b,sess)
W_array = sess.run(W)
b_array = sess.run(b)
W_array = tf.convert_to_tensor(W_array, dtype=tf.float32)
b_array = tf.convert_to_tensor(b_array, dtype=tf.float32)
#Extracting Hog features
X = hog.hog_from_path(image_path)
#Classifying using mode
if line=='SVM':
_,_,prediction = SVM_classify(X,W_array,b_array)
elif line=='LOGIST':
_,prediction = classify(X,W_array,b_array)
print('\nThe logo belongs to : '+str(class_list[np.argmax(prediction)]))
else:
print('ERROR :: Enter either train or classify after file name......!!')
|
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
Copyright (c) 2011 Tyler Kenendy <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import re
import sys
import traceback
import six
import six.moves
from types import LambdaType
from jawa.util.descriptor import method_descriptor, parse_descriptor
from jawa.constants import *
from jawa.transforms import simple_swap
from .topping import Topping
from burger.util import InvokeDynamicInfo, REF_invokeStatic
SUB_INS_EPSILON = .01
PACKETBUF_NAME = "packetbuffer" # Used to specially identify the PacketBuffer we care about
class PacketInstructionsTopping(Topping):
"""Provides the instructions used to construct network packets."""
PROVIDES = [
"packets.instructions"
]
DEPENDS = [
"packets.classes",
"identify.packet.packetbuffer",
"identify.nbtcompound",
"identify.itemstack",
"identify.chatcomponent",
"identify.metadata",
"identify.resourcelocation"
]
TYPES = {
"writeBoolean": "boolean",
"writeByte": "byte",
"writeBytes": "byte[]",
"writeChar": "char",
"writeDouble": "double",
"writeFloat": "float",
"writeInt": "int",
"writeLong": "long",
"writeShort": "short"
}
CACHE = {}
# Simple instructions are registered below
OPCODES = {}
@classmethod
def register_ins(cls, opcodes, stack_count, template, extra_method=None, category=1):
"""
Registers an instruction handler. This should be used for instructions
that pop some one or more things from the stack and then push a new
value onto it.
opcodes: A single opcode or a list of opcodes for that handler
stack_count: The number of things to pop from the stack
template: A format string; uses stack and operands (and extra if given)
extra_method: Used to get a bit of additional information. Param is ins
category: JVM category for the resulting StackOperand
"""
if isinstance(opcodes, six.string_types):
opcodes = [opcodes]
data = {
"stack_count": stack_count,
"template": template,
"extra_method": extra_method,
"category": category
}
for opcode in opcodes:
cls.OPCODES[opcode] = data
# Prefix types used in instructions
INSTRUCTION_TYPES = {
'a': 'Object',
'b': 'boolean',
'c': 'char',
'd': 'double',
'f': 'float',
'i': 'int',
'l': 'long',
's': 'short'
}
CLEANUP_PATTERN = [
(re.compile("^\((.*[^(])\)$"), "\\1"),
(re.compile("(^|[() ])this\."), "\\1")
]
@staticmethod
def act(aggregate, classloader, verbose=False):
"""Finds all packets and decompiles them"""
for key, packet in six.iteritems(aggregate["packets"]["packet"]):
operations = None
try:
classname = packet["class"][:-len(".class")]
operations = _PIT.class_operations(classloader, classname, aggregate["classes"], verbose)
packet.update(_PIT.format(operations))
except Exception as e:
if verbose:
print("Error: Failed to parse instructions of packet %s (%s): %s" % (key, packet["class"], e))
traceback.print_exc()
if operations:
import json
print(json.dumps(operations, default=lambda o:o.__dict__, indent=4))
print()
@staticmethod
def class_operations(classloader, classname, classes, verbose):
"""Decompiles the instructions for a specific packet."""
# Find the writing method
cf = classloader[classname]
methods = list(cf.methods.find(returns="V", args="L" + classes["packet.packetbuffer"] + ";"))
if len(methods) == 2:
# Assume the second method is the one that writes
method = methods[1]
elif len(methods) == 1:
# 21w08a+: A constructor or static method now handles reading.
# The constructor still returns void, so the above case is still
# usually hit, but the static method returns the packet. When
# the static method exists, there only is one matching method,
# so just assume that that method handles writing.
method = methods[0]
else:
assert len(methods) == 0 # There shouldn't be more than 2 packetbuffer-related methods
if cf.super_.name.value != "java/lang/Object":
# Try the superclass
return _PIT.class_operations(classloader, cf.super_.name.value, classes, verbose)
else:
raise Exception("Failed to find method in class or superclass")
assert not method.access_flags.acc_static
assert not method.access_flags.acc_abstract
return _PIT.operations(classloader, cf, classes, verbose, method, ("this", PACKETBUF_NAME))
@staticmethod
def operations(classloader, cf, classes, verbose, method, arg_names):
"""Decompiles the specified method."""
if method.access_flags.acc_static:
assert len(arg_names) == len(method.args)
else:
# `this` is a local variable and thus needs to be counted.
assert len(arg_names) == len(method.args) + 1
# Decode the instructions
operations = []
stack = []
skip_until = -1
shortif_pos = None
shortif_cond = None
# NOTE: we only use the simple_swap transform here due to the
# expand_constants transform making it hard to use InstructionField
# InstructionField should probably be cleaned up first
for instruction in method.code.disassemble(transforms=[simple_swap]):
if skip_until != -1:
if instruction.pos == skip_until:
skip_until = -1
else:
continue
mnemonic = instruction.mnemonic
operands = [InstructionField(operand, instruction, cf.constants)
for operand in instruction.operands]
# Shortcut if
if instruction.pos == shortif_pos:
# Check to make sure that this actually is a ternary if
assert len(operations) >= 3
assert operations[-1].operation == "endif"
assert operations[-2].operation == "else"
assert operations[-3].operation == "if"
# Now get rid of the unneeded if's
operations.pop()
operations.pop()
operations.pop()
category = stack[-1].category
stack.append(StackOperand("((%(cond)s) ? %(sec)s : %(first)s)" % {
"cond": shortif_cond,
"first": stack.pop(),
"sec": stack.pop()
}, category))
shortif_cond = None
shortif_pos = None
# Method calls
if mnemonic in ("invokevirtual", "invokespecial", "invokestatic", "invokeinterface"):
operations.extend(_PIT._handle_invoke(
classloader, classes, instruction, verbose, operands[0].c,
operands[0].name, method_descriptor(operands[0].descriptor), stack
))
elif mnemonic == "invokedynamic":
InvokeDynamicInfo.create(instruction, cf).apply_to_stack(stack)
# Conditional statements and loops
elif mnemonic.startswith("if"):
if "icmp" in mnemonic or "acmp" in mnemonic:
value2 = stack.pop()
value1 = stack.pop()
elif "null" in mnemonic:
value1 = stack.pop()
value2 = "null"
else:
value1 = stack.pop()
value2 = 0
# All conditions are reversed: if the condition in the mnemonic
# passes, then we'd jump; thus, to execute the following code,
# the condition must _not_ pass
if mnemonic in ("ifeq", "if_icmpeq", "if_acmpeq", "ifnull"):
comparison = "!="
elif mnemonic in ("ifne", "if_icmpne", "if_acmpne", "ifnonnull"):
comparison = "=="
elif mnemonic in ("iflt", "if_icmplt"):
comparison = ">="
elif mnemonic in ("ifge", "if_icmpge"):
comparison = "<"
elif mnemonic in ("ifgt", "if_icmpgt"):
comparison = "<="
elif mnemonic in ("ifle", "if_icmple"):
comparison = ">"
else:
raise Exception("Unknown if mnemonic %s (0x%x)" % (mnemonic, instruction.opcode))
if comparison == "!=" and value2 == 0:
# if (something != 0) -> if (something)
condition = value1
else:
condition = "%s %s %s" % (value1, comparison, value2)
operations.append(Operation(instruction.pos, "if",
condition=condition))
operations.append(Operation(operands[0].target, "endif"))
if shortif_pos is not None:
# Clearly not a ternary-if if we have another nested if
# (assuming that it's not a nested ternary, which we
# already don't handle for other reasons)
# If we don't do this, then the following code can have
# problems:
# if (a) {
# if (b) {
# // ...
# }
# } else if (c) {
# // ...
# }
# as there would be a goto instruction to skip the
# `else if (c)` portion that would be parsed as a shortif
shortif_pos = None
shortif_cond = condition
elif mnemonic == "tableswitch":
operations.append(Operation(instruction.pos, "switch",
field=stack.pop()))
default = operands[0].target
low = operands[1].value
high = operands[2].value
for opr in six.moves.range(3, len(operands)):
target = operands[opr].target
operations.append(Operation(target, "case",
value=low + opr - 3))
# TODO: Default might not be the right place for endswitch,
# though it seems like default isn't used in any other way
# in the normal code.
operations.append(Operation(default, "endswitch"))
elif mnemonic == "lookupswitch":
raise Exception("lookupswitch is not supported")
# operations.append(Operation(instruction.pos, "switch",
# field=stack.pop()))
# for opr in six.moves.range(1, len(operands)):
# target = operands[opr].find_target(1)
# operations.append(Operation(target, "case",
# value=operands[opr].value[0]))
# operations.append(Operation(operands[0].target, "endswitch"))
elif mnemonic == "goto":
target = operands[0].target
endif = _PIT.find_next(operations, instruction.pos, "endif")
case = _PIT.find_next(operations, instruction.pos, "case")
if case is not None and target > case.position:
operations.append(Operation(instruction.pos, "break"))
elif endif is not None:
if target > instruction.pos:
endif.operation = "else"
operations.append(Operation(target, "endif"))
if len(stack) != 0:
shortif_pos = target
else:
endif.operation = "endloop"
_PIT.find_next(
operations, target, "if"
).operation = "loop"
elif target > instruction.pos:
skip_until = target
elif mnemonic == "iinc":
operations.append(Operation(instruction.pos, "increment",
field="var%s" % operands[0],
amount=operands[1]))
# Other manually handled instructions
elif mnemonic == "multianewarray":
operand = ""
for i in six.moves.range(operands[1].value):
operand = "[%s]%s" % (stack.pop(), operand)
stack.append(StackOperand(
"new %s%s" % (operands[0].type, operand)))
elif mnemonic == "pop":
stack.pop()
elif mnemonic == "pop2":
if stack.pop().category != 2:
stack.pop()
elif mnemonic == "swap":
stack[-2], stack[-1] = stack[-1], stack[-2]
elif mnemonic == "dup":
stack.append(stack[-1])
elif mnemonic == "dup_x1":
stack.insert(-2, stack[-1])
elif mnemonic == "dup_x2":
stack.insert(-2 if stack[-2].category == 2 else -3, stack[-1])
elif mnemonic == "dup2":
if stack[-1].category == 2:
stack.append(stack[-1])
else:
stack += stack[-2:]
elif mnemonic == "dup2_x1":
if stack[-1].category == 2:
stack.insert(-2, stack[-1])
else:
stack.insert(-3, stack[-2])
stack.insert(-3, stack[-1])
elif mnemonic == "dup2_x2":
if stack[-1].category == 2:
stack.insert(
-2 if stack[-2].category == 2 else -3, stack[-1]
)
else:
stack.insert(
-3 if stack[-3].category == 2 else -4, stack[-2]
)
stack.insert(
-3 if stack[-3].category == 2 else -4, stack[-1]
)
elif mnemonic == "return":
# Don't attempt to lookup the instruction in the handler
pass
elif instruction in ("istore", "lstore", "fstore", "dstore", "astore"):
# Keep track of what is being stored, for clarity
type = _PIT.INSTRUCTION_TYPES[instruction.mnemonic[0]]
arg = operands.pop().value
var = arg_names[arg] if arg < len(arg_names) else "var%s" % arg
operations.append(Operation(instruction.pos, "store",
type=type,
var=var,
value=stack.pop()))
elif instruction in ("iastore", "lastore", "fastore", "dastore", "aastore", "bastore", "castore", "sastore"):
type = _PIT.INSTRUCTION_TYPES[instruction.mnemonic[0]]
# Array store
value = stack.pop()
index = stack.pop()
array = stack.pop()
operations.append(Operation(instruction.pos, "arraystore",
type=type,
index=index,
var=array,
value=value))
# Default handlers
else:
if mnemonic not in _PIT.OPCODES:
raise Exception("Unhandled instruction opcode %s (0x%x)" % (mnemonic, instruction.opcode))
handler = _PIT.OPCODES[mnemonic]
ins_stack = []
assert len(stack) >= handler["stack_count"]
for _ in six.moves.range(handler["stack_count"]):
ins_stack.insert(0, stack.pop())
ctx = {
"operands": operands,
"stack": ins_stack,
"ins": instruction,
"arg_names": arg_names
}
if handler["extra_method"]:
ctx["extra"] = handler["extra_method"](ctx)
category = handler["category"]
try:
formatted = handler["template"].format(**ctx)
except Exception as ex:
raise Exception("Failed to format info for %s (0x%x) with template %s and ctx %s: %s" %
(mnemonic, instruction.opcode, handler["template"], ctx, ex))
stack.append(StackOperand(formatted, handler["category"]))
return operations
@staticmethod
def _handle_invoke(classloader, classes, instruction, verbose,
cls, name, desc, stack):
"""
Handles invocation of a method, returning the operations for it and also
updating the stack.
"""
num_arguments = len(desc.args)
assert len(stack) >= num_arguments
if num_arguments > 0:
arguments = stack[-num_arguments:]
else:
arguments = []
for i in six.moves.range(num_arguments):
stack.pop()
is_static = (instruction.mnemonic == "invokestatic")
obj = cls if is_static else stack.pop()
if name in _PIT.TYPES:
# Builtin netty buffer methods
assert num_arguments == 1
assert not is_static
# These methods always return the same buffer.
stack.append(obj)
return [Operation(instruction.pos, "write", type=_PIT.TYPES[name],
field=arguments[0])]
elif len(name) == 1 and isinstance(obj, StackOperand) and obj.value == PACKETBUF_NAME:
# Checking len(name) == 1 is used to see if it's a Minecraft method
# (due to obfuscation). Netty methods have real (and thus longer) names.
if num_arguments == 1:
result = _PIT._handle_1_arg_buffer_call(classloader, classes,
instruction, verbose,
cls, name, desc, obj,
arguments[0])
elif num_arguments == 2:
result = _PIT._handle_2_arg_buffer_call(classloader, classes,
instruction, verbose,
cls, name, desc, obj,
arguments)
elif num_arguments == 3:
result = _PIT._handle_3_arg_buffer_call(classloader, classes,
instruction, verbose,
cls, name, desc, obj,
arguments)
else:
raise Exception("Unexpected num_arguments: " + str(num_arguments) + " - desc " + desc)
if desc.returns.name == classes["packet.packetbuffer"]:
# Return the packetbuffer back to the stack.
stack.append(obj)
elif desc.returns.name != "void":
if verbose:
print("PacketBuffer method that returns something other than PacketBuffer used!")
stack.append(object())
return result
elif name == "<init>":
# Constructor call. Should have the instance right on the stack
# as well (due to constructors returning void).
# Add the arguments to that object.
assert stack[-1] is obj
assert isinstance(obj, StackOperand)
obj.value += "(" + _PIT.join(arguments) + ")";
return []
elif name == "forEach":
assert num_arguments == 1
assert not is_static
return _PIT._handle_foreach(classloader, classes, instruction, verbose,
cls, name, desc, obj, arguments[0])
else:
if desc.returns.name != "void":
# Assume that any function that returns something does not write
# to the buffer.
stack.append(StackOperand(
"%s.%s(%s)" % (
obj, name, _PIT.join(arguments)
),
2 if desc.returns.name in ("long", "double") else 1)
)
return []
else:
for arg in desc.args:
if arg.name == classes["packet.packetbuffer"]:
if cls == classes["metadata"]:
# Special case - metadata is a complex type but
# well documented; we don't want to include its
# exact writing but just want to instead say
# 'metadata'.
# There are two cases - one is calling an
# instance method of metadata that writes
# out the instance, and the other is a
# static method that takes a list and then
# writes that list.
return [Operation(instruction.pos, "write", type="metadata",
field=obj if not is_static else arguments[0])]
# If calling a sub-method that takes a packetbuffer as a
# parameter, it's possible that it's a sub-method that
# writes to the buffer, so we need to check it.
# Note that we do this even if the method is abstract
# or part of an interface; _sub_operations checks that.
return _PIT._sub_operations(
classloader, classes, instruction, verbose,
cls, name, desc,
[obj] + arguments if not is_static else arguments
)
else:
# Call to a method that does not take a packetbuffer.
# It might have side-effects, but we don't know what they
# would be and can't do anything with them.
if verbose:
print("Call to %s.%s%s does not use buffer; ignoring" % (cls, name, desc.descriptor))
return []
@staticmethod
def _handle_1_arg_buffer_call(classloader, classes, instruction, verbose,
cls, name, desc, instance, arg):
arg_type = desc.args[0].name
if desc.args[0].dimensions == 1:
# Array methods, which prefix a length
operations = [Operation(instruction.pos, "write",
type="varint", field="%s.length" % arg)]
if arg_type == "byte":
operations.append(Operation(instruction.pos, "write",
type="byte[]", field=arg))
elif arg_type == "int":
operations.append(Operation(instruction.pos, "write",
type="varint[]", field=arg))
elif arg_type == "long":
operations.append(Operation(instruction.pos, "write",
type="long[]", field=arg))
else:
raise Exception("Unexpected array type: " + arg_type)
return operations
assert desc.args[0].dimensions == 0
if arg_type == "java/lang/String":
max_length = 32767 # not using this at the time
return [Operation(instruction.pos, "write", type="string", field=arg)]
elif arg_type == "java/util/UUID":
return [Operation(instruction.pos, "write", type="uuid", field=arg)]
elif arg_type == "java/util/Date":
return [Operation(instruction.pos, "write", type="long", field="%s.getTime()" % arg)]
elif arg_type == "int":
# We know that the obfuscated function that takes an int or long is
# the VarInt/VarLong version, and the non-obfuscated one with a netty
# name is the regular version.
return [Operation(instruction.pos, "write", type="varint", field=arg)]
elif arg_type == "long":
return [Operation(instruction.pos, "write", type="varlong", field=arg)]
elif arg_type == "java/lang/Enum":
# If we were using the read method instead of the write method, then we could get the class for this enum...
return [Operation(instruction.pos, "write", type="enum", field=arg)]
elif arg_type == classes["nbtcompound"]:
return [Operation(instruction.pos, "write", type="nbtcompound", field=arg)]
elif arg_type == classes["itemstack"]:
return [Operation(instruction.pos, "write", type="itemstack", field=arg)]
elif arg_type == classes["chatcomponent"]:
return [Operation(instruction.pos, "write", type="chatcomponent", field=arg)]
elif arg_type == classes["identifier"]:
return [Operation(instruction.pos, "write", type="identifier", field=arg)]
elif "position" not in classes or arg_type == classes["position"]:
if "position" not in classes:
classes["position"] = arg_type
if verbose:
print("Assuming", arg_type, "is the position class")
return [Operation(instruction.pos, "write", type="position", field=arg)]
# Unknown type in packetbuffer; try inlining it as well
# (on the assumption that it's something made of a few calls,
# and not e.g. writeVarInt)
if verbose:
print("Inlining PacketBuffer.%s(%s)" % (name, arg_type))
return _PIT._sub_operations(classloader, classes, instruction, verbose,
cls, name, desc, [instance, arg])
@staticmethod
def _handle_2_arg_buffer_call(classloader, classes, instruction, verbose,
cls, name, desc, instance, args):
if desc.args[0].name == "java/lang/String" and desc.args[1].name == "int":
max_length = args[1] # not using this at this time
return [Operation(instruction.pos, "write", type="string", field=args[0])]
elif desc.args[0].name == "com/mojang/serialization/Codec":
codec = args[0]
value = args[1]
# This isn't the exact syntax used by DataFixerUpper,
# but it's close enough for our purposes
field = "%s.encode(%s)" % (codec, value)
return [Operation(instruction.pos, "write", type="nbtcompound", field=field)]
elif desc.args[0].name == "java/util/Collection" and \
desc.args[1].name == "java/util/function/BiConsumer":
# Loop that calls the consumer with the packetbuffer
# and value for each value in collection
# TODO: Disambiguate names it and itv if there are multiple loops
operations = []
field = args[0]
assert isinstance(field, StackOperand)
operations.append(Operation(instruction.pos, "write", type="varint",
field=field.value + ".size()"))
operations.append(Operation(instruction.pos, "store",
type="Iterator", var="it",
value=field.value + ".iterator()"))
operations.append(Operation(instruction.pos, "loop",
condition="it.hasNext()"))
info = args[1]
assert isinstance(info, InvokeDynamicInfo)
operations.append(Operation(instruction.pos, "store",
type=info.method_desc.args[-1].name.replace("/", "."),
var="itv", value="it.next()"))
operations += _PIT._lambda_operations(
classloader, classes, instruction, verbose,
info, [instance, "itv"]
)
# Jank: the part of the program that converts loop+endloop
# to a nested setup sorts the operations.
# Thus, if instruction.pos is used, _sub_operations
# adds epsilon to each sub-instruction, making them
# come after the endloop.
# Assume that 1 - SUB_INS_EPSILON (e.g. .99) will put
# the endloop past everything.
operations.append(Operation(instruction.pos + 1 - SUB_INS_EPSILON, "endloop"))
return operations
elif desc.args[0].name == "java/util/Optional" and \
desc.args[1].name == "java/util/function/BiConsumer":
# Write a boolean indicating whether the optional is present.
# Call the consumer with the packetbuffer and value if the optional is present.
operations = []
field = args[0]
assert isinstance(field, StackOperand)
operations.append(Operation(instruction.pos, "write", type="boolean",
field=field.value + ".isPresent()"))
operations.append(Operation(instruction.pos, "if",
condition=field.value + ".isPresent()"))
info = args[1]
assert isinstance(info, InvokeDynamicInfo)
operations += _PIT._lambda_operations(
classloader, classes, instruction, verbose,
info, [instance, field.value + ".get()"]
)
# Jank: the part of the program that converts loop+endloop
# to a nested setup sorts the operations.
# Thus, if instruction.pos is used, _sub_operations
# adds epsilon to each sub-instruction, making them
# come after the endloop.
# Assume that 1 - SUB_INS_EPSILON (e.g. .99) will put
# the endloop past everything.
operations.append(Operation(instruction.pos + 1 - SUB_INS_EPSILON, "endif"))
return operations
else:
raise Exception("Unexpected descriptor " + desc.descriptor)
@staticmethod
def _handle_3_arg_buffer_call(classloader, classes, instruction, verbose,
cls, name, desc, instance, args):
if desc.args[0].name == "java/util/Map" and \
desc.args[1].name == "java/util/function/BiConsumer" and \
desc.args[1].name == "java/util/function/BiConsumer":
# Loop that calls the consumers with the packetbuffer
# and key, and then packetbuffer and value, for each
# (key, value) pair in the map.
# TODO: Disambiguate names it and itv if there are multiple loops
operations = []
field = args[0]
assert isinstance(field, StackOperand)
operations.append(Operation(instruction.pos, "write", type="varint",
field=field.value + ".size()"))
operations.append(Operation(instruction.pos, "store",
type="Iterator", var="it",
value=field.value + ".iterator()"))
operations.append(Operation(instruction.pos, "loop",
condition="it.hasNext()"))
key_info = args[1]
val_info = args[2]
assert isinstance(key_info, InvokeDynamicInfo)
assert isinstance(val_info, InvokeDynamicInfo)
# TODO: these are violated
key_type = key_info.method_desc.args[-1].name.replace("/", ".")
val_type = val_info.method_desc.args[-1].name.replace("/", ".")
operations.append(Operation(instruction.pos, "store",
type="Map.Entry<" + key_type + ", " + val_type + ">",
var="itv", value="it.next()"))
operations += _PIT._lambda_operations(
classloader, classes, instruction, verbose,
key_info, [instance, "itv.getKey()"]
)
# TODO: Does the SUB_INS_EPSILON work correctly here?
# I think this will lead to [1.01, 1.02, 1.03, 1.01, 1.02, 1.03]
# which would get sorted wrongly, but I'm not sure
operations += _PIT._lambda_operations(
classloader, classes, instruction, verbose,
val_info, [instance, "itv.getValue()"]
)
# Same jank as with the one in _handle_2_arg_buffer_call
operations.append(Operation(instruction.pos + 1 - SUB_INS_EPSILON, "endloop"))
return operations
else:
raise Exception("Unexpected descriptor " + desc.descriptor)
@staticmethod
def _handle_foreach(classloader, classes, instruction, verbose,
cls, name, desc, instance, consumer):
assert isinstance(instance, StackOperand)
assert isinstance(consumer, InvokeDynamicInfo)
assert "Consumer" in desc.args[0].name
operations = []
operations.append(Operation(instruction.pos, "store",
type="Iterator", var="it",
value=instance.value + ".iterator()"))
operations.append(Operation(instruction.pos, "loop",
condition="it.hasNext()"))
operations.append(Operation(instruction.pos, "store",
type=consumer.method_desc.args[-1].name.replace("/", "."),
var="itv", value="it.next()"))
operations += _PIT._lambda_operations(
classloader, classes, instruction, verbose, consumer, ["itv"]
)
# See comment in _handle_1_arg_buffer_call
operations.append(Operation(instruction.pos + 1 - SUB_INS_EPSILON, "endloop"))
return operations
@staticmethod
def join(arguments, separator=", "):
"""Converts a list of object into a comma separated list"""
return separator.join(str(arg) for arg in arguments)
@staticmethod
def find_next(operations, position, operation_search):
"""Finds an operation"""
for operation in _PIT.ordered_operations(operations):
if (operation.position > position and
operation.operation == operation_search):
return operation
@staticmethod
def ordered_operations(operations):
"""Orders the operation by their actual position"""
return sorted(operations, key=lambda op: op.position)
@staticmethod
def _sub_operations(classloader, classes, instruction, verbose, invoked_class,
name, desc, args):
"""
Gets the instructions for a call to a different function.
Usually that function is in a different class.
Note that for instance methods, `this` is included in args.
"""
cache_key = "%s/%s/%s/%s" % (invoked_class, name, desc, _PIT.join(args, ","))
if cache_key in _PIT.CACHE:
cache = _PIT.CACHE[cache_key]
operations = [op.clone() for op in cache]
else:
cf = classloader[invoked_class]
method = cf.methods.find_one(name=name, args=desc.args_descriptor)
assert method != None
if method.access_flags.acc_abstract:
assert not method.access_flags.acc_static
call_type = "interface" if cf.access_flags.acc_interface else "abstract"
operations = [Operation(0, "interfacecall", type=call_type,
target=invoked_class, name=name,
method=name + desc.descriptor, field=args[0],
args=_PIT.join(args[1:]))]
else:
operations = _PIT.operations(classloader, cf, classes, verbose,
method, args)
# Sort operations by position, and try to ensure all of them fit between
# two normal instructions. Note that since operations are renumbered
# on each use of _sub_operations, this is safe (recursive calls to
# _sub_operations will produce [1.01, 1.02, 1.03, 1.04], not
# [1.01, 1.0101, 1.0102, 1.02] or [1.01, 1.02, 1.03, 1.02]).
position = 0
for operation in _PIT.ordered_operations(operations):
position += SUB_INS_EPSILON
# However, it will break if the position gets too large, as then
# something like [1.01, 1.02, ..., 1.99, 2.00, 2.01, 2] could occur.
# If this happens, just shrink SUB_INS_EPSILON.
assert(position < 1)
operation.position = instruction.pos + (position)
_PIT.CACHE[cache_key] = operations
return operations
@staticmethod
def _lambda_operations(classloader, classes, instruction, verbose, info, args):
assert isinstance(info, InvokeDynamicInfo)
assert len(args) == len(info.instantiated_desc.args)
effective_args = info.stored_args + args
if info.ref_kind == REF_invokeStatic:
assert len(effective_args) == len(info.method_desc.args)
else:
# The `this` parameter must be supplied. Usually this will be in
# effective_args, but it's also valid to use Class::function
# and then provide an instance of class along with the parameters
# to function.
assert len(effective_args) == len(info.method_desc.args) + 1
# I don't think Java allows pre-supplying other arguments in this
# case (as that'd require reordering them to make `this` first).
assert len(info.stored_args) == 0 or len(info.stored_args) == 1
# Now just call the (generated) method.
# Note that info is included because this is
cf, method = info.create_method()
operations = _PIT.operations(classloader, cf, classes, verbose,
method, effective_args)
position = 0
# See note in _sub_operations
for operation in _PIT.ordered_operations(operations):
position += SUB_INS_EPSILON
assert(position < 1)
operation.position = instruction.pos + (position)
return operations
@staticmethod
def format(operations):
"""Constructs output structure"""
head = []
stack = [head]
aggregate = {"instructions": head}
block_start = ("if", "loop", "switch", "else")
block_end = ("endif", "endloop", "endswitch", "else")
for operation in _PIT.ordered_operations(operations):
obj = operation.__dict__.copy()
obj.pop("position")
for field in ("field", "condition"):
if field in obj:
obj[field] = _PIT.clean_field(obj[field])
if operation.operation in block_end + block_start:
if operation.operation in block_end:
if len(head) == 0:
stack[-2].pop()
stack.pop()
head = stack[-1]
if operation.operation in block_start:
new_head = []
stack.append(new_head)
obj["instructions"] = new_head
head.append(obj)
head = new_head
else:
head.append(obj)
return aggregate
@staticmethod
def clean_field(field):
for pattern in _PIT.CLEANUP_PATTERN:
field = re.sub(pattern[0], pattern[1], field)
return field
class Operation:
"""Represents a performed operation"""
def __init__(self, position, operation, **args):
self.position = position
self.operation = operation
for arg in args:
self.set(arg, args[arg])
def __repr__(self):
return str(self.__dict__)
def set(self, key, value):
self.__dict__[key] = str(value)
return self
def clone(self):
clone = Operation(self.position, self.operation)
for name in self.__dict__:
clone.set(name, self.__dict__[name])
return clone
class InstructionField:
"""Represents a operand in a instruction"""
def __init__(self, operand, instruction, constants):
assert instruction.mnemonic != "lookupswitch"
# Note: this will fail if operand is not actually an instance of
# Operand, which is the case for lookupswitch, hence the earlier assert
self.value = operand.value
assert isinstance(operand.value, int)
self.constants = constants
self.instruction = instruction
self.handlers = {
"name": self.find_name,
"c": self.find_class,
"classname": self.find_classname,
"descriptor": self.find_descriptor,
"target": self.find_target,
"atype": self.find_atype,
"type": self.find_type
}
def __str__(self):
return str(self.value)
def __repr__(self):
return self.__str__()
def __getattr__(self, name):
if name in self.handlers:
return self.handlers[name]()
else:
raise AttributeError
def find_class(self):
"""Finds the internal name of a class, uses slashes for packages."""
const = self.constants[self.value]
if isinstance(const, ConstantClass):
return const.name.value
else:
return const.class_.name.value
def find_name(self):
"""Finds the name of a method called in the suplied instruction"""
# At least, allegedly. In practice this seems to actually be used for
# a zillion other things, and usually not the name, for the ldc instruction
const = self.constants[self.value]
if isinstance(const, ConstantClass):
return const.name.value
elif isinstance(const, String):
return '"' + const.string.value + '"'
elif isinstance(const, (Integer, Float, Long, Double, UTF8)):
return str(const.value)
else:
return self.constants[self.value].name_and_type.name.value
def find_classname(self):
"""Finds the name of a class as intended for display"""
name = self.find_class().replace("/", ".")
if name.startswith("["):
# Fix arrays, which might be in the form of [Lcom/example/Foo;
desc = parse_descriptor(name)[0]
name = desc.name + "[]" * desc.dimensions
if name.startswith("java.lang.") or name.startswith("java.util."):
name = name[10:]
return name
def find_descriptor(self):
"""Finds types used in an instruction"""
return self.constants[self.value].name_and_type.descriptor.value
def find_target(self):
"""Finds the target of a goto or if instruction"""
return self.value + self.instruction.pos
def find_type(self):
"""Finds a type used by an instruction"""
# This may be broken, as current code does not use it
descriptor = self.constants[self.value].name_and_type.descriptor.value
descriptor = field_descriptor(descriptor)
return descriptor[:descriptor.find("[")]
def find_atype(self):
"""Finds the type used by the `newarray` instruction"""
return [
"boolean",
"char",
"float",
"double",
"byte",
"short",
"int",
"long"
][self.value - 4]
class StackOperand:
"""
Represents an operand on the runtime operand stack
value is the actual value
category is the JVM category/type, see
https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-2.html#jvms-2.11.1-320
"""
def __init__(self, value, category=1):
self.value = value
self.category = category
def __str__(self):
return str(self.value)
def __repr__(self):
return "%s [%s]" % (self.value, self.category)
_PIT = PacketInstructionsTopping
# Register instructions now
def arg_name(arg_index=lambda ctx: ctx["operands"][0].value):
"""
Returns a lambda that gets the name of the argument at the given index.
The index defaults to the first operand's value.
"""
return lambda ctx: (ctx["arg_names"][arg_index(ctx)]
if arg_index(ctx) < len(ctx["arg_names"])
else "var%s" % arg_index(ctx))
_PIT.register_ins("aconst_null", 0, "null")
_PIT.register_ins("iconst_m1", 0, "-1")
_PIT.register_ins(["lconst_0", "lconst_1"], 0, "{extra}", lambda ctx: int(ctx["ins"].mnemonic[-1], 2))
_PIT.register_ins(["fconst_0", "fconst_1", "fconst_2"], 0, "{extra}.0f", lambda ctx: int(ctx["ins"].mnemonic[-1]))
_PIT.register_ins(["dconst_0", "dconst_1"], 0, "{extra}.0", lambda ctx: int(ctx["ins"].mnemonic[-1], 2))
_PIT.register_ins(["bipush", "sipush"], 0, "{extra}", lambda ctx: ("0x{0:x}" if ctx["operands"][0].value > 5 else "{0}").format(ctx["operands"][0].value))
_PIT.register_ins(["ldc", "ldc_w"], 0, "{operands[0].name}")
_PIT.register_ins("ldc2_w", 0, "{operands[0].name}", category=2)
_PIT.register_ins("iload", 0, "{extra}", arg_name())
_PIT.register_ins("lload", 0, "{extra}", arg_name(), 2)
_PIT.register_ins("fload", 0, "{extra}", arg_name())
_PIT.register_ins("dload", 0, "{extra}", arg_name(), 2)
_PIT.register_ins("aload", 0, "{extra}", arg_name())
_PIT.register_ins("iaload", 2, "{stack[0]}[{stack[1]}]")
_PIT.register_ins("laload", 2, "{stack[0]}[{stack[1]}]", category=2)
_PIT.register_ins("faload", 2, "{stack[0]}[{stack[1]}]")
_PIT.register_ins("daload", 2, "{stack[0]}[{stack[1]}]", category=2)
_PIT.register_ins("aaload", 2, "{stack[0]}[{stack[1]}]")
_PIT.register_ins("baload", 2, "{stack[0]}[{stack[1]}]")
_PIT.register_ins("caload", 2, "{stack[0]}[{stack[1]}]")
_PIT.register_ins("saload", 2, "{stack[0]}[{stack[1]}]")
_PIT.register_ins("iadd", 2, "({stack[0]} + {stack[1]})")
_PIT.register_ins("ladd", 2, "({stack[0]} + {stack[1]})", category=2)
_PIT.register_ins("fadd", 2, "({stack[0]} + {stack[1]})")
_PIT.register_ins("dadd", 2, "({stack[0]} + {stack[1]})", category=2)
_PIT.register_ins("isub", 2, "({stack[0]} - {stack[1]})")
_PIT.register_ins("lsub", 2, "({stack[0]} - {stack[1]})", category=2)
_PIT.register_ins("fsub", 2, "({stack[0]} - {stack[1]})")
_PIT.register_ins("dsub", 2, "({stack[0]} - {stack[1]})", category=2)
_PIT.register_ins("imul", 2, "({stack[0]} * {stack[1]})")
_PIT.register_ins("lmul", 2, "({stack[0]} * {stack[1]})", category=2)
_PIT.register_ins("fmul", 2, "({stack[0]} * {stack[1]})")
_PIT.register_ins("dmul", 2, "({stack[0]} * {stack[1]})", category=2)
_PIT.register_ins("idiv", 2, "({stack[0]} / {stack[1]})")
_PIT.register_ins("ldiv", 2, "({stack[0]} / {stack[1]})", category=2)
_PIT.register_ins("fdiv", 2, "({stack[0]} / {stack[1]})")
_PIT.register_ins("ddiv", 2, "({stack[0]} / {stack[1]})", category=2)
_PIT.register_ins("irem", 2, "({stack[0]} % {stack[1]})")
_PIT.register_ins("lrem", 2, "({stack[0]} % {stack[1]})", category=2)
_PIT.register_ins("frem", 2, "({stack[0]} % {stack[1]})")
_PIT.register_ins("drem", 2, "({stack[0]} % {stack[1]})", category=2)
_PIT.register_ins("ineg", 1, "(-{stack[0]})")
_PIT.register_ins("lneg", 1, "(-{stack[0]})", category=2)
_PIT.register_ins("fneg", 1, "(-{stack[0]})")
_PIT.register_ins("dneg", 1, "(-{stack[0]})", category=2)
_PIT.register_ins("ishl", 2, "({stack[0]} << {stack[1]})")
_PIT.register_ins("lshl", 2, "({stack[0]} << {stack[1]})", category=2)
_PIT.register_ins("ishr", 2, "({stack[0]} >>> {stack[1]})")
_PIT.register_ins("lshr", 2, "({stack[0]} >>> {stack[1]})", category=2)
_PIT.register_ins("iushr", 2, "({stack[0]} >> {stack[1]})")
_PIT.register_ins("lushr", 2, "({stack[0]} >> {stack[1]})", category=2)
_PIT.register_ins("iand", 2, "({stack[0]} & {stack[1]})")
_PIT.register_ins("land", 2, "({stack[0]} & {stack[1]})", category=2)
_PIT.register_ins("ior", 2, "({stack[0]} | {stack[1]})")
_PIT.register_ins("lor", 2, "({stack[0]} | {stack[1]})", category=2)
_PIT.register_ins("ixor", 2, "({stack[0]} ^ {stack[1]})")
_PIT.register_ins("lxor", 2, "({stack[0]} ^ {stack[1]})", category=2)
_PIT.register_ins(["i2l", "f2l", "d2l"], 1, "((long){stack[0]})", category=2)
_PIT.register_ins(["i2f", "l2f", "d2f"], 1, "((float){stack[0]})")
_PIT.register_ins(["i2d", "l2d", "f2d"], 1, "((double){stack[0]})", category=2)
_PIT.register_ins(["l2i", "f2i", "d2i"], 1, "((int){stack[0]})")
_PIT.register_ins("i2b", 1, "((byte){stack[0]})")
_PIT.register_ins("i2c", 1, "((char){stack[0]})")
_PIT.register_ins("i2s", 1, "((short){stack[0]})")
_PIT.register_ins("lcmp", 2, "compare({stack[0]}, {stack[1]})", category=2)
_PIT.register_ins("fcmpg", 2, "compare({stack[0]}, {stack[1]} /*, NaN -> 1 */)")
_PIT.register_ins("fcmpl", 2, "compare({stack[0]}, {stack[1]} /*, NaN -> -1 */)")
_PIT.register_ins("dcmpg", 2, "compare({stack[0]}, {stack[1]} /*, NaN -> 1 */)", category=2)
_PIT.register_ins("dcmpl", 2, "compare({stack[0]}, {stack[1]} /*, NaN -> -1 */)", category=2)
_PIT.register_ins("getstatic", 0, "{operands[0].classname}.{operands[0].name}") # Doesn't handle category
_PIT.register_ins("getfield", 1, "{stack[0]}.{operands[0].name}") # Doesn't handle category
_PIT.register_ins("new", 0, "new {operands[0].classname}")
_PIT.register_ins("newarray", 1, "new {operands[0].atype}[{stack[0]}]")
_PIT.register_ins("anewarray", 1, "new {operands[0].classname}[{stack[0]}]")
_PIT.register_ins("arraylength", 1, "{stack[0]}.length")
_PIT.register_ins("athrow", 1, "throw {stack[0]}") # this is a bit weird, but throw does put the exception back on the stack, kinda
_PIT.register_ins("checkcast", 1, "(({operands[0].classname}){stack[0]})")
_PIT.register_ins("instanceof", 1, "({stack[0]} instanceof {operands[0].classname})")
|
|
'''
Created on May 3, 2017
@author: jesper
'''
import itertools
import yaml
from symbol import comparison
from multiprocessing.forking import duplicate
from sqlalchemy.sql.functions import next_value
class AuditModule():
@staticmethod
def read(file):
pass
@staticmethod
def evaluate(info, yaml_path):
pass
class cron_at(AuditModule):
@staticmethod
def read(file):
info_dict = dict()
next_line = file.readline()
while next_line:
inner_values = next_line.split()
if "No such file or directory" in next_line:
info_dict[inner_values[3][1:-2]] = ["No such file or directory"] # [1:-2] is to trim the filename from from quotation marks
else:
# [permissions][?][owner][group][size][month][day][hour:min][filename]
info_dict[inner_values[8]] = inner_values[0]
next_line = file.readline()
return info_dict
@staticmethod
def evaluate(info_dict, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for file_name in yaml_dict:
if info_dict.has_key(file_name):
info_value = info_dict[file_name]
for comparison in yaml_dict[file_name]:
yaml_values = yaml_dict[file_name][comparison]
message = compare(info_value, yaml_values, comparison)
if message is not None: return_string += message + "\n"
return return_string
class crontab(AuditModule):
@staticmethod
def read(file):
values = dict()
notSetupString = "No crontab has been set up for the following: \n"
next_line = file.readline()[:-1]
while (next_line):
crontab = next_line.replace("no crontab for ", "")
values[crontab] = "no"
next_line = file.readline()[:-1]
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
blacklist = yaml_dict.pop("blacklist")
expected = yaml_dict.pop("expected")
for cronjob in blacklist:
if info.has_key(cronjob):
message = blacklist[cronjob]["msg"]
return_string += message + "\n"
for cronjob in expected:
if not info.has_key(cronjob):
message = expected[cronjob]["msg"]
return_string += message + "\n"
# for key in yaml_dict:
# if info.has_key(key):
# customer_value = info[key]
#
# for comparison in yaml_dict[key]:
# values = yaml_dict[key][comparison]
# print customer_value
# print values
# print comparison
# message = compare(customer_value, values, comparison)
# if message is not None: return_string += message + "\n"
return return_string
class diskvolume(AuditModule):
@staticmethod
def read(file):
values = dict()
file.readline() # Skip first line
next_line = file.readline()
column = ["filesystem", "size", "used", "avail", "use%", "mount"]
while next_line:
inner_dict = dict()
# [Filesystem][Size][Used][Avail][Use%][Mounted on]
inner_values = next_line.split()
for index in range(0, 6):
inner_dict[column[index]] = inner_values[index]
inner_dict["use%"] = inner_dict["use%"][:-1] # Removes the % sign
values[inner_values[5]] = inner_dict
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
info_copy = dict(info)
with open(yaml_path, "r") as stream:
loaded_data = yaml.load(stream)
for key in loaded_data:
if info.has_key(key):
customer_map = info[key]
for column in customer_map:
customer_value = customer_map[column]
if not loaded_data[key].has_key(column): continue
for comparison in loaded_data[key][column]:
values = loaded_data[key][column][comparison]
message = compare(customer_value, values, comparison)
if message is not None: return_string += message + "\n"
info_copy.pop(key)
for key in info_copy:
customer_map = info[key]
for column in customer_map:
customer_value = customer_map[column]
if not loaded_data["default"].has_key(column): continue
for comparison in loaded_data["default"][column]:
values = loaded_data["default"][column][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
message = message.replace("/fs/", key)
return_string += message + "\n"
return return_string
class encrypted_disk(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while next_line:
inner_values = dict()
n_line_split = next_line.split()
for i in range(1, len(n_line_split)):
n_line_ssplit = n_line_split[i].split("=")
inner_values[n_line_ssplit[0]] = n_line_ssplit[1]
values[n_line_split[0]] = inner_values
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
uuid_dict = {}
for key in info:
for key_key in info[key]:
if ("UUID" in key_key):
if uuid_dict.has_key(info[key][key_key]):
uuid_dict[info[key][key_key]].append(key)
else:
uuid_dict[info[key][key_key]] = [key]
for uuid in uuid_dict:
duplicate_warning_msg = open("duplicate_uuid_warning_msg.txt", "r").read()
if len(uuid_dict[uuid]) > 1:
duplicate_warning_msg = duplicate_warning_msg.replace("/uuid/", uuid)
duplicate_warning_msg = duplicate_warning_msg.replace("/key_set/", str(set(uuid_dict[uuid])))
return_string += duplicate_warning_msg + "\n"
return return_string
class environment(AuditModule):
@staticmethod
def read(file):
values = dict()
while True:
nextLine = file.readline()
if (nextLine == ""):
break
innerValues = nextLine.split("=")
if (innerValues[0] == "LS_COLORS"): # Hard to parse and don't think it has anythign to do with security risks
continue
values[innerValues[0]] = innerValues[1][:-1]
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
# check if key exists in customer file
if info.has_key(key):
customer_value = info[key]
values = yaml_dict[key]
for comparison in values:
message = compare(customer_value, values[comparison], comparison)
if message is not None: return_string += message + "\n"
return return_string
class firewall(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while next_line:
inner_values = next_line.split()
if (inner_values and inner_values[0] == "Chain"):
chain = inner_values[1]
policy = inner_values[3].split(")")[0]
values[chain] = policy
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for trafic in yaml_dict:
columns = yaml_dict[trafic]
if yaml_dict[trafic].has_key("policy"):
for comparison in yaml_dict[trafic]["policy"]:
customer_value = info[trafic]
values = yaml_dict[trafic]["policy"][comparison]
message = compare(customer_value, values, comparison)
if message is not None: return_string += message + "\n"
return return_string
class groups(AuditModule):
@staticmethod
def read(file):
info_dict = dict()
next_line = file.readline()[:-1]
while next_line:
inner_dict = dict()
inner_values = next_line.split(":")
inner_dict["group"] = inner_values[0]
inner_dict["password"] = inner_values[1]
inner_dict["id"] = inner_values[2]
inner_dict["users"] = inner_values[3]
info_dict[inner_dict["group"]] = inner_dict
next_line = file.readline()[:-1]
return info_dict
@staticmethod
def evaluate(info_dict, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
default_dict = yaml_dict.pop("default")
for key in yaml_dict:
if info_dict.has_key(key):
for column in yaml_dict[key]:
info_value = info_dict[key][column]
for comparison in yaml_dict[key][column]:
yaml_values = yaml_dict[key][column][comparison]
message = compare(info_value, yaml_values, comparison)
if message is not None:
message = message.replace("/users/", info_dict[key]["users"])
message = message.replace("/group/", info_dict[key]["group"])
return_string += message + "\n"
for key in info_dict:
for column in default_dict:
info_value = info_dict[key][column]
for comparison in default_dict[column]:
yaml_values = default_dict[column][comparison]
message = compare(info_value, yaml_values, comparison)
if message is not None:
message = message.replace("/users/", info_dict[key]["users"])
message = message.replace("/group/", info_dict[key]["group"])
return_string += message + "\n"
return return_string
class lastlog(AuditModule):
# Unsure how to parse...
@staticmethod
def read(file):
value = dict()
last_dict = dict()
lastlog_dict = dict()
next_line = file.readline()
while next_line and not "wtmp begins " in next_line:
next_values = next_line.split()
if len(next_values) > 1:
last_dict[next_values[0]] = "yes"
next_line = file.readline()
next_line = file.readline() # Skip line
while next_line:
next_values = next_line[:-1].split(None, 1)
if len(next_values) > 1:
lastlog_dict[next_values[0]] = next_values[1]
next_line = file.readline()
value["last"] = last_dict
value["lastlog"] = lastlog_dict
return value
@staticmethod
def evaluate(info, yaml_path):
# Not sure how to evaluate...
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
last = yaml_dict.pop("last")
lastlog = yaml_dict.pop("lastlog")
info_last = info.pop("last")
info_lastlog = info.pop("lastlog")
for key in lastlog:
if info_lastlog.has_key(key):
for comparison in lastlog[key]:
customer_value = info_lastlog[key]
values = lastlog[key][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message + "\n"
for key in last:
if info_last.has_key(key):
message = last[key]["msg"]
if message is not None:
return_string += message + "\n"
return return_string
class modprobe(AuditModule):
@staticmethod
def read(file):
values = dict()
modprobes = []
while True:
nextLine = file.readline()
if ("Module" in nextLine): break
modprobes.append(nextLine[:-1])
values["modprobe.d"] = modprobes
while True:
nextLine = file.readline()
if (nextLine == ""): break
innerValues = nextLine.split()
values[innerValues[0]] = innerValues
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
# Important configs
for config in yaml_dict["important_configs"]:
if config == "default":
important_configs = yaml_dict["important_configs"]["default"]["config"]
for i_config in important_configs:
if i_config not in info["modprobe.d"]:
message = yaml_dict["important_configs"]["default"]["message"]
message = message.replace("/conf/", i_config)
return_string += message + "\n"
elif config not in info["modprobe.d"]:
message = yaml_dict["important_configs"][config]["message"]
return_string += message + "\n"
# Important modules
for module in yaml_dict["important_modules"]:
if module == "default":
important_modules = yaml_dict["important_modules"]["default"]["module"]
for i_module in important_modules:
if i_module not in info.keys():
message = yaml_dict["important_modules"]["default"]["message"]
message = message.replace("/module/", i_module)
return_string += message + "\n"
elif module not in info.keys():
message = yaml_dict["important_modules"][module]["message"]
return_string += message + "\n"
# Blacklisted configs
for config in yaml_dict["blacklisted_configs"]:
if config == "default":
important_configs = yaml_dict["blacklisted_configs"]["default"]["config"]
for i_config in important_configs:
if i_config in info["modprobe.d"]:
message = yaml_dict["blacklisted_configs"]["default"]["message"]
message = message.replace("/conf/", i_config)
return_string += message + "\n"
elif config in info["modprobe.d"]:
message = yaml_dict["blacklisted_configs"][config]["message"]
return_string += message + "\n"
# Blacklisted modules
for module in yaml_dict["blacklisted_modules"]:
if module == "default":
important_modules = yaml_dict["blacklisted_modules"]["default"]["module"]
for i_module in important_modules:
if i_module in info.keys():
message = yaml_dict["blacklisted_modules"]["default"]["message"]
message = message.replace("/module/", i_module)
return_string += message + "\n"
elif module in info.keys():
message = yaml_dict["blacklisted_modules"][module]["message"]
return_string += message + "\n"
# modprobe_file = open("modprobe_folders", "r")
#
# config_list = []
# blacklist = []
# important_list = []
#
# customer_modules = []
#
#
# next_line = modprobe_file.readline() #Skip line
# next_line = modprobe_file.readline()
#
# while next_line and not next_line.startswith("#"):
# config_list.append(next_line[:-1])
# next_line = modprobe_file.readline()
#
# next_line = modprobe_file.readline() # Skip line
#
# while next_line and not next_line.startswith("#"):
# blacklist.append(next_line[:-1])
# next_line = modprobe_file.readline()
#
# next_line = modprobe_file.readline() # Skip line
#
# while next_line and not next_line.startswith("#"):
# important_list.append(next_line[:-1])
# next_line = modprobe_file.readline()
#
# customer_config_list = dict["modprobe.d"].split("%")
#
# dict.pop("modprobe.d", None)
# dict.pop("", None)
#
# for key in dict:
# customer_modules.append(key)
#
# for config in config_list:
# if config not in customer_config_list:
# return_string += "The expected file " + config + " is not in your system.\n"
#
# for module in customer_modules:
# if module in blacklist:
# return_string += "The system contains the blacklisted module " + module + "\n"
#
# for module in important_list:
# if module not in customer_modules:
# return_string += "The system does not contain the important module " + module + "\n"
return return_string
class networkvolume(AuditModule):
@staticmethod
def read(file):
values = dict()
mount_dict = dict()
fstab_dict = dict()
next_line = file.readline()
while next_line and "#" not in next_line and not next_line.isspace():
innerValues = next_line.split()
mount_dict[innerValues[2]] = innerValues
next_line = file.readline()
while next_line and "#" not in next_line and not next_line.isspace():
inner_dict = dict()
if ("#" in next_line):
next_line = file.readline()
continue
inner_values = next_line.split()
inner_dict["file_system"] = inner_values[0]
inner_dict["mount_point"] = inner_values[1]
inner_dict["type"] = inner_values[2]
options = inner_values[3].split(",")
inner_dict["options"] = options
inner_dict["dump"] = inner_values[4]
inner_dict["pass"] = inner_values[5]
fstab_dict[inner_dict["mount_point"]] = inner_dict
next_line = file.readline()
values["mount"] = mount_dict
values["fstab"] = fstab_dict
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
uuid_dict = dict()
info_mount = info["mount"]
info_fstab = info["fstab"]
with open(yaml_path, "r") as stream:
warnings = yaml.load(stream)
# check duplicates
for key in info_fstab:
uuid = info_fstab[key]["file_system"].split("=")[1]
if uuid_dict.has_key(uuid):
uuid_dict[uuid].append(info_fstab[key]["mount_point"])
else:
uuid_dict[uuid] = [info_fstab[key]["mount_point"]]
for key in uuid_dict:
if len(uuid_dict[key]) > 1:
message = warnings["duplicates"]
message = message.replace("/uuid/", key).replace("/key_set/", str(uuid_dict[key]))
return_string += message + "\n"
# #
# check for username/password and backup, pass
for key in info_fstab:
# check for username/password
options = info_fstab[key]["options"]
for option in options:
if "password" in option or "username" in option:
message = warnings["username_password"]
return_string += message + "\n"
# checks for backup
backup = info_fstab[key]["dump"]
if backup != 1:
message = warnings["backup"]
return_string += message + "\n"
# checks for pass
pass_flag = info_fstab[key]["pass"]
if key != "/" and pass_flag == "1":
message = warnings["pass_non_root"]
return_string += message + "\n"
elif key == "/" and pass_flag != "1":
message = warnings["pass_root"]
return_string += message + "\n"
return return_string
class open_connections(AuditModule):
@staticmethod
def read(file):
values = dict()
file.readline() # Skip first line
next_line = file.readline()
while (next_line and not "COMMAND" in next_line):
innerValues = next_line.split()
values[innerValues[4]] = innerValues
next_line = file.readline()
while (next_line):
innerValues = next_line.split()
# Unsure what should be the key..
values[innerValues[0] + "#" + innerValues[3]] = innerValues
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
"""Lists of listen ports, estab ports etc
make sure that the ports are not bad according to open_connections file
"""
return return_string
class passwdpolicy(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while(next_line):
if "#" not in next_line and not next_line.isspace():
key_value = next_line.split()
values[key_value[0]] = key_value[1]
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info.has_key(key):
for comparison in yaml_dict[key]:
customer_value = info[key]
values = yaml_dict[key][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message + "\n"
# passwd_file = open("passwdpolicy", "r")
#
# next_line = passwd_file.readline()
#
# important_keys = []
# passwd_dict = dict()
#
# while next_line:
#
# if (next_line.isspace() or next_line.startswith("%")):
# next_line = passwd_file.readline()
# continue
#
# passwd_key = next_line.split("=")[0]
#
# passwd_values = next_line.split("=")[1][:-1]
#
# passwd_dict[passwd_key] = passwd_values
# next_line = passwd_file.readline()
#
# print passwd_dict
# print info
#
# for key in passwd_dict:
# #If key is in customer
# if info.has_key(key[1:]):
# #If key is dangerous
# if (key.startswith("^")):
# return_string += "The key " + key + " is considered dangerous.\n"
#
# else:
# customer_value = info[key[1:]]
# values = passwd_dict[key]
# print key
# print "customer: " + customer_value
# print "values: " + str(values)
# #If value is dangerous
# if "^" + customer_value in values:
# return_string += "The value " + customer_value + " is considered dangerous. Consider switching to " + str([x for x in values if not x.startswith("^")] + ". prefeably one of " + str([x for x in values if x.startswith("*")])) + "\n"
#
# #If value is not prefered
# if "<" + customer_value in values:
# return_string += "The value " + customer_value + " is not considered preferable. Consider switching to one of " + str([x for x in values if x.startswith("*")]) + "\n"
#
# #If not found in customer
# else:
# #If key is important
# if (key.startswith("#")):
# important_keys.append(key[1:])
# #Add recomended value?
#
# if len(important_keys) > 0:
# return_string += "The following important keys were not found: " + str(important_keys) + "\n"
#
"""if info["ENCRYPT_METHOD"] == "MD5":
return_string = (return_string + "Your currently password encrypting method is MD5. " +
"\nYou should consider changing the encrypting method to SHA256 or SHA516.")
if info["PASS_MIN_DAYS"] > '0':
return_string = (return_string + "Warning: You have to wait " + dict["PASS_MIN_DAYS"] +
" days to change password, this can be a security risk in case of accidental password change.")
"""
return return_string
class processes(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
next_line = file.readline() # Skip first line
while (next_line):
inner_dict = dict()
next_line = next_line[:-1]
inner_values = next_line.split(None, 10)
inner_dict["USER"] = inner_values[0]
inner_dict["PID"] = inner_values[1]
inner_dict["%CPU"] = inner_values[2]
inner_dict["%MEM"] = inner_values[3]
inner_dict["VSZ"] = inner_values[4]
inner_dict["RSS"] = inner_values[5]
inner_dict["TTY"] = inner_values[6]
inner_dict["STAT"] = inner_values[7]
inner_dict["START"] = inner_values[8]
inner_dict["TIME"] = inner_values[9]
inner_dict["COMMAND"] = inner_values[10]
values[inner_dict["COMMAND"]] = inner_dict
next_line = file.readline()
# next_line = file.readline()
#
# while (next_line):
# splitted_line = next_line.split()
# innerValues = ["" for i in range(11)] # Init the list with empty strings
# for i in range (0, 10):
# innerValues[i] = splitted_line[i]
# for i in range (10, len(splitted_line)):
# innerValues[10] = str(innerValues[10]) + splitted_line[i] + " "
#
# innerValues[10] = innerValues[:-1]
# next_line = file.readline()
#
#
# values[innerValues[1]] = innerValues
return values
@staticmethod
def evaluate(info, yaml_path): # change to dict if using commented code?
return_string = ""
info_copy = dict(info)
with open(yaml_path, 'r') as stream:
yaml_dict = yaml.load(stream)
default = yaml_dict.pop("default")
important_processes = yaml_dict.pop("important_processes")
blacklisted_processes = yaml_dict.pop("blacklisted_processes")
# important processes
for key in important_processes:
if key == "default":
for process in important_processes["default"]["process"]:
if not info.has_key(process):
message = important_processes["default"]["message"]
message = message.replace("/process/", process)
return_string += message + "\n"
elif not info_copy.has_key(key):
return_string += important_processes[key]["message"] + "\n"
# blacklisted processes
for key in blacklisted_processes:
if key == "default":
for process in blacklisted_processes["default"]["process"]:
if info.has_key(process):
message = blacklisted_processes["default"]["message"]
message = message.replace("/process/", process)
return_string += message + "\n"
elif info_copy.has_key(key):
return_string += blacklisted_processes[key]["message"] + "\n"
# default value check (CPU & MEM usage)
# print info_copy
for key in info_copy:
for column in default:
customer_value = info_copy[key][column]
for comparison in default[column]:
values = default[column][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
message = message.replace("/process/", key)
return_string += message + "\n"
# other keys
for key in yaml_dict:
if info_copy.has_key(key):
for column in yaml_dict[key]:
for comparison in yaml_dict[key][column]:
if info_copy[key].has_key(column):
customer_value = info_copy[key][column]
values = yaml_dict[key][column][comparison]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message
# processes_file = open("processes", "r")
#
# next_line = processes_file.readline() #Skip first line
# next_line = processes_file.readline()
#
# expected_processes = []
# non_root_blacklist = []
# blacklist = []
#
#
# while next_line and "#" not in next_line and not next_line.isspace():
# expected_processes.append(next_line[:-1])
# next_line = processes_file.readline()
#
# next_line = processes_file.readline()
#
# while next_line and "#" not in next_line and not next_line.isspace():
# non_root_blacklist.append(next_line[:-1])
# next_line = processes_file.readline()
#
# next_line = processes_file.readline()
#
#
# while next_line and "#" not in next_line and not next_line.isspace():
# blacklist.append(next_line[:-1])
# next_line = processes_file.readline()
#
#
#
# for key in dict.iterkeys():
# customer_process = dict[key][10][:-1]
#
# #if process is blacklist
# if customer_process in blacklist:
# return_string += "The process " + customer_process + " currently running on your service is in our blacklist\n"
#
# #if process is non root
# elif customer_process in non_root_blacklist and dict[key][0 != "root"]:
# return_string += "The process " + customer_process + " currently running on your service as a non-root. This is considered a security risk\n"
#
# #if expected process is found, it removes it from the exepcted processes list
# if customer_process in expected_processes:
# expected_processes = [x for x in expected_processes if x != customer_process]
#
# #if expected_processes is NOT empty
# if expected_processes:
# return_string += "The following processes were expected but could not be found on your system: " + str(expected_processes) + "\n"
return return_string
class samba(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
if "No such file or directory" in next_line:
values["/etc/samba/smb.conf"] = "No such file or directory"
return values
if "#" in next_line or next_line.isspace():
next_line = file.readline()
continue
if "[" in next_line:
level = next_line[1:-2]
next_line = file.readline()
continue
next_values = next_line.split(" = ")
next_dict = dict()
next_dict['value'] = next_values[1][:-1]
next_dict['level'] = level
values[next_values[0].lstrip()] = next_dict
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
with open(yaml_path, 'r') as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info.has_key(key):
#pop out value(otherwise going through comparisons might give issue)
customer_value = info[key]['value']
customer_level = info[key]['level']
customer_level_value = customer_level + ("#%") + customer_value
for comparison in yaml_dict[key]:
yaml_values = yaml_dict[key][comparison]
msg = compare(customer_level_value, yaml_values, comparison)
if msg is not None:
msg = msg.replace("/key/", key)
msg = msg.replace("/level/", customer_level)
msg = msg.replace("/value/", customer_value)
# print "samba/eval"
#
# print info
# return_string = ""
#
#
# samba_file = open(yaml_path, "r")
#
# samba_dict = dict()
#
# samba_lists = [[]]
#
#
# samba_important_keys = []
#
# samba_lists[0] = ([1, 2, 3])
# samba_lists.append([17, 6, 5])
#
# next_line = samba_file.readline()
#
# while next_line:
# if next_line.startswith("%") or next_line.isspace():
# next_line = samba_file.readline()
# continue
# samba_k_v_l = next_line[:-1].split("=")
# samba_key = samba_k_v_l[0]
# samba_v_l = samba_k_v_l[1].split(",")
#
#
# next_line = samba_file.readline()
# samba_values = samba_v_l[0].split("|")
# samba_levels = samba_v_l[1].split("|")
#
# if samba_key.startswith("#"): samba_important_keys.append(samba_key[1:])
#
# samba_dict[samba_key] = [samba_values, samba_levels]
#
#
# for key in samba_dict:
# if key[1:] in info.keys():
#
# # if Dangerous key
# if key.startswith("^"):
# return_string += "The key " + key + " is considered dangerous.\n"
#
# else:
# customer_value = info[key[1:]][0]
# customer_level = info[key[1:]][1]
# samba_values = samba_dict[key][0]
# samba_levels = samba_dict[key][1]
# # if Dangerous level
# if "^" + customer_level in samba_levels:
# return_string += "The level for the key " + key[1:] + " is considered dangerous. Consider changing to one of " + str([x[1:] for x in samba_levels if not x.startswith("^")]) + " preferably one of " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n"
#
# # if not preferable level
# elif "<" + customer_level in samba_levels:
# if len([x for x in samba_levels if x.startswith("*")]) > 0:
# return_string += "The level for the environment key " + key[1:] + " is not considered preferable. Consider changing to one of " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n"
#
# # cant find level in samba txt
# elif "*" + customer_level not in samba_levels:
# return_string += "The level " + customer_value + " for the key " + key[1:] + " was not found in our list of \"predetermined\" levels. \n\tRecommended levels: " + str([x[1:] for x in samba_levels if x.startswith("*")]) + "\n\tOkay levels: " + str([x[1:] for x in samba_levels if x.startswith("<")]) + "\n"
#
#
# # if Dangerous value
# if "^" + customer_value in samba_values:
# return_string += "The value for the key " + key[1:] + " is considered dangerous. Consider changing to one of " + str([x[1:] for x in samba_values if not x.startswith("^")]) + " preferably one of " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n"
#
# # if not preferable value
# elif "<" + customer_value in samba_values:
# if len([x for x in samba_levels if x.startswith("*")]) > 0:
# return_string += "The value for the environment key " + key[1:] + " is not considered preferable. Consider changing to one of " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n"
#
# # cant find value in samba txt
# elif "*" + customer_level not in samba_values:
# return_string += "The value " + customer_value + " for the key " + key[1:] + " was not found in our list of \"predetermined\" values. \n\tRecommended values: " + str([x[1:] for x in samba_values if x.startswith("*")]) + "\n\tOkay levels: " + str([x[1:] for x in samba_values if x.startswith("<")]) + "\n"
#
# samba_important_keys = [x for x in samba_important_keys if x != key[1:]]
# # cant find key in samba
#
# if len(samba_important_keys) > 0:
# return_string += "The following keys were not found in your system: " + str(samba_important_keys) + ". They are considered important."
#
# return return_string
#
class sshd(AuditModule):
@staticmethod
def read(file):
info_dict = dict()
next_line = file.readline()
while (next_line):
if "No such file or directory" in next_line:
info_dict["/etc/ssh/sshd_config"] = "No such file or directory"
if "#" in next_line or next_line.isspace():
next_line = file.readline()
continue
next_values = next_line.split()
info_dict[next_values[0]] = next_values[1]
next_line = file.readline()
return info_dict
@staticmethod
def evaluate(info_dict, yaml_path):
return_string = ""
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info_dict.has_key(key):
info_value = info_dict[key]
yaml_values = yaml_dict[key]
for comparison in yaml_values:
yaml_value = yaml_values[comparison]
message = compare(info_value, yaml_value, comparison)
if message is not None:
return_string += message + "\n"
return return_string
class startup(AuditModule):
@staticmethod
def read(file):
values = dict()
file.readline() # Skip first line (/etc/init.d)
file.readline() # Skip second line (total 216) //maybe use?
next_line = file.readline()
while (next_line):
next_values = next_line.split()
values[next_values[8]] = next_values
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
blacklist = []
expected = []
with open(yaml_path, "r") as stream:
yaml_dict = yaml.load(stream)
expected = yaml_dict.pop("expected")
blacklist = yaml_dict.pop("blacklisted")
permission = yaml_dict.pop("permission")
# expected scripts
for script in expected["scripts"]:
if script not in info:
message = expected["msg"]
message = message.replace("/script/", script)
return_string += message + "\n"
# blacklisted scripts
for script in blacklist["scripts"]:
if script in info:
message = blacklist["msg"]
message = message.replace("/script/", script)
return_string += message + "\n"
# check permissions
for key, value in info.iteritems():
permissions = value[0]
permissions = list(permissions)
if permissions[5] == "w" or permissions[8] == "w":
message = permission["msg"]
message = message.replace("/script/", key)
return_string += message + "\n"
return return_string
class sudoers(AuditModule):
@staticmethod
def read(file):
values = dict()
username = ""
hosts = ""
run_as_users = ""
run_as_groups = ""
command = ""
next_line = file.readline()
while (next_line):
group = False
if "#" in next_line or next_line.isspace():
next_line = file.readline()
continue
if "Defaults" in next_line:
inner_values = next_line.split()
tmp = inner_values[1].split("=")
username = tmp[0]
values[username] = ['', '', '', command]
next_line = file.readline()
continue
inner_values = next_line.split()
username = inner_values[0]
command = inner_values[2]
inner_values = inner_values[1].split("=")
hosts = inner_values[0]
inner_values = inner_values[1].split(":")
if (len(inner_values) > 1):
run_as_users = inner_values[0][1:]
run_as_groups = inner_values[1][:-1]
else:
run_as_users = inner_values[0][-1:-1]
values[username] = [hosts, run_as_users, run_as_groups, command]
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
if info.has_key("env_reset") == True:
return_string += "env_reset is available. The system will make sure the terminal environment remove any user variables and clear potentially harmful environmental variables from the sudo sessions \n \n"
else:
return_string += "env_reset variable has not been set. You should add it the variable in /etc/sudoers"
for key, value in info.iteritems():
if key == "secure_path":
if value[3] != "[\'\"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin\"\']":
continue
if (value[0] and value[1] and value[2] and value[3]) == "ALL" and ("root" not in key) and ("%" not in key):
return_string += "User: " + "\"" + key + "\"" + " has super user rights.\n\n"
continue
if (value[0] and value[2] and value[3] == "ALL") and (value[1] == '') and ("root" not in key) and ("%admin" not in key) and ("%sudo" not in key):
return_string += "Members of group: " + "\"" + key + "\"" + " may gain root privileges.\n\n"
continue
if (value[0] and value[1] and value[2] and value[3] == "ALL") and ("root" not in key) and ("%admin" not in key) and ("%sudo" not in key):
return_string += "Members of sudo group: " + "\"" + key + "\"" + " can execute any command\n\n"
continue
return return_string
class suid_files(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
values[next_line] = next_line
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
return return_string
class system(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
values[next_line] = next_line
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
return return_string
class users(AuditModule):
@staticmethod
def read(file):
values = dict()
next_line = file.readline()
while (next_line):
inner_dict = dict()
inner_values = next_line[:-1].split(":", 6)
inner_dict["username"] = inner_values[0]
inner_dict["password"] = inner_values[1]
inner_dict["user_id"] = inner_values[2]
inner_dict["group_id"] = inner_values[3]
inner_dict["user_info"] = inner_values[4]
inner_dict["home_dir"] = inner_values[5]
inner_dict["shell"] = inner_values[6]
values[inner_dict["username"]] = inner_dict
next_line = file.readline()
return values
@staticmethod
def evaluate(info, yaml_path):
return_string = ""
with open(yaml_path, 'r') as stream:
yaml_dict = yaml.load(stream)
for key in yaml_dict:
if info.has_key(key):
for column in yaml_dict[key]:
for comparison in yaml_dict[key][column]:
values = yaml_dict[key][column][comparison]
customer_value = info[key][column]
message = compare(customer_value, values, comparison)
if message is not None:
return_string += message
# for key in dict:
#
# risks = [False, False, False]
#
# value = dict[key]
# if value[2] == "0" and not key == "root":
# return_string = return_string + "User " + "'" + key + "'" + " has super user rights\n"
# risks[0] = True
#
# if value[1] == "!":
# return_string = return_string = "User " + "'" + key + "'" + " is stored in /etc/security/passwd and is not encrypted\n"
# risks[1] = True
#
# elif value[1] == "*":
# return_string = return_string + "User " + "'" + key + "'" + " has an invalid password\n"
# risks[2] = True
#
#
# if risks[0]:
# return_string += "\nYou should change the users' priviliges"
#
# if risks[1]:
# return_string += "\nYou should encrypt the users' password"
#
# if risks[2]:
# return_string += "\nYou should change users' password to a valid one"
#
return return_string
def compare(customer_value, values, comparison):
# Equal
if comparison == "eq":
value = values.keys()[0]
if customer_value != value:
message = values[value]["msg"]
severity = values[value]["severity"]
return message
# Not equal
if comparison == "neq":
values = values["values"]
if customer_value in values.keys():
message = values[customer_value]["msg"]
severity = values[customer_value]["severity"]
return message
if comparison == "nlt":
value = values["value"]
if int(customer_value) < int(value):
message = values["msg"]
severity = values["severity"]
return message
if comparison == "ngr":
value = values["value"]
if float(customer_value) > float(value):
message = values["msg"]
return message
if comparison == "nbtwn":
values = values["values"]
for message in values:
for ranges in values[message]["ranges"]:
range_max = max(ranges)
range_min = min(ranges)
if int(customer_value) < range_max and int(customer_value) > range_min:
severity = values[message]["severity"]
return message
if comparison == "in":
if customer_value not in values["values"]:
severity = values["severity"]
message = values["msg"]
return message
if comparison == "permissions":
for permission_group in values:
if permission_group == "other":
other_rwx = customer_value[7:]
for permission in values[permission_group]:
if permission in other_rwx:
message = values[permission_group] [permission]["msg"]
return message
if permission_group == "user":
user_rwx = customer_value[1:4]
for permission in values[permission_group]:
if permission in user_rwx:
message = values[permission_group] [permission]["msg"]
return message
if permission_group == "group":
group_rwx = customer_value[4:7]
for permission in values[permission_group]:
if permission in group_rwx:
message = values[permission_group] [permission]["msg"]
return message
pass
|
|
import time
from collections import OrderedDict
from cassandra.util import sortedset
from cassandra.query import SimpleStatement
from cassandra import ConsistencyLevel
from dtest import Tester
from tools import since
from assertions import (
assert_all,
assert_none,
assert_row_count,
assert_almost_equal,
assert_unavailable
)
@since('2.0')
class TestTTL(Tester):
""" Test Time To Live Feature """
def setUp(self):
super(TestTTL, self).setUp()
self.cluster.populate(1).start()
[node1] = self.cluster.nodelist()
self.cursor1 = self.patient_cql_connection(node1)
self.create_ks(self.cursor1, 'ks', 1)
def prepare(self, default_time_to_live=None):
self.cursor1.execute("DROP TABLE IF EXISTS ttl_table;")
query = """
CREATE TABLE ttl_table (
key int primary key,
col1 int,
col2 int,
col3 int,
)
"""
if default_time_to_live:
query += " WITH default_time_to_live = {};".format(default_time_to_live)
self.cursor1.execute(query)
def smart_sleep(self, start_time, time_to_wait):
""" Function that sleep smartly based on the start_time.
Useful when tests are slower than expected.
start_time: The start time of the timed operations
time_to_wait: The time to wait in seconds from the start_time
"""
now = time.time()
real_time_to_wait = time_to_wait - (now - start_time)
if real_time_to_wait > 0:
time.sleep(real_time_to_wait)
def default_ttl_test(self):
""" Test default_time_to_live specified on a table """
self.prepare(default_time_to_live=1)
start = time.time()
self.cursor1.execute("INSERT INTO ttl_table (key, col1) VALUES (%d, %d)" % (1, 1))
self.cursor1.execute("INSERT INTO ttl_table (key, col1) VALUES (%d, %d)" % (2, 2))
self.cursor1.execute("INSERT INTO ttl_table (key, col1) VALUES (%d, %d)" % (3, 3))
self.smart_sleep(start, 3)
assert_row_count(self.cursor1, 'ttl_table', 0)
def insert_ttl_has_priority_on_defaut_ttl_test(self):
""" Test that a ttl specified during an insert has priority on the default table ttl """
self.prepare(default_time_to_live=1)
start = time.time()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (%d, %d) USING TTL 5;
""" % (1, 1))
self.smart_sleep(start, 2)
assert_row_count(self.cursor1, 'ttl_table', 1) # should still exist
self.smart_sleep(start, 7)
assert_row_count(self.cursor1, 'ttl_table', 0)
def insert_ttl_works_without_default_ttl_test(self):
""" Test that a ttl specified during an insert works even if a table has no default ttl """
self.prepare()
start = time.time()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (%d, %d) USING TTL 1;
""" % (1, 1))
self.smart_sleep(start, 3)
assert_row_count(self.cursor1, 'ttl_table', 0)
def default_ttl_can_be_removed_test(self):
""" Test that default_time_to_live can be removed """
self.prepare(default_time_to_live=1)
start = time.time()
self.cursor1.execute("ALTER TABLE ttl_table WITH default_time_to_live = 0;")
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (%d, %d);
""" % (1, 1))
self.smart_sleep(start, 1.5)
assert_row_count(self.cursor1, 'ttl_table', 1)
def removing_default_ttl_does_not_affect_existing_rows_test(self):
""" Test that removing a default_time_to_live doesn't affect the existings rows """
self.prepare(default_time_to_live=1)
self.cursor1.execute("ALTER TABLE ttl_table WITH default_time_to_live = 10;")
start = time.time()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (%d, %d);
""" % (1, 1))
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (%d, %d) USING TTL 15;
""" % (2, 1))
self.cursor1.execute("ALTER TABLE ttl_table WITH default_time_to_live = 0;")
self.cursor1.execute("INSERT INTO ttl_table (key, col1) VALUES (%d, %d);" % (3, 1))
self.smart_sleep(start, 5)
assert_row_count(self.cursor1, 'ttl_table', 3)
self.smart_sleep(start, 12)
assert_row_count(self.cursor1, 'ttl_table', 2)
self.smart_sleep(start, 20)
assert_row_count(self.cursor1, 'ttl_table', 1)
def update_single_column_ttl_test(self):
""" Test that specifying a TTL on a single column works """
self.prepare()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (1, 1, 1, 1))
start = time.time()
self.cursor1.execute("UPDATE ttl_table USING TTL 3 set col1=42 where key=%s;" % (1,))
assert_all(self.cursor1, "SELECT * FROM ttl_table;", [[1, 42, 1, 1]])
self.smart_sleep(start, 5)
assert_all(self.cursor1, "SELECT * FROM ttl_table;", [[1, None, 1, 1]])
def update_multiple_columns_ttl_test(self):
""" Test that specifying a TTL on multiple columns works """
self.prepare()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (1, 1, 1, 1))
start = time.time()
self.cursor1.execute("""
UPDATE ttl_table USING TTL 2 set col1=42, col2=42, col3=42 where key=%s;
""" % (1,))
assert_all(self.cursor1, "SELECT * FROM ttl_table;", [[1, 42, 42, 42]])
self.smart_sleep(start, 4)
assert_all(self.cursor1, "SELECT * FROM ttl_table;", [[1, None, None, None]])
def update_column_ttl_with_default_ttl_test(self):
"""
Test that specifying a column ttl works when a default ttl is set.
This test specify a lower ttl for the column than the default ttl.
"""
self.prepare(default_time_to_live=8)
start = time.time()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (1, 1, 1, 1))
self.cursor1.execute("UPDATE ttl_table USING TTL 3 set col1=42 where key=%s;" % (1,))
assert_all(self.cursor1, "SELECT * FROM ttl_table;", [[1, 42, 1, 1]])
self.smart_sleep(start, 5)
assert_all(self.cursor1, "SELECT * FROM ttl_table;", [[1, None, 1, 1]])
self.smart_sleep(start, 10)
assert_row_count(self.cursor1, 'ttl_table', 0)
def update_column_ttl_with_default_ttl_test2(self):
"""
Test that specifying a column ttl works when a default ttl is set.
This test specify a higher column ttl than the default ttl.
"""
self.prepare(default_time_to_live=2)
start = time.time()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (1, 1, 1, 1))
self.cursor1.execute("UPDATE ttl_table USING TTL 6 set col1=42 where key=%s;" % (1,))
self.smart_sleep(start, 4)
assert_all(self.cursor1, "SELECT * FROM ttl_table;", [[1, 42, None, None]])
self.smart_sleep(start, 8)
assert_row_count(self.cursor1, 'ttl_table', 0)
def remove_column_ttl_test(self):
"""
Test that removing a column ttl works.
"""
self.prepare()
start = time.time()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d) USING TTL 2;
""" % (1, 1, 1, 1))
self.cursor1.execute("UPDATE ttl_table set col1=42 where key=%s;" % (1,))
self.smart_sleep(start, 4)
assert_all(self.cursor1, "SELECT * FROM ttl_table;", [[1, 42, None, None]])
def remove_column_ttl_with_default_ttl_test(self):
"""
Test that we cannot remove a column ttl when a default ttl is set.
"""
self.prepare(default_time_to_live=2)
start = time.time()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (1, 1, 1, 1))
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1, col2, col3) VALUES (%d, %d, %d, %d);
""" % (2, 1, 1, 1))
self.cursor1.execute("UPDATE ttl_table using ttl 0 set col1=42 where key=%s;" % (1,))
self.cursor1.execute("UPDATE ttl_table using ttl 8 set col1=42 where key=%s;" % (2,))
self.smart_sleep(start, 5)
# The first row should be deleted, using ttl 0 should fallback to default_time_to_live
assert_all(self.cursor1, "SELECT * FROM ttl_table;", [[2, 42, None, None]])
self.smart_sleep(start, 10)
assert_row_count(self.cursor1, 'ttl_table', 0)
def collection_list_ttl_test(self):
"""
Test that ttl has a granularity of elements using a list collection.
"""
self.prepare(default_time_to_live=10)
self.cursor1.execute("ALTER TABLE ttl_table ADD mylist list<int>;""")
start = time.time()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1, mylist) VALUES (%d, %d, %s);
""" % (1, 1, [1, 2, 3, 4, 5]))
self.cursor1.execute("""
UPDATE ttl_table USING TTL 5 SET mylist[0] = 42, mylist[4] = 42 WHERE key=1;
""")
assert_all(self.cursor1, "SELECT * FROM ttl_table;", [[1, 1, None, None, [42, 2, 3, 4, 42]]])
self.smart_sleep(start, 7)
assert_all(self.cursor1, "SELECT * FROM ttl_table;", [[1, 1, None, None, [2, 3, 4]]])
self.smart_sleep(start, 12)
assert_row_count(self.cursor1, 'ttl_table', 0)
def collection_set_ttl_test(self):
"""
Test that ttl has a granularity of elements using a set collection.
"""
self.prepare(default_time_to_live=10)
self.cursor1.execute("ALTER TABLE ttl_table ADD myset set<int>;""")
start = time.time()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1, myset) VALUES (%d, %d, %s);
""" % (1, 1, '{1,2,3,4,5}'))
self.cursor1.execute("""
UPDATE ttl_table USING TTL 3 SET myset = myset + {42} WHERE key=1;
""")
assert_all(
self.cursor1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None, sortedset([1, 2, 3, 4, 5, 42])]]
)
self.smart_sleep(start, 5)
assert_all(
self.cursor1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None, sortedset([1, 2, 3, 4, 5])]]
)
self.smart_sleep(start, 12)
assert_row_count(self.cursor1, 'ttl_table', 0)
def collection_map_ttl_test(self):
"""
Test that ttl has a granularity of elements using a map collection.
"""
self.prepare(default_time_to_live=6)
self.cursor1.execute("ALTER TABLE ttl_table ADD mymap map<int, int>;""")
start = time.time()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1, mymap) VALUES (%d, %d, %s);
""" % (1, 1, '{1:1,2:2,3:3,4:4,5:5}'))
self.cursor1.execute("""
UPDATE ttl_table USING TTL 2 SET mymap[1] = 42, mymap[5] = 42 WHERE key=1;
""")
assert_all(
self.cursor1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None, OrderedDict([(1, 42), (2, 2), (3, 3), (4, 4), (5, 42)])]]
)
self.smart_sleep(start, 4)
assert_all(
self.cursor1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None, OrderedDict([(2, 2), (3, 3), (4, 4)])]]
)
self.smart_sleep(start, 8)
assert_row_count(self.cursor1, 'ttl_table', 0)
def delete_with_ttl_expired_test(self):
"""
Updating a row with a ttl does not prevent deletion, test for CASSANDRA-6363
"""
self.cursor1.execute("DROP TABLE IF EXISTS session")
self.cursor1.execute("CREATE TABLE session (id text, usr text, valid int, PRIMARY KEY (id))")
self.cursor1.execute("insert into session (id, usr) values ('abc', 'abc')")
self.cursor1.execute("update session using ttl 1 set valid = 1 where id = 'abc'")
self.smart_sleep(time.time(), 2)
self.cursor1.execute("delete from session where id = 'abc' if usr ='abc'")
assert_row_count(self.cursor1, 'session', 0)
class TestDistributedTTL(Tester):
""" Test Time To Live Feature in a distributed environment """
def setUp(self):
super(TestDistributedTTL, self).setUp()
self.cluster.populate(2).start()
[self.node1, self.node2] = self.cluster.nodelist()
self.cursor1 = self.patient_cql_connection(self.node1)
self.create_ks(self.cursor1, 'ks', 2)
def prepare(self, default_time_to_live=None):
self.cursor1.execute("DROP TABLE IF EXISTS ttl_table;")
query = """
CREATE TABLE ttl_table (
key int primary key,
col1 int,
col2 int,
col3 int,
)
"""
if default_time_to_live:
query += " WITH default_time_to_live = {};".format(default_time_to_live)
self.cursor1.execute(query)
def ttl_is_replicated_test(self):
"""
Test that the ttl setting is replicated properly on all nodes
"""
self.prepare(default_time_to_live=5)
cursor1 = self.patient_exclusive_cql_connection(self.node1)
cursor2 = self.patient_exclusive_cql_connection(self.node2)
cursor1.execute("USE ks;")
cursor2.execute("USE ks;")
query = SimpleStatement(
"INSERT INTO ttl_table (key, col1) VALUES (1, 1);",
consistency_level=ConsistencyLevel.ALL
)
cursor1.execute(query)
assert_all(
cursor1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None]],
cl=ConsistencyLevel.ALL
)
ttl_cursor1 = cursor1.execute('SELECT ttl(col1) FROM ttl_table;')
ttl_cursor2 = cursor2.execute('SELECT ttl(col1) FROM ttl_table;')
assert_almost_equal(ttl_cursor1[0][0], ttl_cursor2[0][0], error=0.05)
time.sleep(7)
assert_none(cursor1, "SELECT * FROM ttl_table;", cl=ConsistencyLevel.ALL)
def ttl_is_respected_on_delayed_replication_test(self):
""" Test that ttl is respected on delayed replication """
self.prepare()
self.node2.stop()
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (1, 1) USING TTL 5;
""")
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (2, 2) USING TTL 60;
""")
assert_all(
self.cursor1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None], [2, 2, None, None]]
)
time.sleep(7)
self.node1.stop()
self.node2.start(wait_for_binary_proto=True)
cursor2 = self.patient_exclusive_cql_connection(self.node2)
cursor2.execute("USE ks;")
assert_row_count(cursor2, 'ttl_table', 0) # should be 0 since node1 is down, no replica yet
self.node1.start(wait_for_binary_proto=True)
self.cursor1 = self.patient_exclusive_cql_connection(self.node1)
self.cursor1.execute("USE ks;")
self.node1.cleanup()
# Check that the expired data has not been replicated
assert_row_count(cursor2, 'ttl_table', 1)
assert_all(
cursor2,
"SELECT * FROM ttl_table;",
[[2, 2, None, None]],
cl=ConsistencyLevel.ALL
)
# Check that the TTL on both server are the same
ttl_cursor1 = self.cursor1.execute('SELECT ttl(col1) FROM ttl_table;')
ttl_cursor2 = cursor2.execute('SELECT ttl(col1) FROM ttl_table;')
assert_almost_equal(ttl_cursor1[0][0], ttl_cursor2[0][0], error=0.1)
def ttl_is_respected_on_repair_test(self):
""" Test that ttl is respected on repair """
self.prepare()
self.cursor1.execute("""
ALTER KEYSPACE ks WITH REPLICATION =
{'class' : 'SimpleStrategy', 'replication_factor' : 1};
""")
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (1, 1) USING TTL 5;
""")
self.cursor1.execute("""
INSERT INTO ttl_table (key, col1) VALUES (2, 2) USING TTL 1000;
""")
assert_all(
self.cursor1,
"SELECT * FROM ttl_table;",
[[1, 1, None, None], [2, 2, None, None]]
)
time.sleep(7)
self.node1.stop()
cursor2 = self.patient_exclusive_cql_connection(self.node2)
cursor2.execute("USE ks;")
assert_unavailable(cursor2.execute, "SELECT * FROM ttl_table;")
self.node1.start(wait_for_binary_proto=True)
self.cursor1 = self.patient_exclusive_cql_connection(self.node1)
self.cursor1.execute("USE ks;")
self.cursor1.execute("""
ALTER KEYSPACE ks WITH REPLICATION =
{'class' : 'SimpleStrategy', 'replication_factor' : 2};
""")
self.node1.repair(['ks'])
ttl_start = time.time()
ttl_cursor1 = self.cursor1.execute('SELECT ttl(col1) FROM ttl_table;')
self.node1.stop()
assert_row_count(cursor2, 'ttl_table', 1)
assert_all(
cursor2,
"SELECT * FROM ttl_table;",
[[2, 2, None, None]]
)
# Check that the TTL on both server are the same
ttl_cursor2 = cursor2.execute('SELECT ttl(col1) FROM ttl_table;')
ttl_cursor1 = ttl_cursor1[0][0] - (time.time() - ttl_start)
assert_almost_equal(ttl_cursor1, ttl_cursor2[0][0], error=0.005)
|
|
import mock
from django.db import transaction
from django.test import RequestFactory
from django.http import Http404
from nose import tools as nt
from datetime import datetime, timedelta
from website.project.model import Comment
from admin.common_auth.logs import OSFLogEntry
from admin.spam.forms import ConfirmForm, EmailForm
from tests.base import AdminTestCase
from tests.factories import CommentFactory, AuthUserFactory, ProjectFactory
from admin_tests.utilities import setup_view, setup_form_view
from admin_tests.factories import UserFactory
from admin.spam.views import (
SpamList,
UserSpamList,
SpamDetail,
EmailFormView,
)
from website.project.spam.model import SpamStatus
class TestSpamListView(AdminTestCase):
def setUp(self):
super(TestSpamListView, self).setUp()
Comment.remove()
self.project = ProjectFactory(is_public=True)
self.user_1 = AuthUserFactory()
self.user_2 = AuthUserFactory()
self.project.add_contributor(self.user_1)
self.project.add_contributor(self.user_2)
self.project.save()
self.user_1.save()
self.user_2.save()
date = datetime.utcnow()
self.comment_1 = CommentFactory(node=self.project, user=self.user_1)
self.comment_2 = CommentFactory(node=self.project, user=self.user_1)
self.comment_3 = CommentFactory(node=self.project, user=self.user_1)
self.comment_4 = CommentFactory(node=self.project, user=self.user_1)
self.comment_5 = CommentFactory(node=self.project, user=self.user_2)
self.comment_6 = CommentFactory(node=self.project, user=self.user_2)
self.comment_1.report_abuse(
user=self.user_2,
save=True,
category='spam',
date=date - timedelta(seconds=5)
)
self.comment_2.report_abuse(
user=self.user_2,
save=True,
category='spam',
date=date - timedelta(seconds=4)
)
self.comment_3.report_abuse(
user=self.user_2,
save=True,
category='spam',
date=date - timedelta(seconds=3)
)
self.comment_4.report_abuse(
user=self.user_2,
save=True,
category='spam',
date=date - timedelta(seconds=2)
)
self.comment_5.report_abuse(
user=self.user_1,
save=True,
category='spam',
date=date - timedelta(seconds=1)
)
self.comment_6.report_abuse(user=self.user_1, save=True,
category='spam')
self.request = RequestFactory().get('/fake_path')
self.view = SpamList()
self.view = setup_view(self.view, self.request, user_id=self.user_1._id)
def test_get_spam(self):
res = list(self.view.get_queryset())
nt.assert_equal(len(res), 6)
response_list = [r._id for r in res]
should_be = [
self.comment_6._id,
self.comment_5._id,
self.comment_4._id,
self.comment_3._id,
self.comment_2._id,
self.comment_1._id
]
nt.assert_list_equal(should_be, response_list)
def test_get_context_data(self):
self.view.object_list = self.view.get_queryset()
res = self.view.get_context_data()
nt.assert_is_instance(res['spam'], list)
nt.assert_is_instance(res['spam'][0], dict)
nt.assert_equal(res['status'], '1')
nt.assert_equal(res['page_number'], 1)
class TestSpamDetail(AdminTestCase):
def setUp(self):
super(TestSpamDetail, self).setUp()
self.comment = CommentFactory()
self.comment.report_abuse(user=AuthUserFactory(), save=True,
category='spam')
self.request = RequestFactory().post('/fake_path')
self.request.user = UserFactory()
def test_confirm_spam(self):
form_data = {'confirm': str(SpamStatus.SPAM)}
form = ConfirmForm(data=form_data)
nt.assert_true(form.is_valid())
view = SpamDetail()
view = setup_form_view(
view, self.request, form, spam_id=self.comment._id)
with transaction.atomic():
view.form_valid(form)
obj = OSFLogEntry.objects.latest(field_name='action_time')
nt.assert_equal(obj.object_id, self.comment._id)
nt.assert_in('Confirmed SPAM:', obj.message())
def test_confirm_ham(self):
form_data = {'confirm': str(SpamStatus.HAM)}
form = ConfirmForm(data=form_data)
nt.assert_true(form.is_valid())
view = SpamDetail()
view = setup_form_view(
view, self.request, form, spam_id=self.comment._id)
with transaction.atomic():
view.form_valid(form)
obj = OSFLogEntry.objects.latest(field_name='action_time')
nt.assert_equal(obj.object_id, self.comment._id)
nt.assert_in('Confirmed HAM:', obj.message())
def test_form_valid_bad_id(self):
form = ConfirmForm()
view = SpamDetail()
view = setup_form_view(view, self.request, form, spam_id='a1')
with nt.assert_raises(Http404):
view.form_valid(form)
def test_get_context_data(self):
view = SpamDetail()
view = setup_view(view, self.request, spam_id=self.comment._id)
res = view.get_context_data()
nt.assert_equal(res['status'], '1')
nt.assert_equal(res['page_number'], '1')
nt.assert_is_instance(res['comment'], dict)
nt.assert_equal(res['SPAM_STATUS'].UNKNOWN, SpamStatus.UNKNOWN)
nt.assert_equal(res['SPAM_STATUS'].SPAM, SpamStatus.SPAM)
nt.assert_equal(res['SPAM_STATUS'].HAM, SpamStatus.HAM)
nt.assert_equal(res['SPAM_STATUS'].FLAGGED, SpamStatus.FLAGGED)
def test_get_context_data_bad_id(self):
view = setup_view(SpamDetail(), self.request, spam_id='a1')
with nt.assert_raises(Http404):
view.get_context_data()
class TestEmailFormView(AdminTestCase):
def setUp(self):
super(TestEmailFormView, self).setUp()
self.comment = CommentFactory()
self.comment.report_abuse(user=AuthUserFactory(), save=True,
category='spam')
self.request = RequestFactory().post('/fake_path')
self.request.user = UserFactory()
self.view = EmailFormView()
self.form = EmailForm(data={
'author': 'Nemo',
'message': 'A message for spammers.',
'subject': 'stop spamming',
'email': ('[email protected]', '[email protected]')
})
self.view = setup_form_view(self.view, self.request, self.form,
spam_id=self.comment._id)
@mock.patch('admin.spam.views.render')
def test_get_context_data(self, mock_render):
res = self.view.get_context_data()
nt.assert_equal(res['status'], '1')
nt.assert_equal(res['page_number'], '1')
nt.assert_is_instance(res['comment'], dict)
def test_get_context_data_bad_id(self):
view = setup_view(EmailFormView(), self.request, spam_id='a1')
with nt.assert_raises(Http404):
view.get_context_data()
@mock.patch('admin.spam.views.render')
def test_get_initial(self, mock_render):
self.view.get_initial()
res = self.view.initial
nt.assert_is_instance(res, dict)
nt.assert_is_instance(res['email'], list)
nt.assert_is_instance(res['email'][0], tuple)
def test_get_initial_bad_id(self):
view = setup_view(EmailFormView(), self.request, spam_id='a1')
with nt.assert_raises(Http404):
view.get_initial()
class TestUserSpamListView(AdminTestCase):
def setUp(self):
super(TestUserSpamListView, self).setUp()
self.project = ProjectFactory(is_public=True)
self.user_1 = AuthUserFactory()
self.user_2 = AuthUserFactory()
self.project.add_contributor(self.user_1)
self.project.add_contributor(self.user_2)
self.project.save()
self.user_1.save()
self.user_2.save()
self.comment_1 = CommentFactory(node=self.project, user=self.user_1)
self.comment_2 = CommentFactory(node=self.project, user=self.user_1)
self.comment_3 = CommentFactory(node=self.project, user=self.user_1)
self.comment_4 = CommentFactory(node=self.project, user=self.user_1)
self.comment_5 = CommentFactory(node=self.project, user=self.user_2)
self.comment_6 = CommentFactory(node=self.project, user=self.user_2)
self.comment_1.report_abuse(user=self.user_2, save=True,
category='spam')
self.comment_2.report_abuse(user=self.user_2, save=True,
category='spam')
self.comment_3.report_abuse(user=self.user_2, save=True,
category='spam')
self.comment_4.report_abuse(user=self.user_2, save=True,
category='spam')
self.comment_5.report_abuse(user=self.user_1, save=True,
category='spam')
self.comment_6.report_abuse(user=self.user_1, save=True,
category='spam')
self.request = RequestFactory().get('/fake_path')
self.view = UserSpamList()
self.view = setup_view(self.view, self.request, user_id=self.user_1._id)
def test_get_user_spam(self):
res = list(self.view.get_queryset())
nt.assert_equal(len(res), 4)
def test_get_context_data(self):
self.view.object_list = self.view.get_queryset()
res = self.view.get_context_data()
nt.assert_is_instance(res['spam'], list)
nt.assert_is_instance(res['spam'][0], dict)
nt.assert_equal(res['status'], '1')
nt.assert_equal(res['page_number'], 1)
nt.assert_equal(res['user_id'], self.user_1._id)
|
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import random
import time
from oslo.config import cfg
from oslo.db import exception as db_exc
from oslo import messaging
from oslo.utils import timeutils
import sqlalchemy as sa
from sqlalchemy import func
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import joinedload
from sqlalchemy import sql
from neutron.common import constants
from neutron.common import utils as n_utils
from neutron import context as n_ctx
from neutron.db import agents_db
from neutron.db import agentschedulers_db
from neutron.db import l3_attrs_db
from neutron.db import model_base
from neutron.extensions import l3agentscheduler
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
L3_AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('router_scheduler_driver',
default='neutron.scheduler.l3_agent_scheduler.ChanceScheduler',
help=_('Driver to use for scheduling '
'router to a default L3 agent')),
cfg.BoolOpt('router_auto_schedule', default=True,
help=_('Allow auto scheduling of routers to L3 agent.')),
cfg.BoolOpt('allow_automatic_l3agent_failover', default=False,
help=_('Automatically reschedule routers from offline L3 '
'agents to online L3 agents.')),
]
cfg.CONF.register_opts(L3_AGENTS_SCHEDULER_OPTS)
class RouterL3AgentBinding(model_base.BASEV2):
"""Represents binding between neutron routers and L3 agents."""
router_id = sa.Column(sa.String(36),
sa.ForeignKey("routers.id", ondelete='CASCADE'),
primary_key=True)
l3_agent = orm.relation(agents_db.Agent)
l3_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id", ondelete='CASCADE'),
primary_key=True)
class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
agentschedulers_db.AgentSchedulerDbMixin):
"""Mixin class to add l3 agent scheduler extension to plugins
using the l3 agent for routing.
"""
router_scheduler = None
def start_periodic_agent_status_check(self):
if not cfg.CONF.allow_automatic_l3agent_failover:
LOG.info(_LI("Skipping period L3 agent status check because "
"automatic router rescheduling is disabled."))
return
self.periodic_agent_loop = loopingcall.FixedIntervalLoopingCall(
self.reschedule_routers_from_down_agents)
interval = max(cfg.CONF.agent_down_time / 2, 1)
# add random initial delay to allow agents to check in after the
# neutron server first starts. random to offset multiple servers
self.periodic_agent_loop.start(interval=interval,
initial_delay=random.randint(interval, interval * 2))
def reschedule_routers_from_down_agents(self):
"""Reschedule routers from down l3 agents if admin state is up."""
# give agents extra time to handle transient failures
agent_dead_limit = cfg.CONF.agent_down_time * 2
# check for an abrupt clock change since last check. if a change is
# detected, sleep for a while to let the agents check in.
tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
timeutils.utcnow())
if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
LOG.warn(_LW("Time since last L3 agent reschedule check has "
"exceeded the interval between checks. Waiting "
"before check to allow agents to send a heartbeat "
"in case there was a clock adjustment."))
time.sleep(agent_dead_limit)
self._clock_jump_canary = timeutils.utcnow()
context = n_ctx.get_admin_context()
cutoff = timeutils.utcnow() - datetime.timedelta(
seconds=agent_dead_limit)
down_bindings = (
context.session.query(RouterL3AgentBinding).
join(agents_db.Agent).
filter(agents_db.Agent.heartbeat_timestamp < cutoff,
agents_db.Agent.admin_state_up).
outerjoin(l3_attrs_db.RouterExtraAttributes,
l3_attrs_db.RouterExtraAttributes.router_id ==
RouterL3AgentBinding.router_id).
filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
try:
for binding in down_bindings:
LOG.warn(_LW(
"Rescheduling router %(router)s from agent %(agent)s "
"because the agent did not report to the server in "
"the last %(dead_time)s seconds."),
{'router': binding.router_id,
'agent': binding.l3_agent_id,
'dead_time': agent_dead_limit})
try:
self.reschedule_router(context, binding.router_id)
except (l3agentscheduler.RouterReschedulingFailed,
messaging.RemoteError):
# Catch individual router rescheduling errors here
# so one broken one doesn't stop the iteration.
LOG.exception(_LE("Failed to reschedule router %s"),
binding.router_id)
except db_exc.DBError:
# Catch DB errors here so a transient DB connectivity issue
# doesn't stop the loopingcall.
LOG.exception(_LE("Exception encountered during router "
"rescheduling."))
def validate_agent_router_combination(self, context, agent, router):
"""Validate if the router can be correctly assigned to the agent.
:raises: RouterL3AgentMismatch if attempting to assign DVR router
to legacy agent, or centralized router to compute's L3 agents.
:raises: InvalidL3Agent if attempting to assign router to an
unsuitable agent (disabled, type != L3, incompatible configuration)
"""
is_distributed = router.get('distributed')
agent_conf = self.get_configuration_dict(agent)
agent_mode = agent_conf.get('agent_mode', 'legacy')
is_agent_router_types_incompatible = (
agent_mode == 'dvr' and not is_distributed
or agent_mode == 'legacy' and is_distributed
)
if is_agent_router_types_incompatible:
router_type = ('distributed' if is_distributed else 'centralized')
raise l3agentscheduler.RouterL3AgentMismatch(
router_type=router_type, router_id=router['id'],
agent_mode=agent_mode, agent_id=agent['id'])
is_wrong_type_or_unsuitable_agent = (
agent['agent_type'] != constants.AGENT_TYPE_L3 or
not agent['admin_state_up'] or
not self.get_l3_agent_candidates(context, router, [agent])
)
if is_wrong_type_or_unsuitable_agent:
raise l3agentscheduler.InvalidL3Agent(id=agent['id'])
def check_agent_router_scheduling_needed(self, context, agent, router):
"""Check if the router scheduling is needed.
:raises: RouterHostedByL3Agent if router is already assigned
to a different agent.
:returns: True if scheduling is needed, otherwise False
"""
router_id = router['id']
agent_id = agent['id']
query = context.session.query(RouterL3AgentBinding)
bindings = query.filter_by(router_id=router_id).all()
if not bindings:
return True
for binding in bindings:
if binding.l3_agent_id == agent_id:
# router already bound to the agent we need
return False
if router.get('distributed'):
return False
# non-dvr case: centralized router is already bound to some agent
raise l3agentscheduler.RouterHostedByL3Agent(
router_id=router_id,
agent_id=bindings[0].l3_agent_id)
def create_router_to_agent_binding(self, context, agent, router):
"""Create router to agent binding."""
router_id = router['id']
agent_id = agent['id']
if self.router_scheduler:
try:
self.router_scheduler.bind_router(context, router_id, agent)
except db_exc.DBError:
raise l3agentscheduler.RouterSchedulingFailed(
router_id=router_id, agent_id=agent_id)
def add_router_to_l3_agent(self, context, agent_id, router_id):
"""Add a l3 agent to host a router."""
with context.session.begin(subtransactions=True):
router = self.get_router(context, router_id)
agent = self._get_agent(context, agent_id)
self.validate_agent_router_combination(context, agent, router)
if self.check_agent_router_scheduling_needed(
context, agent, router):
self.create_router_to_agent_binding(context, agent, router)
else:
return
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_added_to_agent(
context, [router_id], agent.host)
def remove_router_from_l3_agent(self, context, agent_id, router_id):
"""Remove the router from l3 agent.
After removal, the router will be non-hosted until there is update
which leads to re-schedule or be added to another agent manually.
"""
agent = self._get_agent(context, agent_id)
self._unbind_router(context, router_id, agent_id)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
l3_notifier.router_removed_from_agent(
context, router_id, agent.host)
def _unbind_router(self, context, router_id, agent_id):
with context.session.begin(subtransactions=True):
query = context.session.query(RouterL3AgentBinding)
query = query.filter(
RouterL3AgentBinding.router_id == router_id,
RouterL3AgentBinding.l3_agent_id == agent_id)
try:
binding = query.one()
except exc.NoResultFound:
raise l3agentscheduler.RouterNotHostedByL3Agent(
router_id=router_id, agent_id=agent_id)
context.session.delete(binding)
def reschedule_router(self, context, router_id, candidates=None):
"""Reschedule router to a new l3 agent
Remove the router from the agent(s) currently hosting it and
schedule it again
"""
cur_agents = self.list_l3_agents_hosting_router(
context, router_id)['agents']
with context.session.begin(subtransactions=True):
for agent in cur_agents:
self._unbind_router(context, router_id, agent['id'])
new_agent = self.schedule_router(context, router_id,
candidates=candidates)
if not new_agent:
raise l3agentscheduler.RouterReschedulingFailed(
router_id=router_id)
l3_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_L3)
if l3_notifier:
for agent in cur_agents:
l3_notifier.router_removed_from_agent(
context, router_id, agent['host'])
l3_notifier.router_added_to_agent(
context, [router_id], new_agent.host)
def list_routers_on_l3_agent(self, context, agent_id):
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(RouterL3AgentBinding.l3_agent_id == agent_id)
router_ids = [item[0] for item in query]
if router_ids:
return {'routers':
self.get_routers(context, filters={'id': router_ids})}
else:
return {'routers': []}
def list_active_sync_routers_on_active_l3_agent(
self, context, host, router_ids):
agent = self._get_agent_by_type_and_host(
context, constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(RouterL3AgentBinding.router_id)
query = query.filter(
RouterL3AgentBinding.l3_agent_id == agent.id)
if router_ids:
query = query.filter(
RouterL3AgentBinding.router_id.in_(router_ids))
router_ids = [item[0] for item in query]
if router_ids:
if n_utils.is_extension_supported(self,
constants.L3_HA_MODE_EXT_ALIAS):
return self.get_ha_sync_data_for_host(context, host,
router_ids=router_ids,
active=True)
else:
return self.get_sync_data(context, router_ids=router_ids,
active=True)
else:
return []
def get_l3_agents_hosting_routers(self, context, router_ids,
admin_state_up=None,
active=None):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
if len(router_ids) > 1:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id.in_(router_ids))
else:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id == router_ids[0])
if admin_state_up is not None:
query = (query.filter(agents_db.Agent.admin_state_up ==
admin_state_up))
l3_agents = [binding.l3_agent for binding in query]
if active is not None:
l3_agents = [l3_agent for l3_agent in
l3_agents if not
agents_db.AgentDbMixin.is_agent_down(
l3_agent['heartbeat_timestamp'])]
return l3_agents
def _get_l3_bindings_hosting_routers(self, context, router_ids):
if not router_ids:
return []
query = context.session.query(RouterL3AgentBinding)
if len(router_ids) > 1:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id.in_(router_ids))
else:
query = query.options(joinedload('l3_agent')).filter(
RouterL3AgentBinding.router_id == router_ids[0])
return query.all()
def list_l3_agents_hosting_router(self, context, router_id):
with context.session.begin(subtransactions=True):
bindings = self._get_l3_bindings_hosting_routers(
context, [router_id])
results = []
for binding in bindings:
l3_agent_dict = self._make_agent_dict(binding.l3_agent)
results.append(l3_agent_dict)
if results:
return {'agents': results}
else:
return {'agents': []}
def get_l3_agents(self, context, active=None, filters=None):
query = context.session.query(agents_db.Agent)
query = query.filter(
agents_db.Agent.agent_type == constants.AGENT_TYPE_L3)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
if filters:
for key, value in filters.iteritems():
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
return [l3_agent
for l3_agent in query
if agentschedulers_db.AgentSchedulerDbMixin.is_eligible_agent(
active, l3_agent)]
def check_ports_exist_on_l3agent(self, context, l3_agent, router_id):
"""
This function checks for existence of dvr serviceable
ports on the host, running the input l3agent.
"""
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
core_plugin = manager.NeutronManager.get_plugin()
filter = {'fixed_ips': {'subnet_id': subnet_ids}}
ports = core_plugin.get_ports(context, filters=filter)
for port in ports:
if (n_utils.is_dvr_serviced(port['device_owner']) and
l3_agent['host'] == port['binding:host_id']):
return True
return False
def get_snat_candidates(self, sync_router, l3_agents):
"""Get the valid snat enabled l3 agents for the distributed router."""
candidates = []
is_router_distributed = sync_router.get('distributed', False)
if not is_router_distributed:
return candidates
for l3_agent in l3_agents:
if not l3_agent.admin_state_up:
continue
agent_conf = self.get_configuration_dict(l3_agent)
agent_mode = agent_conf.get('agent_mode', 'legacy')
if agent_mode != 'dvr_snat':
continue
router_id = agent_conf.get('router_id', None)
use_namespaces = agent_conf.get('use_namespaces', True)
if not use_namespaces and router_id != sync_router['id']:
continue
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
candidates.append(l3_agent)
return candidates
def get_l3_agent_candidates(self, context, sync_router, l3_agents):
"""Get the valid l3 agents for the router from a list of l3_agents."""
candidates = []
for l3_agent in l3_agents:
if not l3_agent.admin_state_up:
continue
agent_conf = self.get_configuration_dict(l3_agent)
router_id = agent_conf.get('router_id', None)
use_namespaces = agent_conf.get('use_namespaces', True)
handle_internal_only_routers = agent_conf.get(
'handle_internal_only_routers', True)
gateway_external_network_id = agent_conf.get(
'gateway_external_network_id', None)
agent_mode = agent_conf.get('agent_mode', 'legacy')
if not use_namespaces and router_id != sync_router['id']:
continue
ex_net_id = (sync_router['external_gateway_info'] or {}).get(
'network_id')
if ((not ex_net_id and not handle_internal_only_routers) or
(ex_net_id and gateway_external_network_id and
ex_net_id != gateway_external_network_id)):
continue
is_router_distributed = sync_router.get('distributed', False)
if agent_mode in ('legacy', 'dvr_snat') and (
not is_router_distributed):
candidates.append(l3_agent)
elif is_router_distributed and agent_mode.startswith('dvr') and (
self.check_ports_exist_on_l3agent(
context, l3_agent, sync_router['id'])):
candidates.append(l3_agent)
return candidates
def auto_schedule_routers(self, context, host, router_ids):
if self.router_scheduler:
return self.router_scheduler.auto_schedule_routers(
self, context, host, router_ids)
def schedule_router(self, context, router, candidates=None):
if self.router_scheduler:
return self.router_scheduler.schedule(
self, context, router, candidates=candidates)
def schedule_routers(self, context, routers):
"""Schedule the routers to l3 agents."""
for router in routers:
self.schedule_router(context, router, candidates=None)
def get_l3_agent_with_min_routers(self, context, agent_ids):
"""Return l3 agent with the least number of routers."""
query = context.session.query(
agents_db.Agent,
func.count(
RouterL3AgentBinding.router_id
).label('count')).outerjoin(RouterL3AgentBinding).group_by(
RouterL3AgentBinding.l3_agent_id).order_by('count')
res = query.filter(agents_db.Agent.id.in_(agent_ids)).first()
return res[0]
|
|
from unittest import TestCase
import threading
import time
import mock
from twitter_monitor import DynamicTwitterStream
class TestDynamicTwitterStream(TestCase):
def setUp(self):
# Mock tweepy.Stream
self.stream_patcher = mock.patch('tweepy.Stream')
self.MockTweepyStream = self.stream_patcher.start()
self.tweepy_stream_instance = mock.Mock()
self.MockTweepyStream.return_value = self.tweepy_stream_instance
# Mock the supporting objects
self.auth = mock.Mock()
self.listener = mock.Mock()
self.checker = mock.Mock()
# For controlling the tracking terms
self.term_list = []
self.checker.tracking_terms.return_value = self.term_list
self.stop_timeout = DynamicTwitterStream.STOP_TIMEOUT
DynamicTwitterStream.STOP_TIMEOUT = 0
self.retry_count = 7
# Create a dynamic twitter stream
self.stream = DynamicTwitterStream(auth=self.auth,
listener=self.listener,
term_checker=self.checker,
retry_count=self.retry_count)
def tearDown(self):
self.stream_patcher.stop()
# Restore the normal stop timeout
DynamicTwitterStream.STOP_TIMEOUT = self.stop_timeout
def test_start_stream_no_terms(self):
# Start the stream without a term
self.stream.start_stream()
# Should check the list of terms
self.checker.tracking_terms.assert_called_once_with()
# A stream should NOT have been created, because no terms yet
self.MockTweepyStream.assert_has_calls([])
def test_start_unfiltered_no_terms(self):
# Create an unfiltered stream instead
self.stream = DynamicTwitterStream(auth=self.auth,
listener=self.listener,
term_checker=self.checker,
retry_count=self.retry_count,
unfiltered=True)
# Start the stream without a term
self.stream.start_stream()
# Should check the list of terms
self.checker.tracking_terms.assert_called_once_with()
# But it should a stream even without any terms
self.MockTweepyStream.assert_called_once_with(self.auth, self.listener,
stall_warnings=True,
timeout=90,
retry_count=self.retry_count)
# It should be using the sample endpoint
self.tweepy_stream_instance.sample.assert_called_once_with(is_async=True, languages=None)
def test_start_with_languages(self):
# Create a stream with languages
languages = ['en', 'fr']
self.stream = DynamicTwitterStream(auth=self.auth,
listener=self.listener,
term_checker=self.checker,
retry_count=self.retry_count,
unfiltered=True,
languages=languages)
# Start the stream without a term
self.stream.start_stream()
# It should be using the sample endpoint with languages
self.tweepy_stream_instance.sample.assert_called_once_with(is_async=True, languages=languages)
def test_start_stream_with_terms(self):
# Start the stream with a term
self.term_list.append("hello")
self.stream.start_stream()
# Should check the list of terms
self.checker.tracking_terms.assert_called_once_with()
# Should create a Stream instance
self.MockTweepyStream.assert_called_once_with(self.auth, self.listener,
stall_warnings=True,
timeout=90,
retry_count=self.retry_count)
# Should start the filter with the terms
self.tweepy_stream_instance.filter.assert_called_once_with(track=self.term_list, is_async=True, languages=None)
def test_stop_stream_not_started(self):
self.stream.stop_stream()
# No attempt to disconnect stream that isn't started
self.tweepy_stream_instance.disconnect.assert_has_calls([])
def test_stop_stream_started(self):
# Start the stream with a term
self.term_list.append("hello")
self.stream.start_stream()
self.stream.stop_stream()
# Should try to disconnect tweepy stream
self.tweepy_stream_instance.disconnect.assert_called_once_with()
def test_update_stream_terms_unchanged(self):
self.checker.check.return_value = False
self.stream.start_stream = mock.Mock()
self.stream.stop_stream = mock.Mock()
self.stream.update_stream()
# Should have checked if terms changed
self.checker.check.assert_called_once_with()
# Should NOT have stopped the old stream
self.assertEqual(self.stream.stop_stream.call_count, 0)
# Should NOT have started a new stream
self.assertEqual(self.stream.start_stream.call_count, 0)
def test_update_stream_terms_changed(self):
self.checker.check.return_value = True
self.stream.start_stream = mock.Mock()
self.stream.stop_stream = mock.Mock()
self.stream.update_stream()
# Should have checked if terms changed
self.checker.check.assert_called_once_with()
# Should have stopped the old stream
self.stream.stop_stream.assert_called_once_with()
# Should have started a new stream
self.stream.start_stream.assert_called_once_with()
def test_update_stream_after_error(self):
# Start the stream with a term
self.term_list.append("hello")
self.stream.start_stream()
self.stream.start_stream = mock.Mock()
self.stream.stop_stream = mock.Mock()
# Simulate a dead stream
self.tweepy_stream_instance.running = False
self.listener.error = 500
self.stream.update_stream()
# Should have stopped the old stream
self.stream.stop_stream.assert_called_once_with()
# Should have started a new stream
self.stream.start_stream.assert_called_once_with()
# Should have turned off the error
self.assertFalse(self.listener.error)
def test_update_stream_after_exception(self):
# Start the stream with a term
self.term_list.append("hello")
self.stream.start_stream()
self.stream.start_stream = mock.Mock()
self.stream.stop_stream = mock.Mock()
# Simulate an exception inside Tweepy
self.tweepy_stream_instance.running = False
self.listener.streaming_exception = Exception("testing")
self.stream.update_stream()
# Should have stopped the old stream
self.stream.stop_stream.assert_called_once_with()
# Should have started a new stream
self.stream.start_stream.assert_called_once_with()
# Should have turned off the exception
self.assertFalse(self.listener.streaming_exception)
def test_handle_exceptions(self):
self.listener.streaming_exception = None
try:
self.stream.handle_exceptions()
except Exception:
self.fail("Raised exception when no streaming exception set")
self.listener.streaming_exception = Exception("testing")
self.assertRaises(Exception, self.stream.handle_exceptions)
def test_clears_exceptions(self):
self.listener.streaming_exception = Exception("testing")
try:
self.stream.handle_exceptions()
except:
pass
self.assertEqual(self.listener.streaming_exception, None)
def test_stop_polling(self):
# Mock some supporting methods we test separately
self.stream.update_stream = mock.Mock()
self.stream.handle_exceptions = mock.Mock()
# This should poll every 1 second
thread = threading.Thread(target=self.stream.start_polling, args=[1])
thread.start()
waits = 0
# Wait for a maximum of 3 seconds (ish), or until the loop has run at least once
while self.stream.update_stream.call_count < 2 and waits < 12:
print("Waiting...")
time.sleep(0.25)
waits += 1
self.assertTrue(self.stream.polling)
# Try to stop the thread
self.stream.stop_polling()
self.assertFalse(self.stream.polling)
# Wait for a maximimum of 2 seconds
thread.join(timeout=2)
self.assertTrue(self.stream.update_stream.call_count >= 1, "Checked for stream/term updates")
self.assertTrue(self.stream.handle_exceptions.call_count >= 1, "Checked for stream exceptions")
|
|
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.exception import SDBResponseError
class SequenceGenerator(object):
"""Generic Sequence Generator object, this takes a single
string as the "sequence" and uses that to figure out
what the next value in a string is. For example
if you give "ABC" and pass in "A" it will give you "B",
and if you give it "C" it will give you "AA".
If you set "rollover" to True in the above example, passing
in "C" would give you "A" again.
The Sequence string can be a string or any iterable
that has the "index" function and is indexable.
"""
__name__ = "SequenceGenerator"
def __init__(self, sequence_string, rollover=False):
"""Create a new SequenceGenerator using the sequence_string
as how to generate the next item.
:param sequence_string: The string or list that explains
how to generate the next item in the sequence
:type sequence_string: str,iterable
:param rollover: Rollover instead of incrementing when
we hit the end of the sequence
:type rollover: bool
"""
self.sequence_string = sequence_string
self.sequence_length = len(sequence_string[0])
self.rollover = rollover
self.last_item = sequence_string[-1]
self.__name__ = "%s('%s')" % (self.__class__.__name__, sequence_string)
def __call__(self, val, last=None):
"""Get the next value in the sequence"""
# If they pass us in a string that's not at least
# the lenght of our sequence, then return the
# first element in our sequence
if val == None or len(val) < self.sequence_length:
return self.sequence_string[0]
last_value = val[-self.sequence_length:]
if (not self.rollover) and (last_value == self.last_item):
val = "%s%s" % (self(val[:-self.sequence_length]), self._inc(last_value))
else:
val = "%s%s" % (val[:-self.sequence_length], self._inc(last_value))
return val
def _inc(self, val):
"""Increment a single value"""
assert(len(val) == self.sequence_length)
return self.sequence_string[(self.sequence_string.index(val)+1) % len(self.sequence_string)]
#
# Simple Sequence Functions
#
def increment_by_one(cv=None, lv=None):
if cv == None:
return 0
return cv + 1
def double(cv=None, lv=None):
if cv == None:
return 1
return cv * 2
def fib(cv=1, lv=0):
"""The fibonacci sequence, this incrementer uses the
last value"""
if cv == None:
cv = 1
if lv == None:
lv = 0
return cv + lv
increment_string = SequenceGenerator("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
class Sequence(object):
"""A simple Sequence using the new SDB "Consistent" features
Based largly off of the "Counter" example from mitch garnaat:
http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py"""
def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None):
"""Create a new Sequence, using an optional function to
increment to the next number, by default we just increment by one.
Every parameter here is optional, if you don't specify any options
then you'll get a new SequenceGenerator with a random ID stored in the
default domain that increments by one and uses the default botoweb
environment
:param id: Optional ID (name) for this counter
:type id: str
:param domain_name: Optional domain name to use, by default we get this out of the
environment configuration
:type domain_name:str
:param fnc: Optional function to use for the incrementation, by default we just increment by one
There are several functions defined in this module.
Your function must accept "None" to get the initial value
:type fnc: function, str
:param init_val: Initial value, by default this is the first element in your sequence,
but you can pass in any value, even a string if you pass in a function that uses
strings instead of ints to increment
"""
self._db = None
self._value = None
self.last_value = None
self.domain_name = domain_name
self.id = id
if self.id == None:
import uuid
self.id = str(uuid.uuid4())
if init_val == None:
init_val = fnc(init_val)
self.val = init_val
self.item_type = type(fnc(None))
self.timestamp = None
# Allow us to pass in a full name to a function
if type(fnc) == str:
from boto.utils import find_class
fnc = find_class(fnc)
self.fnc = fnc
def set(self, val):
"""Set the value"""
import time
now = time.time()
expected_value = []
new_val = {}
new_val['timestamp'] = now
if self._value != None:
new_val['last_value'] = self._value
expected_value = ['current_value', str(self._value)]
new_val['current_value'] = val
try:
self.db.put_attributes(self.id, new_val, expected_value=expected_value)
self.timestamp = new_val['timestamp']
except SDBResponseError, e:
if e.status == 409:
raise ValueError, "Sequence out of sync"
else:
raise
def get(self):
"""Get the value"""
val = self.db.get_attributes(self.id, consistent_read=True)
if val and val.has_key('timestamp'):
self.timestamp = val['timestamp']
if val and val.has_key('current_value'):
self._value = self.item_type(val['current_value'])
if val.has_key("last_value") and val['last_value'] != None:
self.last_value = self.item_type(val['last_value'])
return self._value
val = property(get, set)
def __repr__(self):
return "%s('%s', '%s', '%s.%s', '%s')" % (
self.__class__.__name__,
self.id,
self.domain_name,
self.fnc.__module__, self.fnc.__name__,
self.val)
def _connect(self):
"""Connect to our domain"""
if not self._db:
import boto
sdb = boto.connect_sdb()
if not self.domain_name:
self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default"))
try:
self._db = sdb.get_domain(self.domain_name)
except SDBResponseError, e:
if e.status == 400:
self._db = sdb.create_domain(self.domain_name)
else:
raise
return self._db
db = property(_connect)
def next(self):
self.val = self.fnc(self.val, self.last_value)
return self.val
def delete(self):
"""Remove this sequence"""
self.db.delete_attributes(self.id)
|
|
from __future__ import absolute_import
from django.contrib.auth import authenticate
from django.contrib.sites.models import Site
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.db import models
from django.utils.crypto import get_random_string
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
import allauth.app_settings
from allauth.account.models import EmailAddress
from allauth.account.utils import get_next_redirect_url, setup_user_email
from allauth.utils import get_user_model
from . import app_settings, providers
from ..utils import get_request_param
from .adapter import get_adapter
from .fields import JSONField
class SocialAppManager(models.Manager):
def get_current(self, provider, request=None):
cache = {}
if request:
cache = getattr(request, '_socialapp_cache', {})
request._socialapp_cache = cache
app = cache.get(provider)
if not app:
site = get_current_site(request)
app = self.get(
sites__id=site.id,
provider=provider)
cache[provider] = app
return app
@python_2_unicode_compatible
class SocialApp(models.Model):
objects = SocialAppManager()
provider = models.CharField(verbose_name=_('provider'),
max_length=30,
choices=providers.registry.as_choices())
name = models.CharField(verbose_name=_('name'),
max_length=40)
client_id = models.CharField(verbose_name=_('client id'),
max_length=191,
help_text=_('App ID, or consumer key'))
secret = models.CharField(verbose_name=_('secret key'),
max_length=191,
help_text=_('API secret, client secret, or'
' consumer secret'))
key = models.CharField(verbose_name=_('key'),
max_length=191,
blank=True,
help_text=_('Key'))
# Most apps can be used across multiple domains, therefore we use
# a ManyToManyField. Note that Facebook requires an app per domain
# (unless the domains share a common base name).
# blank=True allows for disabling apps without removing them
sites = models.ManyToManyField(Site, blank=True)
class Meta:
verbose_name = _('social application')
verbose_name_plural = _('social applications')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SocialAccount(models.Model):
user = models.ForeignKey(allauth.app_settings.USER_MODEL,
on_delete=models.CASCADE)
provider = models.CharField(verbose_name=_('provider'),
max_length=30,
choices=providers.registry.as_choices())
# Just in case you're wondering if an OpenID identity URL is going
# to fit in a 'uid':
#
# Ideally, URLField(max_length=1024, unique=True) would be used
# for identity. However, MySQL has a max_length limitation of 191
# for URLField (in case of utf8mb4). How about
# models.TextField(unique=True) then? Well, that won't work
# either for MySQL due to another bug[1]. So the only way out
# would be to drop the unique constraint, or switch to shorter
# identity URLs. Opted for the latter, as [2] suggests that
# identity URLs are supposed to be short anyway, at least for the
# old spec.
#
# [1] http://code.djangoproject.com/ticket/2495.
# [2] http://openid.net/specs/openid-authentication-1_1.html#limits
uid = models.CharField(verbose_name=_('uid'),
max_length=app_settings.UID_MAX_LENGTH)
last_login = models.DateTimeField(verbose_name=_('last login'),
auto_now=True)
date_joined = models.DateTimeField(verbose_name=_('date joined'),
auto_now_add=True)
extra_data = JSONField(verbose_name=_('extra data'), default=dict)
class Meta:
unique_together = ('provider', 'uid')
verbose_name = _('social account')
verbose_name_plural = _('social accounts')
def authenticate(self):
return authenticate(account=self)
def __str__(self):
return force_text(self.user)
def get_profile_url(self):
return self.get_provider_account().get_profile_url()
def get_avatar_url(self):
return self.get_provider_account().get_avatar_url()
def get_provider(self):
return providers.registry.by_id(self.provider)
def get_provider_account(self):
return self.get_provider().wrap_account(self)
@python_2_unicode_compatible
class SocialToken(models.Model):
app = models.ForeignKey(SocialApp, on_delete=models.CASCADE)
account = models.ForeignKey(SocialAccount, on_delete=models.CASCADE)
token = models.TextField(
verbose_name=_('token'),
help_text=_(
'"oauth_token" (OAuth1) or access token (OAuth2)'))
token_secret = models.TextField(
blank=True,
verbose_name=_('token secret'),
help_text=_(
'"oauth_token_secret" (OAuth1) or refresh token (OAuth2)'))
expires_at = models.DateTimeField(blank=True, null=True,
verbose_name=_('expires at'))
class Meta:
unique_together = ('app', 'account')
verbose_name = _('social application token')
verbose_name_plural = _('social application tokens')
def __str__(self):
return self.token
class SocialLogin(object):
"""
Represents a social user that is in the process of being logged
in. This consists of the following information:
`account` (`SocialAccount` instance): The social account being
logged in. Providers are not responsible for checking whether or
not an account already exists or not. Therefore, a provider
typically creates a new (unsaved) `SocialAccount` instance. The
`User` instance pointed to by the account (`account.user`) may be
prefilled by the provider for use as a starting point later on
during the signup process.
`token` (`SocialToken` instance): An optional access token token
that results from performing a successful authentication
handshake.
`state` (`dict`): The state to be preserved during the
authentication handshake. Note that this state may end up in the
url -- do not put any secrets in here. It currently only contains
the url to redirect to after login.
`email_addresses` (list of `EmailAddress`): Optional list of
e-mail addresses retrieved from the provider.
"""
def __init__(self, user=None, account=None, token=None,
email_addresses=[]):
if token:
assert token.account is None or token.account == account
self.token = token
self.user = user
self.account = account
self.email_addresses = email_addresses
self.state = {}
def connect(self, request, user):
self.user = user
self.save(request, connect=True)
def serialize(self):
serialize_instance = get_adapter().serialize_instance
ret = dict(account=serialize_instance(self.account),
user=serialize_instance(self.user),
state=self.state,
email_addresses=[serialize_instance(ea)
for ea in self.email_addresses])
if self.token:
ret['token'] = serialize_instance(self.token)
return ret
@classmethod
def deserialize(cls, data):
deserialize_instance = get_adapter().deserialize_instance
account = deserialize_instance(SocialAccount, data['account'])
user = deserialize_instance(get_user_model(), data['user'])
if 'token' in data:
token = deserialize_instance(SocialToken, data['token'])
else:
token = None
email_addresses = []
for ea in data['email_addresses']:
email_address = deserialize_instance(EmailAddress, ea)
email_addresses.append(email_address)
ret = SocialLogin()
ret.token = token
ret.account = account
ret.user = user
ret.email_addresses = email_addresses
ret.state = data['state']
return ret
def save(self, request, connect=False):
"""
Saves a new account. Note that while the account is new,
the user may be an existing one (when connecting accounts)
"""
assert not self.is_existing
user = self.user
user.save()
self.account.user = user
self.account.save()
if app_settings.STORE_TOKENS and self.token:
self.token.account = self.account
self.token.save()
if connect:
# TODO: Add any new email addresses automatically?
pass
else:
setup_user_email(request, user, self.email_addresses)
@property
def is_existing(self):
"""
Account is temporary, not yet backed by a database record.
"""
return self.account.pk
def lookup(self):
"""
Lookup existing account, if any.
"""
assert not self.is_existing
try:
a = SocialAccount.objects.get(provider=self.account.provider,
uid=self.account.uid)
# Update account
a.extra_data = self.account.extra_data
self.account = a
self.user = self.account.user
a.save()
# Update token
if app_settings.STORE_TOKENS and self.token:
assert not self.token.pk
try:
t = SocialToken.objects.get(account=self.account,
app=self.token.app)
t.token = self.token.token
if self.token.token_secret:
# only update the refresh token if we got one
# many oauth2 providers do not resend the refresh token
t.token_secret = self.token.token_secret
t.expires_at = self.token.expires_at
t.save()
self.token = t
except SocialToken.DoesNotExist:
self.token.account = a
self.token.save()
except SocialAccount.DoesNotExist:
pass
def get_redirect_url(self, request):
url = self.state.get('next')
return url
@classmethod
def state_from_request(cls, request):
state = {}
next_url = get_next_redirect_url(request)
if next_url:
state['next'] = next_url
state['process'] = get_request_param(request, 'process', 'login')
state['scope'] = get_request_param(request, 'scope', '')
state['auth_params'] = get_request_param(request, 'auth_params', '')
return state
@classmethod
def stash_state(cls, request):
state = cls.state_from_request(request)
verifier = get_random_string()
request.session['socialaccount_state'] = (state, verifier)
return verifier
@classmethod
def unstash_state(cls, request):
if 'socialaccount_state' not in request.session:
raise PermissionDenied()
state, verifier = request.session.pop('socialaccount_state')
return state
@classmethod
def verify_and_unstash_state(cls, request, verifier):
if 'socialaccount_state' not in request.session:
raise PermissionDenied()
state, verifier2 = request.session.pop('socialaccount_state')
if verifier != verifier2:
raise PermissionDenied()
return state
|
|
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Transformer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import librispeech
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models import transformer
import tensorflow.compat.v1 as tf
BATCH_SIZE = 3
INPUT_LENGTH = 5
TARGET_LENGTH = 7
VOCAB_SIZE = 10
def get_model(hparams=None, mode=tf.estimator.ModeKeys.TRAIN,
has_input=True, model_cls=transformer.Transformer):
if hparams is None:
hparams = transformer.transformer_tiny()
hparams.hidden_size = 8
hparams.filter_size = 32
hparams.num_heads = 1
hparams.layer_prepostprocess_dropout = 0.0
if hparams.get("problem_hparams", None) is None:
p_hparams = problem_hparams.test_problem_hparams(VOCAB_SIZE,
VOCAB_SIZE,
hparams)
if not has_input:
del p_hparams.modality["inputs"]
hparams.problem_hparams = p_hparams
inputs = np.random.randint(
VOCAB_SIZE, size=(BATCH_SIZE, INPUT_LENGTH, 1, 1))
targets = np.random.randint(
VOCAB_SIZE, size=(BATCH_SIZE, TARGET_LENGTH, 1, 1))
features = {
"targets": tf.constant(targets, dtype=tf.int32, name="targets"),
"target_space_id": tf.constant(1, dtype=tf.int32)
}
if has_input:
features["inputs"] = tf.constant(inputs, dtype=tf.int32, name="inputs")
return model_cls(hparams, mode, p_hparams), features
def small_librispeech_model(param_overrides=None):
hparams = transformer.transformer_small()
hparams.hidden_size = 8
hparams.filter_size = 32
hparams.num_heads = 1
hparams.layer_prepostprocess_dropout = 0.0
p_hparams = librispeech.Librispeech().get_hparams(hparams)
p_hparams.vocab_size["targets"] = VOCAB_SIZE
hparams.problem_hparams = p_hparams
model = transformer.Transformer(hparams, problem_hparams=p_hparams)
if param_overrides is not None: # Add or Set any provided HParams
assert isinstance(param_overrides, dict)
for param_name in param_overrides:
if hasattr(hparams, param_name):
hparams.set_hparam(param_name, param_overrides[param_name])
else:
hparams.add_hparam(param_name, param_overrides[param_name])
inputs = np.random.rand(
BATCH_SIZE, INPUT_LENGTH, 80, 3).astype("float32") # modify for speech
targets = np.random.randint(
VOCAB_SIZE, size=(BATCH_SIZE, TARGET_LENGTH, 1, 1))
features = {
"inputs": tf.constant(inputs, dtype=tf.float32, name="inputs"),
"targets": tf.constant(targets, dtype=tf.int32, name="targets"),
"target_space_id": tf.constant(1, dtype=tf.int32)
}
return model, features
class TransformerTest(tf.test.TestCase):
def testTransformer(self, get_model_fn=None, p=None):
if get_model_fn:
model, features = get_model_fn(param_overrides=p)
else:
model, features = get_model(transformer.transformer_small())
logits, _ = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, 1, 1, VOCAB_SIZE))
def testTransformerLibrispeech(self, params=None):
self.testTransformer(get_model_fn=small_librispeech_model, p=params)
def testLibrispeechSlowVsFast(self, params=None):
self.testSlowVsFast(get_model_fn=small_librispeech_model, p=params)
def testLibrispeechMultihead(self, params=None):
self.testTransformerLibrispeech({"num_heads": 2})
def testLibrispeechWithAreaAttention(self):
self.testTransformerLibrispeech({"max_area_width": 2,
"num_area_layers": 1,
"area_key_mode": "mean",
"area_value_mode": "sum"})
def testTransformerRelative(self):
model, features = get_model(transformer.transformer_relative_tiny())
logits, _ = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, 1, 1, VOCAB_SIZE))
def testSlowVsFast(self, get_model_fn=None, p=None):
if get_model_fn:
model, features = get_model_fn(param_overrides=p)
else:
model, features = get_model(transformer.transformer_small())
decode_length = 3
out_logits, _ = model(features)
out_logits = tf.squeeze(out_logits, axis=[2, 3])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
labels=tf.reshape(features["targets"], [-1]))
loss = tf.reduce_mean(loss)
apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)
with self.test_session():
tf.global_variables_initializer().run()
for _ in range(100):
apply_grad.run()
model.set_mode(tf.estimator.ModeKeys.PREDICT)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
greedy_result = model._slow_greedy_infer(
features, decode_length)["outputs"]
greedy_result = tf.squeeze(greedy_result, axis=[2, 3])
fast_result = model._greedy_infer(features, decode_length)["outputs"]
with self.test_session():
greedy_res = greedy_result.eval()
fast_res = fast_result.eval()
self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length))
self.assertAllClose(greedy_res, fast_res)
def testSlowVsFastNoInput(self):
model, features = get_model(
transformer.transformer_small(), has_input=False)
decode_length = 3
out_logits, _ = model(features)
out_logits = tf.squeeze(out_logits, axis=[2, 3])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
labels=tf.reshape(features["targets"], [-1]))
loss = tf.reduce_mean(loss)
apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)
with self.test_session():
tf.global_variables_initializer().run()
for _ in range(100):
apply_grad.run()
model.set_mode(tf.estimator.ModeKeys.PREDICT)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
slow_result = model._slow_greedy_infer(
features, decode_length)["outputs"]
slow_result = tf.squeeze(slow_result, axis=[2, 3])
fast_result = model._greedy_infer(features, decode_length)["outputs"]
with self.test_session():
slow_res = slow_result.eval()
fast_res = fast_result.eval()
self.assertEqual(slow_res.shape, (BATCH_SIZE, decode_length))
self.assertAllClose(slow_res, fast_res)
def testBeamDecodeWithRelativeAttention(self):
decode_length = 2
model, features = get_model(transformer.transformer_relative_tiny())
model.set_mode(tf.estimator.ModeKeys.PREDICT)
beam_result = model._beam_decode(
features, decode_length, beam_size=4, top_beams=1,
alpha=1.0)["outputs"]
with self.test_session():
tf.global_variables_initializer().run()
beam_result.eval()
# TODO(petershaw): This test is flaky because the decode may hit EOS before
# getting to the expected length.
# self.assertEqual(beam_res.shape,
# (BATCH_SIZE, INPUT_LENGTH + decode_length))
def testBeamVsFast(self):
model, features = get_model(transformer.transformer_small())
decode_length = 2
out_logits, _ = model(features)
out_logits = tf.squeeze(out_logits, axis=[2, 3])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
labels=tf.reshape(features["targets"], [-1]))
loss = tf.reduce_mean(loss)
apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)
with self.test_session():
tf.global_variables_initializer().run()
for _ in range(100):
apply_grad.run()
model.set_mode(tf.estimator.ModeKeys.PREDICT)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
beam_result = model._beam_decode_slow(
features,
decode_length,
beam_size=4,
top_beams=1,
alpha=1.0)["outputs"]
fast_result = model._beam_decode(
features,
decode_length,
beam_size=4,
top_beams=1,
alpha=1.0)["outputs"]
with self.test_session():
beam_res = beam_result.eval()
fast_res = fast_result.eval()
self.assertAllClose(beam_res, fast_res)
def testTransformerWithoutProblem(self):
hparams = transformer.transformer_test()
embedded_inputs = np.random.random_sample(
(BATCH_SIZE, INPUT_LENGTH, 1, hparams.hidden_size))
embedded_targets = np.random.random_sample(
(BATCH_SIZE, TARGET_LENGTH, 1, hparams.hidden_size))
transformed_features = {
"inputs": tf.constant(embedded_inputs, dtype=tf.float32),
"targets": tf.constant(embedded_targets, dtype=tf.float32)
}
model = transformer.Transformer(hparams)
body_out, _ = model(transformed_features)
self.assertAllEqual(
body_out.get_shape().as_list(),
[BATCH_SIZE, TARGET_LENGTH, 1, hparams.hidden_size])
def testTransformerWithEncoderDecoderAttentionLoss(self):
model, features = get_model(
transformer.transformer_supervised_attention())
expected_attention_weights = np.random.random_sample(
size=(BATCH_SIZE, TARGET_LENGTH, INPUT_LENGTH))
features["expected_attentions"] = tf.constant(
expected_attention_weights, dtype=tf.float32)
_, extra_loss = model(features)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
res = session.run(extra_loss["attention_loss"])
self.assertEqual(res.shape, ())
def _create_greedy_infer_model(self):
"""Creates model for greedy inference testing.
Returns:
model: A t2t model.
features: An map of string to tensor.
"""
model, features = get_model(transformer.transformer_small())
out_logits, _ = model(features)
out_logits = tf.squeeze(out_logits, axis=[2, 3])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
labels=tf.reshape(features["targets"], [-1]))
loss = tf.reduce_mean(loss)
apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)
with self.test_session():
tf.global_variables_initializer().run()
for _ in range(100):
apply_grad.run()
model.set_mode(tf.estimator.ModeKeys.PREDICT)
return model, features
def testGreedySlowTPUVsNonTPU(self):
decode_length = 3
model, features = self._create_greedy_infer_model()
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
slow_result_non_tpu = model._slow_greedy_infer(
features, decode_length)["outputs"]
slow_result_non_tpu = tf.squeeze(slow_result_non_tpu, axis=[2, 3])
slow_result_tpu = model._slow_greedy_infer_tpu(
features, decode_length)["outputs"]
slow_result_tpu = tf.squeeze(slow_result_tpu, axis=[2, 3])
with self.test_session():
slow_non_tpu_res = slow_result_non_tpu.eval()
slow_tpu_res = slow_result_tpu.eval()
self.assertEqual(slow_tpu_res.shape,
(BATCH_SIZE, INPUT_LENGTH + decode_length))
self.assertAllClose(slow_tpu_res, slow_non_tpu_res)
def testGreedyFastTPUVsNonTPU(self):
decode_length = 3
model, features = self._create_greedy_infer_model()
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
fast_result_non_tpu = model._greedy_infer(
features, decode_length, use_tpu=False)["outputs"]
fast_result_tpu = model._greedy_infer(
features, decode_length, use_tpu=True)["outputs"]
with self.test_session():
fast_non_tpu_res = fast_result_non_tpu.eval()
fast_tpu_res = fast_result_tpu.eval()
self.assertEqual(fast_tpu_res.shape,
(BATCH_SIZE, INPUT_LENGTH + decode_length))
self.assertAllClose(fast_tpu_res, fast_non_tpu_res)
def testGreedyTPUSlowVsFast(self):
decode_length = 3
model, features = self._create_greedy_infer_model()
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
slow_result = model._slow_greedy_infer_tpu(
features, decode_length)["outputs"]
slow_result = tf.squeeze(slow_result, axis=[2, 3])
fast_result = model._greedy_infer(
features, decode_length, use_tpu=True)["outputs"]
with self.test_session():
slow_res = slow_result.eval()
fast_res = fast_result.eval()
self.assertEqual(fast_res.shape,
(BATCH_SIZE, INPUT_LENGTH + decode_length))
self.assertAllClose(fast_res, slow_res)
class TransformerScorerTest(tf.test.TestCase):
def testReturnsScores(self):
model, features = get_model(
mode=tf.estimator.ModeKeys.PREDICT,
model_cls=transformer.TransformerScorer)
infer_out = model.infer(features)
self.assertTrue("outputs" in infer_out)
self.assertTrue("scores" in infer_out)
with self.test_session() as session:
session.run(tf.global_variables_initializer())
infer_out = session.run(infer_out)
self.assertEqual((BATCH_SIZE,), infer_out["scores"].shape)
self.assertEqual((BATCH_SIZE, TARGET_LENGTH), infer_out["outputs"].shape)
def testVarNames(self):
with tf.Graph().as_default():
model, features = get_model(
mode=tf.estimator.ModeKeys.PREDICT,
model_cls=transformer.TransformerScorer)
_ = model.infer(features)
scorer_vars = [v.name for v in tf.global_variables()]
with tf.Graph().as_default():
model, features = get_model(
mode=tf.estimator.ModeKeys.EVAL,
model_cls=transformer.TransformerScorer)
_ = model(features)
scorer_eval_vars = [v.name for v in tf.global_variables()]
with tf.Graph().as_default():
model, features = get_model(
mode=tf.estimator.ModeKeys.EVAL,
model_cls=transformer.Transformer)
_ = model(features)
transformer_vars = [v.name for v in tf.global_variables()]
self.assertEqual(sorted(scorer_vars), sorted(transformer_vars))
self.assertEqual(sorted(scorer_eval_vars), sorted(transformer_vars))
if __name__ == "__main__":
tf.test.main()
|
|
"""
MIT License
Copyright (c) 2018 Chad Rosenquist
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Tests CaptureLogs.
Created on April 4, 2018
@author: Chad Rosenquist
"""
import unittest
import logging
from io import StringIO
import loggingtestcase
class CaptureLogsTestCase(unittest.TestCase):
"""Tests for capturing log files."""
@loggingtestcase.capturelogs('foo', level='INFO')
def test_capture_logs(self, logs):
"""Verify logs using @capturelogs decorator."""
logging.getLogger('foo').info('first message')
logging.getLogger('foo.bar').error('second message')
self.assertEqual(logs.output, ['INFO:foo:first message',
'ERROR:foo.bar:second message'])
self.assertEqual(logs.records[0].message, 'first message')
self.assertEqual(logs.records[1].message, 'second message')
@loggingtestcase.capturelogs('foo', level='ERROR')
def test_capture_logs_2(self, logs):
"""Verify logs using @capturelogs decorator, using a different level."""
logging.getLogger('foo').info('first message')
logging.getLogger('foo.bar').error('second message')
self.assertEqual(logs.output, ['ERROR:foo.bar:second message'])
@loggingtestcase.capturelogs('foo')
def test_default_log_level(self, logs):
"""Verify defaults to INFO."""
logging.getLogger('foo').info('first message')
logging.getLogger('foo.bar').error('second message')
logging.getLogger('foo').debug('third message')
self.assertEqual(logs.output, ['INFO:foo:first message',
'ERROR:foo.bar:second message'])
@loggingtestcase.capturelogs(logging.getLogger('foo'), 'INFO')
def test_logger_passed_in(self, logs):
"""Tests with a logger passed in, instead of a log name."""
logging.getLogger('foo').info('first message')
self.assertEqual(logs.output, ['INFO:foo:first message'])
def test_log_level_restored(self):
"""Verifies the log level is correct restored."""
foo_logger = logging.getLogger('foo')
foo_logger.setLevel(logging.DEBUG)
self._logging_test_function()
self.assertEqual(foo_logger.level, logging.DEBUG)
@loggingtestcase.capturelogs('foo', level='INFO')
def _logging_test_function(self, logs):
pass
def test_log_level_restored_after_exception(self):
"""Verifies the log level is correct restored, even after an exception."""
foo_logger = logging.getLogger('foo')
foo_logger.setLevel(logging.DEBUG)
with self.assertRaises(ValueError):
self._logging_test_function_exception()
self.assertEqual(foo_logger.level, logging.DEBUG)
@loggingtestcase.capturelogs('foo', level='INFO')
def _logging_test_function_exception(self, logs):
raise ValueError('test')
def test_arguments_and_return_value(self):
"""Verifies the arguments and return value are correctly preserved."""
return_value = self._arguments_and_return('one', keyword_one='two')
self.assertEqual(return_value, 'one | two')
# noinspection PyUnusedLocal
@loggingtestcase.capturelogs('foo', level='INFO')
def _arguments_and_return(self, argument1, logs, keyword_one='hello'):
return '{0} | {1}'.format(argument1, keyword_one)
class CaptureLogsAssertNoLogs(unittest.TestCase):
"""Tests parameter assert_no_logs."""
def test_assert_no_logs(self):
"""Tests when assert_no_logs=True, an AssertionError is raised if logs are emitted."""
with self.assertRaisesRegex(AssertionError,
r'In _assert_no_logs\(\), '
r'the follow messages were unexpectedly logged:\n'
r' INFO:foo:first message\n'
r' ERROR:foo:second message'):
self._assert_no_logs()
# noinspection PyUnusedLocal
@loggingtestcase.capturelogs('foo', level='INFO', assert_no_logs=True)
def _assert_no_logs(self, logs):
"""Log a a message, causing capturelogs to raise an AssertionError."""
logging.getLogger('foo').info('first message')
logging.getLogger('foo').error('second message')
def test_assert_no_logs_no_except(self):
"""Tests when assert_no_logs=True and there are no logs emitted,
no exceptions are thrown.
"""
self._assert_no_logs_no_logging()
# noinspection PyUnusedLocal
@loggingtestcase.capturelogs('foo', level='INFO', assert_no_logs=True)
def _assert_no_logs_no_logging(self, logs):
pass
class DisplayLogsTestCase(unittest.TestCase):
"""Tests for displaying the logs.
The code is actually very simple, but there is a lot of test code
so place this into another class.
"""
@classmethod
def _set_stream_handler(cls):
foo_logger = logging.getLogger('foo')
stream = StringIO()
stream_handler = logging.StreamHandler(stream)
stream_formatter = logging.Formatter("%(levelname)s:%(name)s:%(message)s")
stream_handler.setFormatter(stream_formatter)
foo_logger.addHandler(stream_handler)
return stream
def test_display_logs_if_failure(self):
"""If the test fails, the logs are displayed."""
stream = self._set_stream_handler()
# Run a test that writes to the logs and fails.
with self.assertRaises(AssertionError):
self._failed_test()
# Verify the logs are captured.
self.assertMultiLineEqual(stream.getvalue(),
'INFO:foo:Failed to open file!'
'\nDEBUG:foo:Check file permissions.\n')
# noinspection PyUnusedLocal
@loggingtestcase.capturelogs('foo', level='DEBUG')
def _failed_test(self, logs):
logging.getLogger('foo').info('Failed to open file!')
logging.getLogger('foo').debug('Check file permissions.')
self.assertTrue(False)
def test_discard_logs_if_failure(self):
"""If the test fails, the logs are discarded."""
stream = self._set_stream_handler()
# Run a test that writes to the logs and fails.
with self.assertRaises(AssertionError):
self._failed_test_discard()
# Verify the logs are not captured.
self.assertEqual(stream.getvalue(), '')
# noinspection PyUnusedLocal
@loggingtestcase.capturelogs('foo', level='DEBUG',
display_logs=loggingtestcase.DisplayLogs.NEVER)
def _failed_test_discard(self, logs):
logging.getLogger('foo').info('Failed to open file!')
logging.getLogger('foo').debug('Check file permissions.')
self.assertTrue(False)
def test_discard_logs_if_success(self):
"""If the test passes, the logs are discarded."""
stream = self._set_stream_handler()
# Run a test that writes to the logs and fails.
self._success_test_discard()
# Verify the logs are not captured.
self.assertEqual(stream.getvalue(), '')
# noinspection PyUnusedLocal
@loggingtestcase.capturelogs('foo', level='DEBUG')
def _success_test_discard(self, logs):
logging.getLogger('foo').info('Failed to open file!')
logging.getLogger('foo').debug('Check file permissions.')
def test_display_logs_if_success(self):
"""If the test passes, the logs are displayed."""
stream = self._set_stream_handler()
# Run a test that writes to the logs and fails.
self._success_test_display()
# Verify the logs are not captured.
self.assertEqual(stream.getvalue(),
'INFO:foo:Failed to open file!\nDEBUG:foo:Check file permissions.\n')
# noinspection PyUnusedLocal
@loggingtestcase.capturelogs('foo', level='DEBUG',
display_logs=loggingtestcase.DisplayLogs.ALWAYS)
def _success_test_display(self, logs):
logging.getLogger('foo').info('Failed to open file!')
logging.getLogger('foo').debug('Check file permissions.')
if __name__ == '__main__':
unittest.main()
|
|
# Author: Kun Xi <[email protected]>
# License: Python Software Foundation License
"""
A Python wrapper to access Amazon Web Service(AWS) E-Commerce Serive APIs,
based upon pyamazon (http://www.josephson.org/projects/pyamazon/), enhanced
to meet the latest AWS specification(http://www.amazon.com/webservices).
This module defines the following classes:
- `Bag`, a generic container for the python objects
- `ListIterator`, a forward iterator adapter
- `PaginatedIterator`, a page-based iterator using lazy evaluation
Exception classes:
- `AWSException`
- `NoLicenseKey`
- `BadLocale`
- `BadOption`
- `ExactParameterRequirement`
- `ExceededMaximumParameterValues`
- `InsufficientParameterValues`
- `InternalError`
- `InvalidEnumeratedParameter`
- `InvalidISO8601Time`
- `InvalidOperationForMarketplace`
- `InvalidOperationParameter`
- `InvalidParameterCombination`
- `InvalidParameterValue`
- `InvalidResponseGroup`
- `InvalidServiceParameter`
- `InvalidSubscriptionId`
- `InvalidXSLTAddress`
- `MaximumParameterRequirement`
- `MinimumParameterRequirement`
- `MissingOperationParameter`
- `MissingParameterCombination`
- `MissingParameters`
- `MissingParameterValueCombination`
- `MissingServiceParameter`
- `ParameterOutOfRange`
- `ParameterRepeatedInRequest`
- `RestrictedParameterValueCombination`
- `XSLTTransformationError`
Functions:
- `setLocale`
- `getLocale`
- `setLicenseKey`
- `getLicenseKey`
- `getVersion`
- `setOptions`
- `getOptions`
- `buildRequest`
- `buildException`
- `query`
- `SimpleObject`
- `Collection`
- `Pagination`
- `unmarshal`
- `ItemLookup`
- `XMLItemLookup`
- `ItemSearch`
- `XMLItemSearch`
- `SimilarityLookup`
- `XMLSimilarityLookup`
- `ListLookup`
- `XMLListLookup`
- `ListSearch`
- `XMLListSearch`
- `CartCreate`
- `XMLCartCreate`
- `CartAdd`
- `XMLCartAdd`
- `CartGet`
- `XMLCartGet`
- `CartModify`
- `XMLCartModify`
- `CartClear`
- `XMLCartClear`
- `SellerLookup`
- `XMLSellerLookup`
- `SellerListingLookup`
- `XMLSellerListingLookup`
- `SellerListingSearch`
- `XMLSellerListingSearch`
- `CustomerContentSearch`
- `XMLCustomerContentSearch`
- `CustomerContentLookup`
- `XMLCustomerContentLookup`
- `BrowseNodeLookup`
- `XMLBrowseNodeLookup`
- `Help`
- `XMLHelp`
- `TransactionLookup`
- `XMLTransactionLookup`
Accroding to the ECS specification, there are two implementation foo and XMLfoo, for example, `ItemLookup` and `XMLItemLookup`. foo returns a Python object, XMLfoo returns the raw XML file.
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Apply for a Amazon Web Service API key from Amazon Web Service:
https://aws-portal.amazon.com/gp/aws/developer/registration/index.html
2. Import it: ``import pyaws.ecs``
3. Setup the license key: ``ecs.setLicenseKey('YOUR-KEY-FROM-AWS')``
or you could use the environment variable AMAZON_Meta.license_key
Optional:
a) setup other options, like AssociateTag, MerchantID, Validate
b) export the http_proxy environment variable if you want to use proxy
c) setup the locale if your locale is not ``us``
4. Send query to the AWS, and manupilate the returned python object.
"""
__author__ = "Kun Xi <[email protected]>"
__version__ = "0.3.0"
__license__ = "Python Software Foundation"
__docformat__ = 'restructuredtext'
import os, urllib, string
from xml.dom import minidom
import hmac
import hashlib
import base64
from time import strftime
class Meta:
license_key = None
secret_key = None
locale = "us"
version = "2007-04-04"
options = {}
locales = {
None : "ecs.amazonaws.com",
"us" : "ecs.amazonaws.com",
"uk" : "ecs.amazonaws.co.uk",
"de" : "ecs.amazonaws.co.de",
"jp" : "ecs.amazonaws.co.jp",
"fr" : "ecs.amazonaws.co.fr",
"ca" : "ecs.amazonaws.co.ca",
}
def __buildPlugins():
"""
Build plugins used in unmarshal
Return the dict like:
Operation => { 'isByPassed'=>(...), 'isPivoted'=>(...),
'isCollective'=>(...), 'isCollected'=>(...),
isPaged=> { key1: (...), key2: (...), ... }
"""
"""
ResponseGroups heirachy:
Parent => children,
The benefit of this layer is to reduce the redundency, when
the child ResponseGroup change, it propaged to the parent
automatically
"""
rgh = {
'CustomerFull': ('CustomerInfo', 'CustomerLists', 'CustomerReviews'),
'Large': ('Accessories', 'BrowseNodes', 'ListmaniaLists', 'Medium', 'Offers', 'Reviews', 'Similarities', 'Tracks'),
'ListFull': ('ListInfo', 'ListItems'),
'ListInfo': ('ListMinimum', ),
'ListItems': ('ListMinimum', ),
'Medium': ('EditorialReview', 'Images', 'ItemAttributes', 'OfferSummary', 'Request', 'SalesRank', 'Small'),
'OfferFull': ('Offers',),
'Offers': ('OfferSummary',),
'Variations': ('VariationMinimum', 'VariationSummary')
}
"""
ResponseGroup and corresponding plugins:
ResponseGroup=>(isBypassed, isPivoted, isCollective, isCollected, isPaged)
isPaged is defined as:
{ kwItems : (kwPage, kwTotalResults, pageSize) }
- kwItems: string, the tagname of collection
- kwPage: string, the tagname of page
- kwTotalResults: string, the tagname of length
- pageSize: constant integer, the size of each page
CODE DEBT:
- Do we need to remove the ResponseGroup in rgh.keys()? At least, Medium does not
introduce any new attributes.
"""
rgps = {
'Accessories': ((), (), ('Accessories',), ('Accessory',), {}),
'AlternateVersions': ((), (), (), (), {}),
'BrowseNodeInfo': ((), (), ('Children', 'Ancestors'), ('BrowseNode',), {}),
'BrowseNodes': ((), (), ('Children', 'Ancestors', 'BrowseNodes'), ('BrowseNode',), {}),
'Cart': ((), (), (), (), {}),
'CartNewReleases': ((), (), (), (), {}),
'CartTopSellers': ((), (), (), (), {}),
'CartSimilarities': ((), (), (), (), {}),
'Collections': ((), (), (), (), {}),
'CustomerFull': ((), (), (), (), {}),
'CustomerInfo': ((), (), ('Customers',), ('Customer',), {}),
'CustomerLists': ((), (), ('Customers',), ('Customer',), {}),
'CustomerReviews': ((), (), ('Customers', 'CustomerReviews',),('Customer', 'Review'),{}),
'EditorialReview': ((), (), ('EditorialReviews',), ('EditorialReview',), {}),
'Help': ((), (), ('RequiredParameters', 'AvailableParameters',
'DefaultResponseGroups', 'AvailableResponseGroups'),
('Parameter', 'ResponseGroup'), {}),
'Images': ((), (), ('ImageSets',), ('ImageSet',), {}),
'ItemAttributes': ((), ('ItemAttributes',), (), (), {}),
'ItemIds': ((), (), (), (), {}),
'ItemLookup.Small': ((), ('ItemAttributes',), (), ('Item',),
{'Items': ('OfferPage', 'OfferPages', 10) }),
'ItemSearch.Small': ((), ('ItemAttributes',), (), ('Item',),
{'Items': ('ItemPage', 'TotalPages', 10) }),
'Large': ((), (), (), (), {}),
'ListFull': ((), (), (), ('ListItem', ), {}),
'ListInfo': ((), (), (), (), {}),
'ListItems': ((), (), ('Lists',), ('ListItem', 'List'), {'List': ('ProductPage',
'TotalPages', 10)}),
'ListmaniaLists': ((), (), ('ListmaniaLists', ), ('ListmaniaList',), {}),
'ListMinimum': ((), (), (), (), {}),
'Medium': ((), (), (), (), {}),
'MerchantItemAttributes': ((), (), (), (), {}),
'NewReleases': ((), (), ('NewReleases',), ('NewRelease',), {}),
'OfferFull': ((), (), (), (), {}),
'OfferListings': ((), (), (), (), {}),
'Offers': ((), (), (), ('Offer',), {'Offers': ('OfferPage', 'TotalOfferPages', 10)}),
'OfferSummary': ((), (), (), (), {}),
'Request': (('Request',), (), (), (), {}),
'Reviews': ((), (), ('CustomerReviews', ),('Review',), {}),
'SalesRank': ((), (), (), (), {}),
'SearchBins': ((), (), ('SearchBinSets',), ('SearchBinSet',), {}),
'SimilarityLookup.Small': ((), ('ItemAttributes',), ('Items',), ('Item',), {}),
'Seller': ((), (), (), (), {}),
'SellerListing': ((), (), (), (), {}),
'Similarities': ((), (), ('SimilarProducts',), ('SimilarProduct',), {}),
'Small': ((), (), (), (), {}),
'Subjects': ((), (), ('Subjects',), ('Subject',), {}),
'TopSellers': ((), (), ('TopSellers',), ('TopSeller',), {}),
'Tracks': ((), ('Tracks',), (), (), {}),
'TransactionDetails': ((), (), ('Transactions', 'TransactionItems', 'Shipments'),
('Transaction', 'TransactionItem', 'Shipment'), {}),
'Variations': ((), (), (), (), {}),
'VariationMinimum': ((), (), ('Variations',), ('Variation',), {}),
'VariationImages': ((), (), (), (), {}),
'VariationSummary':((), (), (), (), {})
}
"""
Operation=>ResponseGroups
"""
orgs = {
'BrowseNodeLookup': ('Request', 'BrowseNodeInfo', 'NewReleases', 'TopSellers'),
'CartAdd': ('Cart', 'Request', 'CartSimilarities', 'CartTopSellers', 'NewReleases'),
'CartClear': ('Cart', 'Request'),
'CartCreate': ('Cart', 'Request', 'CartSimilarities', 'CartTopSellers', 'CartNewReleases'),
'CartGet': ('Cart', 'Request', 'CartSimilarities', 'CartTopSellers', 'CartNewReleases'),
'CartModify': ('Cart', 'Request', 'CartSimilarities', 'CartTopSellers', 'CartNewReleases'),
'CustomerContentLookup': ('Request', 'CustomerInfo', 'CustomerReviews', 'CustomerLists', 'CustomerFull'),
'CustomerContentSearch': ('Request', 'CustomerInfo'),
'Help': ('Request', 'Help'),
'ItemLookup': ('Request', 'ItemLookup.Small', 'Accessories', 'BrowseNodes', 'EditorialReview', 'Images', 'ItemAttributes', 'ItemIds', 'Large', 'ListmaniaLists', 'Medium', 'MerchantItemAttributes', 'OfferFull', 'Offers', 'OfferSummary', 'Reviews', 'SalesRank', 'Similarities', 'Subjects', 'Tracks', 'VariationImages', 'VariationMinimum', 'Variations', 'VariationSummary'),
'ItemSearch': ('Request', 'ItemSearch.Small', 'Accessories', 'BrowseNodes', 'EditorialReview', 'ItemAttributes', 'ItemIds', 'Large', 'ListmaniaLists', 'Medium', 'MerchantItemAttributes', 'OfferFull', 'Offers', 'OfferSummary', 'Reviews', 'SalesRank', 'SearchBins', 'Similarities', 'Subjects', 'Tracks', 'VariationMinimum', 'Variations', 'VariationSummary'),
'ListLookup': ('Request', 'ListInfo', 'Accessories', 'BrowseNodes', 'EditorialReview', 'Images', 'ItemAttributes', 'ItemIds', 'Large', 'ListFull', 'ListItems', 'ListmaniaLists', 'Medium', 'Offers', 'OfferSummary', 'Reviews', 'SalesRank', 'Similarities', 'Subjects', 'Tracks', 'VariationMinimum', 'Variations', 'VariationSummary'),
'ListSearch': ('Request', 'ListInfo', 'ListMinimum'),
'SellerListingLookup': ('Request', 'SellerListing'),
'SellerListingSearch': ('Request', 'SellerListing'),
'SellerLookup': ('Request', 'Seller'),
'SimilarityLookup': ('Request', 'SimilarityLookup.Small', 'Accessories', 'BrowseNodes', 'EditorialReview', 'Images', 'ItemAttributes', 'ItemIds', 'Large', 'ListmaniaLists', 'Medium', 'Offers', 'OfferSummary', 'Reviews', 'SalesRank', 'Similarities', 'Tracks', 'VariationMinimum', 'Variations', 'VariationSummary'),
'TransactionLookup':('Request', 'TransactionDetails')
}
def collapse(responseGroups):
l = []
for x in responseGroups:
l.append(x)
if x in rgh.keys():
l.extend( collapse(rgh[x]) )
return l
def mergePlugins(responseGroups, index):
#return reduce(lambda x, y: x.update(set(rgps[y][index])), responseGroups, set())
# this magic reduce does not work, using the primary implementation first.
# CODEDEBT: magic number !
if index == 4:
s = dict()
else:
s = set()
map(lambda x: s.update(rgps[x][index]), responseGroups)
return s
def unionPlugins(responseGroups):
return dict( [ (key, mergePlugins(collapse(responseGroups), index)) for index, key in enumerate(['isBypassed', 'isPivoted', 'isCollective', 'isCollected', 'isPaged']) ])
return dict( [ (k, unionPlugins(v)) for k, v in orgs.items() ] )
__plugins = __buildPlugins()
# Basic class for ECS
class Bag :
"""A generic container for the python objects"""
def __repr__(self):
return '<Bag instance: ' + self.__dict__.__repr__() + '>'
class ListIterator(list):
pass
class PaginatedIterator(ListIterator):
def __init__(self, XMLSearch, arguments, keywords, element, plugins):
"""
Initialize a `PaginatedIterator` object.
Parameters:
- `XMLSearch`: a function, the query to get the DOM
- `arguments`: a dictionary, `XMLSearch`'s arguments
- `keywords`: a tuple, (kwItems, (kwPage, kwTotalPages, pageSize) )
- `element`: a DOM element, the root of the collection
- `plugins`: a dictionary, collection of plugged objects
"""
kwItems, (kwPage, kwTotalPages, pageSize) = keywords
self.search = XMLSearch
self.arguments = arguments
self.plugins = plugins
self.keywords ={'Items':kwItems, 'Page':kwPage}
self.total_page = int(element.getElementsByTagName(kwTotalPages).item(0).firstChild.data)
self.page = 1
self.cache = unmarshal(XMLSearch, arguments, element, plugins, ListIterator())
def __iter__(self):
while True:
for x in self.cache:
yield x
self.page += 1
if self.page > self.total_page:
raise StopIteration
self.arguments[self.keywords['Page']] = self.page
dom = self.search(** self.arguments)
self.cache = unmarshal(self.search, self.arguments, dom.getElementsByTagName(self.keywords['Items']).item(0), self.plugins, ListIterator())
def SimpleObject(XMLSearch, arguments, kwItem, plugins=None):
"""Return simple object from `unmarshal`"""
dom = XMLSearch(** arguments)
return unmarshal(XMLSearch, arguments, dom.getElementsByTagName(kwItem).item(0), plugins)
def Collection(XMLSearch, arguments, kwItems, plugins=None):
"""Return collection of objects from `unmarshal` using ListIterator interface."""
dom = XMLSearch(** arguments)
return unmarshal(XMLSearch, arguments, dom.getElementsByTagName(kwItems).item(0), plugins, ListIterator())
def Pagination(XMLSearch, arguments, keywords, plugins):
return PaginatedIterator(XMLSearch, arguments, keywords,
XMLSearch(** arguments).getElementsByTagName(keywords[0]).item(0),
plugins)
# Exception classes
class AWSException(Exception) : pass
class NoLicenseKey(AWSException) : pass
class BadLocale(AWSException) : pass
class BadOption(AWSException): pass
# Runtime exception
class ExactParameterRequirement(AWSException): pass
class ExceededMaximumParameterValues(AWSException): pass
class InsufficientParameterValues(AWSException): pass
class InternalError(AWSException): pass
class InvalidEnumeratedParameter(AWSException): pass
class InvalidISO8601Time(AWSException): pass
class InvalidOperationForMarketplace(AWSException): pass
class InvalidOperationParameter(AWSException): pass
class InvalidParameterCombination(AWSException): pass
class InvalidParameterValue(AWSException): pass
class InvalidResponseGroup(AWSException): pass
class InvalidServiceParameter(AWSException): pass
class InvalidSubscriptionId(AWSException): pass
class InvalidXSLTAddress(AWSException): pass
class MaximumParameterRequirement(AWSException): pass
class MinimumParameterRequirement(AWSException): pass
class MissingOperationParameter(AWSException): pass
class MissingParameterCombination(AWSException): pass
class MissingParameters(AWSException): pass
class MissingParameterValueCombination(AWSException): pass
class MissingServiceParameter(AWSException): pass
class ParameterOutOfRange(AWSException): pass
class ParameterRepeatedInRequest(AWSException): pass
class RestrictedParameterValueCombination(AWSException): pass
class XSLTTransformationError(AWSException): pass
class RequestThrottled(AWSException): pass
# make a valid RESTful AWS query, that is signed, from a dictionary
# http://docs.amazonwebservices.com/AWSECommerceService/latest/DG/index.html?RequestAuthenticationArticle.html
# code by Robert Wallis: [email protected], your hourly software contractor
def amazonWebServicesUrl(aws_access_key_id, secret, query_dictionary):
query_dictionary["AWSAccessKeyId"] = aws_access_key_id
query_dictionary["Timestamp"] = strftime("%Y-%m-%dT%H:%M:%SZ")
query_pairs = []
for (k,v) in query_dictionary.items():
if v:
query_pairs.append(k+"="+urllib.quote(v))
# query_pairs = map(
# lambda (k,v):(k+"="+urllib.quote(v)),
# query_dictionary.items()
# )
# The Amazon specs require a sorted list of arguments
query_pairs.sort()
query_string = "&".join(query_pairs)
hm = hmac.new(
secret,
"GET\nwebservices.amazon.com\n/onca/xml\n"+query_string,
hashlib.sha256
)
signature = urllib.quote(base64.b64encode(hm.digest()))
query_string = "https://webservices.amazon.com/onca/xml?%s&Signature=%s" % (query_string, signature)
return query_string
def buildRequest(argv):
"""Build the REST request URL from argv."""
url = "https://" + Meta.locales[getLocale()] + "/onca/xml?Service=AWSECommerceService&" + 'Version=%s&' % Meta.version
argv['Service'] = "AWSECommerceService"
argv['Version'] = Meta.version
if not argv['AWSAccessKeyId']:
argv['AWSAccessKeyId'] = getLicenseKey()
argv.update(getOptions())
# return url + '&'.join(['%s=%s' % (k,urllib.quote(str(v))) for (k,v) in argv.items() if v])
return amazonWebServicesUrl(getLicenseKey(), getSecretKey(), argv)
def buildException(els):
"""Build the exception from the returned DOM node
Note: only the first exception is raised."""
error = els[0]
class_name = error.childNodes[0].firstChild.data[4:]
msg = error.childNodes[1].firstChild.data
try:
e = globals()[ class_name ](msg)
except KeyError:
class_name = error.childNodes[0].firstChild.data
e = globals()[ class_name ](msg)
return e
def query(url):
"""Send the query url and return the DOM
Exception is raised if there are errors"""
u = urllib.FancyURLopener()
usock = u.open(url)
dom = minidom.parse(usock)
usock.close()
# print dom.toxml()
errors = dom.getElementsByTagName('Error')
if errors:
e = buildException(errors)
raise e
return dom
def unmarshal(XMLSearch, arguments, element, plugins=None, rc=None):
"""Return the `Bag` / `ListIterator` object with attributes
populated using DOM element.
Parameters:
- `XMLSearch`: callback function, used when construct PaginatedIterator
- `arguments`: arguments of `XMLSearch`
- `element`: DOM object, the DOM element interested in
- `plugins`: a dictionary, collection of plugged objects to fine-tune
the object attributes
- `rc`: Bag object, parent object
This core function is inspired by Mark Pilgrim ([email protected])
with some enhancement. Each node.tagName is evalued by plugins' callback
functions:
- if tagname in plugins['isBypassed']
this elment is ignored
- if tagname in plugins['isPivoted']
this children of this elment is moved to grandparents
this object is ignored.
- if tagname in plugins['isCollective']
this elment is mapped to []
- if tagname in plugins['isCollected']
this children of elment is appended to grandparent
this object is ignored.
- if tagname in plugins['isPaged'].keys():
this PaginatedIterator is constructed for the object
CODE DEBT:
- Use optimal search for optimization if necessary
"""
if(rc == None):
rc = Bag()
childElements = [e for e in element.childNodes if isinstance(e, minidom.Element)]
if childElements:
for child in childElements:
key = child.tagName
if hasattr(rc, key):
attr = getattr(rc, key)
if type(attr) <> type([]):
setattr(rc, key, [attr])
setattr(rc, key, getattr(rc, key) + [unmarshal(XMLSearch, arguments, child, plugins)])
elif isinstance(child, minidom.Element):
if child.tagName in plugins['isPivoted']:
unmarshal(XMLSearch, arguments, child, plugins, rc)
continue
elif child.tagName in plugins['isBypassed']:
continue
if child.tagName in plugins['isCollective']:
value = unmarshal(XMLSearch, arguments, child, plugins, ListIterator())
elif child.tagName in plugins['isPaged'].keys():
value = PaginatedIterator(XMLSearch, arguments, (child.tagName, plugins['isPaged'][child.tagName]), child, plugins)
else :
value = unmarshal(XMLSearch, arguments, child, plugins)
if child.tagName in plugins['isCollected']:
rc.append(value)
else :
setattr(rc, key, value)
else:
rc = "".join([e.data for e in element.childNodes if isinstance(e, minidom.Text)])
return rc
# User interfaces
def ItemLookup(ItemId, IdType=None, SearchIndex=None, MerchantId=None, Condition=None, DeliveryMethod=None, ISPUPostalCode=None, OfferPage=None, ReviewPage=None, ReviewSort=None, VariationPage=None, ResponseGroup=None, AWSAccessKeyId=None):
'''ItemLookup in ECS'''
return SimpleObject(XMLItemLookup, vars(), 'Item', __plugins['ItemLookup'])
def XMLItemLookup(ItemId, IdType=None, SearchIndex=None, MerchantId=None, Condition=None, DeliveryMethod=None, ISPUPostalCode=None, OfferPage=None, ReviewPage=None, ReviewSort=None, VariationPage=None, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of ItemLookup in ECS'''
Operation = "ItemLookup"
return query(buildRequest(vars()))
def ItemSearch(Keywords, SearchIndex="Blended", Availability=None, Title=None, Power=None, BrowseNode=None, Artist=None, Author=None, Actor=None, Director=None, AudienceRating=None, Manufacturer=None, MusicLabel=None, Composer=None, Publisher=None, Brand=None, Conductor=None, Orchestra=None, TextStream=None, ItemPage=None, OfferPage=None, ReviewPage=None, Sort=None, City=None, Cuisine=None, Neighborhood=None, MinimumPrice=None, MaximumPrice=None, MerchantId=None, Condition=None, DeliveryMethod=None, ResponseGroup=None, AWSAccessKeyId=None):
'''ItemSearch in ECS'''
return Pagination(XMLItemSearch, vars(),
('Items', __plugins['ItemSearch']['isPaged']['Items']), __plugins['ItemSearch'])
def XMLItemSearch(Keywords, SearchIndex="Blended", Availability=None, Title=None, Power=None, BrowseNode=None, Artist=None, Author=None, Actor=None, Director=None, AudienceRating=None, Manufacturer=None, MusicLabel=None, Composer=None, Publisher=None, Brand=None, Conductor=None, Orchestra=None, TextStream=None, ItemPage=None, OfferPage=None, ReviewPage=None, Sort=None, City=None, Cuisine=None, Neighborhood=None, MinimumPrice=None, MaximumPrice=None, MerchantId=None, Condition=None, DeliveryMethod=None, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of ItemSearch in ECS'''
Operation = "ItemSearch"
return query(buildRequest(vars()))
def SimilarityLookup(ItemId, SimilarityType=None, MerchantId=None, Condition=None, DeliveryMethod=None, ResponseGroup=None, OfferPage=None, AWSAccessKeyId=None):
'''SimilarityLookup in ECS'''
return Collection(XMLSimilarityLookup, vars(), 'Items', __plugins['SimilarityLookup'])
def XMLSimilarityLookup(ItemId, SimilarityType=None, MerchantId=None, Condition=None, DeliveryMethod=None, ResponseGroup=None, OfferPage=None, AWSAccessKeyId=None):
'''DOM representation of SimilarityLookup in ECS'''
Operation = "SimilarityLookup"
return query(buildRequest(vars()))
# List Operations
def ListLookup(ListType, ListId, ProductPage=None, ProductGroup=None, Sort=None, MerchantId=None, Condition=None, DeliveryMethod=None, ResponseGroup=None, AWSAccessKeyId=None):
'''ListLookup in ECS'''
return Pagination(XMLListLookup, vars(),
('List', __plugins['ListLookup']['isPaged']['List']),
__plugins['ListLookup'])
def XMLListLookup(ListType, ListId, ProductPage=None, ProductGroup=None, Sort=None, MerchantId=None, Condition=None, DeliveryMethod=None, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of ListLookup in ECS'''
Operation = "ListLookup"
return query(buildRequest(vars()))
def ListSearch(ListType, Name=None, FirstName=None, LastName=None, Email=None, City=None, State=None, ListPage=None, ResponseGroup=None, AWSAccessKeyId=None):
'''ListSearch in ECS'''
argv = vars()
plugins = {
'isBypassed': (),
'isPivoted': ('ItemAttributes',),
'isCollective': ('Lists',),
'isCollected': ('List',),
'isPaged' : { 'Lists': ('ListPage', 'TotalResults', 10) }
}
return Pagination(XMLListSearch, argv,
('Lists', plugins['isPaged']['Lists']), plugins)
def XMLListSearch(ListType, Name=None, FirstName=None, LastName=None, Email=None, City=None, State=None, ListPage=None, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of ListSearch in ECS'''
Operation = "ListSearch"
return query(buildRequest(vars()))
#Remote Shopping Cart Operations
def CartCreate(Items, Quantities, ResponseGroup=None, AWSAccessKeyId=None):
'''CartCreate in ECS'''
return __cartOperation(XMLCartCreate, vars())
def XMLCartCreate(Items, Quantities, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of CartCreate in ECS'''
Operation = "CartCreate"
argv = vars()
for x in ('Items', 'Quantities'):
del argv[x]
__fromListToItems(argv, Items, 'ASIN', Quantities)
return query(buildRequest(argv))
def CartAdd(Cart, Items, Quantities, ResponseGroup=None, AWSAccessKeyId=None):
'''CartAdd in ECS'''
return __cartOperation(XMLCartAdd, vars())
def XMLCartAdd(Cart, Items, Quantities, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of CartAdd in ECS'''
Operation = "CartAdd"
CartId = Cart.CartId
HMAC = Cart.HMAC
argv = vars()
for x in ('Items', 'Cart', 'Quantities'):
del argv[x]
__fromListToItems(argv, Items, 'ASIN', Quantities)
return query(buildRequest(argv))
def CartGet(Cart, ResponseGroup=None, AWSAccessKeyId=None):
'''CartGet in ECS'''
return __cartOperation(XMLCartGet, vars())
def XMLCartGet(Cart, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of CartGet in ECS'''
Operation = "CartGet"
CartId = Cart.CartId
HMAC = Cart.HMAC
argv = vars()
del argv['Cart']
return query(buildRequest(argv))
def CartModify(Cart, Items, Actions, ResponseGroup=None, AWSAccessKeyId=None):
'''CartModify in ECS'''
return __cartOperation(XMLCartModify, vars())
def XMLCartModify(Cart, Items, Actions, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of CartModify in ECS'''
Operation = "CartModify"
CartId = Cart.CartId
HMAC = Cart.HMAC
argv = vars()
for x in ('Cart', 'Items', 'Actions'):
del argv[x]
__fromListToItems(argv, Items, 'CartItemId', Actions)
return query(buildRequest(argv))
def CartClear(Cart, ResponseGroup=None, AWSAccessKeyId=None):
'''CartClear in ECS'''
return __cartOperation(XMLCartClear, vars())
def XMLCartClear(Cart, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of CartClear in ECS'''
Operation = "CartClear"
CartId = Cart.CartId
HMAC = Cart.HMAC
argv = vars()
del argv['Cart']
return query(buildRequest(argv))
def __fromListToItems(argv, items, id, actions):
'''Convert list to AWS REST arguments'''
for i in range(len(items)):
argv["Item.%d.%s" % (i+1, id)] = getattr(items[i], id);
action = actions[i]
if isinstance(action, int):
argv["Item.%d.Quantity" % (i+1)] = action
else:
argv["Item.%d.Action" % (i+1)] = action
def __cartOperation(XMLSearch, arguments):
'''Generic cart operation'''
plugins = {
'isBypassed': ('Request',),
'isPivoted': (),
'isCollective': ('CartItems', 'SavedForLaterItems'),
'isCollected': ('CartItem', 'SavedForLaterItem'),
'isPaged': {}
}
return SimpleObject(XMLSearch, arguments, 'Cart', plugins)
# Seller Operation
def SellerLookup(Sellers, FeedbackPage=None, ResponseGroup=None, AWSAccessKeyId=None):
'''SellerLookup in AWS'''
argv = vars()
plugins = {
'isBypassed': ('Request',),
'isPivoted': (),
'isCollective': ('Sellers',),
'isCollected': ('Seller',),
'isPaged': {}
}
return Collection(XMLSellerLookup, argv, 'Sellers', plugins)
def XMLSellerLookup(Sellers, FeedbackPage=None, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of SellerLookup in AWS'''
Operation = "SellerLookup"
SellerId = ",".join(Sellers)
argv = vars()
del argv['Sellers']
return query(buildRequest(argv))
def SellerListingLookup(SellerId, Id, IdType="Listing", ResponseGroup=None, AWSAccessKeyId=None):
'''SellerListingLookup in AWS
Notice: although the repsonse includes TotalPages, TotalResults,
there is no ListingPage in the request, so we have to use Collection
instead of PaginatedIterator. Hope Amazaon would fix this inconsistance'''
argv = vars()
plugins = {
'isBypassed': ('Request',),
'isPivoted': (),
'isCollective': ('SellerListings',),
'isCollected': ('SellerListing',),
'isPaged': {}
}
return Collection(XMLSellerListingLookup, argv, "SellerListings", plugins)
def XMLSellerListingLookup(SellerId, Id, IdType="Listing", ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of SellerListingLookup in AWS'''
Operation = "SellerListingLookup"
return query(buildRequest(vars()))
def SellerListingSearch(SellerId, Title=None, Sort=None, ListingPage=None, OfferStatus=None, ResponseGroup=None, AWSAccessKeyId=None):
'''SellerListingSearch in AWS'''
argv = vars()
plugins = {
'isBypassed': ('Request',),
'isPivoted': (),
'isCollective': ('SellerListings',),
'isCollected': ('SellerListing',),
'isPaged' : { 'SellerListings': ('ListingPage', 'TotalResults', 10) }
}
return Pagination(XMLSellerListingSearch, argv,
('SellerListings', plugins['isPaged']['SellerListings']), plugins)
def XMLSellerListingSearch(SellerId, Title=None, Sort=None, ListingPage=None, OfferStatus=None, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of SellerListingSearch in AWS'''
Operation = "SellerListingSearch"
return query(buildRequest(vars()))
def CustomerContentSearch(Name=None, Email=None, CustomerPage=1, ResponseGroup=None, AWSAccessKeyId=None):
'''CustomerContentSearch in AWS'''
return Collection(XMLCustomerContentSearch, vars(), 'Customers', __plugins['CustomerContentSearch'])
def XMLCustomerContentSearch(Name=None, Email=None, CustomerPage=1, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of CustomerContentSearch in AWS'''
Operation = "CustomerContentSearch"
argv = vars()
for x in ('Name', 'Email'):
if not argv[x]:
del argv[x]
return query(buildRequest(argv))
def CustomerContentLookup(CustomerId, ReviewPage=1, ResponseGroup=None, AWSAccessKeyId=None):
'''CustomerContentLookup in AWS'''
argv = vars()
plugins = {
'isBypassed': ('Request',),
'isPivoted': (),
'isCollective': ('Customers',),
'isCollected': ('Customer',),
}
return Collection(XMLCustomerContentLookup, argv, 'Customers', __plugins['CustomerContentLookup'])
def XMLCustomerContentLookup(CustomerId, ReviewPage=1, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of CustomerContentLookup in AWS'''
Operation = "CustomerContentLookup"
return query(buildRequest(vars()))
# BrowseNode
def BrowseNodeLookup(BrowseNodeId, ResponseGroup=None, AWSAccessKeyId=None):
"""
BrowseNodeLookup in AWS
"""
return Collection(XMLBrowseNodeLookup, vars(), 'BrowseNodes', __plugins['BrowseNodeLookup'])
def XMLBrowseNodeLookup(BrowseNodeId, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of BrowseNodeLookup in AWS'''
Operation = "BrowseNodeLookup"
return query(buildRequest(vars()))
# Help
def Help(HelpType, About, ResponseGroup=None, AWSAccessKeyId=None):
'''Help in AWS'''
return SimpleObject(XMLHelp, vars(), 'Information', __plugins['Help'])
def XMLHelp(HelpType, About, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of Help in AWS'''
Operation = "Help"
return query(buildRequest(vars()))
# Transaction
def TransactionLookup(TransactionId, ResponseGroup=None, AWSAccessKeyId=None):
'''TransactionLookup in AWS'''
return Collection(XMLTransactionLookup, vars(), 'Transactions', __plugins['TransactionLookup'])
def XMLTransactionLookup(TransactionId, ResponseGroup=None, AWSAccessKeyId=None):
'''DOM representation of TransactionLookup in AWS'''
Operation = "TransactionLookup"
return query(buildRequest(vars()))
# helper functions
def setLocale(locale):
"""Set the locale
if unsupported locale is set, BadLocale is raised."""
if Meta.locales.has_key(locale):
Meta.locale = locale
else :
raise BadLocale, ("Unsupported locale. Locale must be one of: %s" %
', '.join([x for x in Meta.locales.keys() if x]))
def getLocale():
"""Get the locale"""
return Meta.locale
def setSecretKey(secret_key):
Meta.secret_key = secret_key
def getSecretKey():
return Meta.secret_key
def setLicenseKey(license_key=None):
"""Set AWS license key.
If license_key is not specified, the license key is set using the
environment variable: AMAZON_Meta.license_key; if no license key is
set, NoLicenseKey exception is raised."""
if license_key:
Meta.license_key = license_key
else :
Meta.license_key = os.environ.get('AWS_LICENSE_KEY', None)
def getLicenseKey():
"""Get AWS license key.
If no license key is specified, NoLicenseKey is raised."""
if Meta.license_key:
return Meta.license_key
else :
raise NoLicenseKey, ("Please get the license key from http://www.amazon.com/webservices")
def getVersion():
"""Get the version of ECS specification"""
return Meta.version
def setOptions(options):
"""
Set the general optional parameter, available options are:
- AssociateTag
- MerchantID
- Version
- Validate
"""
if set(options.keys()).issubset( set(['AssociateTag', 'MerchantID', 'Validate']) ):
Meta.options.update(options)
else:
raise BadOption, ('Unsupported option')
def getOptions():
"""Get options"""
return Meta.options
|
|
import pandas as pd
# If you'd like to try this lab with PCA instead of Isomap,
# as the dimensionality reduction technique:
Test_PCA = True
Test_K = True
Test_weights = True
def plotDecisionBoundary(model, X, y):
print("Plotting...")
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot') # Look Pretty
fig = plt.figure()
ax = fig.add_subplot(111)
padding = 0.1
resolution = 0.1
#(2 for benign, 4 for malignant)
colors = {2:'royalblue',4:'lightsalmon'}
# Calculate the boundaris
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
x_range = x_max - x_min
y_range = y_max - y_min
x_min -= x_range * padding
y_min -= y_range * padding
x_max += x_range * padding
y_max += y_range * padding
# Create a 2D Grid Matrix. The values stored in the matrix
# are the predictions of the class at at said location
import numpy as np
xx, yy = np.meshgrid(np.arange(x_min, x_max, resolution),
np.arange(y_min, y_max, resolution))
# What class does the classifier say?
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour map
plt.contourf(xx, yy, Z, cmap=plt.cm.seismic)
plt.axis('tight')
# Plot your testing points as well...
for label in np.unique(y):
indices = np.where(y == label)
plt.scatter(X[indices, 0], X[indices, 1], c=colors[label], alpha=0.8)
p = model.get_params()
plt.title('K = ' + str(p['n_neighbors']))
plt.show()
#
# TODO: Load in the dataset, identify nans, and set proper headers.
# Be sure to verify the rows line up by looking at the file in a text editor.
#
file_path = "/Users/szabolcs/dev/git/DAT210x/Module5/Datasets/"
file_name = "breast-cancer-wisconsin.data"
columns = ['sample', 'thickness', 'size', 'shape', 'adhesion', 'epithelial', 'nuclei', 'chromatin', 'nucleoli', 'mitoses', 'status']
X = pd.read_csv(file_path + file_name)
X.columns = columns
print("X", X.shape)
X = X[X.nuclei != '?']
print("X", X.shape)
X.nuclei = X.nuclei.astype(int)
print("IsNull", X.isnull().sum())
print(X.head())
print("X", X.shape)
print(X.dtypes)
#
# TODO: Copy out the status column into a slice, then drop it from the main
# dataframe. Always verify you properly executed the drop by double checking
# (printing out the resulting operating)! Many people forget to set the right
# axis here.
#
# If you goofed up on loading the dataset and notice you have a `sample` column,
# this would be a good place to drop that too if you haven't already.
#
y = X["status"].copy()
X = X.drop(["status"], axis=1)
X = X.drop(["sample"], axis=1)
print("X", X.shape)
print("y", y.shape)
#
# TODO: With the labels safely extracted from the dataset, replace any nan values
# with the mean feature / column value
#
# .. your code here ..
#
# TODO: Do train_test_split. Use the same variable names as on the EdX platform in
# the reading material, but set the random_state=7 for reproduceability, and keep
# the test_size at 0.5 (50%).
#
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=7, test_size=0.5)
#
# TODO: Experiment with the basic SKLearn preprocessing scalers. We know that
# the features consist of different units mixed in together, so it might be
# reasonable to assume feature scaling is necessary. Print out a description
# of the dataset, post transformation. Recall: when you do pre-processing,
# which portion of the dataset is your model trained upon? Also which portion(s)
# of your dataset actually get transformed?
#
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import RobustScaler
scaler = MinMaxScaler()
#scaler = MaxAbsScaler()
#scaler = StandardScaler()
#scaler = Normalizer('l2')
#scaler = RobustScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#
# PCA and Isomap are your new best friends
model = None
if Test_PCA:
print("Computing 2D Principle Components")
#
# TODO: Implement PCA here. Save your model into the variable 'model'.
# You should reduce down to two dimensions.
#
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(X_train)
X_train = pca.transform(X_train)
X_test = pca.transform(X_test)
else:
print("Computing 2D Isomap Manifold")
#
# TODO: Implement Isomap here. Save your model into the variable 'model'
# Experiment with K values from 5-10.
# You should reduce down to two dimensions.
#
from sklearn.manifold import Isomap
isomap = Isomap(n_neighbors=5, n_components=2)
isomap.fit(X_train)
X_train = isomap.transform(X_train)
X_test = isomap.transform(X_test)
#
# TODO: Train your model against data_train, then transform both
# data_train and data_test using your model. You can save the results right
# back into the variables themselves.
#
from sklearn.neighbors import KNeighborsClassifier
uni_scores = []
dist_scores = []
if Test_weights:
weights = ['uniform', 'distance']
for weight in weights:
print(weight)
scores = []
if Test_K:
for k in range(1, 16):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
#train_score = knn.score(X_train, y_train)
test_score = knn.score(X_test, y_test)
print(test_score)
scores.append(test_score)
else:
knn = KNeighborsClassifier(n_neighbors=4)
knn.fit(X_train, y_train)
#train_score = knn.score(X_train, y_train)
test_score = knn.score(X_test, y_test)
scores.append(test_score)
if weight == 'uniform':
uni_scores = scores
else:
dist_scores = scores
#
# TODO: Implement and train KNeighborsClassifier on your projected 2D
# training data here. You can use any K value from 1 - 15, so play around
# with it and see what results you can come up. Your goal is to find a
# good balance where you aren't too specific (low-K), nor are you too
# general (high-K). You should also experiment with how changing the weights
# parameter affects the results.
#
# .. your code here ..
#
# INFO: Be sure to always keep the domain of the problem in mind! It's
# WAY more important to errantly classify a benign tumor as malignant,
# and have it removed, than to incorrectly leave a malignant tumor, believing
# it to be benign, and then having the patient progress in cancer. Since the UDF
# weights don't give you any class information, the only way to introduce this
# data into SKLearn's KNN Classifier is by "baking" it into your data. For
# example, randomly reducing the ratio of benign samples compared to malignant
# samples from the training set.
#
# TODO: Calculate + Print the accuracy of the testing set
#
if Test_K or Test_weights:
scores_vs_k = pd.DataFrame({'uni_score': uni_scores, 'dist_score': dist_scores, 'k': range(1, 16)})
scores_vs_k.plot(x='k', y=['uni_score', 'dist_score'])
print(scores_vs_k)
# Adjusted R^2 and R^2
import statsmodels.api as sm
X1 = sm.add_constant(X)
result = sm.OLS(y, X1).fit()
print("R^2:", result.rsquared)
print("Adjusted R^2:", result.rsquared_adj)
plotDecisionBoundary(knn, X_test, y_test)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import re
import unittest
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
from unittest import mock
import pytest
from parameterized import parameterized
from airflow.configuration import ensure_secrets_loaded
from airflow.exceptions import AirflowException, AirflowFileParseException, ConnectionNotUnique
from airflow.models import Variable
from airflow.secrets import local_filesystem
from airflow.secrets.local_filesystem import LocalFilesystemBackend
from tests.test_utils.config import conf_vars
@contextmanager
def mock_local_file(content):
with mock.patch(
"airflow.secrets.local_filesystem.open", mock.mock_open(read_data=content)
) as file_mock, mock.patch("airflow.secrets.local_filesystem.os.path.exists", return_value=True):
yield file_mock
class FileParsers(unittest.TestCase):
@parameterized.expand(
(
("AA", 'Invalid line format. The line should contain at least one equal sign ("=")'),
("=", "Invalid line format. Key is empty."),
)
)
def test_env_file_invalid_format(self, content, expected_message):
with mock_local_file(content):
with pytest.raises(AirflowFileParseException, match=re.escape(expected_message)):
local_filesystem.load_variables("a.env")
@parameterized.expand(
(
("[]", "The file should contain the object."),
("{AAAAA}", "Expecting property name enclosed in double quotes"),
("", "The file is empty."),
)
)
def test_json_file_invalid_format(self, content, expected_message):
with mock_local_file(content):
with pytest.raises(AirflowFileParseException, match=re.escape(expected_message)):
local_filesystem.load_variables("a.json")
class TestLoadVariables(unittest.TestCase):
@parameterized.expand(
(
("", {}),
("KEY=AAA", {"KEY": "AAA"}),
("KEY_A=AAA\nKEY_B=BBB", {"KEY_A": "AAA", "KEY_B": "BBB"}),
("KEY_A=AAA\n # AAAA\nKEY_B=BBB", {"KEY_A": "AAA", "KEY_B": "BBB"}),
("\n\n\n\nKEY_A=AAA\n\n\n\n\nKEY_B=BBB\n\n\n", {"KEY_A": "AAA", "KEY_B": "BBB"}),
)
)
def test_env_file_should_load_variables(self, file_content, expected_variables):
with mock_local_file(file_content):
variables = local_filesystem.load_variables("a.env")
assert expected_variables == variables
@parameterized.expand((("AA=A\nAA=B", "The \"a.env\" file contains multiple values for keys: ['AA']"),))
def test_env_file_invalid_logic(self, content, expected_message):
with mock_local_file(content):
with pytest.raises(AirflowException, match=re.escape(expected_message)):
local_filesystem.load_variables("a.env")
@parameterized.expand(
(
({}, {}),
({"KEY": "AAA"}, {"KEY": "AAA"}),
({"KEY_A": "AAA", "KEY_B": "BBB"}, {"KEY_A": "AAA", "KEY_B": "BBB"}),
({"KEY_A": "AAA", "KEY_B": "BBB"}, {"KEY_A": "AAA", "KEY_B": "BBB"}),
)
)
def test_json_file_should_load_variables(self, file_content, expected_variables):
with mock_local_file(json.dumps(file_content)):
variables = local_filesystem.load_variables("a.json")
assert expected_variables == variables
@mock.patch("airflow.secrets.local_filesystem.os.path.exists", return_value=False)
def test_missing_file(self, mock_exists):
with pytest.raises(
AirflowException,
match=re.escape("File a.json was not found. Check the configuration of your Secrets backend."),
):
local_filesystem.load_variables("a.json")
@parameterized.expand(
(
("KEY: AAA", {"KEY": "AAA"}),
(
"""
KEY_A: AAA
KEY_B: BBB
""",
{"KEY_A": "AAA", "KEY_B": "BBB"},
),
)
)
def test_yaml_file_should_load_variables(self, file_content, expected_variables):
with mock_local_file(file_content):
variables = local_filesystem.load_variables('a.yaml')
assert expected_variables == variables
class TestLoadConnection(unittest.TestCase):
@parameterized.expand(
(
("CONN_ID=mysql://host_1/", {"CONN_ID": "mysql://host_1"}),
(
"CONN_ID1=mysql://host_1/\nCONN_ID2=mysql://host_2/",
{"CONN_ID1": "mysql://host_1", "CONN_ID2": "mysql://host_2"},
),
(
"CONN_ID1=mysql://host_1/\n # AAAA\nCONN_ID2=mysql://host_2/",
{"CONN_ID1": "mysql://host_1", "CONN_ID2": "mysql://host_2"},
),
(
"\n\n\n\nCONN_ID1=mysql://host_1/\n\n\n\n\nCONN_ID2=mysql://host_2/\n\n\n",
{"CONN_ID1": "mysql://host_1", "CONN_ID2": "mysql://host_2"},
),
)
)
def test_env_file_should_load_connection(self, file_content, expected_connection_uris):
with mock_local_file(file_content):
connection_by_conn_id = local_filesystem.load_connections_dict("a.env")
connection_uris_by_conn_id = {
conn_id: connection.get_uri() for conn_id, connection in connection_by_conn_id.items()
}
assert expected_connection_uris == connection_uris_by_conn_id
@parameterized.expand(
(
("AA", 'Invalid line format. The line should contain at least one equal sign ("=")'),
("=", "Invalid line format. Key is empty."),
)
)
def test_env_file_invalid_format(self, content, expected_message):
with mock_local_file(content):
with pytest.raises(AirflowFileParseException, match=re.escape(expected_message)):
local_filesystem.load_connections_dict("a.env")
@parameterized.expand(
(
({"CONN_ID": "mysql://host_1"}, {"CONN_ID": "mysql://host_1"}),
({"CONN_ID": ["mysql://host_1"]}, {"CONN_ID": "mysql://host_1"}),
({"CONN_ID": {"uri": "mysql://host_1"}}, {"CONN_ID": "mysql://host_1"}),
({"CONN_ID": [{"uri": "mysql://host_1"}]}, {"CONN_ID": "mysql://host_1"}),
)
)
def test_json_file_should_load_connection(self, file_content, expected_connection_uris):
with mock_local_file(json.dumps(file_content)):
connections_by_conn_id = local_filesystem.load_connections_dict("a.json")
connection_uris_by_conn_id = {
conn_id: connection.get_uri() for conn_id, connection in connections_by_conn_id.items()
}
assert expected_connection_uris == connection_uris_by_conn_id
@parameterized.expand(
(
({"CONN_ID": None}, "Unexpected value type: <class 'NoneType'>."),
({"CONN_ID": 1}, "Unexpected value type: <class 'int'>."),
({"CONN_ID": [2]}, "Unexpected value type: <class 'int'>."),
({"CONN_ID": [None]}, "Unexpected value type: <class 'NoneType'>."),
({"CONN_ID": {"AAA": "mysql://host_1"}}, "The object have illegal keys: AAA."),
({"CONN_ID": {"conn_id": "BBBB"}}, "Mismatch conn_id."),
({"CONN_ID": ["mysql://", "mysql://"]}, "Found multiple values for CONN_ID in a.json."),
)
)
def test_env_file_invalid_input(self, file_content, expected_connection_uris):
with mock_local_file(json.dumps(file_content)):
with pytest.raises(AirflowException, match=re.escape(expected_connection_uris)):
local_filesystem.load_connections_dict("a.json")
@mock.patch("airflow.secrets.local_filesystem.os.path.exists", return_value=False)
def test_missing_file(self, mock_exists):
with pytest.raises(
AirflowException,
match=re.escape("File a.json was not found. Check the configuration of your Secrets backend."),
):
local_filesystem.load_connections_dict("a.json")
@parameterized.expand(
(
(
"""CONN_A: 'mysql://host_a'""",
{"CONN_A": {'conn_type': 'mysql', 'host': 'host_a'}},
),
(
"""
conn_a: mysql://hosta
conn_b:
conn_type: scheme
host: host
schema: lschema
login: Login
password: None
port: 1234
extra_dejson:
arbitrary_dict:
a: b
extra__google_cloud_platform__keyfile_dict: '{"a": "b"}'
extra__google_cloud_platform__keyfile_path: asaa""",
{
"conn_a": {'conn_type': 'mysql', 'host': 'hosta'},
"conn_b": {
'conn_type': 'scheme',
'host': 'host',
'schema': 'lschema',
'login': 'Login',
'password': 'None',
'port': 1234,
'extra_dejson': {
'arbitrary_dict': {"a": "b"},
'extra__google_cloud_platform__keyfile_dict': '{"a": "b"}',
'extra__google_cloud_platform__keyfile_path': 'asaa',
},
},
},
),
)
)
def test_yaml_file_should_load_connection(self, file_content, expected_attrs_dict):
with mock_local_file(file_content):
connections_by_conn_id = local_filesystem.load_connections_dict("a.yaml")
for conn_id, connection in connections_by_conn_id.items():
expected_attrs = expected_attrs_dict[conn_id]
actual_attrs = {k: getattr(connection, k) for k in expected_attrs.keys()}
assert actual_attrs == expected_attrs
@parameterized.expand(
(
(
"""
conn_c:
conn_type: scheme
host: host
schema: lschema
login: Login
password: None
port: 1234
extra_dejson:
aws_conn_id: bbb
region_name: ccc
""",
{"conn_c": {"aws_conn_id": "bbb", "region_name": "ccc"}},
),
(
"""
conn_d:
conn_type: scheme
host: host
schema: lschema
login: Login
password: None
port: 1234
extra_dejson:
extra__google_cloud_platform__keyfile_dict:
a: b
extra__google_cloud_platform__key_path: xxx
""",
{
"conn_d": {
"extra__google_cloud_platform__keyfile_dict": {"a": "b"},
"extra__google_cloud_platform__key_path": "xxx",
}
},
),
(
"""
conn_d:
conn_type: scheme
host: host
schema: lschema
login: Login
password: None
port: 1234
extra: '{\"extra__google_cloud_platform__keyfile_dict\": {\"a\": \"b\"}}'
""",
{"conn_d": {"extra__google_cloud_platform__keyfile_dict": {"a": "b"}}},
),
)
)
def test_yaml_file_should_load_connection_extras(self, file_content, expected_extras):
with mock_local_file(file_content):
connections_by_conn_id = local_filesystem.load_connections_dict("a.yaml")
connection_uris_by_conn_id = {
conn_id: connection.extra_dejson for conn_id, connection in connections_by_conn_id.items()
}
assert expected_extras == connection_uris_by_conn_id
@parameterized.expand(
(
(
"""conn_c:
conn_type: scheme
host: host
schema: lschema
login: Login
password: None
port: 1234
extra:
abc: xyz
extra_dejson:
aws_conn_id: bbb
region_name: ccc
""",
"The extra and extra_dejson parameters are mutually exclusive.",
),
)
)
def test_yaml_invalid_extra(self, file_content, expected_message):
with mock_local_file(file_content):
with pytest.raises(AirflowException, match=re.escape(expected_message)):
local_filesystem.load_connections_dict("a.yaml")
@parameterized.expand(
("CONN_ID=mysql://host_1/\nCONN_ID=mysql://host_2/",),
)
def test_ensure_unique_connection_env(self, file_content):
with mock_local_file(file_content):
with pytest.raises(ConnectionNotUnique):
local_filesystem.load_connections_dict("a.env")
@parameterized.expand(
(
({"CONN_ID": ["mysql://host_1", "mysql://host_2"]},),
({"CONN_ID": [{"uri": "mysql://host_1"}, {"uri": "mysql://host_2"}]},),
)
)
def test_ensure_unique_connection_json(self, file_content):
with mock_local_file(json.dumps(file_content)):
with pytest.raises(ConnectionNotUnique):
local_filesystem.load_connections_dict("a.json")
@parameterized.expand(
(
(
"""
conn_a:
- mysql://hosta
- mysql://hostb"""
),
),
)
def test_ensure_unique_connection_yaml(self, file_content):
with mock_local_file(file_content):
with pytest.raises(ConnectionNotUnique):
local_filesystem.load_connections_dict("a.yaml")
class TestLocalFileBackend(unittest.TestCase):
def test_should_read_variable(self):
with NamedTemporaryFile(suffix="var.env") as tmp_file:
tmp_file.write(b"KEY_A=VAL_A")
tmp_file.flush()
backend = LocalFilesystemBackend(variables_file_path=tmp_file.name)
assert "VAL_A" == backend.get_variable("KEY_A")
assert backend.get_variable("KEY_B") is None
@conf_vars(
{
(
"secrets",
"backend",
): "airflow.secrets.local_filesystem.LocalFilesystemBackend",
("secrets", "backend_kwargs"): '{"variables_file_path": "var.env"}',
}
)
def test_load_secret_backend_LocalFilesystemBackend(self):
with mock_local_file("KEY_A=VAL_A"):
backends = ensure_secrets_loaded()
backend_classes = [backend.__class__.__name__ for backend in backends]
assert 'LocalFilesystemBackend' in backend_classes
assert Variable.get("KEY_A") == "VAL_A"
def test_should_read_connection(self):
with NamedTemporaryFile(suffix=".env") as tmp_file:
tmp_file.write(b"CONN_A=mysql://host_a")
tmp_file.flush()
backend = LocalFilesystemBackend(connections_file_path=tmp_file.name)
assert "mysql://host_a" == backend.get_connection("CONN_A").get_uri()
assert backend.get_variable("CONN_B") is None
def test_files_are_optional(self):
backend = LocalFilesystemBackend()
assert None is backend.get_connection("CONN_A")
assert backend.get_variable("VAR_A") is None
|
|
# Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The plugs module provides boilerplate for accessing hardware.
Most tests require interaction with external hardware. This module provides
framework support for such interfaces, allowing for automatic setup and
teardown of the objects.
Test phases can be decorated as using Plug objects, which then get passed
into the test via parameters. Plugs are all instantiated at the
beginning of a test, and all plugs' tearDown() methods are called at the
end of a test. It's up to the Plug implementation to do any sort of
is-ready check.
A plug may be made "frontend-aware", allowing it, in conjunction with the
Station API, to update any frontends each time the plug's state changes. See
FrontendAwareBasePlug for more info.
Example implementation of a plug:
from openhtf import plugs
class ExamplePlug(plugs.BasePlug):
'''A Plug that does nothing.'''
def __init__(self):
print 'Instantiating %s!' % type(self).__name__
def DoSomething(self):
print '%s doing something!' % type(self).__name__
def tearDown(self):
# This method is optional. If implemented, it will be called at the end
# of the test.
print 'Tearing down %s!' % type(self).__name__
Example usage of the above plug:
from openhtf import plugs
from my_custom_plugs_package import example
@plugs.plug(example=example.ExamplePlug)
def TestPhase(test, example):
print 'Test phase started!'
example.DoSomething()
print 'Test phase done!'
Putting all this together, when the test is run (with just that phase), you
would see the output (with other framework logs before and after):
Instantiating ExamplePlug!
Test phase started!
ExamplePlug doing something!
Test phase done!
Tearing down ExamplePlug!
Plugs will often need to use configuration values. The recommended way
of doing this is with the conf.inject_positional_args decorator:
from openhtf import plugs
from openhtf.util import conf
conf.declare('my_config_key', default_value='my_config_value')
class ExamplePlug(plugs.BasePlug):
'''A plug that requires some configuration.'''
@conf.inject_positional_args
def __init__(self, my_config_key)
self._my_config = my_config_key
Note that Plug constructors shouldn't take any other arguments; the
framework won't pass any, so you'll get a TypeError. Any values that are only
known at run time must be either passed into other methods or set via explicit
setter methods. See openhtf/conf.py for details, but with the above
example, you would also need a configuration .yaml file with something like:
my_config_key: my_config_value
This will result in the ExamplePlug being constructed with
self._my_config having a value of 'my_config_value'.
"""
import collections
import functools
import inspect
import json
import logging
import threading
import time
import types
import mutablerecords
from openhtf import util
import openhtf.core.phase_descriptor
from openhtf.util import classproperty
from openhtf.util import conf
from openhtf.util import logs
from openhtf.util import threads
import six
_LOG = logging.getLogger(__name__)
conf.declare('plug_teardown_timeout_s', default_value=0, description=
'Timeout (in seconds) for each plug tearDown function if > 0; '
'otherwise, will wait an unlimited time.')
PlugDescriptor = collections.namedtuple('PlugDescriptor', ['mro']) # pylint: disable=invalid-name
# Placeholder for a specific plug to be provided before test execution.
#
# Use the with_plugs() method to provide the plug before test execution. The
# with_plugs() method checks to make sure the substitute plug is a subclass of
# the PlugPlaceholder's base_class.
PlugPlaceholder = collections.namedtuple('PlugPlaceholder', ['base_class']) # pylint: disable=invalid-name
class PhasePlug(mutablerecords.Record(
'PhasePlug', ['name', 'cls'], {'update_kwargs': True})):
"""Information about the use of a plug in a phase."""
class PlugOverrideError(Exception):
"""Raised when a plug would be overridden by a kwarg."""
class DuplicatePlugError(Exception):
"""Raised when the same plug is required multiple times on a phase."""
class InvalidPlugError(Exception):
"""Raised when a plug declaration or requested name is invalid."""
class BasePlug(object):
"""All plug types must subclass this type.
Attributes:
logger: This attribute will be set by the PlugManager (and as such it
doesn't appear here), and is the same logger as passed into test
phases via TestApi.
"""
# Override this to True in subclasses to support remote Plug access.
enable_remote = False
# Allow explicitly disabling remote access to specific attributes.
disable_remote_attrs = set()
# Override this to True in subclasses to support using with_plugs with this
# plug without needing to use placeholder. This will only affect the classes
# that explicitly define this; subclasses do not share the declaration.
auto_placeholder = False
# Default logger to be used only in __init__ of subclasses.
# This is overwritten both on the class and the instance so don't store
# a copy of it anywhere.
logger = _LOG
@classproperty
def placeholder(cls):
"""Returns a PlugPlaceholder for the calling class."""
return PlugPlaceholder(cls)
def _asdict(self):
"""Return a dictionary representation of this plug's state.
This is called repeatedly during phase execution on any plugs that are in
use by that phase. The result is reported via the Station API by the
PlugManager (if the Station API is enabled, which is the default).
Note that this method is called in a tight loop, it is recommended that you
decorate it with functions.call_at_most_every() to limit the frequency at
which updates happen (pass a number of seconds to it to limit samples to
once per that number of seconds).
"""
return {}
def tearDown(self):
"""This method is called automatically at the end of each Test execution."""
pass
@classmethod
def uses_base_tear_down(cls):
"""Checks whether the tearDown method is the BasePlug implementation."""
this_tear_down = getattr(cls, 'tearDown')
base_tear_down = getattr(BasePlug, 'tearDown')
return this_tear_down.__code__ is base_tear_down.__code__
class FrontendAwareBasePlug(BasePlug, util.SubscribableStateMixin):
"""A plug that notifies of any state updates.
Plugs inheriting from this class may be used in conjunction with the Station
API to update any frontends each time the plug's state changes. The plug
should call notify_update() when and only when the state returned by _asdict()
changes.
Since the Station API runs in a separate thread, the _asdict() method of
frontend-aware plugs should be written with thread safety in mind.
"""
enable_remote = True
def plug(update_kwargs=True, **plugs_map):
"""Creates a decorator that passes in plugs when invoked.
This function returns a decorator for a function that will replace positional
arguments to that function with the plugs specified. See the module
docstring for details and examples.
Note this decorator does not work with class or bound methods, but does work
with @staticmethod.
Args:
update_kwargs: If true, makes the decorated phase take this plug as a kwarg.
**plugs_map: Dict mapping name to Plug type.
Returns:
A PhaseDescriptor that will pass plug instances in as kwargs when invoked.
Raises:
InvalidPlugError: If a type is provided that is not a subclass of BasePlug.
"""
for a_plug in plugs_map.values():
if not (isinstance(a_plug, PlugPlaceholder)
or issubclass(a_plug, BasePlug)):
raise InvalidPlugError(
'Plug %s is not a subclass of plugs.BasePlug nor a placeholder '
'for one' % a_plug)
def result(func):
"""Wrap the given function and return the wrapper.
Args:
func: The function to wrap.
Returns:
A PhaseDescriptor that, when called will invoke the wrapped function,
passing plugs as keyword args.
Raises:
DuplicatePlugError: If a plug name is declared twice for the
same function.
"""
phase = openhtf.core.phase_descriptor.PhaseDescriptor.wrap_or_copy(func)
duplicates = (frozenset(p.name for p in phase.plugs) &
frozenset(plugs_map))
if duplicates:
raise DuplicatePlugError(
'Plugs %s required multiple times on phase %s' % (duplicates, func))
phase.plugs.extend([
PhasePlug(name, a_plug, update_kwargs=update_kwargs)
for name, a_plug in six.iteritems(plugs_map)])
return phase
return result
class _PlugTearDownThread(threads.KillableThread):
"""Killable thread that runs a plug's tearDown function."""
def __init__(self, a_plug, *args, **kwargs):
super(_PlugTearDownThread, self).__init__(*args, **kwargs)
self._plug = a_plug
def _thread_proc(self):
try:
self._plug.tearDown()
except Exception: # pylint: disable=broad-except
# Including the stack trace from ThreadTerminationErrors received when
# killed.
_LOG.warning('Exception calling tearDown on %s:',
self._plug, exc_info=True)
class PlugManager(object):
"""Class to manage the lifetimes of plugs.
This class handles instantiation of plugs at test start and calling
tearDown() on all plugs when the test completes. It is used by
the executor, and should not be instantiated outside the framework itself.
Note this class is not thread-safe. It should only ever be used by the
main framework thread anyway.
Attributes:
_plug_types: Initial set of plug types, additional plug types may be
passed into calls to initialize_plugs().
_logger_name: The name of this test's plug logger. The loggers passed to the
plugs will be children of this logger.
_plugs_by_type: Dict mapping plug type to plug instance.
_plugs_by_name: Dict mapping plug name to plug instance.
_plug_descriptors: Dict mapping plug type to plug descriptor.
"""
def __init__(self, plug_types=None, record_logger_name=None):
self._plug_types = plug_types or set()
for plug in self._plug_types:
if isinstance(plug, PlugPlaceholder):
raise InvalidPlugError('Plug %s is a placeholder, replace it using '
'with_plugs().' % plug)
self._plugs_by_type = {}
self._plugs_by_name = {}
self._plug_descriptors = {}
self.logger = logging.getLogger(record_logger_name).getChild('plug')
def _asdict(self):
return {
'plug_descriptors': {
name: dict(descriptor._asdict()) # Convert OrderedDict to dict.
for name, descriptor in six.iteritems(self._plug_descriptors)
},
'plug_states': {
name: plug._asdict()
for name, plug in six.iteritems(self._plugs_by_name)
},
}
def _make_plug_descriptor(self, plug_type):
"""Returns the plug descriptor, containing info about this plug type."""
return PlugDescriptor(self.get_plug_mro(plug_type))
def get_plug_mro(self, plug_type):
"""Returns a list of names identifying the plug classes in the plug's MRO.
For example:
['openhtf.plugs.user_input.UserInput']
Or:
['openhtf.plugs.user_input.UserInput',
'my_module.advanced_user_input.AdvancedUserInput']
"""
ignored_classes = (BasePlug, FrontendAwareBasePlug)
return [
self.get_plug_name(base_class) for base_class in plug_type.mro()
if (issubclass(base_class, BasePlug) and
base_class not in ignored_classes)
]
def get_plug_name(self, plug_type):
"""Returns the plug's name, which is the class name and module.
For example:
'openhtf.plugs.user_input.UserInput'
"""
return '%s.%s' % (plug_type.__module__, plug_type.__name__)
def initialize_plugs(self, plug_types=None):
"""Instantiate required plugs.
Instantiates plug types and saves the instances in self._plugs_by_type for
use in provide_plugs().
Args:
plug_types: Plug types may be specified here rather than passed
into the constructor (this is used primarily for unit testing
phases).
"""
types = plug_types if plug_types is not None else self._plug_types
for plug_type in types:
# Create a logger for this plug. All plug loggers go under the 'plug'
# sub-logger in the logger hierarchy.
plug_logger = self.logger.getChild(plug_type.__name__)
if plug_type in self._plugs_by_type:
continue
try:
if not issubclass(plug_type, BasePlug):
raise InvalidPlugError(
'Plug type "%s" is not an instance of BasePlug' % plug_type)
if plug_type.logger != _LOG:
# They put a logger attribute on the class itself, overriding ours.
raise InvalidPlugError(
'Do not override "logger" in your plugs.', plug_type)
# Override the logger so that __init__'s logging goes into the record.
plug_type.logger = plug_logger
try:
plug_instance = plug_type()
finally:
# Now set it back since we'll give the instance a logger in a moment.
plug_type.logger = _LOG
# Set the logger attribute directly (rather than in BasePlug) so we
# don't depend on subclasses' implementation of __init__ to have it
# set.
if plug_instance.logger != _LOG:
raise InvalidPlugError(
'Do not set "self.logger" in __init__ in your plugs', plug_type)
else:
# Now the instance has its own copy of the test logger.
plug_instance.logger = plug_logger
except Exception: # pylint: disable=broad-except
plug_logger.critical('Exception instantiating plug "%s"', plug_type.__name__)
self.tear_down_plugs()
raise
self.update_plug(plug_type, plug_instance)
def get_plug_by_class_path(self, plug_name):
"""Get a plug instance by name (class path).
This provides a way for extensions to OpenHTF to access plug instances for
a running test via that test's plug manager.
Args:
plug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'.
Returns:
The plug manager's instance of the specified plug.
"""
#_LOG.debug(self._plugs_by_name)
return self._plugs_by_name.get(plug_name)
def update_plug(self, plug_type, plug_value):
"""Update internal data stores with the given plug value for plug type.
Safely tears down the old instance if one was already created, but that's
generally not the case outside unittests. Also, we explicitly pass the
plug_type rather than detecting it from plug_value to allow unittests to
override plugs with Mock instances.
Note this should only be used inside unittests, as this mechanism is not
compatible with RemotePlug support.
"""
self._plug_types.add(plug_type)
if plug_type in self._plugs_by_type:
self._plugs_by_type[plug_type].tearDown()
plug_name = self.get_plug_name(plug_type)
self._plugs_by_type[plug_type] = plug_value
self._plugs_by_name[plug_name] = plug_value
self._plug_descriptors[plug_name] = self._make_plug_descriptor(plug_type)
def provide_plugs(self, plug_name_map):
"""Provide the requested plugs [(name, type),] as {name: plug instance}."""
return {name: self._plugs_by_type[cls] for name, cls in plug_name_map}
def tear_down_plugs(self):
"""Call tearDown() on all instantiated plugs.
Note that initialize_plugs must have been called before calling
this method, and initialize_plugs must be called again after calling
this method if you want to access the plugs attribute again.
Any exceptions in tearDown() methods are logged, but do not get raised
by this method.
"""
_LOG.debug('Tearing down all plugs.')
for plug_type, plug_instance in six.iteritems(self._plugs_by_type):
if plug_instance.uses_base_tear_down():
name = '<PlugTearDownThread: BasePlug No-Op for %s>' % plug_type
else:
name = '<PlugTearDownThread: %s>' % plug_type
thread = _PlugTearDownThread(plug_instance, name=name)
thread.start()
timeout_s = (conf.plug_teardown_timeout_s
if conf.plug_teardown_timeout_s
else None)
thread.join(timeout_s)
if thread.is_alive():
thread.kill()
_LOG.warning('Killed tearDown for plug %s after timeout.',
plug_instance)
self._plugs_by_type.clear()
self._plugs_by_name.clear()
def wait_for_plug_update(self, plug_name, remote_state, timeout_s):
"""Wait for a change in the state of a frontend-aware plug.
Args:
plug_name: Plug name, e.g. 'openhtf.plugs.user_input.UserInput'.
remote_state: The last observed state.
timeout_s: Number of seconds to wait for an update.
Returns:
An updated state, or None if the timeout runs out.
Raises:
InvalidPlugError: The plug can't be waited on either because it's not in
use or it's not a frontend-aware plug.
"""
plug = self._plugs_by_name.get(plug_name)
if plug is None:
raise InvalidPlugError('Cannot wait on unknown plug "%s".' % plug_name)
if not isinstance(plug, FrontendAwareBasePlug):
raise InvalidPlugError('Cannot wait on a plug %s that is not an subclass '
'of FrontendAwareBasePlug.' % plug_name)
state, update_event = plug.asdict_with_event()
if state != remote_state:
return state
if update_event.wait(timeout_s):
return plug._asdict()
def get_frontend_aware_plug_names(self):
"""Returns the names of frontend-aware plugs."""
return [name for name, plug in six.iteritems(self._plugs_by_name)
if isinstance(plug, FrontendAwareBasePlug)]
|
|
# -*- coding: utf-8 -*-
# Copyright 2014 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from __future__ import with_statement
import os.path
import re
import time
import sys
import lxml.html
from lxml.cssselect import CSSSelector
from splinter.driver import DriverAPI, ElementAPI
from splinter.element_list import ElementList
from splinter.exceptions import ElementDoesNotExist
class LxmlDriver(DriverAPI):
def __init__(self, user_agent=None, wait_time=2):
self.wait_time = wait_time
self._history = []
self._last_urls = []
self._forms = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def _do_method(self, action, url, data=None):
raise NotImplementedError(
"%s doesn't support doing http methods." % self.driver_name)
def visit(self, url):
self._do_method('get', url)
def submit(self, form):
method = form.attrib.get('method', 'get').lower()
action = form.attrib.get('action', '')
if action.strip() != '.':
url = os.path.join(self._url, action)
else:
url = self._url
self._url = url
data = dict(((k, v) for k, v in form.fields.items() if v is not None))
for key in form.inputs.keys():
input = form.inputs[key]
if getattr(input, 'type', '') == 'file' and key in data:
data[key] = open(data[key], 'rb')
self._do_method(method, url, data=data)
return self._response
def submit_data(self, form):
raise NotImplementedError(
"%s doesn't support submitting then getting the data." % self.driver_name)
def back(self):
self._last_urls.insert(0, self.url)
self.visit(self._last_urls[1])
def forward(self):
try:
self.visit(self._last_urls.pop())
except IndexError:
pass
def reload(self):
self.visit(self._url)
def quit(self):
pass
@property
def htmltree(self):
try:
return self._html
except AttributeError:
self._html = lxml.html.fromstring(self.html)
return self._html
@property
def title(self):
html = self.htmltree
return html.xpath('//title')[0].text_content().strip()
@property
def html(self):
raise NotImplementedError(
"%s doesn't support getting the html of the response." %
self.driver_name)
@property
def url(self):
return self._url
def find_option_by_value(self, value):
html = self.htmltree
element = html.xpath('//option[@value="%s"]' % value)[0]
control = LxmlControlElement(element.getparent(), self)
return ElementList([LxmlOptionElement(element, control)], find_by="value", query=value)
def find_option_by_text(self, text):
html = self.htmltree
element = html.xpath('//option[normalize-space(text())="%s"]' % text)[0]
control = LxmlControlElement(element.getparent(), self)
return ElementList([LxmlOptionElement(element, control)], find_by="text", query=text)
def find_by_css(self, selector):
xpath = CSSSelector(selector).path
return self.find_by_xpath(xpath, original_find="css", original_selector=selector)
def find_by_xpath(self, xpath, original_find=None, original_selector=None):
html = self.htmltree
elements = []
for xpath_element in html.xpath(xpath):
if self._element_is_link(xpath_element):
return self._find_links_by_xpath(xpath)
elif self._element_is_control(xpath_element):
elements.append((LxmlControlElement, xpath_element))
else:
elements.append((LxmlElement, xpath_element))
find_by = original_find or "xpath"
query = original_selector or xpath
return ElementList(
[element_class(element, self) for element_class, element in elements],
find_by=find_by, query=query)
def find_by_tag(self, tag):
return self.find_by_xpath('//%s' % tag, original_find="tag", original_selector=tag)
def find_by_value(self, value):
return self.find_by_xpath('//*[@value="%s"]' % value, original_find="value", original_selector=value)
def find_by_text(self, text):
return self.find_by_xpath('//*[text()="%s"]' % text,
original_find="text", original_selector=text)
def find_by_id(self, id_value):
return self.find_by_xpath(
'//*[@id="%s"][1]' % id_value, original_find="id", original_selector=id_value)
def find_by_name(self, name):
html = self.htmltree
xpath = '//*[@name="%s"]' % name
elements = []
for xpath_element in html.xpath(xpath):
elements.append(xpath_element)
find_by = "name"
query = xpath
return ElementList(
[LxmlControlElement(element, self) for element in elements],
find_by=find_by, query=query)
def find_link_by_text(self, text):
return self._find_links_by_xpath("//a[text()='%s']" % text)
def find_link_by_href(self, href):
return self._find_links_by_xpath("//a[@href='%s']" % href)
def find_link_by_partial_href(self, partial_href):
return self._find_links_by_xpath("//a[contains(@href, '%s')]" % partial_href)
def find_link_by_partial_text(self, partial_text):
return self._find_links_by_xpath("//a[contains(normalize-space(.), '%s')]" % partial_text)
def fill(self, name, value):
self.find_by_name(name=name).first.fill(value)
def fill_form(self, field_values):
for name, value in field_values.items():
element = self.find_by_name(name)
control = element.first._control
control_type = control.get('type')
if control_type == 'checkbox':
if value:
control.value = value # control.options
else:
control.value = []
elif control_type == 'radio':
control.value = value # [option for option in control.options if option == value]
elif control_type == 'select':
control.value = [value]
else:
# text, textarea, password, tel
control.value = value
def choose(self, name, value):
self.find_by_name(name).first._control.value = value
def check(self, name):
control = self.find_by_name(name).first._control
control.value = ['checked']
def uncheck(self, name):
control = self.find_by_name(name).first._control
control.value = []
def attach_file(self, name, file_path):
control = self.find_by_name(name).first._control
control.value = file_path
def _find_links_by_xpath(self, xpath):
html = self.htmltree
links = html.xpath(xpath)
return ElementList(
[LxmlLinkElement(link, self) for link in links], find_by="xpath", query=xpath)
def select(self, name, value):
self.find_by_name(name).first._control.value = value
def is_text_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if self._is_text_present(text):
return True
return False
def _is_text_present(self, text):
try:
body = self.find_by_tag('body').first
return text in body.text
except ElementDoesNotExist:
# This exception will be thrown if the body tag isn't present
# This has occasionally been observed. Assume that the
# page isn't fully loaded yet
return False
def is_text_not_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if not self._is_text_present(text):
return True
return False
def _element_is_link(self, element):
return element.tag == 'a'
def _element_is_control(self, element):
return hasattr(element, 'type')
@property
def cookies(self):
return self._cookie_manager
re_extract_inner_html = re.compile(r'^<[^<>]+>(.*)</[^<>]+>$')
class LxmlElement(ElementAPI):
def __init__(self, element, parent):
self._element = element
self.parent = parent
def __getitem__(self, attr):
return self._element.attrib[attr]
def find_by_css(self, selector):
elements = self._element.cssselect(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_xpath(self, selector):
elements = self._element.xpath(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_name(self, name):
elements = self._element.cssselect('[name="%s"]' % name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_tag(self, name):
elements = self._element.cssselect(name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_value(self, value):
elements = self._element.cssselect('[value="%s"]' % value)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_text(self, text):
return self.find_by_xpath('//*[text()="%s"]' % text)
def find_by_id(self, id):
elements = self._element.cssselect('#%s' % id)
return ElementList([self.__class__(element, self) for element in elements])
@property
def value(self):
return self._element.text_content()
@property
def text(self):
return self.value
@property
def outer_html(self):
return lxml.html.tostring(self._element, encoding='unicode').strip()
@property
def html(self):
return re_extract_inner_html.match(self.outer_html).group(1)
def has_class(self, class_name):
return len(self._element.find_class(class_name)) > 0
class LxmlLinkElement(LxmlElement):
def __init__(self, element, parent):
super(LxmlLinkElement, self).__init__(element, parent)
self._browser = parent
def __getitem__(self, attr):
return super(LxmlLinkElement, self).__getitem__(attr)
def click(self):
return self._browser.visit(self["href"])
class LxmlControlElement(LxmlElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return self._control.attrib[attr]
@property
def value(self):
return self._control.value
@property
def checked(self):
return bool(self._control.value)
def click(self):
parent_form = self._get_parent_form()
return self.parent.submit_data(parent_form)
def fill(self, value):
parent_form = self._get_parent_form()
if sys.version_info[0] > 2:
parent_form.fields[self['name']] = value
else:
parent_form.fields[self['name']] = value.decode('utf-8')
def select(self, value):
self._control.value = value
def _get_parent_form(self):
parent_form = next(self._control.iterancestors('form'))
return self.parent._forms.setdefault(parent_form._name(), parent_form)
class LxmlOptionElement(LxmlElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return self._control.attrib[attr]
@property
def text(self):
return self._control.text
@property
def value(self):
return self._control.attrib['value']
@property
def selected(self):
return self.parent.value == self.value
|
|
"""Support for interfacing with Monoprice Blackbird 4k 8x8 HDBaseT Matrix."""
import logging
import socket
from pyblackbird import get_blackbird
from serial import SerialException
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_TYPE,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN, SERVICE_SETALLZONES
_LOGGER = logging.getLogger(__name__)
SUPPORT_BLACKBIRD = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE
MEDIA_PLAYER_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.comp_entity_ids})
ZONE_SCHEMA = vol.Schema({vol.Required(CONF_NAME): cv.string})
SOURCE_SCHEMA = vol.Schema({vol.Required(CONF_NAME): cv.string})
CONF_ZONES = "zones"
CONF_SOURCES = "sources"
DATA_BLACKBIRD = "blackbird"
ATTR_SOURCE = "source"
BLACKBIRD_SETALLZONES_SCHEMA = MEDIA_PLAYER_SCHEMA.extend(
{vol.Required(ATTR_SOURCE): cv.string}
)
# Valid zone ids: 1-8
ZONE_IDS = vol.All(vol.Coerce(int), vol.Range(min=1, max=8))
# Valid source ids: 1-8
SOURCE_IDS = vol.All(vol.Coerce(int), vol.Range(min=1, max=8))
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_PORT, CONF_HOST),
PLATFORM_SCHEMA.extend(
{
vol.Exclusive(CONF_PORT, CONF_TYPE): cv.string,
vol.Exclusive(CONF_HOST, CONF_TYPE): cv.string,
vol.Required(CONF_ZONES): vol.Schema({ZONE_IDS: ZONE_SCHEMA}),
vol.Required(CONF_SOURCES): vol.Schema({SOURCE_IDS: SOURCE_SCHEMA}),
}
),
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Monoprice Blackbird 4k 8x8 HDBaseT Matrix platform."""
if DATA_BLACKBIRD not in hass.data:
hass.data[DATA_BLACKBIRD] = {}
port = config.get(CONF_PORT)
host = config.get(CONF_HOST)
connection = None
if port is not None:
try:
blackbird = get_blackbird(port)
connection = port
except SerialException:
_LOGGER.error("Error connecting to the Blackbird controller")
return
if host is not None:
try:
blackbird = get_blackbird(host, False)
connection = host
except socket.timeout:
_LOGGER.error("Error connecting to the Blackbird controller")
return
sources = {
source_id: extra[CONF_NAME] for source_id, extra in config[CONF_SOURCES].items()
}
devices = []
for zone_id, extra in config[CONF_ZONES].items():
_LOGGER.info("Adding zone %d - %s", zone_id, extra[CONF_NAME])
unique_id = f"{connection}-{zone_id}"
device = BlackbirdZone(blackbird, sources, zone_id, extra[CONF_NAME])
hass.data[DATA_BLACKBIRD][unique_id] = device
devices.append(device)
add_entities(devices, True)
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
source = service.data.get(ATTR_SOURCE)
if entity_ids:
devices = [
device
for device in hass.data[DATA_BLACKBIRD].values()
if device.entity_id in entity_ids
]
else:
devices = hass.data[DATA_BLACKBIRD].values()
for device in devices:
if service.service == SERVICE_SETALLZONES:
device.set_all_zones(source)
hass.services.register(
DOMAIN, SERVICE_SETALLZONES, service_handle, schema=BLACKBIRD_SETALLZONES_SCHEMA
)
class BlackbirdZone(MediaPlayerDevice):
"""Representation of a Blackbird matrix zone."""
def __init__(self, blackbird, sources, zone_id, zone_name):
"""Initialize new zone."""
self._blackbird = blackbird
# dict source_id -> source name
self._source_id_name = sources
# dict source name -> source_id
self._source_name_id = {v: k for k, v in sources.items()}
# ordered list of all source names
self._source_names = sorted(
self._source_name_id.keys(), key=lambda v: self._source_name_id[v]
)
self._zone_id = zone_id
self._name = zone_name
self._state = None
self._source = None
def update(self):
"""Retrieve latest state."""
state = self._blackbird.zone_status(self._zone_id)
if not state:
return
self._state = STATE_ON if state.power else STATE_OFF
idx = state.av
if idx in self._source_id_name:
self._source = self._source_id_name[idx]
else:
self._source = None
@property
def name(self):
"""Return the name of the zone."""
return self._name
@property
def state(self):
"""Return the state of the zone."""
return self._state
@property
def supported_features(self):
"""Return flag of media commands that are supported."""
return SUPPORT_BLACKBIRD
@property
def media_title(self):
"""Return the current source as media title."""
return self._source
@property
def source(self):
"""Return the current input source of the device."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_names
def set_all_zones(self, source):
"""Set all zones to one source."""
if source not in self._source_name_id:
return
idx = self._source_name_id[source]
_LOGGER.debug("Setting all zones source to %s", idx)
self._blackbird.set_all_zone_source(idx)
def select_source(self, source):
"""Set input source."""
if source not in self._source_name_id:
return
idx = self._source_name_id[source]
_LOGGER.debug("Setting zone %d source to %s", self._zone_id, idx)
self._blackbird.set_zone_source(self._zone_id, idx)
def turn_on(self):
"""Turn the media player on."""
_LOGGER.debug("Turning zone %d on", self._zone_id)
self._blackbird.set_zone_power(self._zone_id, True)
def turn_off(self):
"""Turn the media player off."""
_LOGGER.debug("Turning zone %d off", self._zone_id)
self._blackbird.set_zone_power(self._zone_id, False)
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.robotframework
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Robot Framework.
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Copyright 2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from pygments.lexer import Lexer
from pygments.token import Token
from pygments.util import text_type
__all__ = ['RobotFrameworkLexer']
HEADING = Token.Generic.Heading
SETTING = Token.Keyword.Namespace
IMPORT = Token.Name.Namespace
TC_KW_NAME = Token.Generic.Subheading
KEYWORD = Token.Name.Function
ARGUMENT = Token.String
VARIABLE = Token.Name.Variable
COMMENT = Token.Comment
SEPARATOR = Token.Punctuation
SYNTAX = Token.Punctuation
GHERKIN = Token.Generic.Emph
ERROR = Token.Error
def normalize(string, remove=''):
string = string.lower()
for char in remove + ' ':
if char in string:
string = string.replace(char, '')
return string
class RobotFrameworkLexer(Lexer):
"""
For `Robot Framework <http://robotframework.org>`_ test data.
Supports both space and pipe separated plain text formats.
.. versionadded:: 1.6
"""
name = 'RobotFramework'
aliases = ['robotframework']
filenames = ['*.robot']
mimetypes = ['text/x-robotframework']
def __init__(self, **options):
options['tabsize'] = 2
options['encoding'] = 'UTF-8'
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
row_tokenizer = RowTokenizer()
var_tokenizer = VariableTokenizer()
index = 0
for row in text.splitlines():
for value, token in row_tokenizer.tokenize(row):
for value, token in var_tokenizer.tokenize(value, token):
if value:
yield index, token, text_type(value)
index += len(value)
class VariableTokenizer(object):
def tokenize(self, string, token):
var = VariableSplitter(string, identifiers='$@%&')
if var.start < 0 or token in (COMMENT, ERROR):
yield string, token
return
for value, token in self._tokenize(var, string, token):
if value:
yield value, token
def _tokenize(self, var, string, orig_token):
before = string[:var.start]
yield before, orig_token
yield var.identifier + '{', SYNTAX
for value, token in self.tokenize(var.base, VARIABLE):
yield value, token
yield '}', SYNTAX
if var.index:
yield '[', SYNTAX
for value, token in self.tokenize(var.index, VARIABLE):
yield value, token
yield ']', SYNTAX
for value, token in self.tokenize(string[var.end:], orig_token):
yield value, token
class RowTokenizer(object):
def __init__(self):
self._table = UnknownTable()
self._splitter = RowSplitter()
testcases = TestCaseTable()
settings = SettingTable(testcases.set_default_template)
variables = VariableTable()
keywords = KeywordTable()
self._tables = {'settings': settings, 'setting': settings,
'metadata': settings,
'variables': variables, 'variable': variables,
'testcases': testcases, 'testcase': testcases,
'keywords': keywords, 'keyword': keywords,
'userkeywords': keywords, 'userkeyword': keywords}
def tokenize(self, row):
commented = False
heading = False
for index, value in enumerate(self._splitter.split(row)):
# First value, and every second after that, is a separator.
index, separator = divmod(index-1, 2)
if value.startswith('#'):
commented = True
elif index == 0 and value.startswith('*'):
self._table = self._start_table(value)
heading = True
for value, token in self._tokenize(value, index, commented,
separator, heading):
yield value, token
self._table.end_row()
def _start_table(self, header):
name = normalize(header, remove='*')
return self._tables.get(name, UnknownTable())
def _tokenize(self, value, index, commented, separator, heading):
if commented:
yield value, COMMENT
elif separator:
yield value, SEPARATOR
elif heading:
yield value, HEADING
else:
for value, token in self._table.tokenize(value, index):
yield value, token
class RowSplitter(object):
_space_splitter = re.compile('( {2,})')
_pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))')
def split(self, row):
splitter = (row.startswith('| ') and self._split_from_pipes
or self._split_from_spaces)
for value in splitter(row):
yield value
yield '\n'
def _split_from_spaces(self, row):
yield '' # Start with (pseudo)separator similarly as with pipes
for value in self._space_splitter.split(row):
yield value
def _split_from_pipes(self, row):
_, separator, rest = self._pipe_splitter.split(row, 1)
yield separator
while self._pipe_splitter.search(rest):
cell, separator, rest = self._pipe_splitter.split(rest, 1)
yield cell
yield separator
yield rest
class Tokenizer(object):
_tokens = None
def __init__(self):
self._index = 0
def tokenize(self, value):
values_and_tokens = self._tokenize(value, self._index)
self._index += 1
if isinstance(values_and_tokens, type(Token)):
values_and_tokens = [(value, values_and_tokens)]
return values_and_tokens
def _tokenize(self, value, index):
index = min(index, len(self._tokens) - 1)
return self._tokens[index]
def _is_assign(self, value):
if value.endswith('='):
value = value[:-1].strip()
var = VariableSplitter(value, identifiers='$@&')
return var.start == 0 and var.end == len(value)
class Comment(Tokenizer):
_tokens = (COMMENT,)
class Setting(Tokenizer):
_tokens = (SETTING, ARGUMENT)
_keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown',
'suitepostcondition', 'testsetup', 'testprecondition',
'testteardown', 'testpostcondition', 'testtemplate')
_import_settings = ('library', 'resource', 'variables')
_other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags',
'testtimeout')
_custom_tokenizer = None
def __init__(self, template_setter=None):
Tokenizer.__init__(self)
self._template_setter = template_setter
def _tokenize(self, value, index):
if index == 1 and self._template_setter:
self._template_setter(value)
if index == 0:
normalized = normalize(value)
if normalized in self._keyword_settings:
self._custom_tokenizer = KeywordCall(support_assign=False)
elif normalized in self._import_settings:
self._custom_tokenizer = ImportSetting()
elif normalized not in self._other_settings:
return ERROR
elif self._custom_tokenizer:
return self._custom_tokenizer.tokenize(value)
return Tokenizer._tokenize(self, value, index)
class ImportSetting(Tokenizer):
_tokens = (IMPORT, ARGUMENT)
class TestCaseSetting(Setting):
_keyword_settings = ('setup', 'precondition', 'teardown', 'postcondition',
'template')
_import_settings = ()
_other_settings = ('documentation', 'tags', 'timeout')
def _tokenize(self, value, index):
if index == 0:
type = Setting._tokenize(self, value[1:-1], index)
return [('[', SYNTAX), (value[1:-1], type), (']', SYNTAX)]
return Setting._tokenize(self, value, index)
class KeywordSetting(TestCaseSetting):
_keyword_settings = ('teardown',)
_other_settings = ('documentation', 'arguments', 'return', 'timeout', 'tags')
class Variable(Tokenizer):
_tokens = (SYNTAX, ARGUMENT)
def _tokenize(self, value, index):
if index == 0 and not self._is_assign(value):
return ERROR
return Tokenizer._tokenize(self, value, index)
class KeywordCall(Tokenizer):
_tokens = (KEYWORD, ARGUMENT)
def __init__(self, support_assign=True):
Tokenizer.__init__(self)
self._keyword_found = not support_assign
self._assigns = 0
def _tokenize(self, value, index):
if not self._keyword_found and self._is_assign(value):
self._assigns += 1
return SYNTAX # VariableTokenizer tokenizes this later.
if self._keyword_found:
return Tokenizer._tokenize(self, value, index - self._assigns)
self._keyword_found = True
return GherkinTokenizer().tokenize(value, KEYWORD)
class GherkinTokenizer(object):
_gherkin_prefix = re.compile('^(Given|When|Then|And) ', re.IGNORECASE)
def tokenize(self, value, token):
match = self._gherkin_prefix.match(value)
if not match:
return [(value, token)]
end = match.end()
return [(value[:end], GHERKIN), (value[end:], token)]
class TemplatedKeywordCall(Tokenizer):
_tokens = (ARGUMENT,)
class ForLoop(Tokenizer):
def __init__(self):
Tokenizer.__init__(self)
self._in_arguments = False
def _tokenize(self, value, index):
token = self._in_arguments and ARGUMENT or SYNTAX
if value.upper() in ('IN', 'IN RANGE'):
self._in_arguments = True
return token
class _Table(object):
_tokenizer_class = None
def __init__(self, prev_tokenizer=None):
self._tokenizer = self._tokenizer_class()
self._prev_tokenizer = prev_tokenizer
self._prev_values_on_row = []
def tokenize(self, value, index):
if self._continues(value, index):
self._tokenizer = self._prev_tokenizer
yield value, SYNTAX
else:
for value_and_token in self._tokenize(value, index):
yield value_and_token
self._prev_values_on_row.append(value)
def _continues(self, value, index):
return value == '...' and all(self._is_empty(t)
for t in self._prev_values_on_row)
def _is_empty(self, value):
return value in ('', '\\')
def _tokenize(self, value, index):
return self._tokenizer.tokenize(value)
def end_row(self):
self.__init__(prev_tokenizer=self._tokenizer)
class UnknownTable(_Table):
_tokenizer_class = Comment
def _continues(self, value, index):
return False
class VariableTable(_Table):
_tokenizer_class = Variable
class SettingTable(_Table):
_tokenizer_class = Setting
def __init__(self, template_setter, prev_tokenizer=None):
_Table.__init__(self, prev_tokenizer)
self._template_setter = template_setter
def _tokenize(self, value, index):
if index == 0 and normalize(value) == 'testtemplate':
self._tokenizer = Setting(self._template_setter)
return _Table._tokenize(self, value, index)
def end_row(self):
self.__init__(self._template_setter, prev_tokenizer=self._tokenizer)
class TestCaseTable(_Table):
_setting_class = TestCaseSetting
_test_template = None
_default_template = None
@property
def _tokenizer_class(self):
if self._test_template or (self._default_template and
self._test_template is not False):
return TemplatedKeywordCall
return KeywordCall
def _continues(self, value, index):
return index > 0 and _Table._continues(self, value, index)
def _tokenize(self, value, index):
if index == 0:
if value:
self._test_template = None
return GherkinTokenizer().tokenize(value, TC_KW_NAME)
if index == 1 and self._is_setting(value):
if self._is_template(value):
self._test_template = False
self._tokenizer = self._setting_class(self.set_test_template)
else:
self._tokenizer = self._setting_class()
if index == 1 and self._is_for_loop(value):
self._tokenizer = ForLoop()
if index == 1 and self._is_empty(value):
return [(value, SYNTAX)]
return _Table._tokenize(self, value, index)
def _is_setting(self, value):
return value.startswith('[') and value.endswith(']')
def _is_template(self, value):
return normalize(value) == '[template]'
def _is_for_loop(self, value):
return value.startswith(':') and normalize(value, remove=':') == 'for'
def set_test_template(self, template):
self._test_template = self._is_template_set(template)
def set_default_template(self, template):
self._default_template = self._is_template_set(template)
def _is_template_set(self, template):
return normalize(template) not in ('', '\\', 'none', '${empty}')
class KeywordTable(TestCaseTable):
_tokenizer_class = KeywordCall
_setting_class = KeywordSetting
def _is_template(self, value):
return False
# Following code copied directly from Robot Framework 2.7.5.
class VariableSplitter:
def __init__(self, string, identifiers):
self.identifier = None
self.base = None
self.index = None
self.start = -1
self.end = -1
self._identifiers = identifiers
self._may_have_internal_variables = False
try:
self._split(string)
except ValueError:
pass
else:
self._finalize()
def get_replaced_base(self, variables):
if self._may_have_internal_variables:
return variables.replace_string(self.base)
return self.base
def _finalize(self):
self.identifier = self._variable_chars[0]
self.base = ''.join(self._variable_chars[2:-1])
self.end = self.start + len(self._variable_chars)
if self._has_list_or_dict_variable_index():
self.index = ''.join(self._list_and_dict_variable_index_chars[1:-1])
self.end += len(self._list_and_dict_variable_index_chars)
def _has_list_or_dict_variable_index(self):
return self._list_and_dict_variable_index_chars\
and self._list_and_dict_variable_index_chars[-1] == ']'
def _split(self, string):
start_index, max_index = self._find_variable(string)
self.start = start_index
self._open_curly = 1
self._state = self._variable_state
self._variable_chars = [string[start_index], '{']
self._list_and_dict_variable_index_chars = []
self._string = string
start_index += 2
for index, char in enumerate(string[start_index:]):
index += start_index # Giving start to enumerate only in Py 2.6+
try:
self._state(char, index)
except StopIteration:
return
if index == max_index and not self._scanning_list_variable_index():
return
def _scanning_list_variable_index(self):
return self._state in [self._waiting_list_variable_index_state,
self._list_variable_index_state]
def _find_variable(self, string):
max_end_index = string.rfind('}')
if max_end_index == -1:
raise ValueError('No variable end found')
if self._is_escaped(string, max_end_index):
return self._find_variable(string[:max_end_index])
start_index = self._find_start_index(string, 1, max_end_index)
if start_index == -1:
raise ValueError('No variable start found')
return start_index, max_end_index
def _find_start_index(self, string, start, end):
index = string.find('{', start, end) - 1
if index < 0:
return -1
if self._start_index_is_ok(string, index):
return index
return self._find_start_index(string, index+2, end)
def _start_index_is_ok(self, string, index):
return string[index] in self._identifiers\
and not self._is_escaped(string, index)
def _is_escaped(self, string, index):
escaped = False
while index > 0 and string[index-1] == '\\':
index -= 1
escaped = not escaped
return escaped
def _variable_state(self, char, index):
self._variable_chars.append(char)
if char == '}' and not self._is_escaped(self._string, index):
self._open_curly -= 1
if self._open_curly == 0:
if not self._is_list_or_dict_variable():
raise StopIteration
self._state = self._waiting_list_variable_index_state
elif char in self._identifiers:
self._state = self._internal_variable_start_state
def _is_list_or_dict_variable(self):
return self._variable_chars[0] in ('@','&')
def _internal_variable_start_state(self, char, index):
self._state = self._variable_state
if char == '{':
self._variable_chars.append(char)
self._open_curly += 1
self._may_have_internal_variables = True
else:
self._variable_state(char, index)
def _waiting_list_variable_index_state(self, char, index):
if char != '[':
raise StopIteration
self._list_and_dict_variable_index_chars.append(char)
self._state = self._list_variable_index_state
def _list_variable_index_state(self, char, index):
self._list_and_dict_variable_index_chars.append(char)
if char == ']':
raise StopIteration
|
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from oslo_serialization import jsonutils
from nailgun.test import base
from nailgun import consts
class TestPutSameJson(base.BaseIntegrationTest):
def setUp(self):
super(TestPutSameJson, self).setUp()
meta = self.env.default_metadata()
meta["interfaces"] = [
{'name': 'eth0', 'pxe': True},
{'name': 'eth1', 'mac': '08:01:5b:2d:62:70'},
{'name': 'eth2', 'mac': '55:62:3f:7b:3e:26'}
]
meta_p = self.env.default_metadata()
meta["interfaces"] = [
{'name': 'eth0', 'pxe': True},
{'name': 'eth1', 'mac': '08:01:5b:2d:62:7a'},
{'name': 'eth2', 'mac': '55:62:3f:7b:3e:2a'}
]
self.cluster = self.env.create(
cluster_kwargs={'api': True},
nodes_kwargs=[
{'api': True, 'meta': meta},
{'api': True, 'pending_addition': True, 'meta': meta_p},
]
)
self.cluster = self.env.clusters[0]
def assertHttpPut(self, name, arguments, data, expected_status):
"""Helper assert for checking HTTP PUT.
:param name: a handler name, for reversing url
:param arguments: arguments for reversing url
:param data: a data to be PUT
:param expected_status: expected HTTP response code
"""
response = self.app.put(
base.reverse(name, kwargs=arguments),
jsonutils.dumps(data),
headers=self.default_headers
)
if not isinstance(expected_status, list):
expected_status = [expected_status]
self.assertIn(response.status_code, expected_status)
# Heuristic checking if response is of task type
is_task = 'progress' in response.json_body and \
'status' in response.json_body and \
'uuid' in response.json_body
if is_task:
self.assertNotEqual(
response.json_body['status'],
consts.TASK_STATUSES.error
)
def http_get(self, name, arguments):
"""Makes a GET request to a resource with `name`
Returns a deserialized dict
"""
resp = self.app.get(
base.reverse(name, kwargs=arguments),
headers=self.default_headers
)
return resp.json_body
def test_release(self):
release = self.env.create_release()
release = self.http_get(
'ReleaseHandler', {
'obj_id': release['id']
}
)
self.assertHttpPut(
'ReleaseHandler', {
'obj_id': release['id']
},
release, 200
)
def test_cluster(self):
cluster = self.http_get(
'ClusterHandler', {
'obj_id': self.cluster.id
}
)
self.assertHttpPut(
'ClusterHandler', {
'obj_id': self.cluster.id
},
cluster, 200
)
@patch('nailgun.rpc.cast')
def test_cluster_changes(self, mock_rpc):
cluster = self.http_get(
'ClusterHandler', {
'obj_id': self.cluster.id
}
)
cluster_changes = cluster['changes']
self.assertHttpPut(
'ClusterChangesHandler',
{
'cluster_id': self.cluster.id
},
cluster_changes, [200, 202]
)
def test_cluster_attributes(self):
cluster_attributes = self.http_get(
'ClusterAttributesHandler', {
'cluster_id': self.cluster.id
}
)
self.assertHttpPut(
'ClusterAttributesHandler', {
'cluster_id': self.cluster.id
},
cluster_attributes, 200
)
def test_cluster_attributes_default(self):
cluster_attributes = self.http_get(
'ClusterAttributesDefaultsHandler', {
'cluster_id': self.cluster.id
}
)
self.assertHttpPut(
'ClusterAttributesDefaultsHandler', {
'cluster_id': self.cluster.id
},
cluster_attributes, 200
)
def test_nova_network_configuration(self):
self.cluster = self.env.create(
cluster_kwargs={
'api': True,
'net_provider': consts.CLUSTER_NET_PROVIDERS.nova_network,
},
)
nova_config = self.http_get(
'NovaNetworkConfigurationHandler', {
'cluster_id': self.cluster['id']
}
)
self.assertHttpPut(
'NovaNetworkConfigurationHandler', {
'cluster_id': self.cluster['id']
},
nova_config, 200
)
def test_neutron_network_configuration(self):
self.cluster = self.env.create(
cluster_kwargs={
'api': True,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
},
)
neutron_config = self.http_get(
'NeutronNetworkConfigurationHandler', {
'cluster_id': self.cluster['id']
}
)
self.assertHttpPut(
'NeutronNetworkConfigurationHandler', {
'cluster_id': self.cluster['id']
},
neutron_config, 200
)
def test_deployment_info(self):
deployment_info = self.http_get(
'DeploymentInfo', {
'cluster_id': self.cluster.id
}
)
self.assertHttpPut(
'DeploymentInfo', {
'cluster_id': self.cluster.id
},
deployment_info, 200
)
def test_provisioning_info(self):
provisioning_info = self.http_get(
'ProvisioningInfo', {
'cluster_id': self.cluster.id
}
)
self.assertHttpPut(
'ProvisioningInfo', {
'cluster_id': self.cluster.id
},
provisioning_info, 200
)
def test_node_collection(self):
nodes = self.http_get(
'NodeCollectionHandler', {}
)
self.assertHttpPut(
'NodeCollectionHandler', {},
nodes, 200
)
def test_node(self):
node = self.http_get(
'NodeHandler', {
'obj_id': self.cluster.nodes[0].id
}
)
self.assertHttpPut(
'NodeHandler', {
'obj_id': self.cluster.nodes[0].id
},
node, 200
)
def test_node_disks(self):
node_disks = self.http_get(
'NodeDisksHandler', {
'node_id': self.cluster.nodes[0].id
}
)
self.assertHttpPut(
'NodeDisksHandler', {
'node_id': self.cluster.nodes[0].id
},
node_disks, 200
)
def test_node_nics(self):
node_nics = self.http_get(
'NodeNICsHandler', {
'node_id': self.cluster.nodes[0].id
}
)
self.assertHttpPut(
'NodeNICsHandler', {
'node_id': self.cluster.nodes[0].id
},
node_nics, 200
)
def test_task(self):
self.task = self.env.create_task(name='dump')
task = self.http_get(
'TaskHandler', {
'obj_id': self.task.id
}
)
self.assertHttpPut(
'TaskHandler', {
'obj_id': self.task.id
},
task, 200
)
def test_notification_collection(self):
self.env.create_notification(cluster_id=self.cluster.id)
self.env.create_notification(cluster_id=self.cluster.id)
notifications = self.http_get(
'NotificationCollectionHandler', {}
)
self.assertHttpPut(
'NotificationCollectionHandler', {},
notifications, 200
)
def test_notification(self):
self.notification = self.env.create_notification(
cluster_id=self.cluster.id
)
notification = self.http_get(
'NotificationHandler', {
'obj_id': self.notification.id
}
)
self.assertHttpPut(
'NotificationHandler', {
'obj_id': self.notification.id
},
notification, 200
)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
from op_test import OpTest
from paddle.fluid import compiler, Program, program_guard
class TestLRNOp(OpTest):
def get_input(self):
r''' TODO(gongweibao): why it's grad diff is so large?
x = np.ndarray(
shape=(self.N, self.C, self.H, self.W), dtype=float, order='C')
for m in range(0, self.N):
for i in range(0, self.C):
for h in range(0, self.H):
for w in range(0, self.W):
x[m][i][h][w] = m * self.C * self.H * self.W + \
i * self.H * self.W + \
h * self.W + w + 1
'''
x = np.random.rand(self.N, self.C, self.H, self.W).astype("float32")
return x + 1
def get_out(self):
start = -(self.n - 1) // 2
end = start + self.n
mid = np.empty((self.N, self.C, self.H, self.W)).astype("float32")
mid.fill(self.k)
for m in range(0, self.N):
for i in range(0, self.C):
for c in range(start, end):
ch = i + c
if ch < 0 or ch >= self.C:
continue
s = mid[m][i][:][:]
r = self.x[m][ch][:][:]
s += np.square(r) * self.alpha
mid2 = np.power(mid, -self.beta)
return np.multiply(self.x, mid2), mid
def get_attrs(self):
attrs = {
'n': self.n,
'k': self.k,
'alpha': self.alpha,
'beta': self.beta,
'data_format': self.data_format
}
return attrs
def setUp(self):
self.op_type = "lrn"
self.init_test_case()
self.N = 2
self.C = 3
self.H = 5
self.W = 5
self.n = 5
self.k = 2.0
self.alpha = 0.0001
self.beta = 0.75
self.x = self.get_input()
self.out, self.mid_out = self.get_out()
if self.data_format == 'NHWC':
self.x = np.transpose(self.x, [0, 2, 3, 1])
self.out = np.transpose(self.out, [0, 2, 3, 1])
self.mid_out = np.transpose(self.mid_out, [0, 2, 3, 1])
self.inputs = {'X': self.x}
self.outputs = {'Out': self.out, 'MidOut': self.mid_out}
self.attrs = self.get_attrs()
def init_test_case(self):
self.data_format = 'NCHW'
def test_check_output(self):
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out')
class TestLRNOpAttrDataFormat(TestLRNOp):
def init_test_case(self):
self.data_format = 'NHWC'
class TestLRNAPI(unittest.TestCase):
def test_case(self):
data1 = fluid.data(name='data1', shape=[2, 4, 5, 5], dtype='float32')
data2 = fluid.data(name='data2', shape=[2, 5, 5, 4], dtype='float32')
out1 = fluid.layers.lrn(data1, data_format='NCHW')
out2 = fluid.layers.lrn(data2, data_format='NHWC')
data1_np = np.random.random((2, 4, 5, 5)).astype("float32")
data2_np = np.transpose(data1_np, [0, 2, 3, 1])
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
results = exe.run(fluid.default_main_program(),
feed={"data1": data1_np,
"data2": data2_np},
fetch_list=[out1, out2],
return_numpy=True)
self.assertTrue(
np.allclose(results[0], np.transpose(results[1], (0, 3, 1, 2))))
def test_exception(self):
input1 = fluid.data(name="input1", shape=[2, 4, 5, 5], dtype="float32")
input2 = fluid.data(
name="input2", shape=[2, 4, 5, 5, 5], dtype="float32")
def _attr_data_fromat():
out = fluid.layers.lrn(input1, data_format='NDHW')
def _input_dim_size():
out = fluid.layers.lrn(input2)
self.assertRaises(ValueError, _attr_data_fromat)
self.assertRaises(ValueError, _input_dim_size)
class TestLRNOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input must be float32
in_w = fluid.data(name="in_w", shape=[None, 3, 3, 3], dtype="int64")
self.assertRaises(TypeError, fluid.layers.lrn, in_w)
class TestLocalResponseNormFAPI(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def check_static_3d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
in_np1 = np.random.random([3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 1))
input1 = fluid.data(
name="input1", shape=[3, 40, 40], dtype="float32")
input2 = fluid.data(
name="input2", shape=[3, 40, 40], dtype="float32")
res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCL')
res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NLC')
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input1": in_np1,
"input2": in_np2},
fetch_list=[res1, res2])
fetches1_tran = np.transpose(fetches[1], (0, 2, 1))
self.assertTrue(np.allclose(fetches[0], fetches1_tran))
def check_static_4d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input1 = fluid.data(
name="input1", shape=[3, 3, 40, 40], dtype="float32")
input2 = fluid.data(
name="input2", shape=[3, 40, 40, 3], dtype="float32")
res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCHW')
res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NHWC')
in_np1 = np.random.random([3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 1))
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input1": in_np1,
"input2": in_np2},
fetch_list=[res1, res2])
fetches1_tran = np.transpose(fetches[1], (0, 3, 1, 2))
self.assertTrue(np.allclose(fetches[0], fetches1_tran))
def check_static_5d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input1 = fluid.data(
name="input1", shape=[3, 3, 3, 40, 40], dtype="float32")
input2 = fluid.data(
name="input2", shape=[3, 3, 40, 40, 3], dtype="float32")
res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCDHW')
res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NDHWC')
in_np1 = np.random.random([3, 3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 4, 1))
exe = fluid.Executor(place)
fetches = exe.run(fluid.default_main_program(),
feed={"input1": in_np1,
"input2": in_np2},
fetch_list=[res1, res2])
fetches1_tran = np.transpose(fetches[1], (0, 4, 1, 2, 3))
self.assertTrue(np.allclose(fetches[0], fetches1_tran))
def test_static(self):
for place in self.places:
self.check_static_3d_input(place=place)
self.check_static_4d_input(place=place)
self.check_static_5d_input(place=place)
def check_dygraph_3d_input(self, place):
with fluid.dygraph.guard(place):
in_np1 = np.random.random([3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 1))
in1 = paddle.to_tensor(in_np1)
in2 = paddle.to_tensor(in_np2)
res1 = paddle.nn.functional.local_response_norm(
x=in1, size=5, data_format='NCL')
res2 = paddle.nn.functional.local_response_norm(
x=in2, size=5, data_format='NLC')
res2_tran = np.transpose(res2.numpy(), (0, 2, 1))
self.assertTrue(np.allclose(res1.numpy(), res2_tran))
def check_dygraph_4d_input(self, place):
with fluid.dygraph.guard(place):
in_np1 = np.random.random([3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 1))
in1 = paddle.to_tensor(in_np1)
in2 = paddle.to_tensor(in_np2)
res1 = paddle.nn.functional.local_response_norm(
x=in1, size=5, data_format='NCHW')
res2 = paddle.nn.functional.local_response_norm(
x=in2, size=5, data_format='NHWC')
res2_tran = np.transpose(res2.numpy(), (0, 3, 1, 2))
self.assertTrue(np.allclose(res1.numpy(), res2_tran))
def check_dygraph_5d_input(self, place):
with fluid.dygraph.guard(place):
in_np1 = np.random.random([3, 3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 4, 1))
in1 = paddle.to_tensor(in_np1)
in2 = paddle.to_tensor(in_np2)
res1 = paddle.nn.functional.local_response_norm(
x=in1, size=5, data_format='NCDHW')
res2 = paddle.nn.functional.local_response_norm(
x=in2, size=5, data_format='NDHWC')
res2_tran = np.transpose(res2.numpy(), (0, 4, 1, 2, 3))
self.assertTrue(np.allclose(res1.numpy(), res2_tran))
def test_dygraph(self):
for place in self.places:
self.check_dygraph_3d_input(place)
self.check_dygraph_4d_input(place)
self.check_dygraph_5d_input(place)
class TestLocalResponseNormFAPIError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_Variable():
# the input of lrn must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace())
paddle.nn.functional.local_response_norm(x1, size=5)
self.assertRaises(TypeError, test_Variable)
def test_datatype():
x = fluid.data(name='x', shape=[3, 4, 5, 6], dtype="int32")
paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(TypeError, test_datatype)
def test_dataformat():
x = fluid.data(name='x', shape=[3, 4, 5, 6], dtype="float32")
paddle.nn.functional.local_response_norm(
x, size=5, data_format="NCTHW")
self.assertRaises(ValueError, test_dataformat)
def test_dim():
x = fluid.data(name='x', shape=[3, 4], dtype="float32")
paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(ValueError, test_dim)
def test_shape():
x = paddle.rand(shape=[0, 0, 2, 3], dtype="float32")
paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(ValueError, test_shape)
class TestLocalResponseNormCAPI(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
self.places.append(fluid.CUDAPlace(0))
def test_dygraph(self):
for place in self.places:
with fluid.dygraph.guard(place):
in1 = paddle.rand(shape=(3, 3, 40, 40), dtype="float32")
in2 = paddle.transpose(in1, [0, 2, 3, 1])
m1 = paddle.nn.LocalResponseNorm(size=5, data_format='NCHW')
m2 = paddle.nn.LocalResponseNorm(size=5, data_format='NHWC')
res1 = m1(in1)
res2 = m2(in2)
res2_tran = np.transpose(res2.numpy(), (0, 3, 1, 2))
self.assertTrue(np.allclose(res1.numpy(), res2_tran))
if __name__ == "__main__":
unittest.main()
|
|
"""Helper methods to handle the time in Home Assistant."""
from __future__ import annotations
from contextlib import suppress
import datetime as dt
import re
from typing import Any, cast
import ciso8601
import pytz
import pytz.exceptions as pytzexceptions
import pytz.tzinfo as pytzinfo
from homeassistant.const import MATCH_ALL
DATE_STR_FORMAT = "%Y-%m-%d"
NATIVE_UTC = dt.timezone.utc
UTC = pytz.utc
DEFAULT_TIME_ZONE: dt.tzinfo = pytz.utc
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
DATETIME_RE = re.compile(
r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
r"[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
)
def set_default_time_zone(time_zone: dt.tzinfo) -> None:
"""Set a default time zone to be used when none is specified.
Async friendly.
"""
global DEFAULT_TIME_ZONE # pylint: disable=global-statement
# NOTE: Remove in the future in favour of typing
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone
def get_time_zone(time_zone_str: str) -> dt.tzinfo | None:
"""Get time zone from string. Return None if unable to determine.
Async friendly.
"""
try:
return pytz.timezone(time_zone_str)
except pytzexceptions.UnknownTimeZoneError:
return None
def utcnow() -> dt.datetime:
"""Get now in UTC time."""
return dt.datetime.now(NATIVE_UTC)
def now(time_zone: dt.tzinfo | None = None) -> dt.datetime:
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
def as_utc(dattim: dt.datetime) -> dt.datetime:
"""Return a datetime as UTC time.
Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.
"""
if dattim.tzinfo == UTC:
return dattim
if dattim.tzinfo is None:
dattim = DEFAULT_TIME_ZONE.localize(dattim) # type: ignore
return dattim.astimezone(UTC)
def as_timestamp(dt_value: dt.datetime) -> float:
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt: dt.datetime | None = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if parsed_dt is None:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
def as_local(dattim: dt.datetime) -> dt.datetime:
"""Convert a UTC datetime object to local time zone."""
if dattim.tzinfo == DEFAULT_TIME_ZONE:
return dattim
if dattim.tzinfo is None:
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE)
def utc_from_timestamp(timestamp: float) -> dt.datetime:
"""Return a UTC time from a timestamp."""
return UTC.localize(dt.datetime.utcfromtimestamp(timestamp))
def start_of_local_day(dt_or_d: dt.date | dt.datetime | None = None) -> dt.datetime:
"""Return local datetime object of start of day from date or datetime."""
if dt_or_d is None:
date: dt.date = now().date()
elif isinstance(dt_or_d, dt.datetime):
date = dt_or_d.date()
else:
date = dt_or_d
return DEFAULT_TIME_ZONE.localize( # type: ignore
dt.datetime.combine(date, dt.time())
)
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
def parse_datetime(dt_str: str) -> dt.datetime | None:
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
with suppress(ValueError, IndexError):
return ciso8601.parse_datetime(dt_str)
match = DATETIME_RE.match(dt_str)
if not match:
return None
kws: dict[str, Any] = match.groupdict()
if kws["microsecond"]:
kws["microsecond"] = kws["microsecond"].ljust(6, "0")
tzinfo_str = kws.pop("tzinfo")
tzinfo: dt.tzinfo | None = None
if tzinfo_str == "Z":
tzinfo = UTC
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if tzinfo_str[0] == "-":
offset = -offset
tzinfo = dt.timezone(offset)
kws = {k: int(v) for k, v in kws.items() if v is not None}
kws["tzinfo"] = tzinfo
return dt.datetime(**kws)
def parse_date(dt_str: str) -> dt.date | None:
"""Convert a date string to a date object."""
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError: # If dt_str did not match our format
return None
def parse_time(time_str: str) -> dt.time | None:
"""Parse a time string (00:20:00) into Time object.
Return None if invalid.
"""
parts = str(time_str).split(":")
if len(parts) < 2:
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = int(parts[2]) if len(parts) > 2 else 0
return dt.time(hour, minute, second)
except ValueError:
# ValueError if value cannot be converted to an int or not in range
return None
def get_age(date: dt.datetime) -> str:
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(number: int, unit: str) -> str:
"""Add "unit" if it's plural."""
if number == 1:
return f"1 {unit}"
return f"{number:d} {unit}s"
delta = (now() - date).total_seconds()
rounded_delta = round(delta)
units = ["second", "minute", "hour", "day", "month"]
factors = [60, 60, 24, 30, 12]
selected_unit = "year"
for i, next_factor in enumerate(factors):
if rounded_delta < next_factor:
selected_unit = units[i]
break
delta /= next_factor
rounded_delta = round(delta)
return formatn(rounded_delta, selected_unit)
def parse_time_expression(parameter: Any, min_value: int, max_value: int) -> list[int]:
"""Parse the time expression part and return a list of times to match."""
if parameter is None or parameter == MATCH_ALL:
res = list(range(min_value, max_value + 1))
elif isinstance(parameter, str):
if parameter.startswith("/"):
parameter = int(parameter[1:])
res = [x for x in range(min_value, max_value + 1) if x % parameter == 0]
else:
res = [int(parameter)]
elif not hasattr(parameter, "__iter__"):
res = [int(parameter)]
else:
res = sorted(int(x) for x in parameter)
for val in res:
if val < min_value or val > max_value:
raise ValueError(
f"Time expression '{parameter}': parameter {val} out of range "
f"({min_value} to {max_value})"
)
return res
def find_next_time_expression_time(
now: dt.datetime, # pylint: disable=redefined-outer-name
seconds: list[int],
minutes: list[int],
hours: list[int],
) -> dt.datetime:
"""Find the next datetime from now for which the time expression matches.
The algorithm looks at each time unit separately and tries to find the
next one that matches for each. If any of them would roll over, all
time units below that are reset to the first matching value.
Timezones are also handled (the tzinfo of the now object is used),
including daylight saving time.
"""
if not seconds or not minutes or not hours:
raise ValueError("Cannot find a next time: Time expression never matches!")
def _lower_bound(arr: list[int], cmp: int) -> int | None:
"""Return the first value in arr greater or equal to cmp.
Return None if no such value exists.
"""
left = 0
right = len(arr)
while left < right:
mid = (left + right) // 2
if arr[mid] < cmp:
left = mid + 1
else:
right = mid
if left == len(arr):
return None
return arr[left]
result = now.replace(microsecond=0)
# Match next second
next_second = _lower_bound(seconds, result.second)
if next_second is None:
# No second to match in this minute. Roll-over to next minute.
next_second = seconds[0]
result += dt.timedelta(minutes=1)
result = result.replace(second=next_second)
# Match next minute
next_minute = _lower_bound(minutes, result.minute)
if next_minute != result.minute:
# We're in the next minute. Seconds needs to be reset.
result = result.replace(second=seconds[0])
if next_minute is None:
# No minute to match in this hour. Roll-over to next hour.
next_minute = minutes[0]
result += dt.timedelta(hours=1)
result = result.replace(minute=next_minute)
# Match next hour
next_hour = _lower_bound(hours, result.hour)
if next_hour != result.hour:
# We're in the next hour. Seconds+minutes needs to be reset.
result = result.replace(second=seconds[0], minute=minutes[0])
if next_hour is None:
# No minute to match in this day. Roll-over to next day.
next_hour = hours[0]
result += dt.timedelta(days=1)
result = result.replace(hour=next_hour)
if result.tzinfo is None:
return result
# Now we need to handle timezones. We will make this datetime object
# "naive" first and then re-convert it to the target timezone.
# This is so that we can call pytz's localize and handle DST changes.
tzinfo: pytzinfo.DstTzInfo = UTC if result.tzinfo == NATIVE_UTC else result.tzinfo
result = result.replace(tzinfo=None)
try:
result = tzinfo.localize(result, is_dst=None)
except pytzexceptions.AmbiguousTimeError:
# This happens when we're leaving daylight saving time and local
# clocks are rolled back. In this case, we want to trigger
# on both the DST and non-DST time. So when "now" is in the DST
# use the DST-on time, and if not, use the DST-off time.
use_dst = bool(now.dst())
result = tzinfo.localize(result, is_dst=use_dst)
except pytzexceptions.NonExistentTimeError:
# This happens when we're entering daylight saving time and local
# clocks are rolled forward, thus there are local times that do
# not exist. In this case, we want to trigger on the next time
# that *does* exist.
# In the worst case, this will run through all the seconds in the
# time shift, but that's max 3600 operations for once per year
result = result.replace(tzinfo=tzinfo) + dt.timedelta(seconds=1)
return find_next_time_expression_time(result, seconds, minutes, hours)
result_dst = cast(dt.timedelta, result.dst())
now_dst = cast(dt.timedelta, now.dst()) or dt.timedelta(0)
if result_dst >= now_dst:
return result
# Another edge-case when leaving DST:
# When now is in DST and ambiguous *and* the next trigger time we *should*
# trigger is ambiguous and outside DST, the excepts above won't catch it.
# For example: if triggering on 2:30 and now is 28.10.2018 2:30 (in DST)
# we should trigger next on 28.10.2018 2:30 (out of DST), but our
# algorithm above would produce 29.10.2018 2:30 (out of DST)
# Step 1: Check if now is ambiguous
try:
tzinfo.localize(now.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# Step 2: Check if result of (now - DST) is ambiguous.
check = now - now_dst
check_result = find_next_time_expression_time(check, seconds, minutes, hours)
try:
tzinfo.localize(check_result.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# OK, edge case does apply. We must override the DST to DST-off
check_result = tzinfo.localize(check_result.replace(tzinfo=None), is_dst=False)
return check_result
|
|
"""
PRIMEDesigner15.py
A SOLUZION problem formulation
It is important that COMMON_CODE come
before all the other sections (except METADATA), including COMMON_DATA.
"""
# <METADATA>
SOLUZION_VERSION = "0.01"
PROBLEM_NAME = "PRIME Designer 2015"
PROBLEM_VERSION = "0.1"
PROBLEM_AUTHORS = ["S. Tanimoto", "Dennis Orzikh", "Paul Curry"]
PROBLEM_CREATION_DATE = "13-APL-2015"
PROBLEM_DESC=\
"""This version is mainly for the Brython version of the solving client
and the Brython version of Python.
However, it should all be generic Python 3, and this file is intended
to work a future Python+Tkinter client that runs on the desktop.
Anything specific to the Brython context should be in the separate
file MondrianVisForBRYTHON.py, which is imported by this file when
being used in the Brython SOLUZION client."""
#</METADATA>
#<COMMON_DATA>
#</COMMON_DATA>
#<COMMON_CODE>
BRYTHON = True
if(BRYTHON):
from PRIMEDesigner15VisForBrython import hide_loading, show_loading, url_is_valid
from PRIMEDesigner15VisForBrython import add_puzzle_menu, add_condition_menu, add_action_menu, edit_rule_menu
from PRIMEDesigner15VisForBrython import delete_condition_menu, delete_action_menu, open_or_closed_menu
from templateRoot.PRIMEDesigner15Operator import Operator as Operator
from templateRoot.PRIMEDesigner15Operator import AsyncOperator as AsyncOperator
from browser import document, window, alert, console, ajax
from javascript import JSObject, JSConstructor
import time, json, datetime
# Debug string
def dAlert(string):
alert(string)
# Preforms a deep copy of the given state.
def copy_state(state):
oldRooms = state["Rooms"]
oldRules = state["Rules"]
oldImgPuzzles = state["Image_Puzzles"]
oldMusPuzzles = state["Music_Puzzles"]
newState = {}
newRooms = []
newRules = []
newImagePuzzles = {}
newMusicPuzzles = {}
'''
#Debug
print("image puzzles")
string = ""
for puzzle in state["Image_Puzzles"]:
string += puzzle.name + " ,"
print(string)
string = ""
print("music puzzles")
for puzzle in state["Music_Puzzles"]:
string += puzzle.name + " ,"
print(string)
'''
# Copy the rooms (without doors in their walls) and doors into the newState's dictionary.
for room in oldRooms:
newRooms.append(room.copy())
for name in oldImgPuzzles:
newImagePuzzles[name] = state["Image_Puzzles"][name].copy()
for name in oldMusPuzzles:
newMusicPuzzles[name] = state["Music_Puzzles"][name].copy()
for rule in oldRules:
newRules.append(rule.copy())
# Put the new lists into the new state's lists.
newState["Rooms"] = newRooms
newState["Rules"] = newRules
newState["Image_Puzzles"] = newImagePuzzles
newState["Music_Puzzles"] = newMusicPuzzles
# Primitives and operators do not need to be deep copied.
newState["Selected_Room"] = state["Selected_Room"]
newState["Selected_Image"] = state["Selected_Image"]
newState["Selected_Music"] = state["Selected_Music"]
newState["Role"] = state["Role"]
# These are constant so the pointer can be passed up.
newState['ConditionMaster'] = state['ConditionMaster']
newState['ActionMaster'] = state['ActionMaster']
# Operators is updated in set_operators.
newState["Operators"] = state["Operators"]
return newState
def describe_state(state):
""" Produces a textual description of a state.
Might not be needed in normal operation with GUIs."""
# Goes through the rules of a state and marks the conditions/actions
# that refer to non-existent objects as app.
def check_rules(state):
rules = state["Rules"]
for rule in rules:
#Check condition
for condition in rule.conditions:
cdSplit = condition.text.split(" ")
# If condition is "Solved Puzzle:"
if(cdSplit[0] == "Solved"):
'''
puzzleName = condition.text.rsplit(':', 1)[1].strip()
found = False
for room in state["Rooms"]:
for wall in room.walls.values():
if wall.puzzle == puzzleName:
found = True
condition.app = found
'''
roomNum = cdSplit[4]
dir = cdSplit[6]
if(state["Rooms"][int(roomNum)-1].walls[dir].puzzle is None):
condition.app = False
#In the off chance that a puzzle is placed, a rule element added to it,
#And then the puzzle is removed, but then a puzzle is placed in the same spot
else:
condition.app = True
'''
#If we want to check that the puzzle exists at all
puzzleName = condition.text.rsplit(':', 1)[1].strip()
console.log(puzzleName)
if(puzzleName in state['Image_Puzzles'] or puzzleName in state['Music_Puzzles']):
condition.app = True
else:
condition.app = False
'''
for action in rule.actions:
# Check action
acSplit = action.text.split(" ")
# If action is opening or closing a door:
if(acSplit[0] == "Open" or acSplit[0] == "Close"):
roomNum1 = int(acSplit[4])
roomNum2 = int(acSplit[6])
#switch from visual index to array index
roomNumFinal = roomNum1 - 1
if roomNum2 - roomNum1 == 1:
dir = 'E'
else:
dir = 'S'
if state["Rooms"][roomNumFinal].walls[dir].hasDoor is False:
action.app = False
else:
action.app = True
'''
roomNum = acSplit[4]
dir = acSplit[6]
if(state["Rooms"][int(roomNum)-1].walls[dir].hasDoor is False):
action.app = False
'''
# If action is "Unsolve Puzzle:"
if(acSplit[0] == "Unsolve"):
roomNum = acSplit[4]
dir = acSplit[6]
if(state["Rooms"][int(roomNum)-1].walls[dir].puzzle is None):
action.app = False
#In the off chance that a puzzle is placed, a rule element added to it,
#And then the puzzle is removed, but then a puzzle is placed in the same spot
else:
action.app = True
#Template JSON Stuff
#try:
# from browser import window, alert
# window.SOLUZION_INITIAL_STATE = INITIAL_STATE
# window.IS_JSON = json_encode(INITIAL_STATE)
#alert("Inside of the template Mondrian.py, the INITIAL_STATE JSON is "+window.IS_JSON)
#print(INITIAL_STATE)
#except Exception as e:dsa
# print("There was an exception when trying to communicate back from Python to Javascript.")
# print(e)
class Room:
""" A room in the game contains 4 walls that could have wallpapers or doors
and possible ambient audio, and a possible puzzle. """
def __init__(self, x1, y1, x2, y2, aAudio = None):
# Coordinates for display of the room.
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
# Ambient music, contains a url to a piece of music mp3
self.aAudio = aAudio
# 4 walls.
self.walls = {}
# Horizontal walls.
self.walls['N'] = Wall(x1 ,y1 ,x2 ,y1, 'N') #top
self.walls['S'] = Wall(x1 ,y2 ,x2 ,y2, 'S') #bottom
# Vertical walls.
self.walls['W'] = Wall(x1 ,y1 ,x1 ,y2, 'W') #left
self.walls['E'] = Wall(x2 ,y1 ,x2 ,y2, 'E') #right
def copy(self):
newRoom = Room(self.x1, self.y1, self.x2, self.y2, self.aAudio)
for direction in ['N','S','W','E']:
newRoom.walls[direction] = self.walls[direction].copy()
return newRoom
def encode(self):
return {#"Vector Coordinates" : {"x1": self.x1, "y1": self.y1, "x2": self.x2, "y2": self.y2},
"Walls" : {"N" : self.walls['N'].encode(), "S" : self.walls['S'].encode(),
"W" : self.walls['W'].encode(), "E" : self.walls['E'].encode()},
"Ambient Music" : self.aAudio}
""" A wall could contain a door and a wallpaper """
class Wall:
def __init__(self, x1, y1, x2, y2, loc, wallpaperurl = "images/wall.jpg", hasDoor = False, doorOpen = None, puzzle = None):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.loc = loc
self.wallpaperurl = wallpaperurl
# Whether the wall contains a door and if its open or not
self.hasDoor = hasDoor
self.doorOpen = doorOpen
# Possible puzzle
self.puzzle = puzzle
# Returns a copy of itself. Does not copy its door.
def copy(self):
newWall = Wall(self.x1,self.y1,self.x2,self.y2,self.loc, self.wallpaperurl, self.hasDoor, self.doorOpen, self.puzzle)
return newWall
def encode(self):
return {#"Vector Coordinates" : {"x1": self.x1, "y1": self.y1, "x2": self.x2, "y2": self.y2},
"Location" : self.loc,
"Wallpaper" : self.wallpaperurl,
"HasDoor" : self.hasDoor,
"DoorOpen" : self.doorOpen,
"Puzzle" : self.puzzle}
'''
class Door:
def __init__(self, isOpen = False, url="images/door.jpg"):
self.isOpen = isOpen
self.url = url
# Closes the door if it is open.
# Opens the door if it is closed.
def open_or_close(self):
self.isOpen = not isOpen
# Returns a deep copy of itself.
def copy(self):
return Door(self.isOpen, self.url)
'''
class ImagePuzzle:
def __init__(self, url = "images/metalfencing.jpg", transformList = []):
self.url = url
# shallow copying a new list
self.transformList = transformList[:]
def add_transform(self, transform):
self.transformList.append(transform)
def copy(self):
return ImagePuzzle(self.url, self.transformList)
def encode(self):
return {"Type" : "image", "URL" : self.url, "Transform List" : self.transformList}
class MusicPuzzle:
def __init__(self, notes = [], transformList = []):
# shallow copying a new list
self.notes = notes[:]
# shallow copying a new list
self.transformList = transformList[:]
def add_transform(self, transform):
self.transformList.append(transform)
def copy(self):
# Deep copy note list
noteCopy = []
for note in self.notes:
noteCopy.append(note)
return MusicPuzzle(noteCopy, self.transformList)
def encode(self):
return {"Type" : "music", "Notes" : self.notes, "Transform List" : self.transformList}
# A rule contains two lists of RuleElements. Conditions and actions.
class Rule:
def __init__(self, conditions = [], actions = []):
self.conditions = conditions[:]
self.actions = actions[:]
self.name = "C: "
for condition in self.conditions:
self.name += condition.text + ","
self.name += " A: "
for action in self.actions:
self.name += action.text + ","
# Copies the rule, list is used to return a new list
def copy(self):
newConditions = []
newActions = []
for condition in self.conditions:
newConditions.append(condition.copy())
for action in self.actions:
newActions.append(action.copy())
return Rule(newConditions, newActions)
def encode(self):
conditionsJSON = [condition.encode() for condition in self.conditions]
actionsJSON = [action.encode() for action in self.actions]
return {"Conditions" : conditionsJSON,
"Actions" : actionsJSON,
"Name" : self.name}
class RuleElement:
def __init__(self, text, app = True):
self.text = text
self.app = app
def copy(self):
return RuleElement(self.text, self.app)
def encode(self):
result = {}
result["Applicable"] = self.app
textSplit = self.text.split(" ")
if(textSplit[0] == 'Entered'):
result["Condition"] = 'Entered Room'
result["Room"] = int(textSplit[2])
elif(textSplit[1] == 'puzzle'):
if(textSplit[0] == 'Solved'):
result["Condition"] = 'Solved puzzle'
else:
result["Action"] = 'Unsolve puzzle'
result["Room"] = int(textSplit[4])
result["Wall"] = textSplit[6]
elif(textSplit[0] == 'Had'):
result["Condition"] = 'Had Points'
result["Points"] = int(textSplit[1])
elif(textSplit[1] == 'minutes'):
result["Condition"] = 'Minutes elapsed'
result["Minutes"] = int(textSplit[0])
elif(textSplit[1] == 'door'):
result["Action"] = textSplit[0] + ' a door between two rooms'
result["Room"] = int(textSplit[4])
result["Room2"] = int(textSplit[6])
elif(textSplit[0] == 'Display'):
result["Action"] = 'Display a message'
result["Message"] = textSplit[2]
for i in range(3, len(textSplit)):
result["Message"] += " " + textSplit[i]
elif(textSplit[1] == 'Sound'):
result["Action"] = 'Play sound from URL'
result["URL"] = textSplit[4]
elif(textSplit[2] == 'Points'):
result["Action"] = textSplit[0] + " points"
result["Points"] = int(textSplit[1])
console.log(result)
return result
# Takes a room num from 0 to 8 and a side for the door to be on, [N, S, E, W]
# Optional newDoor parameter which allows you to pass which door the walls will point to.
# Is default set to the creation of a new door.
def add_door_to_room(state, room_num, side, openOrClosed):
ROOMS = state["Rooms"]
ROOMS[room_num].walls[side].hasDoor = True
ROOMS[room_num].walls[side].doorOpen = openOrClosed
if side == 'N':
ROOMS[room_num - 3].walls['S'].hasDoor = True
ROOMS[room_num - 3].walls['S'].doorOpen = openOrClosed
elif side == 'S':
ROOMS[room_num + 3].walls['N'].hasDoor = True
ROOMS[room_num + 3].walls['N'].doorOpen = openOrClosed
elif side == 'E':
ROOMS[room_num + 1].walls['W'].hasDoor = True
ROOMS[room_num + 1].walls['W'].doorOpen = openOrClosed
elif side == 'W':
ROOMS[room_num - 1].walls['E'].hasDoor = True
ROOMS[room_num - 1].walls['E'].doorOpen = openOrClosed
else:
alert("Error: Invalid direction passed to add_door")
check_rules(state)
# Operator version of add door that returns new state
def add_door_operator(state, room_num, side, sendBack):
def processState(openOrClosed):
newState = copy_state(state)
add_door_to_room(newState, room_num, side, openOrClosed)
sendBack(newState)
open_or_closed_menu(processState)
# Removes the door between two walls or a puzzle on a wall
def remove_wall_object_from_room(state, side):
room_num = state["Selected_Room"]
rooms = state["Rooms"]
wall = state["Rooms"][room_num].walls[side]
if(wall.hasDoor):
wall.hasDoor = False
if side == 'N':
wall = rooms[room_num - 3].walls['S']
elif side == 'S':
wall = rooms[room_num + 3].walls['N']
elif side == 'E':
wall = rooms[room_num + 1].walls['W']
elif side == 'W':
wall = rooms[room_num - 1].walls['E']
wall.hasDoor = False
check_rules(state)
elif(wall.puzzle is not None):
wall.puzzle = None
check_rules(state)
else:
alert("no puzzle or door to remove")
# Operator version of remove door that returns new state
def remove_wall_object_operator(state, side):
newState = copy_state(state)
remove_wall_object_from_room(newState,side)
return newState
# Checks if a door can be placed on a wall, meaning a door cannot already be on a wall
# and a puzzle cannot be on the wall or on the other side of the wall.
def add_doors_is_valid(state, side):
ROOMS = state["Rooms"]
room_num = state["Selected_Room"]
if side == 'N':
north_room = room_num - 3
if (north_room < 0):
return False
elif (ROOMS[room_num].walls['N'].hasDoor == True
or ROOMS[room_num].walls['N'].puzzle is not None
or ROOMS[north_room].walls['S'].puzzle is not None):
return False
else:
return True
elif side == 'S':
south_room = room_num + 3
if (south_room > 8):
return False
elif (ROOMS[room_num].walls['S'].hasDoor == True
or ROOMS[room_num].walls['S'].puzzle is not None
or ROOMS[south_room].walls['N'].puzzle is not None):
return False
else:
return True
elif side == 'E':
east_room = room_num + 1
if (room_num + 1) % 3 is 0:
return False
elif (ROOMS[room_num].walls['E'].hasDoor == True
or ROOMS[room_num].walls['E'].puzzle is not None
or ROOMS[east_room].walls['W'].puzzle is not None):
return False
else:
return True
elif side == 'W':
west_room = room_num - 1
if (room_num + 1) % 3 is 1:
return False
elif (ROOMS[room_num].walls['W'].hasDoor == True
or ROOMS[room_num].walls['W'].puzzle is not None
or ROOMS[west_room].walls['E'].puzzle is not None):
return False
else:
return True
else:
return False
# Return true if a door can be removed and false if it cant
def remove_wall_object_is_valid(state,side):
room = state['Rooms'][state["Selected_Room"]]
wall = room.walls[side]
return wall.hasDoor or wall.puzzle is not None
# Adds the passed puzzle name to the correct room and side of the
# passed state. Default is creation of new blank imagePuzzle.
def add_puzzle_to_room(room_num,side, state, name = None):
if name is None:
# Create default name, make sure its unique
name = "defaultImagePuzzle"
name = check_puzzle_name(state,name)
puzzle = ImagePuzzle()
state["Image_Puzzles"][name] = puzzle
state["Rooms"][room_num].walls[side].puzzle = name
check_rules(state)
def add_puzzle_operator(state, room_num, sendBack):
def processMenu(state,side,puzzleName):
newState = copy_state(state)
add_puzzle_to_room(room_num,side,newState,puzzleName)
sendBack(newState)
# Get banned directions
bannedDirections = puzzles_is_valid(state)
# Creates a menu with banned direction radio buttons disabled
add_puzzle_menu(state, processMenu,bannedDirections)
# returns a list of cardinals representing
# sides of a room that can not be used to place a puzzle
def puzzles_is_valid(state):
invalidCardinals = []
room_num = state["Selected_Room"]
selectedRoom = state["Rooms"][room_num]
for c in ['N','S','E','W']:
if (selectedRoom.walls[c].puzzle is not None or selectedRoom.walls[c].hasDoor == True):
invalidCardinals.append(c)
return invalidCardinals
# takes a room num from 0 to 8 and prompts the user for a url for the wallpaper
def add_wallpaper_to_room(state):
# Prompt the user for wallpaper url
url = window.prompt("Enter a complete URL for a wallpaper. Say 'cancel' to cancel.", "images/wall.jpg")
if(url is None):
return None
elif(url_is_valid(url)):
newState = copy_state(state)
room = newState["Rooms"][newState["Selected_Room"]]
for loc in room.walls:
room.walls[loc].wallpaperurl = url
return newState
else:
alert("URL was not valid. Try again.")
# Recurse
return add_wallpaper_to_room(state)
# Changes which room the user selects. Then calls the callback function, passing it the new state.
def change_room_selection(state, room_num):
newState = copy_state(state)
newState["Selected_Room"] = room_num
return newState
def change_image_puzzle_selection(state, name):
newState = copy_state(state)
newState["Selected_Image"] = name
return newState
def change_music_puzzle_selection(state, name):
newState = copy_state(state)
newState["Selected_Music"] = name
return newState
def change_role(state, role):
global OPERATORS
newState = copy_state(state)
newState['Role'] = role
# reset the operators
newState['Operators'] = set_operators(newState)
return newState
def create_image_puzzle(state):
# Prompt the user for a image url
url = window.prompt("Enter a complete URL for a picture. Say 'cancel' to cancel.", "images/force.jpg")
if(url is None):
return None
elif(url_is_valid(url)):
newState = copy_state(state)
# Get name, make sure there are no copies
name = getName(url)
name = check_puzzle_name(state,name)
newPuzzle = ImagePuzzle(url)
# Add newPuzzle to dictionary
newState["Image_Puzzles"][name] = newPuzzle
newState["Selected_Image"] = name
return newState
else:
alert("URL was not valid. Try again.")
# Recurse
return create_image_puzzle(state)
# gets a name out of a url
def getName(url):
# Get name out of the url
name = ""
i = 0
foundDot = False
while foundDot == False:
char = url[i]
if(char == "/"):
name = ""
elif(char == "."):
foundDot = True
else:
name = name + char
i = i + 1
return name
def check_puzzle_name(state,name):
# Make sure there are no copies of the name in image puzzles
imageNames = state["Image_Puzzles"]
musicNames = state["Music_Puzzles"]
i = 1
newName = name
while(newName in imageNames or newName in musicNames):
newName = name + " (" + str(i) + ")"
i = i + 1
return newName
def check_if_puzzle_copy(state,name):
# Make sure there are no copies of the name in image puzzles
imageNames = state["Image_Puzzles"]
musicNames = state["Music_Puzzles"]
if(name is None):
return False
if(name in imageNames or name in musicNames):
return True
return False
def rename_image_puzzle(state, sendBack):
newName = window.prompt("Enter the new unique name for your puzzle: " + state["Selected_Image"], "")
while(newName is not None and check_if_puzzle_copy(state,newName) is True):
newName = window.prompt("There is already a puzzle with that name","")
if(newName is not None):
newState = copy_state(state)
puzzle = newState["Image_Puzzles"][newState["Selected_Image"]]
newState["Image_Puzzles"].pop(newState["Selected_Image"],None)
newState["Image_Puzzles"][newName] = puzzle
newState["Selected_Image"] = newName
sendBack(newState)
# NOTE: This operators requires Brython as it uses a JSON object.
def create_music_puzzle(state, sendBack):
url = window.prompt("Enter a complete URL for a json music file. Say 'cancel' to cancel.", "music/twinkleTwinkle.txt")
if(url is not None):
if(url_is_valid(url)):
# Double nested to allow use of name parameter
def requestSuccess(name):
# When the request is recieved
def requestSuccess2(req):
if(req.status == 200 or req.status == 0):
newState = copy_state(state)
# Assign name to song using data from JSON object
song = json.loads(req.responseText)
newPuzzle = MusicPuzzle(song["notes"])
newState["Music_Puzzles"][name] = newPuzzle
newState["Selected_Music"] = name
# Hide loading visualization
hide_loading()
sendBack(newState)
else:
print("request failure")
return requestSuccess2
# Show loading visualization
show_loading()
# Get name, make sure there are no copies
name = getName(url)
name = check_puzzle_name(state,name)
request = ajax.ajax()
request.open('GET',url,True)
request.bind("complete",requestSuccess(name))
request.send()
else:
alert("URL was not valid. Try again.")
create_music_puzzle(state, sendBack)
def rename_music_puzzle(state, sendBack):
newName = window.prompt("Enter the new unique name for your puzzle: " + state["Selected_Image"], "")
while(newName is not None and check_if_puzzle_copy(state,newName) is True):
newName = window.prompt("There is already a puzzle with that name","")
if(newName is not None):
newState = copy_state(state)
puzzle = newState["Music_Puzzles"][newState["Selected_Music"]]
newState["Music_Puzzles"].pop(newState["Selected_Music"],None)
newState["Music_Puzzles"][newName] = puzzle
newState["Selected_Music"] = newName
sendBack(newState)
# Adds ambient audio to a room. The rule designer chose when and if these play.
def add_ambient_music(state):
url = window.prompt("Enter a url for an mp3 to attach ambient audio to a room", "music\defaultAmbient.mp3")
if(url is None):
return None
elif(url_is_valid(url)):
newState = copy_state(state)
room_num = newState["Selected_Room"]
# Add ambient audio to room
newState["Rooms"][room_num].aAudio = url
return newState
else:
alert("URL was not valid. Try again.")
# Recurse
return add_ambient_music(state)
def create_json(state):
global SOLUZION_VERSION, PROBLEM_NAME, PROBLEM_VERSION, PROBLEM_AUTHORS, PROBLEM_DESC
#get the current date
now = datetime.datetime.today()
day = str(now.day)
month = str(now.month)
year = str(now.year)
creationDate = month + '-' + day + '-' + year
stateJSON = {"Soluzion Version" : SOLUZION_VERSION, "Problem Name" : PROBLEM_NAME,
"Problem Version" : PROBLEM_VERSION, "Problem Authors" : PROBLEM_AUTHORS,
"Problem Creation Date" : creationDate, "Problem Description" : PROBLEM_DESC}
#Rooms
stateJSON["Rooms"] = []
for room in state["Rooms"]:
stateJSON["Rooms"].append(room.encode())
#looks like stateJson = {"Rooms" : {1 : room1, 2 : room2, etc}, etc}
stateJSON["Rules"] = []
for rule in state["Rules"]:
stateJSON["Rules"].append(rule.encode())
stateJSON["Puzzles"] = {}
for puzzle in state["Image_Puzzles"]:
stateJSON["Puzzles"][puzzle] = state["Image_Puzzles"][puzzle].encode()
for puzzle in state["Music_Puzzles"]:
stateJSON["Puzzles"][puzzle] = state["Music_Puzzles"][puzzle].encode()
window.state_JSON = json.dumps(stateJSON)
#console.log(window.state_JSON)
req = ajax.ajax()
req.bind('complete', lambda e: console.log('finished on brython side'))
req.open('POST', 'dependencies//jsonPatch.php', True)
req.set_header('content-type','application/x-www-form-urlencoded')
req.send({'stateJSON' : window.state_JSON})
#Puzzles
#Rules
#We don't need to send the selected room, image/music puzzles, role,
#the operators, or the action/condition master
def addImageTransformation(state, transformation):
newState = copy_state(state)
# Add transform to newState list
newState["Image_Puzzles"][newState["Selected_Image"]].add_transform(transformation)
return newState
def addMusicTransformation(state, transformation):
newState = copy_state(state)
# Add transform to newState list
newState["Music_Puzzles"][newState["Selected_Music"]].add_transform(transformation)
return newState
def createRule(state):
newState = copy_state(state)
newRule = Rule()
newState["Rules"].append(newRule)
return newState
def deleteRule(state, index):
newState = copy_state(state)
del newState["Rules"][index]
return newState
def addCondition(state, index, sendBack):
def processCondition(condition):
newState = copy_state(state)
newState["Rules"][index].conditions.append(RuleElement(condition))
check_rules(newState)
sendBack(newState)
add_condition_menu(state, processCondition)
def addAction(state, index, sendBack):
def processAction(action):
newState = copy_state(state)
newState["Rules"][index].actions.append(RuleElement(action))
check_rules(newState)
sendBack(newState)
add_action_menu(state, processAction)
# Deletes a condition from the specified rule
def deleteCondition(state, index, sendBack):
def processDelete(conditionName):
newState = copy_state(state)
conditionList = newState["Rules"][index].conditions
for i, condition in enumerate(conditionList):
if(condition.text == conditionName):
conditionList.pop(i)
sendBack(newState)
delete_condition_menu(state, index, processDelete)
# Deletes an action from the specified rule
def deleteAction(state, index, sendBack):
def processDelete(actionName):
newState = copy_state(state)
actionList = newState["Rules"][index].actions
for i, action in enumerate(actionList):
if(action.text == actionName):
actionList.pop(i)
sendBack(newState)
delete_action_menu(state, index, processDelete)
# Concatenates several operators into one with a central menu.
def editRule(state, index, sendBack):
def processEdit(edit):
if(edit == "addAction"):
addAction(state,index,sendBack)
elif(edit == "addCondition"):
addCondition(state,index,sendBack)
elif(edit == "deleteAction"):
deleteAction(state,index,sendBack)
elif(edit == "deleteCondition"):
deleteCondition(state,index,sendBack)
elif(edit == "deleteRule"):
newState = deleteRule(state,index)
sendBack(newState)
else:
pass
edit_rule_menu(state, processEdit)
def doNothing():
pass
#</COMMON_CODE>
#<OPERATORS>
# Method that can be called to set the Operators
# of the current Role given the current State.
# Each AsyncOperators state transfer must have a callback function defined.
def set_operators(state):
# Sendback is the function given by the client which receives the modified state
sb = None
create_json_file =\
AsyncOperator("Create JSON file for the current state.",
lambda state: True,
lambda state, sb: create_json(state))
nothing_selected =\
[Operator("Nothing Selected",
lambda state: True,
lambda state: doNothing())]
role_operators =\
[Operator("Change Role to " + role + " Designer.",
lambda state, r = role: state['Role'] is not r,
lambda state, r = role: change_role(state, r))
for role in ["Architect", "Image Puzzle", "Music Puzzle", "Rules"]]
if (state['Role'] == "Architect"):
selection_operators =\
[Operator("Switch to room numbered " + str(num + 1) + " for editing.",
lambda state, n = num: n is not state["Selected_Room"],
lambda state, n = num: change_room_selection(state, n))
for num in range(9)]
add_door_operators =\
[AsyncOperator("Add door to current room on " + cardinal + " wall.",
lambda state, c = cardinal: add_doors_is_valid(state, c),
lambda state, sb, c = cardinal: add_door_operator(state, state["Selected_Room"], c, sb))
for cardinal in ['N', 'S', 'E', 'W']]
remove_object_operators =\
[Operator("Remove puzzle or door from room on " + cardinal + " wall.",
lambda state, c = cardinal: remove_wall_object_is_valid(state, c[0]),
lambda state, c = cardinal: remove_wall_object_operator(state, c[0]))
for cardinal in ['North', 'South', 'East', 'West']]
wallpaper_operators =\
Operator("Add wallpaper to current room.",
lambda state: True,
lambda state: add_wallpaper_to_room(state))
add_puzzle_operators =\
AsyncOperator("Add a puzzle to current room",
lambda state: puzzles_is_valid(state) != ['N','S','E','W'],
lambda state, sb: add_puzzle_operator(state, state["Selected_Room"], sb))
add_ambient_music_operator =\
Operator("Add ambient audio to current room.",
lambda state: True,
lambda state: add_ambient_music(state))
# I don't know why I have to do this, something to do with memory perhaps.
OPERATORS = nothing_selected
OPERATORS += role_operators + create_json_file + selection_operators + add_door_operators
OPERATORS += remove_object_operators + wallpaper_operators + add_puzzle_operators + add_ambient_music_operator
elif(state['Role'] == "Image Puzzle"):
puzzles = state["Image_Puzzles"]
numOfPuzzles = len(puzzles)
selection_operators =\
[Operator("Switch to puzzle \"" + name + "\" for editing",
lambda state, n = name: numOfPuzzles > 1 and n != state["Selected_Image"],
lambda state, n = name: change_image_puzzle_selection(state, n))
for name in puzzles.keys()]
create_new_puzzle =\
Operator("Create a new image puzzle.",
lambda state: True,
lambda state: create_image_puzzle(state))
rename_puzzle =\
AsyncOperator("Rename selected puzzle.",
lambda state: state["Selected_Image"] is not None,
lambda state, sb: rename_image_puzzle(state, sb))
horiz_flip =\
Operator("Flip the image horizontally.",
lambda state: state["Selected_Image"] is not None,
lambda state: addImageTransformation(state, "horizFlip"))
vert_flip =\
Operator("Flip the image vertically.",
lambda state: state["Selected_Image"] is not None,
lambda state: addImageTransformation(state, "vertFlip"))
shuff_rows =\
Operator("Shuffle the rows of the image.",
lambda state: state["Selected_Image"] is not None,
lambda state: addImageTransformation(state, "shuffleRows"))
invs_shuff_rows =\
Operator("Invert Row shuffling",
lambda state: state["Selected_Image"] is not None,
lambda state: addImageTransformation(state, "shuffleRowsInverse"))
invs_shuff_cols =\
Operator("Invert Column shuffling",
lambda state: state["Selected_Image"] is not None,
lambda state: addImageTransformation(state, "shuffleColumnsInverse"))
shuff_cols =\
Operator("Shuffle the columns of the image.",
lambda state: state["Selected_Image"] is not None,
lambda state: addImageTransformation(state, "shuffleColumns"))
pixel_crossover =\
Operator("Perform Pixel Crossover.",
lambda state: state["Selected_Image"] is not None,
lambda state: addImageTransformation(state, "pixelCrossover"))
pixel_crossover_inverse =\
Operator("Inverse Pixel Crossover.",
lambda state: state["Selected_Image"] is not None,
lambda state: addImageTransformation(state, "pixelCrossoverInverse"))
OPERATORS = nothing_selected + create_json_file + role_operators
OPERATORS += selection_operators + create_new_puzzle + rename_puzzle
OPERATORS += [horiz_flip] + vert_flip + shuff_rows + shuff_cols + invs_shuff_rows + invs_shuff_cols + pixel_crossover + pixel_crossover_inverse
elif(state['Role'] == "Music Puzzle"):
puzzles = state["Music_Puzzles"]
numOfPuzzles = len(puzzles)
selection_operators =\
[Operator("Switch to puzzle \"" + name + "\" for editing.",
lambda state, n = name: numOfPuzzles > 1 and n != state["Selected_Music"],
lambda state, n = name: change_music_puzzle_selection(state, n))
for name in puzzles.keys()]
create_new_puzzle =\
AsyncOperator("Create a new music puzzle.",
lambda state: True,
lambda state, sb: create_music_puzzle(state, sb))
rename_puzzle =\
AsyncOperator("Rename selected puzzle.",
lambda state: state["Selected_Music"] is not None,
lambda state, sb: rename_music_puzzle(state, sb))
increase_pitch =\
Operator("Increase pitch of song",
lambda state: state["Selected_Music"] is not None,
lambda state: addMusicTransformation(state, "increasePitch"))
decrease_pitch =\
Operator("Decrease pitch of song",
lambda state: state["Selected_Music"] is not None,
lambda state: addMusicTransformation(state, "decreasePitch"))
increase_tempo =\
Operator("Increase tempo of song",
lambda state: state["Selected_Music"] is not None,
lambda state: addMusicTransformation(state, "increaseTempo"))
decrease_tempo =\
Operator("Decrease tempo of song",
lambda state: state["Selected_Music"] is not None,
lambda state: addMusicTransformation(state, "decreaseTempo"))
shuffle_notes =\
Operator("Shuffle notes of song",
lambda state: state["Selected_Music"] is not None,
lambda state: addMusicTransformation(state, "shuffleNotes"))
reverse_notes =\
Operator("Reverse notes of song",
lambda state: state["Selected_Music"] is not None,
lambda state: addMusicTransformation(state, "reverseNotes"))
OPERATORS = nothing_selected + create_json_file
OPERATORS += role_operators + selection_operators + create_new_puzzle + rename_puzzle + increase_tempo + decrease_tempo + shuffle_notes + increase_pitch + decrease_pitch + reverse_notes
elif(state['Role'] == "Rules"):
create_rule =\
Operator("Create new Rule.",
lambda state: True,
lambda state: createRule(state))
edit_rule =\
[AsyncOperator("Edit Rule " + str(index + 1) + ".",
lambda state: True,
lambda state, sb, i = index: editRule(state, i, sb))
for index, rule in enumerate(state["Rules"])]
OPERATORS = nothing_selected + create_json_file + role_operators + create_rule + edit_rule
else:
alert("unsupported role")
return OPERATORS
#</OPERATORS>
#<INITIAL_STATE> The game is a list of 9 rooms stored a list.
INITIAL_STATE = {}
INITIAL_STATE['Rooms'] = []
INITIAL_STATE['Image_Puzzles'] = {}
INITIAL_STATE['Music_Puzzles'] = {}
# ADD A BLANK MUSIC PUZZLE FOR DEBUG PURPOSES ONLY
INITIAL_STATE["Music_Puzzles"]["test puzzle1"] = MusicPuzzle()
INITIAL_STATE['Rules'] = []
# ADD BLANK RULES FOR DEBUG PURPOSES ONLY
INITIAL_STATE['Rules'].append(Rule())
'''INITIAL_STATE['Rules'].append(Rule())
INITIAL_STATE['Rules'].append(Rule(["Cool bean",'fdafsasdfadfasfdaf','aaaaaaaaa'],["cool cream"], True))
INITIAL_STATE['Rules'].append(Rule())
INITIAL_STATE['Rules'].append(Rule())
INITIAL_STATE['Rules'].append(Rule(["not cool bean"]))
INITIAL_STATE['Rules'].append(Rule())
INITIAL_STATE['Rules'].append(Rule())
INITIAL_STATE['Rules'].append(Rule())
INITIAL_STATE['Rules'].append(Rule())
INITIAL_STATE['Rules'].append(Rule())
INITIAL_STATE['Rules'].append(Rule())
INITIAL_STATE['Rules'].append(Rule())
INITIAL_STATE['Rules'].append(Rule())
'''
INITIAL_STATE['Selected_Room'] = 0
# Stores name of selected image and selected music
INITIAL_STATE['Selected_Image'] = None
INITIAL_STATE['Selected_Music'] = None
INITIAL_STATE['Role'] = "Rules"
INITIAL_STATE['Operators'] = set_operators(INITIAL_STATE)
INITIAL_STATE['ConditionMaster'] = ["Entered Room","Had Points","Time Elapsed", "Solved Puzzle"]
INITIAL_STATE['ActionMaster'] = ["Open Door", "Close Door", "Play Sound", "Display Message",
"Unsolve Puzzle", "Gain Points","Lose Points","Game Ends"]
# Create 9 rooms, add them to the the state.
for j in range(3):
for i in range(3):
INITIAL_STATE['Rooms'].append( Room(i, j, i + 1, j + 1) )
# TEMP DEBUG ADD PUZZLE
#add_puzzle_to_room(0,'E',INITIAL_STATE)
# Temporary addition for debug purposes
#INITIAL_STATE["Rooms"][0].aAudio = "music\defaultAmbient.mp3"
# Now initialize operators.
OPERATORS = INITIAL_STATE['Operators']
#</INITIAL_STATE>
|
|
from __future__ import absolute_import
import os
import re
from OpenSSL import SSL
from netlib import http_auth, certutils, tcp
from .. import utils, platform, version
from .primitives import RegularProxyMode, SpoofMode, SSLSpoofMode, TransparentProxyMode, UpstreamProxyMode, ReverseProxyMode, Socks5ProxyMode
TRANSPARENT_SSL_PORTS = [443, 8443]
CONF_BASENAME = "mitmproxy"
CA_DIR = "~/.mitmproxy"
class HostMatcher(object):
def __init__(self, patterns=[]):
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
address = tcp.Address.wrap(address)
host = "%s:%s" % (address.host, address.port)
if any(rex.search(host) for rex in self.regexes):
return True
else:
return False
def __nonzero__(self):
return bool(self.patterns)
class ProxyConfig:
def __init__(
self,
host='',
port=8080,
server_version=version.NAMEVERSION,
cadir=CA_DIR,
clientcerts=None,
no_upstream_cert=False,
body_size_limit=None,
mode=None,
upstream_server=None,
http_form_in=None,
http_form_out=None,
authenticator=None,
ignore_hosts=[],
tcp_hosts=[],
ciphers_client=None,
ciphers_server=None,
certs=[],
certforward=False,
ssl_version_client=tcp.SSL_DEFAULT_METHOD,
ssl_version_server=tcp.SSL_DEFAULT_METHOD,
ssl_ports=TRANSPARENT_SSL_PORTS,
spoofed_ssl_port=None,
):
self.host = host
self.port = port
self.server_version = server_version
self.ciphers_client = ciphers_client
self.ciphers_server = ciphers_server
self.clientcerts = clientcerts
self.no_upstream_cert = no_upstream_cert
self.body_size_limit = body_size_limit
if mode == "transparent":
self.mode = TransparentProxyMode(platform.resolver(), ssl_ports)
elif mode == "socks5":
self.mode = Socks5ProxyMode(ssl_ports)
elif mode == "reverse":
self.mode = ReverseProxyMode(upstream_server)
elif mode == "upstream":
self.mode = UpstreamProxyMode(upstream_server)
elif mode == "spoof":
self.mode = SpoofMode()
elif mode == "sslspoof":
self.mode = SSLSpoofMode(spoofed_ssl_port)
else:
self.mode = RegularProxyMode()
# Handle manual overrides of the http forms
self.mode.http_form_in = http_form_in or self.mode.http_form_in
self.mode.http_form_out = http_form_out or self.mode.http_form_out
self.check_ignore = HostMatcher(ignore_hosts)
self.check_tcp = HostMatcher(tcp_hosts)
self.authenticator = authenticator
self.cadir = os.path.expanduser(cadir)
self.certstore = certutils.CertStore.from_store(
self.cadir,
CONF_BASENAME)
for spec, cert in certs:
self.certstore.add_cert_file(spec, cert)
self.certforward = certforward
self.ssl_ports = ssl_ports
if isinstance(ssl_version_client, int):
self.openssl_method_client = ssl_version_client
else:
self.openssl_method_client = tcp.SSL_VERSIONS[ssl_version_client]
if isinstance(ssl_version_server, int):
self.openssl_method_server = ssl_version_server
else:
self.openssl_method_server = tcp.SSL_VERSIONS[ssl_version_server]
self.openssl_options_client = tcp.SSL_DEFAULT_OPTIONS
self.openssl_options_server = tcp.SSL_DEFAULT_OPTIONS
def process_proxy_options(parser, options):
body_size_limit = utils.parse_size(options.body_size_limit)
c = 0
mode, upstream_server, spoofed_ssl_port = None, None, None
if options.transparent_proxy:
c += 1
if not platform.resolver:
return parser.error(
"Transparent mode not supported on this platform.")
mode = "transparent"
if options.socks_proxy:
c += 1
mode = "socks5"
if options.reverse_proxy:
c += 1
mode = "reverse"
upstream_server = options.reverse_proxy
if options.upstream_proxy:
c += 1
mode = "upstream"
upstream_server = options.upstream_proxy
if options.spoof_mode:
c += 1
mode = "spoof"
if options.ssl_spoof_mode:
c += 1
mode = "sslspoof"
spoofed_ssl_port = options.spoofed_ssl_port
if c > 1:
return parser.error(
"Transparent, SOCKS5, reverse and upstream proxy mode "
"are mutually exclusive.")
if options.clientcerts:
options.clientcerts = os.path.expanduser(options.clientcerts)
if not os.path.exists(
options.clientcerts) or not os.path.isdir(
options.clientcerts):
return parser.error(
"Client certificate directory does not exist or is not a directory: %s" %
options.clientcerts)
if (options.auth_nonanonymous or options.auth_singleuser or options.auth_htpasswd):
if options.auth_singleuser:
if len(options.auth_singleuser.split(':')) != 2:
return parser.error(
"Invalid single-user specification. Please use the format username:password")
username, password = options.auth_singleuser.split(':')
password_manager = http_auth.PassManSingleUser(username, password)
elif options.auth_nonanonymous:
password_manager = http_auth.PassManNonAnon()
elif options.auth_htpasswd:
try:
password_manager = http_auth.PassManHtpasswd(
options.auth_htpasswd)
except ValueError as v:
return parser.error(v.message)
authenticator = http_auth.BasicProxyAuth(password_manager, "mitmproxy")
else:
authenticator = http_auth.NullProxyAuth(None)
certs = []
for i in options.certs:
parts = i.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
parts[1] = os.path.expanduser(parts[1])
if not os.path.exists(parts[1]):
parser.error("Certificate file does not exist: %s" % parts[1])
certs.append(parts)
ssl_ports = options.ssl_ports
if options.ssl_ports != TRANSPARENT_SSL_PORTS:
# arparse appends to default value by default, strip that off.
# see http://bugs.python.org/issue16399
ssl_ports = ssl_ports[len(TRANSPARENT_SSL_PORTS):]
return ProxyConfig(
host=options.addr,
port=options.port,
cadir=options.cadir,
clientcerts=options.clientcerts,
no_upstream_cert=options.no_upstream_cert,
body_size_limit=body_size_limit,
mode=mode,
upstream_server=upstream_server,
http_form_in=options.http_form_in,
http_form_out=options.http_form_out,
ignore_hosts=options.ignore_hosts,
tcp_hosts=options.tcp_hosts,
authenticator=authenticator,
ciphers_client=options.ciphers_client,
ciphers_server=options.ciphers_server,
certs=certs,
certforward=options.certforward,
ssl_version_client=options.ssl_version_client,
ssl_version_server=options.ssl_version_server,
ssl_ports=ssl_ports,
spoofed_ssl_port=spoofed_ssl_port
)
def ssl_option_group(parser):
group = parser.add_argument_group("SSL")
group.add_argument(
"--cert",
dest='certs',
default=[],
type=str,
metavar="SPEC",
action="append",
help='Add an SSL certificate. SPEC is of the form "[domain=]path". '
'The domain may include a wildcard, and is equal to "*" if not specified. '
'The file at path is a certificate in PEM format. If a private key is included in the PEM, '
'it is used, else the default key in the conf dir is used. '
'The PEM file should contain the full certificate chain, with the leaf certificate as the first entry. '
'Can be passed multiple times.')
group.add_argument(
"--cert-forward", action="store_true",
dest="certforward", default=False,
help="Simply forward SSL certificates from upstream."
)
group.add_argument(
"--ciphers-client", action="store",
type=str, dest="ciphers_client", default=None,
help="Set supported ciphers for client connections. (OpenSSL Syntax)"
)
group.add_argument(
"--ciphers-server", action="store",
type=str, dest="ciphers_server", default=None,
help="Set supported ciphers for server connections. (OpenSSL Syntax)"
)
group.add_argument(
"--client-certs", action="store",
type=str, dest="clientcerts", default=None,
help="Client certificate directory."
)
group.add_argument(
"--no-upstream-cert", default=False,
action="store_true", dest="no_upstream_cert",
help="Don't connect to upstream server to look up certificate details."
)
group.add_argument(
"--ssl-port",
action="append",
type=int,
dest="ssl_ports",
default=list(TRANSPARENT_SSL_PORTS),
metavar="PORT",
help="Can be passed multiple times. Specify destination ports which are assumed to be SSL. "
"Defaults to %s." %
str(TRANSPARENT_SSL_PORTS))
group.add_argument(
"--ssl-version-client", dest="ssl_version_client", type=str, default=tcp.SSL_DEFAULT_VERSION,
choices=tcp.SSL_VERSIONS.keys(),
help=""""
Use a specified protocol for client connections:
TLSv1.2, TLSv1.1, TLSv1, SSLv3, SSLv2, SSLv23.
Default to SSLv23."""
)
group.add_argument(
"--ssl-version-server", dest="ssl_version_server", type=str, default=tcp.SSL_DEFAULT_VERSION,
choices=tcp.SSL_VERSIONS.keys(),
help=""""
Use a specified protocol for server connections:
TLSv1.2, TLSv1.1, TLSv1, SSLv3, SSLv2, SSLv23.
Default to SSLv23."""
)
|
|
# -*- coding: utf-8 -*-
import httplib as http
import logging
from bs4 import BeautifulSoup
from flask import request
from framework.mongo.utils import to_mongo_key
from framework.exceptions import HTTPError
from framework.auth.utils import privacy_info_handle
from framework.auth.decorators import must_be_logged_in
from framework.flask import redirect
from website.addons.wiki import settings
from website.addons.wiki import utils as wiki_utils
from website.profile.utils import get_gravatar
from website.project.views.node import _view_project
from website.project.model import has_anonymous_link
from website.project.decorators import (
must_be_contributor_or_public,
must_have_addon, must_not_be_registration,
must_be_valid_project,
must_have_permission,
must_have_write_permission_or_public_wiki,
)
from website.exceptions import NodeStateError
from .exceptions import (
NameEmptyError,
NameInvalidError,
NameMaximumLengthError,
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
InvalidVersionError,
)
from .model import NodeWikiPage
logger = logging.getLogger(__name__)
WIKI_NAME_EMPTY_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be empty.'
))
WIKI_NAME_MAXIMUM_LENGTH_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be more than 100 characters.'
))
WIKI_PAGE_CANNOT_RENAME_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page cannot be renamed.'
))
WIKI_PAGE_CONFLICT_ERROR = HTTPError(http.CONFLICT, data=dict(
message_short='Page conflict',
message_long='A wiki page with that name already exists.'
))
WIKI_PAGE_NOT_FOUND_ERROR = HTTPError(http.NOT_FOUND, data=dict(
message_short='Not found',
message_long='A wiki page could not be found.'
))
WIKI_INVALID_VERSION_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The requested version of this wiki page does not exist.'
))
def _get_wiki_versions(node, name, anonymous=False):
key = to_mongo_key(name)
# Skip if wiki_page doesn't exist; happens on new projects before
# default "home" page is created
if key not in node.wiki_pages_versions:
return []
versions = [
NodeWikiPage.load(version_wiki_id)
for version_wiki_id in node.wiki_pages_versions[key]
]
return [
{
'version': version.version,
'user_fullname': privacy_info_handle(version.user.fullname, anonymous, name=True),
'date': '{} UTC'.format(version.date.replace(microsecond=0).isoformat().replace('T', ' ')),
}
for version in reversed(versions)
]
def _get_wiki_pages_current(node):
return [
{
'name': sorted_page.page_name,
'url': node.web_url_for('project_wiki_view', wname=sorted_page.page_name, _guid=True),
'wiki_id': sorted_page._primary_key,
'wiki_content': wiki_page_content(sorted_page.page_name, node=node)
}
for sorted_page in [
node.get_wiki_page(sorted_key)
for sorted_key in sorted(node.wiki_pages_current)
]
# TODO: remove after forward slash migration
if sorted_page is not None
]
def _get_wiki_api_urls(node, name, additional_urls=None):
urls = {
'base': node.api_url_for('project_wiki_home'),
'delete': node.api_url_for('project_wiki_delete', wname=name),
'rename': node.api_url_for('project_wiki_rename', wname=name),
'content': node.api_url_for('wiki_page_content', wname=name),
'settings': node.api_url_for('edit_wiki_settings'),
'grid': node.api_url_for('project_wiki_grid_data', wname=name)
}
if additional_urls:
urls.update(additional_urls)
return urls
def _get_wiki_web_urls(node, key, version=1, additional_urls=None):
urls = {
'base': node.web_url_for('project_wiki_home', _guid=True),
'edit': node.web_url_for('project_wiki_view', wname=key, _guid=True),
'home': node.web_url_for('project_wiki_home', _guid=True),
'page': node.web_url_for('project_wiki_view', wname=key, _guid=True),
}
if additional_urls:
urls.update(additional_urls)
return urls
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def wiki_widget(**kwargs):
node = kwargs['node'] or kwargs['project']
wiki = node.get_addon('wiki')
wiki_page = node.get_wiki_page('home')
# Show "Read more" link if there are multiple pages or has > 400 characters
more = len(node.wiki_pages_current.keys()) >= 2
MAX_DISPLAY_LENGTH = 400
use_python_render = False
if wiki_page and wiki_page.html(node):
wiki_html = wiki_page.html(node)
if len(wiki_html) > MAX_DISPLAY_LENGTH:
wiki_html = BeautifulSoup(wiki_html[:MAX_DISPLAY_LENGTH] + '...', 'html.parser')
more = True
else:
wiki_html = BeautifulSoup(wiki_html)
use_python_render = wiki_page.rendered_before_update
else:
wiki_html = None
ret = {
'complete': True,
'wiki_content': unicode(wiki_html) if wiki_html else None,
'wiki_content_url': node.api_url_for('wiki_page_content', wname='home'),
'use_python_render': use_python_render,
'more': more,
'include': False,
}
ret.update(wiki.config.to_json())
return ret
@must_be_valid_project
@must_have_write_permission_or_public_wiki
@must_have_addon('wiki', 'node')
def wiki_page_draft(wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(wname)
return {
'wiki_content': wiki_page.content if wiki_page else None,
'wiki_draft': (wiki_page.get_draft(node) if wiki_page
else wiki_utils.get_sharejs_content(node, wname)),
}
@must_be_valid_project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def wiki_page_content(wname, wver=None, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(wname, version=wver)
use_python_render = wiki_page.rendered_before_update if wiki_page else False
return {
'wiki_content': wiki_page.content if wiki_page else '',
# Only return rendered version if page was saved before wiki change
'wiki_rendered': wiki_page.html(node) if use_python_render else '',
}
@must_be_valid_project # injects project
@must_have_permission('write') # injects user, project
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_delete(auth, wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
wiki_page = node.get_wiki_page(wiki_name)
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, wiki_name)
if not wiki_page:
raise HTTPError(http.NOT_FOUND)
node.delete_node_wiki(wiki_name, auth)
wiki_utils.broadcast_to_sharejs('delete', sharejs_uuid, node)
return {}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_view(auth, wname, path=None, **kwargs):
node = kwargs['node'] or kwargs['project']
anonymous = has_anonymous_link(node, auth)
wiki_name = (wname or '').strip()
wiki_key = to_mongo_key(wiki_name)
wiki_page = node.get_wiki_page(wiki_name)
wiki_settings = node.get_addon('wiki')
can_edit = (
auth.logged_in
and not node.is_registration
and (
node.has_permission(auth.user, 'write')
or wiki_settings.is_publicly_editable
)
)
versions = _get_wiki_versions(node, wiki_name, anonymous=anonymous)
# Determine panels used in view
panels = {'view', 'edit', 'compare', 'menu'}
if request.args and set(request.args).intersection(panels):
panels_used = [panel for panel in request.args if panel in panels]
num_columns = len(set(panels_used).intersection({'view', 'edit', 'compare'}))
if num_columns == 0:
panels_used.append('view')
num_columns = 1
else:
panels_used = ['view', 'menu']
num_columns = 1
try:
view = wiki_utils.format_wiki_version(
version=request.args.get('view'),
num_versions=len(versions),
allow_preview=True,
)
compare = wiki_utils.format_wiki_version(
version=request.args.get('compare'),
num_versions=len(versions),
allow_preview=False,
)
except InvalidVersionError:
raise WIKI_INVALID_VERSION_ERROR
# ensure home is always lower case since it cannot be renamed
if wiki_name.lower() == 'home':
wiki_name = 'home'
if wiki_page:
version = wiki_page.version
is_current = wiki_page.is_current
content = wiki_page.html(node)
use_python_render = wiki_page.rendered_before_update
else:
version = 'NA'
is_current = False
content = ''
use_python_render = False
if can_edit:
if wiki_key not in node.wiki_private_uuids:
wiki_utils.generate_private_uuid(node, wiki_name)
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, wiki_name)
else:
if wiki_key not in node.wiki_pages_current and wiki_key != 'home':
raise WIKI_PAGE_NOT_FOUND_ERROR
if 'edit' in request.args:
if wiki_settings.is_publicly_editable:
raise HTTPError(http.UNAUTHORIZED)
raise HTTPError(http.FORBIDDEN)
sharejs_uuid = None
# Opens 'edit' panel when home wiki is empty
if not content and can_edit and wiki_name == 'home':
panels_used.append('edit')
# Default versions for view and compare
version_settings = {
'view': view or ('preview' if 'edit' in panels_used else 'current'),
'compare': compare or 'previous',
}
ret = {
'wiki_id': wiki_page._primary_key if wiki_page else None,
'wiki_name': wiki_page.page_name if wiki_page else wiki_name,
'wiki_content': content,
'use_python_render': use_python_render,
'page': wiki_page,
'version': version,
'versions': versions,
'sharejs_uuid': sharejs_uuid or '',
'sharejs_url': settings.SHAREJS_URL,
'is_current': is_current,
'version_settings': version_settings,
'pages_current': _get_wiki_pages_current(node),
'category': node.category,
'panels_used': panels_used,
'num_columns': num_columns,
'urls': {
'api': _get_wiki_api_urls(node, wiki_name, {
'content': node.api_url_for('wiki_page_content', wname=wiki_name),
'draft': node.api_url_for('wiki_page_draft', wname=wiki_name),
}),
'web': _get_wiki_web_urls(node, wiki_name),
'gravatar': get_gravatar(auth.user, 25),
},
}
ret.update(_view_project(node, auth, primary=True))
ret['user']['can_edit_wiki_body'] = can_edit
return ret
@must_be_valid_project # injects node or project
@must_have_write_permission_or_public_wiki # injects user
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_edit_post(auth, wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
wiki_page = node.get_wiki_page(wiki_name)
redirect_url = node.web_url_for('project_wiki_view', wname=wiki_name, _guid=True)
form_wiki_content = request.form['content']
# ensure home is always lower case since it cannot be renamed
if wiki_name.lower() == 'home':
wiki_name = 'home'
if wiki_page:
# Only update node wiki if content has changed
if form_wiki_content != wiki_page.content:
node.update_node_wiki(wiki_page.page_name, form_wiki_content, auth)
ret = {'status': 'success'}
else:
ret = {'status': 'unmodified'}
else:
# update_node_wiki will create a new wiki page because a page
node.update_node_wiki(wiki_name, form_wiki_content, auth)
ret = {'status': 'success'}
return ret, http.FOUND, None, redirect_url
@must_be_valid_project # injects node or project
@must_have_permission('admin')
@must_not_be_registration
@must_have_addon('wiki', 'node')
def edit_wiki_settings(node, auth, **kwargs):
wiki_settings = node.get_addon('wiki')
permissions = request.get_json().get('permission', None)
if not wiki_settings:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='Cannot change wiki settings without a wiki'
))
if permissions == 'public':
permissions = True
elif permissions == 'private':
permissions = False
else:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='Permissions flag used is incorrect.'
))
try:
wiki_settings.set_editing(permissions, auth, log=True)
except NodeStateError as e:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=e.message
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_logged_in
@must_be_valid_project
def get_node_wiki_permissions(node, auth, **kwargs):
return wiki_utils.serialize_wiki_settings(auth.user, [node._id])
@must_be_valid_project
@must_have_addon('wiki', 'node')
def project_wiki_home(**kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname='home', _guid=True))
@must_be_valid_project # injects project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_id_page(auth, wid, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(id=wid)
if wiki_page:
return redirect(node.web_url_for('project_wiki_view', wname=wiki_page.page_name, _guid=True))
else:
raise WIKI_PAGE_NOT_FOUND_ERROR
@must_be_valid_project
@must_have_write_permission_or_public_wiki
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_edit(wname, **kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname=wname, _guid=True) + '?edit&view&menu')
@must_be_valid_project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_compare(wname, wver, **kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname=wname, _guid=True) + '?view&compare={0}&menu'.format(wver))
@must_not_be_registration
@must_have_permission('write')
@must_have_addon('wiki', 'node')
def project_wiki_rename(auth, wname, **kwargs):
"""View that handles user the X-editable input for wiki page renaming.
:param wname: The target wiki page name.
:param-json value: The new wiki page name.
"""
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
new_wiki_name = request.get_json().get('value', None)
try:
node.rename_node_wiki(wiki_name, new_wiki_name, auth)
except NameEmptyError:
raise WIKI_NAME_EMPTY_ERROR
except NameInvalidError as error:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid name',
message_long=error.args[0]
))
except NameMaximumLengthError:
raise WIKI_NAME_MAXIMUM_LENGTH_ERROR
except PageCannotRenameError:
raise WIKI_PAGE_CANNOT_RENAME_ERROR
except PageConflictError:
raise WIKI_PAGE_CONFLICT_ERROR
except PageNotFoundError:
raise WIKI_PAGE_NOT_FOUND_ERROR
else:
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, new_wiki_name)
wiki_utils.broadcast_to_sharejs('redirect', sharejs_uuid, node, new_wiki_name)
@must_be_valid_project # returns project
@must_have_permission('write') # returns user, project
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_validate_name(wname, auth, node, **kwargs):
wiki_name = wname.strip()
wiki_key = to_mongo_key(wiki_name)
if wiki_key in node.wiki_pages_current or wiki_key == 'home':
raise HTTPError(http.CONFLICT, data=dict(
message_short='Wiki page name conflict.',
message_long='A wiki page with that name already exists.'
))
else:
node.update_node_wiki(wiki_name, '', auth)
return {'message': wiki_name}
@must_be_valid_project
@must_be_contributor_or_public
def project_wiki_grid_data(auth, node, **kwargs):
pages = []
project_wiki_pages = {
'title': 'Project Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': format_project_wiki_pages(node, auth)
}
pages.append(project_wiki_pages)
component_wiki_pages = {
'title': 'Component Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': format_component_wiki_pages(node, auth)
}
if len(component_wiki_pages['children']) > 0:
pages.append(component_wiki_pages)
return pages
def format_home_wiki_page(node):
home_wiki = node.get_wiki_page('home')
home_wiki_page = {
'page': {
'url': node.web_url_for('project_wiki_home'),
'name': 'Home',
'id': 'None',
}
}
if home_wiki:
home_wiki_page = {
'page': {
'url': node.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': home_wiki._primary_key,
}
}
return home_wiki_page
def format_project_wiki_pages(node, auth):
pages = []
can_edit = node.has_permission(auth.user, 'write') and not node.is_registration
project_wiki_pages = _get_wiki_pages_current(node)
home_wiki_page = format_home_wiki_page(node)
pages.append(home_wiki_page)
for wiki_page in project_wiki_pages:
if wiki_page['name'] != 'home':
has_content = bool(wiki_page['wiki_content'].get('wiki_content'))
page = {
'page': {
'url': wiki_page['url'],
'name': wiki_page['name'],
'id': wiki_page['wiki_id'],
}
}
if can_edit or has_content:
pages.append(page)
return pages
def format_component_wiki_pages(node, auth):
pages = []
for node in node.nodes:
if any([node.is_deleted,
not node.can_view(auth),
not node.has_addon('wiki')]):
continue
else:
serialized = serialize_component_wiki(node, auth)
if serialized:
pages.append(serialized)
return pages
def serialize_component_wiki(node, auth):
children = []
url = node.web_url_for('project_wiki_view', wname='home', _guid=True)
home_has_content = bool(wiki_page_content('home', node=node).get('wiki_content'))
component_home_wiki = {
'page': {
'url': url,
'name': 'Home',
# Handle pointers
'id': node._primary_key if node.primary else node.node._primary_key,
}
}
can_edit = node.has_permission(auth.user, 'write') and not node.is_registration
if can_edit or home_has_content:
children.append(component_home_wiki)
for page in _get_wiki_pages_current(node):
if page['name'] != 'home':
has_content = bool(page['wiki_content'].get('wiki_content'))
component_page = {
'page': {
'url': page['url'],
'name': page['name'],
'id': page['wiki_id'],
}
}
if can_edit or has_content:
children.append(component_page)
if len(children) > 0:
component = {
'page': {
'name': node.title,
'url': url,
},
'kind': 'component',
'category': node.category,
'pointer': not node.primary,
'children': children,
}
return component
return None
|
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2010-2011 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TOKEN-BASED AUTH MIDDLEWARE FOR SWIFT
Authentication on incoming request
* grab token from X-Auth-Token header
* TODO: grab the memcache servers from the request env
* TODOcheck for auth information in memcache
* check for auth information from keystone
* return if unauthorized
* decorate the request for authorization in swift
* forward to the swift proxy app
Authorization via callback
* check the path and extract the tenant
* get the auth information stored in keystone.identity during
authentication
* TODO: check if the user is an account admin or a reseller admin
* determine what object-type to authorize (account, container, object)
* use knowledge of tenant, admin status, and container acls to authorize
"""
import json
from urlparse import urlparse
from webob.exc import HTTPUnauthorized, HTTPNotFound, HTTPExpectationFailed
from keystone.common.bufferedhttp import http_connect_raw as http_connect
from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed
from swift.common.utils import get_logger, split_path
PROTOCOL_NAME = "Swift Token Authentication"
class AuthProtocol(object):
"""Handles authenticating and aurothrizing client calls.
Add to your pipeline in paste config like:
[pipeline:main]
pipeline = catch_errors healthcheck cache keystone proxy-server
[filter:keystone]
use = egg:keystone#swiftauth
keystone_url = http://127.0.0.1:8080
keystone_admin_token = 999888777666
"""
def __init__(self, app, conf):
"""Store valuable bits from the conf and set up logging."""
self.app = app
self.keystone_url = urlparse(conf.get('keystone_url'))
self.admin_token = conf.get('keystone_admin_token')
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH')
self.log = get_logger(conf, log_route='keystone')
self.log.info('Keystone middleware started')
def __call__(self, env, start_response):
"""Authenticate the incoming request.
If authentication fails return an appropriate http status here,
otherwise forward through the rest of the app.
"""
self.log.debug('Keystone middleware called')
token = self._get_claims(env)
self.log.debug('token: %s', token)
if token:
identity = self._validate_claims(token)
if identity:
self.log.debug('request authenticated: %r', identity)
return self.perform_authenticated_request(identity, env,
start_response)
else:
self.log.debug('anonymous request')
return self.unauthorized_request(env, start_response)
self.log.debug('no auth token in request headers')
return self.perform_unidentified_request(env, start_response)
def unauthorized_request(self, env, start_response):
"""Clinet provided a token that wasn't acceptable, error out."""
return HTTPUnauthorized()(env, start_response)
def unauthorized(self, req):
"""Return unauthorized given a webob Request object.
This can be stuffed into the evironment for swift.authorize or
called from the authoriztion callback when authorization fails.
"""
return HTTPUnauthorized(request=req)
def perform_authenticated_request(self, identity, env, start_response):
"""Client provieded a valid identity, so use it for authorization."""
env['keystone.identity'] = identity
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
self.log.debug('calling app: %s // %r', start_response, env)
rv = self.app(env, start_response)
self.log.debug('return from app: %r', rv)
return rv
def perform_unidentified_request(self, env, start_response):
"""Withouth authentication data, use acls for access control."""
env['swift.authorize'] = self.authorize_via_acl
env['swift.clean_acl'] = self.authorize_via_acl
return self.app(env, start_response)
def authorize(self, req):
"""Used when we have a valid identity from keystone."""
self.log.debug('keystone middleware authorization begin')
env = req.environ
tenant = env.get('keystone.identity', {}).get('tenant')
if not tenant:
self.log.warn('identity info not present in authorize request')
return HTTPExpectationFailed('Unable to locate auth claim',
request=req)
# TODO(todd): everyone under a tenant can do anything to that tenant.
# more realistic would be role/group checking to do things
# like deleting the account or creating/deleting containers
# esp. when owned by other users in the same tenant.
if req.path.startswith('/v1/%s_%s' % (self.reseller_prefix, tenant)):
self.log.debug('AUTHORIZED OKAY')
return None
self.log.debug('tenant mismatch: %r', tenant)
return self.unauthorized(req)
def authorize_via_acl(self, req):
"""Anon request handling.
For now this only allows anon read of objects. Container and account
actions are prohibited.
"""
self.log.debug('authorizing anonymous request')
try:
version, account, container, obj = split_path(req.path, 1, 4, True)
except ValueError:
return HTTPNotFound(request=req)
if obj:
return self._authorize_anon_object(req, account, container, obj)
if container:
return self._authorize_anon_container(req, account, container)
if account:
return self._authorize_anon_account(req, account)
return self._authorize_anon_toplevel(req)
def _authorize_anon_object(self, req, account, container, obj):
referrers, groups = parse_acl(getattr(req, 'acl', None))
if referrer_allowed(req.referer, referrers):
self.log.debug('anonymous request AUTHORIZED OKAY')
return None
return self.unauthorized(req)
def _authorize_anon_container(self, req, account, container):
return self.unauthorized(req)
def _authorize_anon_account(self, req, account):
return self.unauthorized(req)
def _authorize_anon_toplevel(self, req):
return self.unauthorized(req)
def _get_claims(self, env):
claims = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
return claims
def _validate_claims(self, claims):
"""Ask keystone (as keystone admin) for information for this user."""
# TODO(todd): cache
self.log.debug('Asking keystone to validate token')
headers = {"Content-type": "application/json",
"Accept": "text/json",
"X-Auth-Token": self.admin_token}
self.log.debug('headers: %r', headers)
self.log.debug('url: %s', self.keystone_url)
conn = http_connect(self.keystone_url.hostname, self.keystone_url.port,
'GET', '/v2.0/tokens/%s' % claims, headers=headers)
resp = conn.getresponse()
data = resp.read()
conn.close()
# Check http status code for the "OK" family of responses
if not str(resp.status).startswith('20'):
return False
identity_info = json.loads(data)
roles = []
role_refs = identity_info["access"]["user"]["roles"]
if role_refs is not None:
for role_ref in role_refs:
roles.append(role_ref["id"])
try:
tenant = identity_info['access']['token']['tenantId']
except:
tenant = None
if not tenant:
tenant = identity_info['access']['user']['tenantId']
# TODO(Ziad): add groups back in
identity = {'user': identity_info['access']['user']['username'],
'tenant': tenant,
'roles': roles}
return identity
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return AuthProtocol(app, conf)
return auth_filter
|
|
#!/usr/bin/env python
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import sys
import tempfile
from mako import template as mako_template
from oslo_utils import fileutils
from six.moves import configparser
import yaml
from sahara.tests.scenario import validation
TEST_TEMPLATE_PATH = 'sahara/tests/scenario/testcase.py.mako'
def set_defaults(config):
# set up credentials
config['credentials'] = config.get('credentials', {})
creds = config['credentials']
creds['os_username'] = creds.get('os_username', 'admin')
creds['os_password'] = creds.get('os_password', 'nova')
creds['os_tenant'] = creds.get('os_tenant', 'admin')
creds['os_auth_url'] = creds.get('os_auth_url',
'http://localhost:5000/v2.0')
creds.setdefault('sahara_service_type', 'data-processing')
creds['sahara_url'] = creds.get('sahara_url', None)
# set up network
config['network'] = config.get('network', {})
net = config['network']
net['type'] = net.get('type', 'neutron')
net['private_network'] = net.get('private_network', 'private')
net['auto_assignment_floating_ip'] = net.get('auto_assignment_floating_ip',
False)
net['public_network'] = net.get('public_network', 'public')
default_scenario = ['run_jobs', 'scale', 'run_jobs']
# set up tests parameters
for testcase in config['clusters']:
testcase['class_name'] = "".join([
testcase['plugin_name'],
testcase['plugin_version'].replace('.', '_')])
testcase['retain_resources'] = testcase.get('retain_resources', False)
testcase['scenario'] = testcase.get('scenario', default_scenario)
testcase['edp_jobs_flow'] = (
config.get('edp_jobs_flow', {}).get(
testcase.get('edp_jobs_flow', None), None))
def _merge_dicts_sections(dict_with_section, dict_for_merge, section):
if dict_with_section.get(section) is not None:
for key in dict_with_section[section]:
if dict_for_merge[section].get(key) is not None:
if dict_for_merge[section][key] != (
dict_with_section[section][key]):
raise ValueError('Sections %s is different' % section)
else:
dict_for_merge[section][key] = dict_with_section[section][key]
return dict_for_merge
def recursive_walk(directory):
list_of_files = []
for file in os.listdir(directory):
path = os.path.join(directory, file)
if os.path.isfile(path):
list_of_files.append(path)
else:
list_of_files += recursive_walk(path)
return list_of_files
def read_template_variables(variable_file, verbose=False):
variables = {}
try:
cp = configparser.ConfigParser()
# key-sensitive keys
cp.optionxform = lambda option: option
cp.readfp(open(variable_file))
variables = cp.defaults()
except IOError as ioe:
print("WARNING: the input contains at least one template, but "
"the variable configuration file '%s' is not valid: %s" %
(variable_file, ioe))
except configparser.Error as cpe:
print("WARNING: the input contains at least one template, but "
"the variable configuration file '%s' can not be parsed: "
"%s" % (variable_file, cpe))
finally:
if verbose:
print("Template variables:\n%s" % (variables))
# continue anyway, as the templates could require no variables
return variables
def is_template_file(config_file):
return config_file.endswith(('.yaml.mako', '.yml.mako'))
def read_scenario_config(scenario_config, template_vars=None,
verbose=False):
"""Parse the YAML or the YAML template file.
If the file is a YAML template file, expand it first.
"""
yaml_file = ''
if is_template_file(scenario_config):
scenario_template = mako_template.Template(filename=scenario_config,
strict_undefined=True)
template = scenario_template.render_unicode(**template_vars)
yaml_file = yaml.load(template)
else:
with open(scenario_config, 'r') as yaml_file:
yaml_file = yaml.load(yaml_file)
if verbose:
print("YAML from %s:\n%s" % (scenario_config,
yaml.safe_dump(yaml_file,
allow_unicode=True,
default_flow_style=False)))
return yaml_file
def main():
# parse args
parser = argparse.ArgumentParser(description="Scenario tests runner.")
parser.add_argument('scenario_arguments', help="Path to scenario files",
nargs='+')
parser.add_argument('--variable_file', '-V', default='', nargs='?',
help='Path to the file with template variables')
parser.add_argument('--verbose', default=False, action='store_true',
help='Increase output verbosity')
args = parser.parse_args()
scenario_arguments = args.scenario_arguments
variable_file = args.variable_file
verbose_run = args.verbose
# parse config
config = {'credentials': {},
'network': {},
'clusters': [],
'edp_jobs_flow': {}}
files = []
for scenario_argument in scenario_arguments:
if os.path.isdir(scenario_argument):
files += recursive_walk(scenario_argument)
if os.path.isfile(scenario_argument):
files.append(scenario_argument)
template_variables = {}
if any(is_template_file(config_file) for config_file in files):
template_variables = read_template_variables(variable_file,
verbose_run)
for scenario_argument in files:
test_scenario = read_scenario_config(scenario_argument,
template_variables, verbose_run)
config = _merge_dicts_sections(test_scenario, config, 'credentials')
config = _merge_dicts_sections(test_scenario, config, 'network')
if test_scenario.get('clusters') is not None:
config['clusters'] += test_scenario['clusters']
if test_scenario.get('edp_jobs_flow') is not None:
for key in test_scenario['edp_jobs_flow']:
if key not in config['edp_jobs_flow']:
config['edp_jobs_flow'][key] = (
test_scenario['edp_jobs_flow'][key])
else:
raise ValueError('Job flow exist')
# validate config
validation.validate(config)
set_defaults(config)
credentials = config['credentials']
network = config['network']
testcases = config['clusters']
# create testcase file
test_template = mako_template.Template(filename=TEST_TEMPLATE_PATH)
testcase_data = test_template.render(testcases=testcases,
credentials=credentials,
network=network)
test_dir_path = tempfile.mkdtemp()
print("The generated test file located at: %s" % test_dir_path)
fileutils.write_to_tempfile(testcase_data, prefix='test_', suffix='.py',
path=test_dir_path)
# run tests
concurrency = config.get('concurrency')
os.environ['DISCOVER_DIRECTORY'] = test_dir_path
command = 'bash tools/pretty_tox.sh'
if concurrency:
command = command + ' -- --concurrency %d' % concurrency
return_code = os.system(command)
sys.exit(return_code)
if __name__ == '__main__':
main()
|
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: qrlbase.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='qrlbase.proto',
package='qrl',
syntax='proto3',
serialized_pb=_b('\n\rqrlbase.proto\x12\x03qrl\"\x10\n\x0eGetNodeInfoReq\"5\n\x0fGetNodeInfoResp\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x11\n\tgrpcProto\x18\x02 \x01(\t2B\n\x04\x42\x61se\x12:\n\x0bGetNodeInfo\x12\x13.qrl.GetNodeInfoReq\x1a\x14.qrl.GetNodeInfoResp\"\x00\x62\x06proto3')
)
_GETNODEINFOREQ = _descriptor.Descriptor(
name='GetNodeInfoReq',
full_name='qrl.GetNodeInfoReq',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=38,
)
_GETNODEINFORESP = _descriptor.Descriptor(
name='GetNodeInfoResp',
full_name='qrl.GetNodeInfoResp',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='qrl.GetNodeInfoResp.version', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='grpcProto', full_name='qrl.GetNodeInfoResp.grpcProto', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=40,
serialized_end=93,
)
DESCRIPTOR.message_types_by_name['GetNodeInfoReq'] = _GETNODEINFOREQ
DESCRIPTOR.message_types_by_name['GetNodeInfoResp'] = _GETNODEINFORESP
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetNodeInfoReq = _reflection.GeneratedProtocolMessageType('GetNodeInfoReq', (_message.Message,), dict(
DESCRIPTOR = _GETNODEINFOREQ,
__module__ = 'qrlbase_pb2'
# @@protoc_insertion_point(class_scope:qrl.GetNodeInfoReq)
))
_sym_db.RegisterMessage(GetNodeInfoReq)
GetNodeInfoResp = _reflection.GeneratedProtocolMessageType('GetNodeInfoResp', (_message.Message,), dict(
DESCRIPTOR = _GETNODEINFORESP,
__module__ = 'qrlbase_pb2'
# @@protoc_insertion_point(class_scope:qrl.GetNodeInfoResp)
))
_sym_db.RegisterMessage(GetNodeInfoResp)
_BASE = _descriptor.ServiceDescriptor(
name='Base',
full_name='qrl.Base',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=95,
serialized_end=161,
methods=[
_descriptor.MethodDescriptor(
name='GetNodeInfo',
full_name='qrl.Base.GetNodeInfo',
index=0,
containing_service=None,
input_type=_GETNODEINFOREQ,
output_type=_GETNODEINFORESP,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_BASE)
DESCRIPTOR.services_by_name['Base'] = _BASE
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BaseStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetNodeInfo = channel.unary_unary(
'/qrl.Base/GetNodeInfo',
request_serializer=GetNodeInfoReq.SerializeToString,
response_deserializer=GetNodeInfoResp.FromString,
)
class BaseServicer(object):
# missing associated documentation comment in .proto file
pass
def GetNodeInfo(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BaseServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetNodeInfo': grpc.unary_unary_rpc_method_handler(
servicer.GetNodeInfo,
request_deserializer=GetNodeInfoReq.FromString,
response_serializer=GetNodeInfoResp.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'qrl.Base', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaBaseServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
# missing associated documentation comment in .proto file
pass
def GetNodeInfo(self, request, context):
# missing associated documentation comment in .proto file
pass
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaBaseStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
# missing associated documentation comment in .proto file
pass
def GetNodeInfo(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
# missing associated documentation comment in .proto file
pass
raise NotImplementedError()
GetNodeInfo.future = None
def beta_create_Base_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('qrl.Base', 'GetNodeInfo'): GetNodeInfoReq.FromString,
}
response_serializers = {
('qrl.Base', 'GetNodeInfo'): GetNodeInfoResp.SerializeToString,
}
method_implementations = {
('qrl.Base', 'GetNodeInfo'): face_utilities.unary_unary_inline(servicer.GetNodeInfo),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Base_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('qrl.Base', 'GetNodeInfo'): GetNodeInfoReq.SerializeToString,
}
response_deserializers = {
('qrl.Base', 'GetNodeInfo'): GetNodeInfoResp.FromString,
}
cardinalities = {
'GetNodeInfo': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'qrl.Base', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
|
|
import copy
import inspect
import logging
import sys
from future.backports.http.cookies import CookieError
from future.backports.http.cookies import SimpleCookie
from future.backports.urllib.parse import urlparse
from oic import oic
from oic.extension.oidc_fed import ProviderConfigurationResponse
from oic.oauth2 import ErrorResponse
from oic.utils.http_util import SeeOther
from otest import operation
from otest import OperationError
from otest.events import EV_HTTP_RESPONSE
from otest.events import EV_RESPONSE
from otest.events import OUTGOING
from otest.events import NoSuchEvent
from otest.events import EV_RUN
from otest.events import EV_PROTOCOL_RESPONSE
from otest.events import EV_REDIRECT_URL
from otest.events import EV_REQUEST
from otest.rp.response import Response
from otest.verify import Verify
from oic.oauth2.util import set_cookie
from oic.oic import message
__author__ = 'roland'
logger = logging.getLogger(__name__)
def save_response(events, resp, resp_cls, err_resp_cls):
if 'error' in resp.message:
_msgtype = err_resp_cls
else:
_msgtype = resp_cls
if isinstance(resp, SeeOther):
p = urlparse(resp.message)
if p.query:
_msg = _msgtype().from_urlencoded(p.query)
elif p.fragment:
_msg = _msgtype().from_urlencoded(p.query)
else:
_msg = ''
else:
_msg = copy.copy(resp.message)
try:
_msg = _msgtype().from_json(_msg)
except Exception as err:
_msg = _msgtype().from_urlencoded(_msg)
events.store(EV_PROTOCOL_RESPONSE, _msg, direction=OUTGOING)
events.store(EV_RESPONSE, resp.message, direction=OUTGOING)
class Operation(operation.Operation):
def __call__(self, *args, **kwargs):
if self.skip:
return
else:
cls_name = self.__class__.__name__
if self.tests["pre"]:
_ver = Verify(self.check_factory, self.conv, cls_name=cls_name)
_ver.test_sequence(self.tests["pre"])
self.conv.events.store(EV_RUN, cls_name)
res = self.run(*args, **kwargs)
if res:
return res
class Init(Operation):
start_page = ''
endpoint = ''
def run(self, **kwargs):
self.conv.events.store('start_page', self.start_page)
if self.internal:
res = self.conv.entity.server.http_request(self.start_page)
else:
res = SeeOther(self.start_page)
return res
self.conv.events.store(EV_HTTP_RESPONSE, res)
if res.status_code in [302, 303]:
loc = res.headers['location']
try:
self.conv.events.store('Cookie', res.headers['set-cookie'])
except KeyError:
pass
logger.info('Redirect to {}'.format(loc))
logger.debug('msg: {}'.format(res.text))
self.conv.events.store(EV_REDIRECT_URL, loc, sub='init')
elif res.status_code >= 400:
logger.info('Error {}'.format(res.text))
raise OperationError('Error response on HTTP request')
return res
def handle_response(self, resp, *args):
self.conv.events.store(EV_PROTOCOL_RESPONSE,
message.AuthorizationResponse(**resp))
class ConfigurationResponse(Response):
endpoint = 'providerinfo_endpoint'
def __init__(self, conv, inut, sh, **kwargs):
Response.__init__(self, conv, inut, sh, **kwargs)
try:
self.op_type = kwargs['op_type']
except KeyError:
self.op_type = ''
else:
del kwargs['op_type']
self.msg_args = kwargs
def handle_request(self, *args, **kwargs):
return None
def construct_message(self):
op = self.conv.entity
resp = op.providerinfo_endpoint()
if resp.status == '200 OK' or resp.status == '201 Created':
save_response(self.conv.events, resp, ProviderConfigurationResponse,
ErrorResponse)
return resp
class RegistrationResponse(Response):
endpoint = 'registration'
def __init__(self, conv, inut, sh, **kwargs):
Response.__init__(self, conv, inut, sh, **kwargs)
try:
self.op_type = kwargs['op_type']
except KeyError:
self.op_type = ''
else:
del kwargs['op_type']
self.msg_args = kwargs
def construct_message(self):
req = self.conv.events.last_item(EV_REQUEST)
resp = self.conv.entity.registration_endpoint(req)
if resp.status == '200 OK' or resp.status == '201 Created':
logging.debug('Registration response: {}'.format(resp.message))
save_response(self.conv.events, resp,
oic.message.RegistrationResponse,
oic.message.ClientRegistrationErrorResponse)
return resp
class AuthorizationResponse(Response):
endpoint = 'authorization'
def __init__(self, conv, inut, sh, **kwargs):
Response.__init__(self, conv, inut, sh, **kwargs)
try:
self.op_type = kwargs['op_type']
except KeyError:
self.op_type = ''
else:
del kwargs['op_type']
self.msg_args = kwargs
def construct_message(self):
_kwargs = {'request': self.conv.events.last_item(EV_REQUEST)}
_kwargs.update(self.msg_args)
_kwargs.update(self.op_args)
_op = self.conv.entity
try:
_cookie = self.conv.events.last_item('Cookie')
except NoSuchEvent:
pass
else:
try:
set_cookie(_op.server.cookiejar, SimpleCookie(_cookie))
except CookieError as err:
logger.error(err)
else:
_kwargs['cookie'] = _op.server._cookies()
resp = _op.authorization_endpoint(**_kwargs)
if isinstance(resp, SeeOther):
save_response(self.conv.events, resp,
oic.message.AuthorizationResponse,
oic.message.AuthorizationErrorResponse)
return resp
class AccessTokenResponse(Response):
endpoint = 'token'
def __init__(self, conv, inut, sh, **kwargs):
Response.__init__(self, conv, inut, sh, **kwargs)
try:
self.op_type = kwargs['op_type']
except KeyError:
self.op_type = ''
else:
del kwargs['op_type']
self.msg_args = kwargs
def construct_message(self):
_kwargs = {
'request': self.conv.events.last_item(EV_REQUEST),
}
try:
_kwargs['authn'] = self.conv.events.last_item('HTTP_AUTHORIZATION')
except NoSuchEvent:
pass
_kwargs.update(self.msg_args)
_kwargs.update(self.op_args)
resp = self.conv.entity.token_endpoint(**_kwargs)
save_response(self.conv.events, resp,
oic.message.AccessTokenResponse,
oic.message.TokenErrorResponse)
return resp
class UserInfoResponse(Response):
endpoint = 'userinfo'
def __init__(self, conv, inut, sh, **kwargs):
Response.__init__(self, conv, inut, sh, **kwargs)
try:
self.op_type = kwargs['op_type']
except KeyError:
self.op_type = ''
else:
del kwargs['op_type']
self.msg_args = kwargs
def construct_message(self):
_kwargs = {'request': self.conv.events.last_item(EV_REQUEST)}
try:
_kwargs['authn'] = self.conv.events.last_item('HTTP_AUTHORIZATION')
except NoSuchEvent:
pass
_kwargs.update(self.msg_args)
_kwargs.update(self.op_args)
resp = self.conv.entity.userinfo_endpoint(**_kwargs)
save_response(self.conv.events, resp,
oic.message.OpenIDSchema,
oic.message.UserInfoErrorResponse)
return resp
def factory(name):
for fname, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
if name == fname:
return obj
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs an exe through Valgrind and puts the intermediate files in a
directory.
"""
import datetime
import glob
import logging
import optparse
import os
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import common
import drmemory_analyze
import memcheck_analyze
class BaseTool(object):
"""Abstract class for running dynamic error detection tools.
Always subclass this and implement ToolCommand with framework- and
tool-specific stuff.
"""
def __init__(self):
temp_parent_dir = None
self.log_parent_dir = ""
if common.IsWindows():
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
#
# TODO(bruening): if scripts die in middle and don't clean up temp
# dir, we'll accumulate files in profile dir. should remove
# really old files automatically.
profile = os.getenv("USERPROFILE")
if profile:
self.log_parent_dir = profile + "\\AppData\\LocalLow\\"
if os.path.exists(self.log_parent_dir):
self.log_parent_dir = common.NormalizeWindowsPath(self.log_parent_dir)
temp_parent_dir = self.log_parent_dir
# Generated every time (even when overridden)
self.temp_dir = tempfile.mkdtemp(prefix="vg_logs_", dir=temp_parent_dir)
self.log_dir = self.temp_dir # overridable by --keep_logs
self.option_parser_hooks = []
# TODO(glider): we may not need some of the env vars on some of the
# platforms.
self._env = {
"G_SLICE" : "always-malloc",
"NSS_DISABLE_UNLOAD" : "1",
"NSS_DISABLE_ARENA_FREE_LIST" : "1",
"GTEST_DEATH_TEST_USE_FORK": "1",
}
def ToolName(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Analyze(self, check_sanity=False):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def RegisterOptionParserHook(self, hook):
# Frameworks and tools can add their own flags to the parser.
self.option_parser_hooks.append(hook)
def CreateOptionParser(self):
# Defines Chromium-specific flags.
self._parser = optparse.OptionParser("usage: %prog [options] <program to "
"test>")
self._parser.disable_interspersed_args()
self._parser.add_option("-t", "--timeout",
dest="timeout", metavar="TIMEOUT", default=10000,
help="timeout in seconds for the run (default 10000)")
self._parser.add_option("", "--build-dir",
help="the location of the compiler output")
self._parser.add_option("", "--source-dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
self._parser.add_option("", "--gtest_filter", default="",
help="which test case to run")
self._parser.add_option("", "--gtest_repeat",
help="how many times to run each test")
self._parser.add_option("", "--gtest_print_time", action="store_true",
default=False,
help="show how long each test takes")
self._parser.add_option("", "--ignore_exit_code", action="store_true",
default=False,
help="ignore exit code of the test "
"(e.g. test failures)")
self._parser.add_option("", "--keep_logs", action="store_true",
default=False,
help="store memory tool logs in the <tool>.logs "
"directory instead of /tmp.\nThis can be "
"useful for tool developers/maintainers.\n"
"Please note that the <tool>.logs directory "
"will be clobbered on tool startup.")
# To add framework- or tool-specific flags, please add a hook using
# RegisterOptionParserHook in the corresponding subclass.
# See ValgrindTool for an example.
for hook in self.option_parser_hooks:
hook(self, self._parser)
def ParseArgv(self, args):
self.CreateOptionParser()
# self._tool_flags will store those tool flags which we don't parse
# manually in this script.
self._tool_flags = []
known_args = []
""" We assume that the first argument not starting with "-" is a program
name and all the following flags should be passed to the program.
TODO(timurrrr): customize optparse instead
"""
while len(args) > 0 and args[0][:1] == "-":
arg = args[0]
if (arg == "--"):
break
if self._parser.has_option(arg.split("=")[0]):
known_args += [arg]
else:
self._tool_flags += [arg]
args = args[1:]
if len(args) > 0:
known_args += args
self._options, self._args = self._parser.parse_args(known_args)
self._timeout = int(self._options.timeout)
self._source_dir = self._options.source_dir
if self._options.keep_logs:
# log_parent_dir has trailing slash if non-empty
self.log_dir = self.log_parent_dir + "%s.logs" % self.ToolName()
if os.path.exists(self.log_dir):
shutil.rmtree(self.log_dir)
os.mkdir(self.log_dir)
logging.info("Logs are in " + self.log_dir)
self._ignore_exit_code = self._options.ignore_exit_code
if self._options.gtest_filter != "":
self._args.append("--gtest_filter=%s" % self._options.gtest_filter)
if self._options.gtest_repeat:
self._args.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_print_time:
self._args.append("--gtest_print_time")
return True
def Setup(self, args):
return self.ParseArgv(args)
def ToolCommand(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def Cleanup(self):
# You may override it in the tool-specific subclass
pass
def Execute(self):
""" Execute the app to be tested after successful instrumentation.
Full execution command-line provided by subclassers via proc."""
logging.info("starting execution...")
proc = self.ToolCommand()
for var in self._env:
common.PutEnvAndLog(var, self._env[var])
return common.RunSubprocess(proc, self._timeout)
def RunTestsAndAnalyze(self, check_sanity):
exec_retcode = self.Execute()
analyze_retcode = self.Analyze(check_sanity)
if analyze_retcode:
logging.error("Analyze failed.")
logging.info("Search the log for '[ERROR]' to see the error reports.")
return analyze_retcode
if exec_retcode:
if self._ignore_exit_code:
logging.info("Test execution failed, but the exit code is ignored.")
else:
logging.error("Test execution failed.")
return exec_retcode
else:
logging.info("Test execution completed successfully.")
if not analyze_retcode:
logging.info("Analysis completed successfully.")
return 0
def Main(self, args, check_sanity, min_runtime_in_seconds):
"""Call this to run through the whole process: Setup, Execute, Analyze"""
start_time = datetime.datetime.now()
retcode = -1
if self.Setup(args):
retcode = self.RunTestsAndAnalyze(check_sanity)
shutil.rmtree(self.temp_dir, ignore_errors=True)
self.Cleanup()
else:
logging.error("Setup failed")
end_time = datetime.datetime.now()
runtime_in_seconds = (end_time - start_time).seconds
hours = runtime_in_seconds / 3600
seconds = runtime_in_seconds % 3600
minutes = seconds / 60
seconds = seconds % 60
logging.info("elapsed time: %02d:%02d:%02d" % (hours, minutes, seconds))
if (min_runtime_in_seconds > 0 and
runtime_in_seconds < min_runtime_in_seconds):
logging.error("Layout tests finished too quickly. "
"It should have taken at least %d seconds. "
"Something went wrong?" % min_runtime_in_seconds)
retcode = -1
return retcode
def Run(self, args, module, min_runtime_in_seconds=0):
MODULES_TO_SANITY_CHECK = ["base"]
check_sanity = module in MODULES_TO_SANITY_CHECK
return self.Main(args, check_sanity, min_runtime_in_seconds)
class ValgrindTool(BaseTool):
"""Abstract class for running Valgrind tools.
Always subclass this and implement ToolSpecificFlags() and
ExtendOptionParser() for tool-specific stuff.
"""
def __init__(self):
super(ValgrindTool, self).__init__()
self.RegisterOptionParserHook(ValgrindTool.ExtendOptionParser)
def UseXML(self):
# Override if tool prefers nonxml output
return True
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a valgrind suppression file")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running valgrind directly")
parser.add_option("", "--indirect_webkit_layout", action="store_true",
default=False,
help="set --wrapper rather than running Dr. Memory "
"directly.")
parser.add_option("", "--trace_children", action="store_true",
default=False,
help="also trace child processes")
parser.add_option("", "--num-callers",
dest="num_callers", default=30,
help="number of callers to show in stack traces")
parser.add_option("", "--generate_dsym", action="store_true",
default=False,
help="Generate .dSYM file on Mac if needed. Slow!")
def Setup(self, args):
if not BaseTool.Setup(self, args):
return False
if common.IsMac():
self.PrepareForTestMac()
return True
def PrepareForTestMac(self):
"""Runs dsymutil if needed.
Valgrind for Mac OS X requires that debugging information be in a .dSYM
bundle generated by dsymutil. It is not currently able to chase DWARF
data into .o files like gdb does, so executables without .dSYM bundles or
with the Chromium-specific "fake_dsym" bundles generated by
build/mac/strip_save_dsym won't give source file and line number
information in valgrind.
This function will run dsymutil if the .dSYM bundle is missing or if
it looks like a fake_dsym. A non-fake dsym that already exists is assumed
to be up-to-date.
"""
test_command = self._args[0]
dsym_bundle = self._args[0] + '.dSYM'
dsym_file = os.path.join(dsym_bundle, 'Contents', 'Resources', 'DWARF',
os.path.basename(test_command))
dsym_info_plist = os.path.join(dsym_bundle, 'Contents', 'Info.plist')
needs_dsymutil = True
saved_test_command = None
if os.path.exists(dsym_file) and os.path.exists(dsym_info_plist):
# Look for the special fake_dsym tag in dsym_info_plist.
dsym_info_plist_contents = open(dsym_info_plist).read()
if not re.search('^\s*<key>fake_dsym</key>$', dsym_info_plist_contents,
re.MULTILINE):
# fake_dsym is not set, this is a real .dSYM bundle produced by
# dsymutil. dsymutil does not need to be run again.
needs_dsymutil = False
else:
# fake_dsym is set. dsym_file is a copy of the original test_command
# before it was stripped. Copy it back to test_command so that
# dsymutil has unstripped input to work with. Move the stripped
# test_command out of the way, it will be restored when this is
# done.
saved_test_command = test_command + '.stripped'
os.rename(test_command, saved_test_command)
shutil.copyfile(dsym_file, test_command)
shutil.copymode(saved_test_command, test_command)
if needs_dsymutil:
if self._options.generate_dsym:
# Remove the .dSYM bundle if it exists.
shutil.rmtree(dsym_bundle, True)
dsymutil_command = ['dsymutil', test_command]
# dsymutil is crazy slow. Ideally we'd have a timeout here,
# but common.RunSubprocess' timeout is only checked
# after each line of output; dsymutil is silent
# until the end, and is then killed, which is silly.
common.RunSubprocess(dsymutil_command)
if saved_test_command:
os.rename(saved_test_command, test_command)
else:
logging.info("No real .dSYM for test_command. Line numbers will "
"not be shown. Either tell xcode to generate .dSYM "
"file, or use --generate_dsym option to this tool.")
def ToolCommand(self):
"""Get the valgrind command to run."""
# Note that self._args begins with the exe to be run.
tool_name = self.ToolName()
# Construct the valgrind command.
if 'CHROME_VALGRIND' in os.environ:
path = os.path.join(os.environ['CHROME_VALGRIND'], "bin", "valgrind")
else:
path = "valgrind"
proc = [path, "--tool=%s" % tool_name]
proc += ["--num-callers=%i" % int(self._options.num_callers)]
if self._options.trace_children:
proc += ["--trace-children=yes"]
proc += ["--trace-children-skip='*dbus-daemon*'"]
proc += ["--trace-children-skip='*dbus-launch*'"]
proc += ["--trace-children-skip='*perl*'"]
proc += ["--trace-children-skip='*python*'"]
# This is really Python, but for some reason Valgrind follows it.
proc += ["--trace-children-skip='*lsb_release*'"]
proc += self.ToolSpecificFlags()
proc += self._tool_flags
suppression_count = 0
for suppression_file in self._options.suppressions:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["--suppressions=%s" % suppression_file]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
logfilename = self.log_dir + ("/%s." % tool_name) + "%p"
if self.UseXML():
proc += ["--xml=yes", "--xml-file=" + logfilename]
else:
proc += ["--log-file=" + logfilename]
# The Valgrind command is constructed.
# Handle --indirect_webkit_layout separately.
if self._options.indirect_webkit_layout:
# Need to create the wrapper before modifying |proc|.
wrapper = self.CreateBrowserWrapper(proc, webkit=True)
proc = self._args
proc.append("--wrapper")
proc.append(wrapper)
return proc
if self._options.indirect:
wrapper = self.CreateBrowserWrapper(proc)
os.environ["BROWSER_WRAPPER"] = wrapper
logging.info('export BROWSER_WRAPPER=' + wrapper)
proc = []
proc += self._args
return proc
def ToolSpecificFlags(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def CreateBrowserWrapper(self, proc, webkit=False):
"""The program being run invokes Python or something else that can't stand
to be valgrinded, and also invokes the Chrome browser. In this case, use a
magic wrapper to only valgrind the Chrome browser. Build the wrapper here.
Returns the path to the wrapper. It's up to the caller to use the wrapper
appropriately.
"""
command = " ".join(proc)
# Add the PID of the browser wrapper to the logfile names so we can
# separate log files for different UI tests at the analyze stage.
command = command.replace("%p", "$$.%p")
(fd, indirect_fname) = tempfile.mkstemp(dir=self.log_dir,
prefix="browser_wrapper.",
text=True)
f = os.fdopen(fd, "w")
f.write('#!/bin/bash\n'
'echo "Started Valgrind wrapper for this test, PID=$$" >&2\n')
f.write('DIR=`dirname $0`\n'
'TESTNAME_FILE=$DIR/testcase.$$.name\n\n')
if webkit:
# Webkit layout_tests pass the URL as the first line of stdin.
f.write('tee $TESTNAME_FILE | %s "$@"\n' % command)
else:
# Try to get the test case name by looking at the program arguments.
# i.e. Chromium ui_tests used --test-name arg.
# TODO(timurrrr): This doesn't handle "--test-name Test.Name"
# TODO(timurrrr): ui_tests are dead. Where do we use the non-webkit
# wrapper now? browser_tests? What do they do?
f.write('for arg in $@\ndo\n'
' if [[ "$arg" =~ --test-name=(.*) ]]\n then\n'
' echo ${BASH_REMATCH[1]} >$TESTNAME_FILE\n'
' fi\n'
'done\n\n'
'%s "$@"\n' % command)
f.close()
os.chmod(indirect_fname, stat.S_IRUSR|stat.S_IXUSR)
return indirect_fname
def CreateAnalyzer(self):
raise NotImplementedError, "This method should be implemented " \
"in the tool-specific subclass"
def GetAnalyzeResults(self, check_sanity=False):
# Glob all the files in the log directory
filenames = glob.glob(self.log_dir + "/" + self.ToolName() + ".*")
# If we have browser wrapper, the logfiles are named as
# "toolname.wrapper_PID.valgrind_PID".
# Let's extract the list of wrapper_PIDs and name it ppids
ppids = set([int(f.split(".")[-2]) \
for f in filenames if re.search("\.[0-9]+\.[0-9]+$", f)])
analyzer = self.CreateAnalyzer()
if len(ppids) == 0:
# Fast path - no browser wrapper was set.
return analyzer.Report(filenames, None, check_sanity)
ret = 0
for ppid in ppids:
testcase_name = None
try:
f = open(self.log_dir + ("/testcase.%d.name" % ppid))
testcase_name = f.read().strip()
f.close()
wk_layout_prefix="third_party/WebKit/LayoutTests/"
wk_prefix_at = testcase_name.rfind(wk_layout_prefix)
if wk_prefix_at != -1:
testcase_name = testcase_name[wk_prefix_at + len(wk_layout_prefix):]
except IOError:
pass
print "====================================================="
print " Below is the report for valgrind wrapper PID=%d." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
print " You can find the corresponding test"
print " by searching the above log for 'PID=%d'" % ppid
sys.stdout.flush()
ppid_filenames = [f for f in filenames \
if re.search("\.%d\.[0-9]+$" % ppid, f)]
# check_sanity won't work with browser wrappers
assert check_sanity == False
ret |= analyzer.Report(ppid_filenames, testcase_name)
print "====================================================="
sys.stdout.flush()
if ret != 0:
print ""
print "The Valgrind reports are grouped by test names."
print "Each test has its PID printed in the log when the test was run"
print "and at the beginning of its Valgrind report."
print "Hint: you can search for the reports by Ctrl+F -> `=#`"
sys.stdout.flush()
return ret
# TODO(timurrrr): Split into a separate file.
class Memcheck(ValgrindTool):
"""Memcheck
Dynamic memory error detector for Linux & Mac
http://valgrind.org/info/tools.html#memcheck
"""
def __init__(self):
super(Memcheck, self).__init__()
self.RegisterOptionParserHook(Memcheck.ExtendOptionParser)
def ToolName(self):
return "memcheck"
def ExtendOptionParser(self, parser):
parser.add_option("--leak-check", "--leak_check", type="string",
default="yes", # --leak-check=yes is equivalent of =full
help="perform leak checking at the end of the run")
parser.add_option("", "--show_all_leaks", action="store_true",
default=False,
help="also show less blatant leaks")
parser.add_option("", "--track_origins", action="store_true",
default=False,
help="Show whence uninitialized bytes came. 30% slower.")
def ToolSpecificFlags(self):
ret = ["--gen-suppressions=all", "--demangle=no"]
ret += ["--leak-check=%s" % self._options.leak_check]
if self._options.show_all_leaks:
ret += ["--show-reachable=yes"]
else:
ret += ["--show-possibly-lost=no"]
if self._options.track_origins:
ret += ["--track-origins=yes"]
# TODO(glider): this is a temporary workaround for http://crbug.com/51716
# Let's see whether it helps.
if common.IsMac():
ret += ["--smc-check=all"]
return ret
def CreateAnalyzer(self):
use_gdb = common.IsMac()
return memcheck_analyze.MemcheckAnalyzer(self._source_dir,
self._options.show_all_leaks,
use_gdb=use_gdb)
def Analyze(self, check_sanity=False):
ret = self.GetAnalyzeResults(check_sanity)
if ret != 0:
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-valgrind for the info on Memcheck/Valgrind")
return ret
class DrMemory(BaseTool):
"""Dr.Memory
Dynamic memory error detector for Windows.
http://dev.chromium.org/developers/how-tos/using-drmemory
It is not very mature at the moment, some things might not work properly.
"""
def __init__(self, full_mode, pattern_mode):
super(DrMemory, self).__init__()
self.full_mode = full_mode
self.pattern_mode = pattern_mode
self.RegisterOptionParserHook(DrMemory.ExtendOptionParser)
def ToolName(self):
return "drmemory"
def ExtendOptionParser(self, parser):
parser.add_option("", "--suppressions", default=[],
action="append",
help="path to a drmemory suppression file")
parser.add_option("", "--follow_python", action="store_true",
default=False, dest="follow_python",
help="Monitor python child processes. If off, neither "
"python children nor any children of python children "
"will be monitored.")
parser.add_option("", "--indirect", action="store_true",
default=False,
help="set BROWSER_WRAPPER rather than "
"running Dr. Memory directly on the harness")
parser.add_option("", "--indirect_webkit_layout", action="store_true",
default=False,
help="set --wrapper rather than running valgrind "
"directly.")
parser.add_option("", "--use_debug", action="store_true",
default=False, dest="use_debug",
help="Run Dr. Memory debug build")
parser.add_option("", "--trace_children", action="store_true",
default=True,
help="TODO: default value differs from Valgrind")
def ToolCommand(self):
"""Get the tool command to run."""
# WINHEAP is what Dr. Memory supports as there are issues w/ both
# jemalloc (https://github.com/DynamoRIO/drmemory/issues/320) and
# tcmalloc (https://github.com/DynamoRIO/drmemory/issues/314)
add_env = {
"CHROME_ALLOCATOR" : "WINHEAP",
"JSIMD_FORCEMMX" : "1", # https://github.com/DynamoRIO/drmemory/issues/540
}
for k,v in add_env.iteritems():
logging.info("export %s=%s", k, v)
os.putenv(k, v)
drmem_cmd = os.getenv("DRMEMORY_COMMAND")
if not drmem_cmd:
raise RuntimeError, "Please set DRMEMORY_COMMAND environment variable " \
"with the path to drmemory.exe"
proc = drmem_cmd.split(" ")
# By default, don't run python (this will exclude python's children as well)
# to reduce runtime. We're not really interested in spending time finding
# bugs in the python implementation.
# With file-based config we must update the file every time, and
# it will affect simultaneous drmem uses by this user. While file-based
# config has many advantages, here we may want this-instance-only
# (https://github.com/DynamoRIO/drmemory/issues/334).
drconfig_cmd = [ proc[0].replace("drmemory.exe", "drconfig.exe") ]
drconfig_cmd += ["-quiet"] # suppress errors about no 64-bit libs
run_drconfig = True
if self._options.follow_python:
logging.info("Following python children")
# -unreg fails if not already registered so query for that first
query_cmd = drconfig_cmd + ["-isreg", "python.exe"]
query_proc = subprocess.Popen(query_cmd, stdout=subprocess.PIPE,
shell=True)
(query_out, query_err) = query_proc.communicate()
if re.search("exe not registered", query_out):
run_drconfig = False # all set
else:
drconfig_cmd += ["-unreg", "python.exe"]
else:
logging.info("Excluding python children")
drconfig_cmd += ["-reg", "python.exe", "-norun"]
if run_drconfig:
drconfig_retcode = common.RunSubprocess(drconfig_cmd, self._timeout)
if drconfig_retcode:
logging.error("Configuring whether to follow python children failed " \
"with %d.", drconfig_retcode)
raise RuntimeError, "Configuring python children failed "
suppression_count = 0
supp_files = self._options.suppressions
if self.full_mode:
supp_files += [s.replace(".txt", "_full.txt") for s in supp_files]
for suppression_file in supp_files:
if os.path.exists(suppression_file):
suppression_count += 1
proc += ["-suppress", common.NormalizeWindowsPath(suppression_file)]
if not suppression_count:
logging.warning("WARNING: NOT USING SUPPRESSIONS!")
# Un-comment to dump Dr.Memory events on error
#proc += ["-dr_ops", "-dumpcore_mask", "-dr_ops", "0x8bff"]
# Un-comment and comment next line to debug Dr.Memory
#proc += ["-dr_ops", "-no_hide"]
#proc += ["-dr_ops", "-msgbox_mask", "-dr_ops", "15"]
#Proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "15"]
# Ensure we see messages about Dr. Memory crashing!
proc += ["-dr_ops", "-stderr_mask", "-dr_ops", "12"]
if self._options.use_debug:
proc += ["-debug"]
proc += ["-logdir", common.NormalizeWindowsPath(self.log_dir)]
if self.log_parent_dir:
# gpu process on Windows Vista+ runs at Low Integrity and can only
# write to certain directories (http://crbug.com/119131)
symcache_dir = os.path.join(self.log_parent_dir, "drmemory.symcache")
elif self._options.build_dir:
# The other case is only possible with -t cmdline.
# Anyways, if we omit -symcache_dir the -logdir's value is used which
# should be fine.
symcache_dir = os.path.join(self._options.build_dir, "drmemory.symcache")
if symcache_dir:
if not os.path.exists(symcache_dir):
try:
os.mkdir(symcache_dir)
except OSError:
logging.warning("Can't create symcache dir?")
if os.path.exists(symcache_dir):
proc += ["-symcache_dir", common.NormalizeWindowsPath(symcache_dir)]
# Use -no_summary to suppress DrMemory's summary and init-time
# notifications. We generate our own with drmemory_analyze.py.
proc += ["-batch", "-no_summary"]
# Un-comment to disable interleaved output. Will also suppress error
# messages normally printed to stderr.
#proc += ["-quiet", "-no_results_to_stderr"]
proc += ["-callstack_max_frames", "40"]
# disable leak scan for now
proc += ["-no_count_leaks", "-no_leak_scan"]
# disable warnings about unaddressable prefetches
proc += ["-no_check_prefetch"]
# crbug.com/413215, no heap mismatch check for Windows release build binary
if common.IsWindows() and "Release" in self._options.build_dir:
proc += ["-no_check_delete_mismatch"]
# make callstacks easier to read
proc += ["-callstack_srcfile_prefix",
"build\\src,chromium\\src,crt_build\\self_x86"]
proc += ["-callstack_modname_hide",
"*drmemory*,chrome.dll"]
boring_callers = common.BoringCallers(mangled=False, use_re_wildcards=False)
# TODO(timurrrr): In fact, we want "starting from .." instead of "below .."
proc += ["-callstack_truncate_below", ",".join(boring_callers)]
if self.pattern_mode:
proc += ["-pattern", "0xf1fd", "-no_count_leaks", "-redzone_size", "0x20"]
elif not self.full_mode:
proc += ["-light"]
proc += self._tool_flags
# Dr.Memory requires -- to separate tool flags from the executable name.
proc += ["--"]
if self._options.indirect or self._options.indirect_webkit_layout:
wrapper_path = os.path.join(self._source_dir,
"tools", "valgrind", "browser_wrapper_win.py")
wrapper = " ".join(["python", wrapper_path] + proc)
self.CreateBrowserWrapper(wrapper)
logging.info("browser wrapper = " + " ".join(proc))
if self._options.indirect_webkit_layout:
proc = self._args
# Layout tests want forward slashes.
wrapper = wrapper.replace('\\', '/')
proc += ["--wrapper", wrapper]
return proc
else:
proc = []
# Note that self._args begins with the name of the exe to be run.
self._args[0] = common.NormalizeWindowsPath(self._args[0])
proc += self._args
return proc
def CreateBrowserWrapper(self, command):
os.putenv("BROWSER_WRAPPER", command)
def Analyze(self, check_sanity=False):
# Use one analyzer for all the log files to avoid printing duplicate reports
#
# TODO(timurrrr): unify this with Valgrind and other tools when we have
# https://github.com/DynamoRIO/drmemory/issues/684
analyzer = drmemory_analyze.DrMemoryAnalyzer()
ret = 0
if not self._options.indirect and not self._options.indirect_webkit_layout:
filenames = glob.glob(self.log_dir + "/*/results.txt")
ret = analyzer.Report(filenames, None, check_sanity)
else:
testcases = glob.glob(self.log_dir + "/testcase.*.logs")
# If we have browser wrapper, the per-test logdirs are named as
# "testcase.wrapper_PID.name".
# Let's extract the list of wrapper_PIDs and name it ppids.
# NOTE: ppids may contain '_', i.e. they are not ints!
ppids = set([f.split(".")[-2] for f in testcases])
for ppid in ppids:
testcase_name = None
try:
f = open("%s/testcase.%s.name" % (self.log_dir, ppid))
testcase_name = f.read().strip()
f.close()
except IOError:
pass
print "====================================================="
print " Below is the report for drmemory wrapper PID=%s." % ppid
if testcase_name:
print " It was used while running the `%s` test." % testcase_name
else:
# TODO(timurrrr): hm, the PID line is suppressed on Windows...
print " You can find the corresponding test"
print " by searching the above log for 'PID=%s'" % ppid
sys.stdout.flush()
ppid_filenames = glob.glob("%s/testcase.%s.logs/*/results.txt" %
(self.log_dir, ppid))
ret |= analyzer.Report(ppid_filenames, testcase_name, False)
print "====================================================="
sys.stdout.flush()
logging.info("Please see http://dev.chromium.org/developers/how-tos/"
"using-drmemory for the info on Dr. Memory")
return ret
class ToolFactory:
def Create(self, tool_name):
if tool_name == "memcheck":
return Memcheck()
if tool_name == "drmemory" or tool_name == "drmemory_light":
# TODO(timurrrr): remove support for "drmemory" when buildbots are
# switched to drmemory_light OR make drmemory==drmemory_full the default
# mode when the tool is mature enough.
return DrMemory(False, False)
if tool_name == "drmemory_full":
return DrMemory(True, False)
if tool_name == "drmemory_pattern":
return DrMemory(False, True)
try:
platform_name = common.PlatformNames()[0]
except common.NotImplementedError:
platform_name = sys.platform + "(Unknown)"
raise RuntimeError, "Unknown tool (tool=%s, platform=%s)" % (tool_name,
platform_name)
def CreateTool(tool):
return ToolFactory().Create(tool)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from six.moves.urllib import parse as urlparse
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
LOG = logging.getLogger(__name__)
class SwiftContainer(resource.Resource):
PROPERTIES = (
NAME, X_CONTAINER_READ, X_CONTAINER_WRITE, X_CONTAINER_META,
X_ACCOUNT_META, PURGE_ON_DELETE,
) = (
'name', 'X-Container-Read', 'X-Container-Write', 'X-Container-Meta',
'X-Account-Meta', 'PurgeOnDelete',
)
ATTRIBUTES = (
DOMAIN_NAME, WEBSITE_URL, ROOT_URL, OBJECT_COUNT, BYTES_USED,
HEAD_CONTAINER,
) = (
'DomainName', 'WebsiteURL', 'RootURL', 'ObjectCount', 'BytesUsed',
'HeadContainer',
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name for the container. If not specified, a unique name will '
'be generated.')
),
X_CONTAINER_READ: properties.Schema(
properties.Schema.STRING,
_('Specify the ACL permissions on who can read objects in the '
'container.')
),
X_CONTAINER_WRITE: properties.Schema(
properties.Schema.STRING,
_('Specify the ACL permissions on who can write objects to the '
'container.')
),
X_CONTAINER_META: properties.Schema(
properties.Schema.MAP,
_('A map of user-defined meta data to associate with the '
'container. Each key in the map will set the header '
'X-Container-Meta-{key} with the corresponding value.'),
default={}
),
X_ACCOUNT_META: properties.Schema(
properties.Schema.MAP,
_('A map of user-defined meta data to associate with the '
'account. Each key in the map will set the header '
'X-Account-Meta-{key} with the corresponding value.'),
default={}
),
PURGE_ON_DELETE: properties.Schema(
properties.Schema.BOOLEAN,
_("If True, delete any objects in the container "
"when the container is deleted. "
"Otherwise, deleting a non-empty container "
"will result in an error."),
default=False,
support_status=support.SupportStatus(
version='2015.1')
),
}
attributes_schema = {
DOMAIN_NAME: attributes.Schema(
_('The host from the container URL.'),
type=attributes.Schema.STRING
),
WEBSITE_URL: attributes.Schema(
_('The URL of the container.'),
type=attributes.Schema.STRING
),
ROOT_URL: attributes.Schema(
_('The parent URL of the container.'),
type=attributes.Schema.STRING
),
OBJECT_COUNT: attributes.Schema(
_('The number of objects stored in the container.'),
type=attributes.Schema.INTEGER
),
BYTES_USED: attributes.Schema(
_('The number of bytes stored in the container.'),
type=attributes.Schema.INTEGER
),
HEAD_CONTAINER: attributes.Schema(
_('A map containing all headers for the container.'),
type=attributes.Schema.MAP
),
}
default_client_name = 'swift'
def physical_resource_name(self):
name = self.properties[self.NAME]
if name:
return name
return super(SwiftContainer, self).physical_resource_name()
@staticmethod
def _build_meta_headers(obj_type, meta_props):
"""Returns a new dict.
Each key of new dict is prepended with "X-Container-Meta-".
"""
if meta_props is None:
return {}
return dict(
('X-' + obj_type.title() + '-Meta-' + k, v)
for (k, v) in meta_props.items())
def handle_create(self):
"""Create a container."""
container = self.physical_resource_name()
container_headers = SwiftContainer._build_meta_headers(
"container", self.properties[self.X_CONTAINER_META])
account_headers = SwiftContainer._build_meta_headers(
"account", self.properties[self.X_ACCOUNT_META])
for key in (self.X_CONTAINER_READ, self.X_CONTAINER_WRITE):
if self.properties[key] is not None:
container_headers[key] = self.properties[key]
LOG.debug('SwiftContainer create container %(container)s with '
'container headers %(container_headers)s and '
'account headers %(account_headers)s'
% {'container': container,
'account_headers': account_headers,
'container_headers': container_headers})
self.client().put_container(container, container_headers)
if account_headers:
self.client().post_account(account_headers)
self.resource_id_set(container)
def _get_objects(self):
try:
container, objects = self.client().get_container(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
return None
return objects
def _deleter(self, obj=None):
"""Delete the underlying container or an object inside it."""
args = [self.resource_id]
if obj:
deleter = self.client().delete_object
args.append(obj['name'])
else:
deleter = self.client().delete_container
try:
deleter(*args)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
def handle_delete(self):
if self.resource_id is None:
return
objects = self._get_objects()
if objects:
if self.properties[self.PURGE_ON_DELETE]:
self._deleter(objects.pop()) # save first container refresh
else:
msg = _("Deleting non-empty container (%(id)s) "
"when %(prop)s is False") % {
'id': self.resource_id,
'prop': self.PURGE_ON_DELETE}
raise exception.ResourceActionNotSupported(action=msg)
# objects is either None (container is gone already) or (empty) list
if objects is not None:
objects = len(objects)
return objects
def check_delete_complete(self, objects):
if objects is None: # resource was not created or is gone already
return True
if objects: # integer >=0 from the first invocation
objs = self._get_objects()
if objs is None:
return True # container is gone already
if objs:
self._deleter(objs.pop())
if objs: # save one last _get_objects() API call
return False
self._deleter()
return True
def handle_check(self):
self.client().get_container(self.resource_id)
def get_reference_id(self):
return six.text_type(self.resource_id)
def _resolve_attribute(self, key):
parsed = list(urlparse.urlparse(self.client().url))
if key == self.DOMAIN_NAME:
return parsed[1].split(':')[0]
elif key == self.WEBSITE_URL:
return '%s://%s%s/%s' % (parsed[0], parsed[1], parsed[2],
self.resource_id)
elif key == self.ROOT_URL:
return '%s://%s%s' % (parsed[0], parsed[1], parsed[2])
elif self.resource_id and key in (
self.OBJECT_COUNT, self.BYTES_USED, self.HEAD_CONTAINER):
try:
headers = self.client().head_container(self.resource_id)
except Exception as ex:
if self.client_plugin().is_client_exception(ex):
LOG.warn(_LW("Head container failed: %s"), ex)
return None
raise
else:
if key == self.OBJECT_COUNT:
return headers['x-container-object-count']
elif key == self.BYTES_USED:
return headers['x-container-bytes-used']
elif key == self.HEAD_CONTAINER:
return headers
def _show_resource(self):
return self.client().head_container(self.resource_id)
def resource_mapping():
return {
'OS::Swift::Container': SwiftContainer,
}
|
|
"""
Includes functions for reading and writing graphs, in a very simple readable format.
loadgraph: reads from files
inputgraph: reads from terminal input / stdin
savegraph: writes to files
printgraph: writes to terminal / stdout
writeDOT: writes in .dot format; can be used for visualization.
The other functions are internal, to implement the above functions.
The graph objects returned by loadgraph and inputgraph are by default constructed using the <graph> class in the module
basicgraphs.py, but by using an optional argument you can use your own graph class (provided that it supports the same
methods/interface).
This module also supports edge weighted graphs: edges should/will have an (integer) attribute <weight>.
"""
# Version: 30-01-2015, Paul Bonsma
# updated 30-01-2015: writeDOT also writes color information for edges.
# updated 2-2-2015: writeDOT can also write directed graphs.
# updated 5-2-2015: no black fill color used, when more than numcolors**2 vertices.
from graphs import basicgraphs
defaultcolorscheme = "paired12"
numcolors = 12
# defaultcolorscheme="piyg11"
# numcolors=11
# defaultcolorscheme="spectral11"
# numcolors=11
def readgraph(graphclass, readline):
"""
For internal use.
:param readline:
:param graphclass:
"""
options = []
while True:
try:
s = readline()
n = int(s)
g = graphclass(n)
break
except ValueError:
if len(s) > 0 and s[-1] == '\n':
options.append(s[:-1])
else:
options.append(s)
s = readline()
edgelist = []
try:
while True:
comma = s.find(',')
if ':' in s:
colon = s.find(':')
edgelist.append((int(s[:comma]), int(s[comma + 1:colon]), int(s[colon + 1:])))
else:
edgelist.append((int(s[:comma]), int(s[comma + 1:]), None))
s = readline()
except Exception:
pass
for edge in edgelist:
# print("Adding edge (%d,%d)"%(edge[0],edge[1]))
e = g.addedge(g[edge[0]], g[edge[1]])
if edge[2] is not None:
e.weight = edge[2]
if s != '' and s[0] == '-':
return g, options, True
else:
return g, options, False
def readgraphlist(graphclass, readline):
"""
For internal use.
:param readline:
:param graphclass:
"""
options = []
l = []
contin = True
while contin:
g, newoptions, contin = readgraph(graphclass, readline)
options += newoptions
l.append(g)
return l, options
def loadgraph(filename, graphclass=basicgraphs.graph, readlist=False):
"""
Reads the file <filename>, and returns the corresponding graph object.
Optional second argument: you may use your own <graph> class, instead of
the one from basicgraphs.py (default).
Optional third argument: set to True if you want to read a list of graphs, and
options included in the file.
In that case, the output is a 2-tuple, where the first item is a list of graphs,
and the second is a list of options (strings).
:param readlist: boolean to determine if it is one graph or a list of graphs
:param graphclass: graph class of basicgraphs.py
:param filename: File of the graph(list) to be loaded
"""
readfile = open(filename, 'rt')
def readln():
s = readfile.readline()
while len(s) > 0 and s[0] == '#':
s = readfile.readline()
return s
if readlist:
gl, options = readgraphlist(graphclass, readln)
readfile.close()
return gl, options
else:
g, options, tmp = readgraph(graphclass, readln)
readfile.close()
return g # ,options
def inputgraph(graphclass=basicgraphs.graph, readlist=False):
"""
Reads a graph from stdin, and returns the corresponding graph object.
Optional first argument: you may use your own <graph> class, instead of
the one from basicgraphs.py.
Optional second argument: set to True if you want to read a list of graphs, and
options included in the file.
In that case, the output is a 2-tuple, where the first item is a list of graphs,
and the second is a list of options (strings).
:param graphclass:
:param readlist:
"""
def readln():
s = input()
while len(s) > 0 and s[0] == '#':
s = input()
return s
if readlist:
gl, options = readgraphlist(graphclass, readln)
return gl, options
else:
g, options, tmp = readgraph(graphclass, readln)
return g # ,options
def writegraphlist(gl, writeline, options=None):
"""
For internal use.
:param options:
:param writeline:
:param gl:
"""
# we may only write options that cannot be seen as an integer:
if options is None:
options = []
for S in options:
try:
x = int(S)
except ValueError:
writeline(str(S))
for i in range(len(gl)):
g = gl[i]
n = len(g.V())
writeline('# Number of vertices:')
writeline(str(n))
# Give the vertices (temporary) labels from 0 to n-1:
nl = {}
for j in range(n):
nl[g[j]] = j
writeline('# Edge list:')
for e in g.E():
if hasattr(e, 'weight'):
writeline(str(nl[e.tail()]) + ',' + str(nl[e.head()]) + ':' + str(e.weight))
else:
writeline(str(nl[e.tail()]) + ',' + str(nl[e.head()]))
if i + 1 < len(gl):
writeline('--- Next graph:')
def savegraph(gl, filename, options=None):
"""
Saves the given graph <GL> in the given <filename>.
Optional last argument: a list of options that will be included in the
file header.
Alternatively, <GL> may be a list of graphs, which are then all written to the
file.
:param options:
:param filename:
:param gl:
"""
if options is None:
options = []
writefile = open(filename, 'wt')
def writeln(s):
writefile.write(s + '\n')
if type(gl) is list:
writegraphlist(gl, writeln, options)
else:
writegraphlist([gl], writeln, options)
writefile.close()
def printgraph(gl, options=None):
"""
Writes the given graph <GL> to Stdout.
Optional last argument: as list of options that will be included in the
header.
Alternatively, <GL> may be a list of graphs, which are then all written.
:param options:
:param gl:
"""
if options is None:
options = []
def writeln(s):
print(s)
if type(gl) is list:
writegraphlist(gl, writeln, options)
else:
writegraphlist([gl], writeln, options)
def writeDOT(g, filename, directed=False):
"""
Writes the given graph <G> in .dot format to <filename>.
If vertices contain attributes <label>, <colortext> or <colornum>, these are also
included in the file.
(<Colortext> should be something like "Blue"/"Magenta"/"Khaki"/"Peachpuff"/"Olivedrab",...
and a <colornum> should be an integer.)
If edges contain an attribute <weight> (integer), these are also included in the
file.
Optional argument: <directed>. If True, then the edges are written as directed edges.
Google GraphViz for more information on the .dot format.
:param directed:
:param g:
:param filename:
"""
writefile = open(filename, 'wt')
if directed:
writefile.write('digraph G {\n')
else:
writefile.write('graph G {\n')
name = {}
nextname = 0
for v in g.V():
name[v] = nextname
nextname += 1
options = 'penwidth=3,'
if hasattr(v, 'label'):
options += 'label="' + str(v.label) + '",'
if hasattr(v, 'colortext'):
options += 'color="' + v.colortext + '",'
elif hasattr(v, 'colornum'):
options += 'color=' + str(v.colornum % numcolors + 1) + ', colorscheme=' + defaultcolorscheme + ','
if v.colornum >= numcolors:
options += 'style=filled,fillcolor=' + str((v.colornum // numcolors) % numcolors + 1) + ','
if len(options) > 0:
writefile.write(' ' + str(name[v]) + ' [' + options[:-1] + ']\n')
else:
writefile.write(' ' + str(name[v]) + '\n')
writefile.write('\n')
for e in g.E():
options = 'penwidth=2,'
if hasattr(e, 'weight'):
options += 'label="' + str(e.weight) + '",'
if hasattr(e, 'colortext'):
options += 'color="' + e.colortext + '",'
elif hasattr(e, 'colornum'):
options += 'color=' + str(e.colornum % numcolors + 1) + ', colorscheme=' + defaultcolorscheme + ','
if len(options) > 0:
options = ' [' + options[:-1] + ']'
if directed:
writefile.write(' ' + str(name[e.tail()]) + ' -> ' + str(name[e.head()]) + options + '\n')
else:
writefile.write(' ' + str(name[e.tail()]) + '--' + str(name[e.head()]) + options + '\n')
writefile.write('}')
writefile.close()
|
|
"""Helpers for components that manage entities."""
from threading import Lock
from homeassistant import config as conf_util
from homeassistant.bootstrap import (prepare_setup_platform,
prepare_setup_component)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_SCAN_INTERVAL, CONF_ENTITY_NAMESPACE,
DEVICE_DEFAULT_NAME)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import get_component
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.helpers.service import extract_entity_ids
DEFAULT_SCAN_INTERVAL = 15
class EntityComponent(object):
"""Helper class that will help a component manage its entities."""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self, logger, domain, hass,
scan_interval=DEFAULT_SCAN_INTERVAL, group_name=None):
"""Initialize an entity component."""
self.logger = logger
self.hass = hass
self.domain = domain
self.entity_id_format = domain + '.{}'
self.scan_interval = scan_interval
self.group_name = group_name
self.entities = {}
self.group = None
self.config = None
self.lock = Lock()
self._platforms = {
'core': EntityPlatform(self, self.scan_interval, None),
}
self.add_entities = self._platforms['core'].add_entities
def setup(self, config):
"""Set up a full entity component.
Loads the platforms from the config and will listen for supported
discovered platforms.
"""
self.config = config
# Look in config for Domain, Domain 2, Domain 3 etc and load them
for p_type, p_config in config_per_platform(config, self.domain):
self._setup_platform(p_type, p_config)
# Generic discovery listener for loading platform dynamically
# Refer to: homeassistant.components.discovery.load_platform()
def component_platform_discovered(platform, info):
"""Callback to load a platform."""
self._setup_platform(platform, {}, info)
discovery.listen_platform(self.hass, self.domain,
component_platform_discovered)
def extract_from_service(self, service):
"""Extract all known entities from a service call.
Will return all entities if no entities specified in call.
Will return an empty list if entities specified but unknown.
"""
with self.lock:
if ATTR_ENTITY_ID not in service.data:
return list(self.entities.values())
return [self.entities[entity_id] for entity_id
in extract_entity_ids(self.hass, service)
if entity_id in self.entities]
def _setup_platform(self, platform_type, platform_config,
discovery_info=None):
"""Setup a platform for this component."""
platform = prepare_setup_platform(
self.hass, self.config, self.domain, platform_type)
if platform is None:
return
# Config > Platform > Component
scan_interval = (platform_config.get(CONF_SCAN_INTERVAL) or
getattr(platform, 'SCAN_INTERVAL', None) or
self.scan_interval)
entity_namespace = platform_config.get(CONF_ENTITY_NAMESPACE)
key = (platform_type, scan_interval, entity_namespace)
if key not in self._platforms:
self._platforms[key] = EntityPlatform(self, scan_interval,
entity_namespace)
entity_platform = self._platforms[key]
try:
platform.setup_platform(self.hass, platform_config,
entity_platform.add_entities,
discovery_info)
self.hass.config.components.append(
'{}.{}'.format(self.domain, platform_type))
except Exception: # pylint: disable=broad-except
self.logger.exception(
'Error while setting up platform %s', platform_type)
def add_entity(self, entity, platform=None):
"""Add entity to component."""
if entity is None or entity in self.entities.values():
return False
entity.hass = self.hass
if getattr(entity, 'entity_id', None) is None:
object_id = entity.name or DEVICE_DEFAULT_NAME
if platform is not None and platform.entity_namespace is not None:
object_id = '{} {}'.format(platform.entity_namespace,
object_id)
entity.entity_id = generate_entity_id(
self.entity_id_format, object_id,
self.entities.keys())
self.entities[entity.entity_id] = entity
entity.update_ha_state()
return True
def update_group(self):
"""Set up and/or update component group."""
if self.group is None and self.group_name is not None:
group = get_component('group')
self.group = group.Group(self.hass, self.group_name,
user_defined=False)
if self.group is not None:
self.group.update_tracked_entity_ids(self.entities.keys())
def reset(self):
"""Remove entities and reset the entity component to initial values."""
with self.lock:
for platform in self._platforms.values():
platform.reset()
self._platforms = {
'core': self._platforms['core']
}
self.entities = {}
self.config = None
if self.group is not None:
self.group.stop()
self.group = None
def prepare_reload(self):
"""Prepare reloading this entity component."""
try:
path = conf_util.find_config_file(self.hass.config.config_dir)
conf = conf_util.load_yaml_config_file(path)
except HomeAssistantError as err:
self.logger.error(err)
return None
conf = prepare_setup_component(self.hass, conf, self.domain)
if conf is None:
return None
self.reset()
return conf
class EntityPlatform(object):
"""Keep track of entities for a single platform."""
# pylint: disable=too-few-public-methods
def __init__(self, component, scan_interval, entity_namespace):
"""Initalize the entity platform."""
self.component = component
self.scan_interval = scan_interval
self.entity_namespace = entity_namespace
self.platform_entities = []
self._unsub_polling = None
def add_entities(self, new_entities):
"""Add entities for a single platform."""
with self.component.lock:
for entity in new_entities:
if self.component.add_entity(entity, self):
self.platform_entities.append(entity)
self.component.update_group()
if self._unsub_polling is not None or \
not any(entity.should_poll for entity
in self.platform_entities):
return
self._unsub_polling = track_utc_time_change(
self.component.hass, self._update_entity_states,
second=range(0, 60, self.scan_interval))
def reset(self):
"""Remove all entities and reset data."""
for entity in self.platform_entities:
entity.remove()
if self._unsub_polling is not None:
self._unsub_polling()
self._unsub_polling = None
def _update_entity_states(self, now):
"""Update the states of all the polling entities."""
with self.component.lock:
# We copy the entities because new entities might be detected
# during state update causing deadlocks.
entities = list(entity for entity in self.platform_entities
if entity.should_poll)
for entity in entities:
entity.update_ha_state(True)
|
|
import pytest
import pandas as pd
import numpy as np
from plotly.callbacks import Points, BoxSelector
from tardis.visualization.widgets.line_info import LineInfoWidget
from tardis.util.base import species_string_to_tuple
@pytest.fixture(scope="class")
def line_info_widget(simulation_verysimple):
line_info_widget = LineInfoWidget.from_simulation(simulation_verysimple)
return line_info_widget
@pytest.mark.parametrize(
("wavelength_range", "filter_mode"),
[
([3000, 4000], "packet_out_nu"),
([3000, 4000], "packet_in_nu"),
([16200, 16300], "packet_out_nu"),
(None, "packet_in_nu"),
],
)
class TestLineInfoWidgetData:
"""Tests for methods that handles data in LineInfoWidget."""
def test_get_species_interactions(
self, line_info_widget, wavelength_range, filter_mode
):
"""
Test for get_species_interactions() method.
Checks shape of dataframe and whether all values sum up to 1 in cases
where dataframe resulting dataframe should not be empty.
"""
species_interactions_df = line_info_widget.get_species_interactions(
wavelength_range, filter_mode
)
if wavelength_range is None or wavelength_range == [16200, 16300]:
# Dataframe contains all falsy values (proxy for empty)
assert species_interactions_df.all(axis=None) == False
else:
# All values sum up to 1
assert np.isclose(species_interactions_df.iloc[:, 0].sum(), 1)
# Test shape of the dataframe
expected_df_length = (
line_info_widget.line_interaction_analysis[filter_mode]
.last_line_in.groupby(["atomic_number", "ion_number"])
.ngroups
)
assert species_interactions_df.shape == (expected_df_length, 1)
@pytest.fixture
def allowed_species(self, line_info_widget, wavelength_range, filter_mode):
"""
For different combinations of wavelength_range and filter_mode
parameters, it calls get_species_interactions on line_info_widget
"""
# Find species present within the selected wavelength range
species_interactions_df = line_info_widget.get_species_interactions(
wavelength_range, filter_mode
)
if species_interactions_df.all(axis=None) == False:
allowed_species = None # no species can be selected
else:
allowed_species = species_interactions_df.index
return allowed_species
@pytest.mark.parametrize("group_mode", ["exc", "de-exc", "both"])
def test_get_last_line_counts(
self, line_info_widget, allowed_species, filter_mode, group_mode
):
"""
Test for get_last_line_counts() method.
Since this method depends on get_species_interactions() so we need to
make sure that we select only allowed species i.e. present within the
wavelength range selected by the get_species_interactions(), which is
being done here by allowed_species fixture.
"""
if allowed_species is None:
last_line_counts_df = line_info_widget.get_last_line_counts(
None, filter_mode, group_mode
)
# Dataframe contains all falsy values (proxy for empty)
assert last_line_counts_df.all(axis=None) == False
return
for selected_species in allowed_species:
last_line_counts_df = line_info_widget.get_last_line_counts(
selected_species, filter_mode, group_mode
)
last_lines_in = (
line_info_widget.line_interaction_analysis[filter_mode]
.last_line_in.xs(
key=species_string_to_tuple(selected_species),
level=["atomic_number", "ion_number"],
)
.reset_index()
)
last_lines_out = (
line_info_widget.line_interaction_analysis[filter_mode]
.last_line_out.xs(
key=species_string_to_tuple(selected_species),
level=["atomic_number", "ion_number"],
)
.reset_index()
)
if group_mode == "exc":
expected_df_length = last_lines_in.groupby("line_id").ngroups
elif group_mode == "de-exc":
expected_df_length = last_lines_out.groupby("line_id").ngroups
elif group_mode == "both":
expected_df_length = last_lines_in.groupby(
["line_id", last_lines_out["line_id"]]
).ngroups
# Test shape of the dataframe
assert last_line_counts_df.shape == (expected_df_length, 1)
class TestLineInfoWidgetEvents:
"""
Test changes in table widgets data by triggering all possible events.
This will make sure that all event listeners are working properly and
updating data in tables accurately. The following four methods are to
trigger each event (interaction) which is possible in LineInfoWidget.
"""
@pytest.fixture(
scope="class",
params=[
[2500, 3500], # Wavelength range with plenty of line interactions
[16200, 16300], # Wavelength range with no line interactions
None, # No selection of wavelength range
],
)
def liw_with_selection(self, simulation_verysimple, request):
"""
Makes different wavelength range selection on figure (specified by
params) after creating a LineInfoWidget object.
"""
liw = LineInfoWidget.from_simulation(simulation_verysimple)
# To attach event listeners to component widgets of line_info_widget
_ = liw.display()
selection_range = request.param
# Since we cannot programatically make a Box selection on spectrum
# so we have to directly call its event listener by passing
# selected wavelength range in a BoxSelector object
if selection_range:
liw._spectrum_selection_handler(
trace=liw.figure_widget.data[0],
points=Points(),
selector=BoxSelector(
xrange=selection_range,
yrange=[
-1.8e39,
1.8e40,
], # Not very relevant, approx height of box
),
)
return liw, selection_range
def test_selection_on_plot(self, liw_with_selection):
"""
Test if selection on spectrum plot, updates correct data in both
the tables and total packets label.
"""
# Since wavelength range selection is already made by liw_with_selection
# fixture, we don't need to trigger selection event here again
line_info_widget, selected_wavelength_range = liw_with_selection
expected_species_interactions = (
line_info_widget.get_species_interactions(
wavelength_range=selected_wavelength_range,
filter_mode=line_info_widget.FILTER_MODES[
line_info_widget.filter_mode_buttons.index
],
)
)
pd.testing.assert_frame_equal(
expected_species_interactions,
line_info_widget.species_interactions_table.df,
)
expected_last_line_counts = line_info_widget.get_last_line_counts(
selected_species=expected_species_interactions.index[0],
filter_mode=line_info_widget.FILTER_MODES[
line_info_widget.filter_mode_buttons.index
],
group_mode=line_info_widget.GROUP_MODES[
line_info_widget.group_mode_dropdown.index
],
)
pd.testing.assert_frame_equal(
expected_last_line_counts,
line_info_widget.last_line_counts_table.df,
)
if selected_wavelength_range in [None, [16200, 16300]]:
expected_total_packets = 0
else:
expected_total_packets = expected_last_line_counts.iloc[:, 0].sum()
assert expected_total_packets == int(
line_info_widget.total_packets_label.widget.children[1].value
)
@pytest.mark.parametrize("selected_filter_mode_idx", [0, 1])
def test_filter_mode_toggle(
self,
liw_with_selection,
selected_filter_mode_idx,
):
"""
Test if toggling filter_mode_buttons updates correct data in both
the tables and total packets label.
"""
line_info_widget, selected_wavelength_range = liw_with_selection
# Toggle the filter_mode_buttons
line_info_widget.filter_mode_buttons.index = selected_filter_mode_idx
expected_species_interactions = (
line_info_widget.get_species_interactions(
wavelength_range=selected_wavelength_range,
filter_mode=line_info_widget.FILTER_MODES[
selected_filter_mode_idx
],
)
)
pd.testing.assert_frame_equal(
expected_species_interactions,
line_info_widget.species_interactions_table.df,
)
expected_last_line_counts = line_info_widget.get_last_line_counts(
selected_species=expected_species_interactions.index[0],
filter_mode=line_info_widget.FILTER_MODES[selected_filter_mode_idx],
group_mode=line_info_widget.GROUP_MODES[
line_info_widget.group_mode_dropdown.index
],
)
pd.testing.assert_frame_equal(
expected_last_line_counts,
line_info_widget.last_line_counts_table.df,
)
if selected_wavelength_range in [None, [16200, 16300]]:
expected_total_packets = 0
else:
expected_total_packets = expected_last_line_counts.iloc[:, 0].sum()
assert expected_total_packets == int(
line_info_widget.total_packets_label.widget.children[1].value
)
def test_selection_on_species_intrctn_table(self, liw_with_selection):
"""
Test if selection on each row in species_interaction_table updates
correct data in last_line_counts_table and total packets label.
"""
line_info_widget, _ = liw_with_selection
for (
selected_species
) in line_info_widget.species_interactions_table.df.index:
# Select row in species_interactions_table
line_info_widget.species_interactions_table.change_selection(
[selected_species]
)
if bool(selected_species) == False:
# When selected_species is a falsy value due to empty
# species_interactions_table, use it as None in get_last_line_counts()
selected_species = None
expected_last_line_counts = line_info_widget.get_last_line_counts(
selected_species=selected_species,
filter_mode=line_info_widget.FILTER_MODES[
line_info_widget.filter_mode_buttons.index
],
group_mode=line_info_widget.GROUP_MODES[
line_info_widget.group_mode_dropdown.index
],
)
pd.testing.assert_frame_equal(
expected_last_line_counts,
line_info_widget.last_line_counts_table.df,
)
if selected_species is None:
expected_total_packets = 0
else:
expected_total_packets = expected_last_line_counts.iloc[
:, 0
].sum()
assert expected_total_packets == int(
line_info_widget.total_packets_label.widget.children[1].value
)
@pytest.mark.parametrize("selected_group_mode_idx", [0, 1, 2])
def test_group_mode_change(
self, liw_with_selection, selected_group_mode_idx
):
"""
Test if selecting an option from group_mode_dropdown updates
correct data in last_line_counts_table and total packets label.
"""
line_info_widget, _ = liw_with_selection
# Select the option in group_mode_dropdown
line_info_widget.group_mode_dropdown.index = selected_group_mode_idx
# For testing changes in last_line_counts_table data,
# we're only considering the 1st row (0th index species)
# in species_interactions_table
if line_info_widget.last_line_counts_table.df.all(axis=None) == False:
species0 = None
else:
species0 = line_info_widget.species_interactions_table.df.index[0]
# Select 1st row in species_interaction_table, if not selected
line_info_widget.species_interactions_table.change_selection(
[species0]
)
expected_last_line_counts = line_info_widget.get_last_line_counts(
selected_species=species0,
filter_mode=line_info_widget.FILTER_MODES[
line_info_widget.filter_mode_buttons.index
],
group_mode=line_info_widget.GROUP_MODES[selected_group_mode_idx],
)
pd.testing.assert_frame_equal(
expected_last_line_counts,
line_info_widget.last_line_counts_table.df,
)
if species0 is None:
expected_total_packets = 0
else:
expected_total_packets = expected_last_line_counts.iloc[:, 0].sum()
assert expected_total_packets == int(
line_info_widget.total_packets_label.widget.children[1].value
)
|
|
""" Code to grab the data from NeuroVault, and compute a map of
frequency of activation in the brain.
"""
# Authors: Chris Filo Gorgolewski, Gael Varoquaux
# License: BSD
import json
import urllib, os, errno
from urllib2 import Request, urlopen, HTTPError
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
import pylab as plt
from nipype.utils.filemanip import split_filename
import nibabel as nb
from joblib import Memory
from nilearn.image import resample_img
from nilearn.masking import compute_background_mask, _extrapolate_out_mask
from nilearn.plotting.img_plotting import plot_anat
# Use a joblib memory, to avoid depending on an Internet connection
mem = Memory(cachedir=os.path.join(os.getenv('HOME'), 'neurovault_analysis/cache'))
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
def get_collections_df():
"""Downloads metadata about collections/papers stored in NeuroVault and
return it as a pandas DataFrame"""
request = Request('http://neurovault.org/api/collections/?format=json')
response = urlopen(request)
elevations = response.read()
data = json.loads(elevations)
collections_df = json_normalize(data)
collections_df.rename(columns={'id':'collection_id'}, inplace=True)
collections_df.set_index("collection_id")
return collections_df
def get_images_df():
"""Downloads metadata about images/statistical maps stored in NeuroVault and
return it as a pandas DataFrame"""
request=Request('http://neurovault.org/api/images/?format=json')
response = urlopen(request)
elevations = response.read()
data = json.loads(elevations)
images_df = json_normalize(data)
images_df['collection'] = images_df['collection'].apply(lambda x: int(x.split("/")[-2]))
images_df['image_id'] = images_df['url'].apply(lambda x: int(x.split("/")[-2]))
images_df.rename(columns={'collection':'collection_id'}, inplace=True)
return images_df
def get_images_with_collections_df():
"""Downloads metadata about images/statistical maps stored in NeuroVault and
and enriches it with metadata of the corresponding collections. The result
is returned as a pandas DataFrame"""
collections_df = get_collections_df()
images_df = get_images_df()
combined_df = pd.merge(images_df, collections_df, how='left', on='collection_id',
suffixes=('_image', '_collection'))
return combined_df
def download_and_resample(images_df, dest_dir, target):
"""Downloads all stat maps and resamples them to a common space.
"""
target_nii = nb.load(target)
orig_path = os.path.join(dest_dir, "original")
mkdir_p(orig_path)
resampled_path = os.path.join(dest_dir, "resampled")
mkdir_p(resampled_path)
out_df = combined_df.copy()
for row in combined_df.iterrows():
# Downloading the file to the "original" subfolder
_, _, ext = split_filename(row[1]['file'])
orig_file = os.path.join(orig_path, "%04d%s" % (row[1]['image_id'], ext))
if not os.path.exists(orig_file):
print "Downloading %s" % orig_file
urllib.urlretrieve(row[1]['file'], orig_file)
try:
# Compute the background and extrapolate outside of the mask
print "Extrapolating %s" % orig_file
niimg = nb.load(orig_file)
data = niimg.get_data().squeeze()
niimg = nb.Nifti1Image(data, niimg.affine,
header=niimg.get_header())
bg_mask = compute_background_mask(niimg).get_data()
# Test if the image has been masked:
out_of_mask = data[np.logical_not(bg_mask)]
if np.all(np.isnan(out_of_mask)) or len(np.unique(out_of_mask)) == 1:
# Need to extrapolate
data = _extrapolate_out_mask(data.astype(np.float), bg_mask,
iterations=3)[0]
niimg = nb.Nifti1Image(data, niimg.affine,
header=niimg.get_header())
del out_of_mask, bg_mask
# Resampling the file to target and saving the output in the "resampled"
# folder
resampled_file = os.path.join(resampled_path,
"%06d%s" % (row[1]['image_id'], ext))
print "Resampling %s" % orig_file
resampled_nii = resample_img(niimg, target_nii.get_affine(),
target_nii.shape)
resampled_nii = nb.Nifti1Image(resampled_nii.get_data().squeeze(),
resampled_nii.get_affine(),
header=niimg.get_header())
if len(resampled_nii.shape) == 3:
resampled_nii.to_filename(resampled_file)
else:
# We have a 4D file
assert len(resampled_nii.shape) == 4
resampled_data = resampled_nii.get_data()
affine = resampled_nii.affine
for index in range(resampled_nii.shape[-1]):
# First save the files separately
this_nii = nb.Nifti1Image(resampled_data[..., index],
affine)
this_id = int("%i%i" % (-row[1]['image_id'], index))
this_file = os.path.join(resampled_path,
"%06d%s" % (this_id, ext))
this_nii.to_filename(this_file)
# Second, fix the dataframe
out_df = out_df[out_df.image_id != row[1]['image_id']]
this_row = row[1].copy()
this_row.image_id = this_id
out_df = out_df.append(this_row)
except IOError as e:
# Fix the dataframe
out_df = out_df[out_df.image_id != row[1]['image_id']]
print "Could not load %s " % orig_file
print e
return out_df
def get_frequency_map(images_df, dest_dir, target):
"""
"""
mask_img = 'gm_mask.nii.gz'
mask = nb.load(mask_img).get_data().astype(np.bool)
target_nii = nb.load(target)
resampled_path = os.path.join(dest_dir, "resampled")
freq_map_data = np.zeros(target_nii.shape)
n_images = 0
for row in combined_df.iterrows():
_, _, ext = split_filename(row[1]['file'])
orig_file = os.path.join(resampled_path,
"%06d%s" % (row[1]['image_id'], ext))
nb.load(orig_file)
if not os.path.exists(orig_file):
urllib.urlretrieve(row[1]['file'], orig_file)
resampled_nii = resample_img(orig_file, target_nii.get_affine(),
target_nii.shape,
interpolation="nearest")
data = resampled_nii.get_data().squeeze()
data[np.isnan(data)] = 0
data[np.logical_not(mask)] = 0
data = np.abs(data)
# Keep only things that are very significant
data = data > 3
if len(data.shape) == 4:
for d in np.rollaxis(data, -1):
freq_map_data += (d != 0)
n_images +=1
else:
freq_map_data += data
n_images += 1
freq_map_data *= 100. / n_images
return nb.Nifti1Image(freq_map_data, target_nii.get_affine())
def url_get(url):
request = Request(url)
response = urlopen(request)
return response.read()
def get_neurosynth_terms(combined_df):
""" Grab terms for each image, decoded with neurosynth"""
terms = list()
from sklearn.feature_extraction import DictVectorizer
vectorizer = DictVectorizer()
image_ids = list()
for row in combined_df.iterrows():
image_id = row[1]['image_id']
image_ids.append(int(image_id))
print "Fetching terms for image %i" % image_id
image_url = row[1]['url_image'].split('/')[-2]
try:
elevations = mem.cache(url_get)(
'http://neurosynth.org/decode/data/?neurovault=%s'
% image_url)
data = json.loads(elevations)['data']
data = dict([(i['analysis'], i['r']) for i in data])
except HTTPError:
data = {}
terms.append(data)
X = vectorizer.fit_transform(terms).toarray()
term_dframe = dict([('neurosynth decoding %s' % name, X[:, idx])
for name, idx in vectorizer.vocabulary_.items()])
term_dframe['image_id'] = image_ids
return pd.DataFrame(term_dframe)
if __name__ == '__main__':
#mem.clear()
combined_df = mem.cache(get_images_with_collections_df)()
# # The following maps are not brain maps
# faulty_ids = [96, 97, 98]
# # And the following are crap
# faulty_ids.extend([338, 339])
# # 335 is a duplicate of 336
# faulty_ids.extend([335, ])
# combined_df = combined_df[~combined_df.image_id.isin(faulty_ids)]
print combined_df[['DOI', 'url_collection', 'name_image', 'file']]
# #restrict to Z-, F-, or T-maps
# combined_df = combined_df[combined_df['map_type'].isin(["Z","F","T"])]
# terms_df = get_neurosynth_terms(combined_df)
# print combined_df["name_collection"].value_counts()
# combined_df = combined_df.merge(terms_df, on=['image_id', ])
dest_dir = os.path.join(os.getenv('HOME'), 'neurovault_analysis/data')
target = "/usr/share/fsl/data/standard/MNI152_T1_2mm.nii.gz"
combined_df = mem.cache(download_and_resample)(combined_df,
dest_dir, target)
# # Now remove -3360, -3362, and -3364, that are mean images, and not Z
# # scores
# not_Zscr = [-3360, -3362, -3364]
# combined_df = combined_df[~combined_df.image_id.isin(not_Zscr)]
# # Now remove images that are ugly, or obviously not z maps:
# broken = [1202, 1163, 1931, 1101, 1099]
# combined_df = combined_df[~combined_df.image_id.isin(broken)]
combined_df.to_csv('%s/metadata.csv' % dest_dir, encoding='utf8')
# #--------------------------------------------------
# # Plot a map of frequency of activation
# freq_nii = get_frequency_map(combined_df, dest_dir, target)
# freq_nii.to_filename("freq_map.nii.gz")
# display = plot_anat("/usr/share/fsl/data/standard/MNI152_T1_2mm.nii.gz",
# display_mode='z',
# cut_coords=np.linspace(-30, 60, 7))
# display.add_overlay(freq_nii, vmin=0, cmap=plt.cm.hot,
# colorbar=True)
# display._colorbar_ax.set_yticklabels(["% 3i" % float(t.get_text())
# for t in display._colorbar_ax.yaxis.get_ticklabels()])
# display.title('Percentage of activations (Z or T > 3)')
# display.savefig('activation_frequency.png')
# display.savefig('activation_frequency.pdf')
# #--------------------------------------------------
# # Plot the frequency of occurence of neurosynth terms
# # Use the terms from neurosynth to label the ICAs
# terms = combined_df[[c for c in combined_df.columns
# if c.startswith('neurosynth decoding')]]
# terms = terms.fillna(0)
# term_matrix = terms.as_matrix()
# # Labels that have a negative correlation are not present in the map
# term_matrix[term_matrix < 0] = 0
# term_names = [c[20:] for c in combined_df.columns
# if c.startswith('neurosynth decoding')]
# plt.figure(figsize=(5, 20))
# plt.barh(np.arange(len(term_names)), term_matrix.sum(axis=0))
# plt.axis('off')
# plt.axis('tight')
# plt.tight_layout()
# for idx, name in enumerate(term_names):
# plt.text(.1, idx + .1, name)
# plt.savefig('term_distribution.pdf')
# plt.show()
|
|
#!/usr/bin/env python
# we're using python 3.x style print but want it to work in python 2.x,
from __future__ import print_function
import re, os, argparse, sys, math, warnings, subprocess, threading, shutil
import tempfile
import platform
# make sure scripts/internal is on the pythonpath.
sys.path = [ os.path.abspath(os.path.dirname(sys.argv[0])) + "/internal" ] + sys.path
# for ExitProgram and RunCommand
from pocolm_common import *
parser = argparse.ArgumentParser(description="Usage: "
"get_counts.py [options] <source-int-dir> <ngram-order> <dest-count-dir>"
"e.g.: get_counts.py data/int 3 data/counts_3"
"This script computes data-counts of the specified n-gram order"
"for each data-source in <source-int-dir>, and puts them all in"
"<dest-counts-dir>.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dump-counts-parallel", type=str, default='true',
choices=['true','false'],
help="If true, while obtaining the original counts, process multiple data-sources "
"in parallel (configurable because 'sort' may use a fair amount of memory).")
parser.add_argument("--verbose", type=str, default='false',
choices=['true','false'],
help="If true, print commands as we execute them.")
parser.add_argument("--cleanup", type=str, default='true',
choices=['true','false'],
help="If true, remove intermediate files (only relevant if --min-counts option "
"is supplied).")
parser.add_argument("--min-counts", type=str, default='',
help="This string allows you to specify minimum counts to be applied "
"to the stats. You may in general specify min-counts per n-gram order "
"and per data-source, but they applied 'jointly' in a smart way so "
"that, for example, for some order if all data-sources have a min-count "
"of 2, an n-gram will be pruned from all data-sources if the total count "
"over all data-sources is 2. Min-counts may be specified for order 3 "
"and above, in a comma-separated list, with values that must be "
"non-decreasing. E.g. --min-counts=2,3. In case of mismatch with "
"the actual n-gram order, excess min-counts will be truncated and "
"an deficit will be remedied by repeating the last min-count. You "
"may specify different min-counts for different data-sources, e.g. "
"--min-counts='fisher=2,3 swbd1=1,1'. You may also set min-counts for "
"some data-sources and use a default for others, as in "
"--min-counts='fisher=2,3 default=1,1'. You may not set min-counts for "
"the dev set.");
parser.add_argument("--num-min-count-jobs", type=int, default=5,
help="The number of parallel jobs used for applying min-counts (only "
"relevant if --min-counts option is given")
parser.add_argument("--num-count-jobs", type=int, default=4,
help="The number of parallel processes per data source used for "
"getting initial counts")
parser.add_argument("--max-memory", type=str, default='',
help="Memory limitation for sort.")
parser.add_argument("--limit-unk-history", type=str, default='false',
choices=['true','false'],
help="Truncate the left n-gram of an <unk> in history.")
parser.add_argument("source_int_dir",
help="Specify <source_int_dir> the data-source")
parser.add_argument("ngram_order", type=int,
help="Specify the order of ngram")
parser.add_argument("dest_count_dir",
help="Specify <dest_count_dir> the destination to puts the counts")
args = parser.parse_args()
# this reads the 'names' file (which has lines like "1 switchboard", "2 fisher"
# and so on), and returns a dictionary from integer id to name.
def ReadNames(names_file):
try:
f = open(names_file, "r");
except:
sys.exit("get_counts.py: failed to open --names={0}"
" for reading".format(names_file))
number_to_name = { }
for line in f:
try:
[ number, name ] = line.split();
number = int(number)
except:
sys.exit("get_counts.py: Bad line '{0}' in names file {1}".format(
line[0:-1], names_file))
if number in number_to_name:
sys.exit("get_counts.py: duplicate number {0} in names file {1}".format(
number, names_file))
number_to_name[number] = name
f.close()
return number_to_name
def GetNumTrainSets(source_int_dir):
f = open(source_int_dir + '/num_train_sets')
# the following should not fail, since we validated source_int_dir.
num_train_sets = int(f.readline())
assert f.readline() == ''
f.close()
return num_train_sets
# copy over some meta-info into the 'counts' directory.
def CopyMetaInfo(source_int_dir, dest_count_dir):
for f in ['num_train_sets', 'num_words', 'names', 'words.txt' ]:
try:
src = source_int_dir + os.path.sep + f
dest = dest_count_dir + os.path.sep + f
shutil.copy(src, dest)
except:
ExitProgram('error copying {0} to {1}'.format(src, dest))
def IsCygwin():
return platform.system()[0:3].lower() == 'win' or platform.system()[0:3].lower() == 'cyg'
# This function, called from FormatMinCounts, takes an array of
# min-counts like [2,3], and normalizes its length to ngram_order - 2
# by either removing elements from the end, or duplicating the last
# element. If it makes any change, it prints a warning.
def NormalizeMinCountsLength(min_counts, ngram_order):
if len(min_counts) == 0:
# this point in the code should not be reached, actually.
sys.exit("get_counts.py: invalid --min-counts string or code error.")
ans = min_counts
# Check that the min-counts are non-decreasing and are >= 1.
for i in range(len(min_counts) - 1):
if min_counts[i] < 1:
sys.exit("get_counts.py: invalid --min-counts string, min-counts must "
"be >= 1.")
if min_counts[i] > min_counts[i+1]:
sys.exit("get_counts.py: invalid --min-counts string, min-counts must "
"not decrease from one n-gram order to the next.")
if len(ans) < ngram_order - 2:
while len(ans) < ngram_order - 2:
ans.append(ans[-1]) # duplicate the last element
print("get_counts.py: extending min-counts from {0} to {1} since "
"ngram order is {2}".format(','.join([str(x) for x in min_counts]),
','.join([str(x) for x in ans]), ngram_order))
if len(ans) > ngram_order - 2:
ans = ans[0:ngram_order-2]
print("get_counts.py: truncating min-counts from {0} to {1} since "
"ngram order is {2}".format(','.join([str(x) for x in min_counts]),
','.join([str(x) for x in ans]), ngram_order))
return ans
# This function converts from the format of --min-counts string accepted by this
# program to the format that is accepted by int-counts-enforce-min-counts; it
# returns a string (such as --min-counts="2,3" -> "2 3", which would be a valid
# string for a 4-gram setup and arbitrary number of inputs; or, supposing
# --min-counts="fisher=2,3 swbd=1,2" and the "names" file maps "fisher" -> 1
# and "swbd" -> 2, this function would return the string "2,1 3,2".
# If the ngram-order is <3, this function will return the empty string, and
# in that case you shouldn't try to apply min-counts.
def FormatMinCounts(source_int_dir, num_train_sets, ngram_order, min_counts):
if len(min_counts) == 0:
sys.exit("get_counts.py: empty --min-counts string.")
if ngram_order < 3:
print("get_counts.py: ignoring --min-counts string since ngram "
"order is {0} and min-counts are only supported for orders "
"3 and greater.".format(ngram_order), file=sys.stderr)
return ''
pieces = min_counts.split()
# 'pieces' is the whitespace-separated pieces of the string.
if len(pieces) == 1 and len(pieces[0].split('=')) == 1:
# the user has specified something like --min-counts=2,3, and we have
# something like pieces = ['2,3']. So there is no attempt to have
# different min-counts for different data sources.
try:
min_counts_per_order = [ float(x) for x in pieces[0].split(',') ]
except:
sys.exit("get_counts.py: --min-counts={0} has unexpected format".format(
min_counts))
min_counts_per_order = NormalizeMinCountsLength(min_counts_per_order,
ngram_order)
ans = ' '.join([str(int(x)) if x == int(x) else str(x)
for x in min_counts_per_order])
else:
# we expect 'pieces' to be something like [ 'fisher=2,3' 'swbd=1,2' ].
# we'll set up a dictionary from name to min-count array, something
# like name_to_mincounts = [ 'fisher':[2,3], 'swbd':[1,2] ]
name_to_mincounts = dict()
for piece in pieces:
try:
[name,comma_separated_list] = piece.split('=')
this_mincounts = [ float(x) for x in comma_separated_list.split(',') ]
this_mincounts = NormalizeMinCountsLength(this_mincounts,
ngram_order)
except:
sys.exit("get_counts.py: could not parse --min-counts='{0}'.".format(
min_counts))
if name in name_to_mincounts:
sys.exit("get_counts.py: duplicate entry found in --min-counts='{0}'.".format(
min_counts))
name_to_mincounts[name] = this_mincounts
names_used = set() # the set of keys of 'name_to_mincounts' that have been used.
# names is a map from integer to name, e.g.
# names = [ 1:'fisher', 2:'swbd' ]
names = ReadNames(source_int_dir + "/names")
# min_counts_per_order will be an array (one per order from 2,...)
# of arrays, one per training set, of the respective min-counts per
# dataset, e.g. in our example it would be [ [ 2,1 ], [3,2] ]
min_counts_per_order = [ ]
for o in range(ngram_order - 2):
min_counts_per_order.append([])
for n in range(1, num_train_sets + 1):
# the next line shouldn't fail since the data-dir did validate correctly.
name = names[n]
if name in name_to_mincounts:
this_mincounts = name_to_mincounts[name]
names_used.add(name)
elif 'default' in name_to_mincounts:
this_mincounts = name_to_mincounts['default']
names_used.add('default')
else:
sys.exit("get_counts.py: invalid min-counts --min-counts='{0}' since there "
"is no min-count specified for {1}.".format(min_counts, name))
for o in range(ngram_order - 2):
min_counts_per_order[o].append(this_mincounts[o])
ans = ' '.join([ ','.join([str(int(x)) if x == int(x) else str(x)
for x in array])
for array in min_counts_per_order ])
for name in name_to_mincounts.keys():
if not name in names_used:
sys.exit("get_counts.py: invalid min-counts --min-counts='{0}' since the key "
"{1} is never used.".format(min_counts, name))
if args.verbose == 'true':
print("get_counts.py: converted min-counts from --min-counts='{0}' to '{1}'".format(
min_counts, ans))
# test whether ans is all ones, and warn if so.
a = ans.replace(',', ' ').split()
if a == [ '1' ] * len(a):
print("get_counts.py: **warning: --min-counts={0} is equivalent to not applying any "
"min-counts, it would be more efficient not to use the option at all, or "
"to set it to the empty string.".format(min_counts))
return ans
# save the n-gram order.
def SaveNgramOrder(dest_count_dir, ngram_order):
try:
f = open('{0}/ngram_order'.format(dest_count_dir), 'w')
except:
ExitProgram('error opening file {0}/ngram_order for writing'.format(dest_count_dir))
assert ngram_order >= 2
print(ngram_order, file=f)
f.close()
# this function dumps the counts to disk.
# if num_splits == 0 [relevant when we're not using min-counts], then it dumps
# its output to {dest_count_dir}/int.{n}.{o} with n = 1..num_train_sets,
# o=2..ngram_order. (note: n is supplied to this function).
#
# If num-splits >= 1 [relevant when we're using min-counts], then it dumps its output
## {dest_count_dir}/int.{n}.split{j} with n = 1..num_train_sets, j=1..num_splits.
def GetCountsSingleProcess(source_int_dir, dest_count_dir, ngram_order, n, num_splits = 0):
if num_splits == 0:
int_counts_output = "/dev/null " + " ".join([ "{0}/int.{1}.{2}".format(dest_count_dir, n, o)
for o in range(2, ngram_order + 1) ])
else:
assert num_splits >= 1
int_counts_output = '/dev/stdout | split-int-counts ' + \
' '.join([ "{0}/int.{1}.split{2}".format(dest_count_dir, n, j)
for j in range(1, num_splits + 1) ])
command = "bash -c 'set -o pipefail; export LC_ALL=C; gunzip -c {source_int_dir}/{n}.txt.gz | "\
"get-text-counts {limit_unk_history} {ngram_order} | sort {mem_opt}| uniq -c | "\
"get-int-counts {int_counts_output}'".format(source_int_dir = source_int_dir,
n = n , ngram_order = ngram_order,
limit_unk_history = "--limit-unk-history" if args.limit_unk_history == 'true' else "",
mem_opt = sort_mem_opt,
int_counts_output = int_counts_output)
log_file = "{dest_count_dir}/log/get_counts.{n}.log".format(
dest_count_dir = dest_count_dir, n = n)
RunCommand(command, log_file, args.verbose == 'true')
# This function uses multiple parallel processes to dumps the counts to files.
# if num_splits == 0 [relevant when we're not using min-counts], then it dumps its output to
# {dest_count_dir}/int.{n}.{o} with n = 1..num_train_sets, o=2..ngram_order.
# (note: n is supplied to this function).
#
# If num-splits >= 1 [relevant when we're using min-counts], then it dumps its output
## {dest_count_dir}/int.{n}.split{j} with n = 1..num_train_sets, j=1..num_splits.
# This function uses multiple processes (num_proc) in parallel to run
# 'get-text-counts' (this tends to be the bottleneck).
# It will use just one process if the amount of data is quite small or if
# the platform is Cygwin (where named pipes don't work)
def GetCountsMultiProcess(source_int_dir, dest_count_dir, ngram_order, n, num_proc,
num_splits = 0):
try:
file_size = os.path.getsize('{0}/{1}.txt.gz'.format(source_int_dir, n))
except:
ExitProgram('get_counts.py: error getting file size of '
'{0}/{1}.txt.gz'.format(source_int_dir, n))
if IsCygwin() or num_proc <= 1 or file_size < 1000000:
if num_proc > 1 and file_size >= 1000000:
# it's only because of Cygwin that we're not using multiple
# processes this merits a warning.
print("get_counts.py: cygwin platform detected so named pipes won't work; "
"using a single process (will be slower)")
return GetCountsSingleProcess(source_int_dir, dest_count_dir,
ngram_order, n, num_splits)
if num_splits == 0:
int_counts_output = "/dev/null " + " ".join([ "{0}/int.{1}.{2}".format(dest_count_dir, n, o)
for o in range(2, ngram_order + 1) ])
else:
assert num_splits >= 1
int_counts_output = '/dev/stdout | split-int-counts ' + \
' '.join([ "{0}/int.{1}.split{2}".format(dest_count_dir, n, j)
for j in range(1, num_splits + 1) ])
try:
# we want a temporary directory on a local file system
# for
tempdir = tempfile.mkdtemp()
except Exception as e:
ExitProgram("Error creating temporary directory: " + str(e))
# This has several pipes for the internal processing that write to and read
# from other internal pipes; and we can't do this using '|' in the shell, we
# need to use mkfifo. This does not work properly on cygwin.
log_file = "{dest_count_dir}/log/get_counts.{n}.log".format(
dest_count_dir = dest_count_dir, n = n)
test_command = "bash -c 'set -o pipefail; (echo a; echo b) | "\
"distribute-input-lines /dev/null /dev/null'";
# We run the following command just to make sure distribute-input-lines is
# on the path and compiled, since we get hard-to-debug errors if it fails.
RunCommand(test_command, log_file)
# we use "bash -c '...'" to make sure it gets run in bash, since
# for example 'set -o pipefail' would only work in bash.
command = ("bash -c 'set -o pipefail; set -e; export LC_ALL=C; mkdir -p {0}; ".format(tempdir) +
''.join(['mkfifo {0}/{1}; '.format(tempdir, p)
for p in range(num_proc) ]) +
'trap "rm -r {0}" SIGINT SIGKILL SIGTERM EXIT; '.format(tempdir) +
'gunzip -c {0}/{1}.txt.gz | distribute-input-lines '.format(source_int_dir, n) +
' '.join(['{0}/{1}'.format(tempdir, p) for p in range(num_proc)]) + '& ' +
'sort -m {0}'.format(sort_mem_opt) +
' '.join([ '<(get-text-counts {4} {0} <{1}/{2} | sort {3})'.format(ngram_order, tempdir, p, sort_mem_opt,
"--limit-unk-history" if args.limit_unk_history == 'true' else "")
for p in range(num_proc) ]) +
'| uniq -c | get-int-counts {0}'.format(int_counts_output) +
"'") # end the quote from the 'bash -c'.
RunCommand(command, log_file, args.verbose=='true')
# This function applies the min-counts (it is only called if you supplied the
# --min-counts option to this script). It reads in the data dumped by
# GetCounts. It dumps the files into {dest_count_dir}/int.{n}.split{j}.{o}
# for n = 1...num_train_sets j = 1..num_jobs, and o=2..ngram_order. [note: j is
# supplied to this function].
def EnforceMinCounts(dest_count_dir, formatted_min_counts, ngram_order, num_train_sets, j):
inputs = ' '.join([ "{0}/int.{1}.split{2}".format(dest_count_dir, n, j)
for n in range(1, num_train_sets + 1) ])
outputs = ' '.join([' '.join([ '{0}/int.{1}.split{2}.{3}'.format(dest_count_dir, n, j, o)
for o in range(2, ngram_order + 1) ])
for n in range(1, num_train_sets + 1) ])
# e.g. suppose j is 2 and ngram_order is 4, outputs would be as follows
# [assuming brace expansion].:
# outputs = dir/int.1.split2.{2,3,4} dir/int.2.split2.{2,3,4} ...
# dir/int.{num_train_sets}.split2.{2,3,4}
command = "int-counts-enforce-min-counts {ngram_order} {formatted_min_counts} {inputs} "\
"{outputs}".format(
ngram_order = ngram_order, formatted_min_counts = formatted_min_counts,
inputs = inputs, outputs = outputs, j = j)
log_file = '{0}/log/enforce_min_counts.{1}.log'.format(dest_count_dir, j)
RunCommand(command, log_file, args.verbose=='true')
# This function merges counts from multiple jobs, that have been split up by
# most recent history-word (it is only called if you supplied the --min-counts
# option to this script). It reads in the data dumped by EnforceMinCounts.
# it merges the files into {dest_count_dir}/int.{n}.{o}.
def MergeCounts(dest_count_dir, num_jobs, n, o):
if num_jobs > 1:
command = ('merge-int-counts ' +
' '.join(['{0}/int.{1}.split{2}.{3}'.format(dest_count_dir, n, j, o)
for j in range(1, num_jobs + 1)]) +
'>{0}/int.{1}.{2}'.format(dest_count_dir, n, o))
log_file = '{0}/log/merge_counts.{1}.{2}.log'.format(dest_count_dir, n, o)
RunCommand(command, log_file, args.verbose=='true')
else:
assert num_jobs == 1
# we can just move the file if num-jobs == 1.
try:
os.remove('{0}/int.{1}.{2}'.format(dest_count_dir, n, o))
except:
pass
os.rename('{0}/int.{1}.split1.{2}'.format(dest_count_dir, n, o),
'{0}/int.{1}.{2}'.format(dest_count_dir, n, o))
# we also want to merge the files $dir/int.dev.{2,3,...} into a single file
# that contains all the dev-data's counts; this will be used in likelihood
# evaluation.
def MergeDevData(dest_count_dir, ngram_order):
command = ("merge-int-counts " + ' '.join([ dest_count_dir + "/int.dev." + str(n)
for n in range(2, ngram_order + 1) ]) +
">{0}/int.dev".format(dest_count_dir))
log_file = dest_count_dir + '/log/merge_dev_counts.log'
RunCommand(command, log_file, args.verbose=='true')
# this function returns the value and unit of the max_memory
# if max_memory is in format of "integer + letter/%", like "10G", it returns (10, 'G')
# if max_memory contains no letter, like "10000", it returns (10000, '')
# we assume the input string is not empty since when it is empty we never call this function
def ParseMemoryString(s):
if not s[-1].isdigit():
return (int(s[:-1]), s[-1])
else:
return (int(s), '')
# make sure 'scripts' and 'src' directory are on the path
os.environ['PATH'] = (os.environ['PATH'] + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])) + os.pathsep +
os.path.abspath(os.path.dirname(sys.argv[0])) + "/../src")
if os.system("validate_int_dir.py " + args.source_int_dir) != 0:
ExitProgram("command validate_int_dir.py {0} failed".format(args.source_int_dir))
if args.ngram_order < 2:
ExitProgram("ngram-order is {0}; it must be at least 2. If you "
"want a unigram LM, do it by hand".format(args.ngram_order))
# read the variable 'num_train_sets'
# from the corresponding file in source_int_dir This shouldn't fail
# because we just called validate_int-dir.py..
f = open(args.source_int_dir + "/num_train_sets")
num_train_sets = int(f.readline())
f.close()
# set the memory restriction for "sort"
sort_mem_opt = ''
if args.max_memory != '':
if args.dump_counts_parallel == 'true':
(value, unit) = ParseMemoryString(args.max_memory)
sub_memory = value/num_train_sets
if sub_memory != float(value)/num_train_sets:
if unit in ['K', '']:
sub_memory = value*1024/num_train_sets
unit = 'b'
if unit == 'M':
sub_memory = value*1024/num_train_sets
unit = 'K'
if unit == 'G':
sub_memory = value*1024/num_train_sets
unit = 'M'
if (unit in ['b', '%']) and (sub_memory == 0):
ExitProgram("max_memory for each of the {0} train sets is {1}{2}."
"Please reset a larger max_memory value".format(
num_train_sets, float(value)/num_train_sets, unit))
sub_max_memory = str(int(sub_memory)) + unit
sort_mem_opt = ("--buffer-size={0} ".format(sub_max_memory))
else:
sort_mem_opt = ("--buffer-size={0} ".format(args.max_memory))
if not os.path.isdir(args.dest_count_dir):
try:
os.makedirs(args.dest_count_dir+'/log')
except:
ExitProgram("error creating directory " + args.dest_count_dir)
CopyMetaInfo(args.source_int_dir, args.dest_count_dir)
SaveNgramOrder(args.dest_count_dir, args.ngram_order)
if args.min_counts == '':
# no min-counts specified: use normal pipeline.
print("get_counts.py: dumping counts", file=sys.stderr)
threads = []
for n in [ "dev" ] + list(range(1, num_train_sets + 1)):
threads.append(threading.Thread(target = GetCountsMultiProcess,
args = [args.source_int_dir, args.dest_count_dir,
args.ngram_order, str(n), args.num_count_jobs] ))
threads[-1].start()
if args.dump_counts_parallel == 'false':
threads[-1].join()
if args.dump_counts_parallel == 'true':
for t in threads:
t.join()
MergeDevData(args.dest_count_dir, args.ngram_order)
print("get_counts.py: done", file=sys.stderr)
else:
# First process the dev data, the min-counts aren't relevant here.
GetCountsSingleProcess(args.source_int_dir, args.dest_count_dir,
args.ngram_order, 'dev')
MergeDevData(args.dest_count_dir, args.ngram_order)
num_mc_jobs = args.num_min_count_jobs
if num_mc_jobs < 1:
ExitProgram("bad option --num-min-count-jobs={0}".format(num_mc_jobs))
formatted_min_counts = FormatMinCounts(args.source_int_dir,
num_train_sets,
args.ngram_order,
args.min_counts)
if not num_mc_jobs >= 1:
sys.exit("get_counts.py: invalid option --num-jobs={0}".format(num_mc_jobs))
# First, dump the counts split up by most-recent-history instead of ngram-order.
print("get_counts.py: dumping counts", file=sys.stderr)
threads = []
for n in range(1, num_train_sets + 1):
threads.append(threading.Thread(target = GetCountsMultiProcess,
args = [args.source_int_dir, args.dest_count_dir,
args.ngram_order, str(n), args.num_count_jobs,
num_mc_jobs] ))
threads[-1].start()
if args.dump_counts_parallel == 'false':
threads[-1].join()
if args.dump_counts_parallel == 'true':
for t in threads:
t.join()
# Next, apply the min-counts.
print("get_counts.py: applying min-counts", file=sys.stderr)
threads = []
for j in range(1, num_mc_jobs + 1):
threads.append(threading.Thread(target = EnforceMinCounts,
args = [args.dest_count_dir, formatted_min_counts,
args.ngram_order, num_train_sets, j]))
threads[-1].start()
for t in threads:
t.join()
if args.cleanup == 'true':
for n in range(1, num_train_sets + 1):
for j in range(1, num_mc_jobs + 1):
os.remove("{0}/int.{1}.split{2}".format(
args.dest_count_dir, n, j))
print("get_counts.py: merging counts", file=sys.stderr)
threads = []
for n in range(1, num_train_sets + 1):
for o in range(2, args.ngram_order + 1):
threads.append(threading.Thread(target = MergeCounts,
args = [args.dest_count_dir,
num_mc_jobs, n, o]))
threads[-1].start()
for t in threads:
t.join()
if args.cleanup == 'true':
for n in range(1, num_train_sets + 1):
for j in range(1, args.num_min_count_jobs + 1):
for o in range(2, args.ngram_order + 1):
try:
os.remove("{0}/int.{1}.split{2}.{3}".format(
args.dest_count_dir, n, j, o))
except:
pass
print("get_counts.py: finished.", file=sys.stderr)
if os.system("validate_count_dir.py " + args.dest_count_dir) != 0:
ExitProgram("command validate_count_dir.py {0} failed".format(args.dest_count_dir))
|
|
from __future__ import absolute_import
from typing import Optional, Tuple, Mapping, Any, Text
from django.utils.translation import ugettext as _
from django.conf import settings
from django.template.defaultfilters import slugify
from django.core.files import File
from django.http import HttpRequest
from jinja2 import Markup as mark_safe
import unicodedata
from zerver.lib.avatar_hash import user_avatar_hash
from zerver.lib.request import JsonableError
from zerver.lib.str_utils import force_text, force_str, NonBinaryStr
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from mimetypes import guess_type, guess_extension
from zerver.models import get_user_profile_by_email, get_user_profile_by_id
from zerver.models import Attachment
from zerver.models import Realm, UserProfile, Message
from six.moves import urllib
import base64
import os
import re
from PIL import Image, ImageOps
from six import binary_type
import io
import random
import logging
DEFAULT_AVATAR_SIZE = 100
MEDIUM_AVATAR_SIZE = 500
# Performance Note:
#
# For writing files to S3, the file could either be stored in RAM
# (if it is less than 2.5MiB or so) or an actual temporary file on disk.
#
# Because we set FILE_UPLOAD_MAX_MEMORY_SIZE to 0, only the latter case
# should occur in practice.
#
# This is great, because passing the pseudofile object that Django gives
# you to boto would be a pain.
# To come up with a s3 key we randomly generate a "directory". The
# "file name" is the original filename provided by the user run
# through a sanitization function.
attachment_url_re = re.compile(u'[/\-]user[\-_]uploads[/\.-].*?(?=[ )]|\Z)')
def attachment_url_to_path_id(attachment_url):
# type: (Text) -> Text
path_id_raw = re.sub(u'[/\-]user[\-_]uploads[/\.-]', u'', attachment_url)
# Remove any extra '.' after file extension. These are probably added by the user
return re.sub(u'[.]+$', u'', path_id_raw, re.M)
def sanitize_name(raw_value):
# type: (NonBinaryStr) -> Text
"""
Sanitizes a value to be safe to store in a Linux filesystem, in
S3, and in a URL. So unicode is allowed, but not special
characters other than ".", "-", and "_".
This implementation is based on django.utils.text.slugify; it is
modified by:
* hardcoding allow_unicode=True.
* adding '.' and '_' to the list of allowed characters.
* preserving the case of the value.
"""
value = force_text(raw_value)
value = unicodedata.normalize('NFKC', value)
value = re.sub('[^\w\s._-]', '', value, flags=re.U).strip()
return mark_safe(re.sub('[-\s]+', '-', value, flags=re.U))
def random_name(bytes=60):
# type: (int) -> Text
return base64.urlsafe_b64encode(os.urandom(bytes)).decode('utf-8')
class BadImageError(JsonableError):
pass
def resize_avatar(image_data, size=DEFAULT_AVATAR_SIZE):
# type: (binary_type, int) -> binary_type
try:
im = Image.open(io.BytesIO(image_data))
im = ImageOps.fit(im, (size, size), Image.ANTIALIAS)
except IOError:
raise BadImageError("Could not decode avatar image; did you upload an image file?")
out = io.BytesIO()
im.save(out, format='png')
return out.getvalue()
### Common
class ZulipUploadBackend(object):
def upload_message_image(self, uploaded_file_name, content_type, file_data, user_profile, target_realm=None):
# type: (Text, Optional[Text], binary_type, UserProfile, Optional[Realm]) -> Text
raise NotImplementedError()
def upload_avatar_image(self, user_file, user_profile, email):
# type: (File, UserProfile, Text) -> None
raise NotImplementedError()
def delete_message_image(self, path_id):
# type: (Text) -> bool
raise NotImplementedError()
def get_avatar_url(self, hash_key, medium=False):
# type: (Text, bool) -> Text
raise NotImplementedError()
def ensure_medium_avatar_image(self, email):
# type: (Text) -> None
raise NotImplementedError()
### S3
def get_bucket(conn, bucket_name):
# type: (S3Connection, Text) -> Bucket
# Calling get_bucket() with validate=True can apparently lead
# to expensive S3 bills:
# http://www.appneta.com/blog/s3-list-get-bucket-default/
# The benefits of validation aren't completely clear to us, and
# we want to save on our bills, so we set the validate flag to False.
# (We think setting validate to True would cause us to fail faster
# in situations where buckets don't exist, but that shouldn't be
# an issue for us.)
bucket = conn.get_bucket(bucket_name, validate=False)
return bucket
def upload_image_to_s3(
bucket_name,
file_name,
content_type,
user_profile,
contents):
# type: (NonBinaryStr, Text, Optional[Text], UserProfile, binary_type) -> None
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = get_bucket(conn, force_str(bucket_name))
key = Key(bucket)
key.key = force_str(file_name)
key.set_metadata("user_profile_id", str(user_profile.id))
key.set_metadata("realm_id", str(user_profile.realm_id))
if content_type is not None:
headers = {'Content-Type': force_str(content_type)}
else:
headers = None
key.set_contents_from_string(contents, headers=headers)
def get_file_info(request, user_file):
# type: (HttpRequest, File) -> Tuple[Text, Optional[Text]]
uploaded_file_name = user_file.name
assert isinstance(uploaded_file_name, str)
content_type = request.GET.get('mimetype')
if content_type is None:
guessed_type = guess_type(uploaded_file_name)[0]
if guessed_type is not None:
content_type = force_text(guessed_type)
else:
uploaded_file_name = uploaded_file_name + guess_extension(content_type)
uploaded_file_name = urllib.parse.unquote(uploaded_file_name)
return uploaded_file_name, content_type
def get_signed_upload_url(path):
# type: (Text) -> Text
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
return force_text(conn.generate_url(15, 'GET', bucket=settings.S3_AUTH_UPLOADS_BUCKET, key=force_str(path)))
def get_realm_for_filename(path):
# type: (Text) -> Optional[int]
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
key = get_bucket(conn, settings.S3_AUTH_UPLOADS_BUCKET).get_key(path)
if key is None:
# This happens if the key does not exist.
return None
return get_user_profile_by_id(key.metadata["user_profile_id"]).realm_id
class S3UploadBackend(ZulipUploadBackend):
def upload_message_image(self, uploaded_file_name, content_type, file_data, user_profile, target_realm=None):
# type: (Text, Optional[Text], binary_type, UserProfile, Optional[Realm]) -> Text
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
s3_file_name = "/".join([
str(target_realm.id if target_realm is not None else user_profile.realm_id),
random_name(18),
sanitize_name(uploaded_file_name)
])
url = "/user_uploads/%s" % (s3_file_name)
upload_image_to_s3(
bucket_name,
s3_file_name,
content_type,
user_profile,
file_data
)
create_attachment(uploaded_file_name, s3_file_name, user_profile)
return url
def delete_message_image(self, path_id):
# type: (Text) -> bool
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = get_bucket(conn, settings.S3_AUTH_UPLOADS_BUCKET)
# check if file exists
key = bucket.get_key(path_id)
if key is not None:
bucket.delete_key(key)
return True
file_name = path_id.split("/")[-1]
logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,))
return False
def upload_avatar_image(self, user_file, user_profile, email):
# type: (File, UserProfile, Text) -> None
content_type = guess_type(user_file.name)[0]
bucket_name = settings.S3_AVATAR_BUCKET
s3_file_name = user_avatar_hash(email)
image_data = user_file.read()
upload_image_to_s3(
bucket_name,
s3_file_name + ".original",
content_type,
user_profile,
image_data,
)
# custom 500px wide version
resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE)
upload_image_to_s3(
bucket_name,
s3_file_name + "-medium.png",
"image/png",
user_profile,
resized_medium
)
resized_data = resize_avatar(image_data)
upload_image_to_s3(
bucket_name,
s3_file_name,
'image/png',
user_profile,
resized_data,
)
# See avatar_url in avatar.py for URL. (That code also handles the case
# that users use gravatar.)
def get_avatar_url(self, hash_key, medium=False):
# type: (Text, bool) -> Text
bucket = settings.S3_AVATAR_BUCKET
medium_suffix = "-medium" if medium else ""
# ?x=x allows templates to append additional parameters with &s
return u"https://%s.s3.amazonaws.com/%s%s?x=x" % (bucket, medium_suffix, hash_key)
def ensure_medium_avatar_image(self, email):
# type: (Text) -> None
user_profile = get_user_profile_by_email(email)
email_hash = user_avatar_hash(email)
s3_file_name = email_hash
bucket_name = settings.S3_AVATAR_BUCKET
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = get_bucket(conn, force_str(bucket_name))
key = bucket.get_key(email_hash)
image_data = key.get_contents_as_string()
resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE)
upload_image_to_s3(
bucket_name,
s3_file_name + "-medium.png",
"image/png",
user_profile,
resized_medium
)
### Local
def mkdirs(path):
# type: (Text) -> None
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def write_local_file(type, path, file_data):
# type: (Text, Text, binary_type) -> None
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, type, path)
mkdirs(file_path)
with open(file_path, 'wb') as f:
f.write(file_data)
def get_local_file_path(path_id):
# type: (Text) -> Optional[Text]
local_path = os.path.join(settings.LOCAL_UPLOADS_DIR, 'files', path_id)
if os.path.isfile(local_path):
return local_path
else:
return None
class LocalUploadBackend(ZulipUploadBackend):
def upload_message_image(self, uploaded_file_name, content_type, file_data, user_profile, target_realm=None):
# type: (Text, Optional[Text], binary_type, UserProfile, Optional[Realm]) -> Text
# Split into 256 subdirectories to prevent directories from getting too big
path = "/".join([
str(user_profile.realm_id),
format(random.randint(0, 255), 'x'),
random_name(18),
sanitize_name(uploaded_file_name)
])
write_local_file('files', path, file_data)
create_attachment(uploaded_file_name, path, user_profile)
return '/user_uploads/' + path
def delete_message_image(self, path_id):
# type: (Text) -> bool
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, 'files', path_id)
if os.path.isfile(file_path):
# This removes the file but the empty folders still remain.
os.remove(file_path)
return True
file_name = path_id.split("/")[-1]
logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,))
return False
def upload_avatar_image(self, user_file, user_profile, email):
# type: (File, UserProfile, Text) -> None
email_hash = user_avatar_hash(email)
image_data = user_file.read()
write_local_file('avatars', email_hash+'.original', image_data)
resized_data = resize_avatar(image_data)
write_local_file('avatars', email_hash+'.png', resized_data)
resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE)
write_local_file('avatars', email_hash+'-medium.png', resized_medium)
def get_avatar_url(self, hash_key, medium=False):
# type: (Text, bool) -> Text
# ?x=x allows templates to append additional parameters with &s
medium_suffix = "-medium" if medium else ""
return u"/user_avatars/%s%s.png?x=x" % (hash_key, medium_suffix)
def ensure_medium_avatar_image(self, email):
# type: (Text) -> None
email_hash = user_avatar_hash(email)
output_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", email_hash + "-medium.png")
if os.path.isfile(output_path):
return
image_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", email_hash + ".original")
image_data = open(image_path, "rb").read()
resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE)
write_local_file('avatars', email_hash + '-medium.png', resized_medium)
# Common and wrappers
if settings.LOCAL_UPLOADS_DIR is not None:
upload_backend = LocalUploadBackend() # type: ZulipUploadBackend
else:
upload_backend = S3UploadBackend()
def delete_message_image(path_id):
# type: (Text) -> bool
return upload_backend.delete_message_image(path_id)
def upload_avatar_image(user_file, user_profile, email):
# type: (File, UserProfile, Text) -> None
upload_backend.upload_avatar_image(user_file, user_profile, email)
def upload_message_image(uploaded_file_name, content_type, file_data, user_profile, target_realm=None):
# type: (Text, Optional[Text], binary_type, UserProfile, Optional[Realm]) -> Text
return upload_backend.upload_message_image(uploaded_file_name, content_type, file_data,
user_profile, target_realm=target_realm)
def claim_attachment(user_profile, path_id, message, is_message_realm_public):
# type: (UserProfile, Text, Message, bool) -> bool
try:
attachment = Attachment.objects.get(path_id=path_id)
attachment.messages.add(message)
# Only the owner of the file has the right to elevate the permissions of a file.
# This makes sure that a private file is not accidently made public by another user
# by sending a message to a public stream that refers the private file.
if attachment.owner == user_profile:
attachment.is_realm_public = attachment.is_realm_public or is_message_realm_public
attachment.save()
return True
except Attachment.DoesNotExist:
raise JsonableError(_("The upload was not successful. Please reupload the file again in a new message."))
return False
def create_attachment(file_name, path_id, user_profile):
# type: (Text, Text, UserProfile) -> bool
Attachment.objects.create(file_name=file_name, path_id=path_id, owner=user_profile, realm=user_profile.realm)
return True
def upload_message_image_from_request(request, user_file, user_profile):
# type: (HttpRequest, File, UserProfile) -> Text
uploaded_file_name, content_type = get_file_info(request, user_file)
return upload_message_image(uploaded_file_name, content_type, user_file.read(), user_profile)
|
|
'''
Created on 14.04.2016
@author: Tobias
'''
import sklearn_helpers as skhelper
from stacking.stacking_model import *
import pandas as pd
import numpy as np
from time import strptime
import datetime
from collections import Counter
from sklearn import preprocessing, naive_bayes
from sklearn.neighbors.classification import KNeighborsClassifier
from sklearn.ensemble.forest import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
from sklearn.cross_validation import cross_val_score, StratifiedKFold,\
train_test_split
from sklearn.metrics import log_loss
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import Pipeline
def create_sub(y_pred):
print("generating submission")
sample_sub = pd.read_csv("data/sample_submission.csv")
n = sample_sub.shape[0]
names = ["Adoption","Died","Euthanasia","Return_to_owner","Transfer"]
for i in range(n):
for col in range(5):
sample_sub.loc[i,names[col]] = y_pred[i,col]
sample_sub.to_csv("n_submission.csv", sep=",", index=False)
def preproc(df):
le = preprocessing.LabelEncoder()
print("preprocessing")
n = df.shape[0]
df = df.fillna("NaN")
# name == nan
name_vec = np.zeros(n)
name_length = np.zeros(n)
name_list = []
i = 0
for name in df["Name"]:
if name == "NaN":
name_vec[i] = 0
name_length[i] = 0
else:
name_list.append(name)
name_vec[i] = 1
name_length[i] = len(name)
i += 1
df["hasName"] = name_vec
df["name_length"] = name_length
c = Counter(name_list)
names_vec = np.zeros(n)
i = 0
for name in df["Name"].values:
if c[name] != 0:
names_vec[i] = 1 - c[name]/n
else:
names_vec[i] = 0
i += 1
df["name_weirdness"] = names_vec
df = df.drop("Name",axis=1)
# map animal
mapping = {'Dog': 0, 'Cat': 1}
df = df.replace({'AnimalType': mapping})
# color
i = 0
color1_vec = []
color2_vec = []
number_colors = np.zeros(n)
texture_vec = []
texture2_vec = []
for color in df["Color"].values:
if '/' in color:
number_colors[i] = 2
color1 = color.split("/")[0]
color2 = color.split("/")[1]
color1 = color1.split(" ")
color1_vec.append(color1[0])
color2 = color2.split(" ")
color2_vec.append(color2[0])
if len(color1) > 1 and len(color2) == 1:
texture_vec.append(color1[1])
texture2_vec.append("0")
if len(color2) > 1 and len(color1) == 1:
texture_vec.append(color2[1])
texture2_vec.append("0")
if len(color1) == 1 and len(color2) == 1:
texture_vec.append("0")
texture2_vec.append("0")
if len(color1) > 1 and len(color2) > 1:
texture_vec.append(color1[1])
texture2_vec.append(color2[1])
else:
color2_vec.append("0")
texture2_vec.append("0")
number_colors[i] = 1
color = color.split(" ")
if len(color) > 1:
texture_vec.append(color[1])
color1_vec.append(color[0])
else:
texture_vec.append("0")
color1_vec.append(color[0])
i += 1
color1_vec = le.fit_transform(color1_vec)
color2_vec = le.fit_transform(color2_vec)
texture_vec = le.fit_transform(texture_vec)
texture2_vec = le.fit_transform(texture2_vec)
df["color1"] = color1_vec
df["color2"] = color2_vec
df["number_of_colors"] = number_colors
df["texture"] = texture_vec
df["texture2"] = texture2_vec
# sex to male/female/unknown
sex_vec = np.zeros(n)
new_vec = np.zeros(n)
i = 0
for sex in df["SexuponOutcome"].values:
if sex == "Unknown" or sex == "NaN":
sex_vec[i] = 0
new_vec[i] = 0
else:
if sex.split(" ")[1] == "Male":
sex_vec[i] = 2
elif sex.split(" ")[1] == "Female":
sex_vec[i] = 1
if sex.split(" ")[0] == "Intact":
new_vec[i] = 2
elif sex.split(" ")[0] == "Spayed":
new_vec[i] = 1
elif sex.split(" ")[0] == "Neutered":
new_vec[i] = 3
i += 1
df["Sex"] = sex_vec
df["Sex_stat"] = new_vec
df = df.drop("SexuponOutcome",axis=1)
# mix
mix_vec = np.zeros(n)
i = 0
breed_list = []
for breed in df["Breed"].values:
if breed.split(" ")[-1] == "Mix":
mix_vec[i] = 1
breed_list.append(breed[:-4])
else:
breed_list.append(breed)
mix_vec[i] = 0
i += 1
df["Mix"] = mix_vec
c = Counter(breed_list)
breed_weird_vec = np.zeros(n)
i = 0
for breed in breed_list:
if c[breed] != 0:
breed_weird_vec[i] = 1 - c[breed]/n
else:
breed_weird_vec[i] = 0
i += 1
df["breed_weirdness"] = breed_weird_vec
# Age
age_week = np.zeros(n)
age_month = np.zeros(n)
age_year = np.zeros(n)
i = 0
for age in df["AgeuponOutcome"]:
age = age.split(" ")
if age[-1][0:4] == "week":
age_week[i] = int(age[0])
age_month[i] = 0
age_year[i] = 0
elif age[-1][0:5] == "month":
age_week[i] = 0
age_month[i] = int(age[0])
age_year[i] = 0
elif age[-1][0:4] == "year":
age_week[i] = 0
age_month[i] = 0
age_year[i] = int(age[0])
i += 1
df = df.drop("AgeuponOutcome",axis=1)
AgeuponOutcome_vec = np.zeros(n)
for i in range(n):
AgeuponOutcome_vec[i] = age_week[i]*7 + age_month[i]*30 + age_year[i]*365
df["AgeuponOutcome"] = AgeuponOutcome_vec
# use the time variable
hour_vec = np.zeros(n)
week_vec = np.zeros(n)
month_vec = np.zeros(n)
day_of_month_vec = np.zeros(n)
i = 0
for date in df["DateTime"].values:
date_ = date.split(" ")[0]
time_month = date_.split("-")[1]
day_of_month_vec[i] = date_.split("-")[2]
time_ = date.split(" ")[1]
time_hour = time_.split(":")[0]
hour_vec[i] = time_hour
month_vec[i] = time_month
# week
date = datetime.datetime(*(strptime(date, '%Y-%m-%d %H:%M:%S')[0:6]))
week_vec[i] = date.weekday()
i += 1
df["Hour"] = hour_vec
df["Weekday"] = week_vec
df["Month"] = month_vec
df["Day_of_month"] = day_of_month_vec
# drop what should be encoded
df = df.drop(["DateTime"], axis=1)
df =df.drop(["Breed","Color"],axis=1)
return df
if __name__ == "__main__":
print("load data")
df_train = pd.read_csv("data/train.csv")
df_test = pd.read_csv("data/test.csv")
# map the classes
mapping = {'Adoption': 0, 'Died': 1, 'Euthanasia': 2,
'Return_to_owner': 3, 'Transfer': 4}
df_train = df_train.replace({'OutcomeType': mapping})
y = df_train["OutcomeType"].values
df_train = df_train.drop(["OutcomeType","OutcomeSubtype","AnimalID"],axis=1)
n_train = df_train.shape[0]
df_all = df_train.append(df_test, ignore_index=True)
df_all = preproc(df_all)
df_all = df_all.drop("ID",axis=1)
df_train = df_all.iloc[:n_train]
df_test = df_all.iloc[n_train:]
X = df_train.values
X_test = df_test.values
feature_names = df_all.columns.values.tolist()
print(X.shape)
print("build the model")
clf1 = RandomForestClassifier(n_estimators=100,random_state=571,max_features=8,max_depth=13,n_jobs=1)
clf2 = KNeighborsClassifier(n_neighbors=250, p=1, weights="distance")
clf3 = ExtraTreesClassifier(n_estimators=200,max_depth=14, max_features=12,random_state=571,n_jobs=1)
clf4 = GaussianNB()
clf5 = GradientBoostingClassifier(n_estimators=100,random_state=571,max_depth=6, max_features=7)
clf6 = RandomForestClassifier(n_estimators=1000,max_features=10,max_depth=14,n_jobs=1) # feats = 10
clf7 = GradientBoostingClassifier(n_estimators=100,max_depth=9, max_features=7) # feats = 7
first_stage = [
("rf",clf1),
("knn",clf2),
("et",clf3),
("gnb",clf4),
("gbm",clf5)
]
second_stage = [
("gbm",clf7),
("rf",clf6)
]
weights = [3,1]
stack = StackingClassifier(stage_one_clfs=first_stage,stage_two_clfs=second_stage,weights=weights, n_runs=10, use_append=False, print_scores=True)
skf = StratifiedKFold(y, n_folds=5,random_state=571)
# print("Training")
# stack.fit(X,y)
# print("Predict")
# y_pred = stack.predict_proba(X_test)
# create_sub(y_pred)
# print("CV")
# scores = cross_val_score(stack,X,y,scoring="log_loss",cv=skf)
# print(scores)
# print("CV-Score: %.3f" % -scores.mean())
# with append: Score: 0.783
# without append: CV-Score: 0.843
# gridsearch
params1 = {
"max_depth": [4],
"max_features": [3]
}
params2 = {
"max_depth": [7],
"max_features": [4]
}
paramsset = [params1, params2]
stack = StackingClassifier(stage_one_clfs=first_stage,stage_two_clfs=second_stage,weights=weights, n_runs=10, use_append=False,
do_gridsearch=True, params=paramsset, cv=skf, scoring="log_loss", print_scores=False)
stack.fit(X,y)
|
|
# This file is part of beets.
# Copyright 2012, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for MusicBrainz API wrapper.
"""
from _common import unittest
from beets.autotag import mb
class MBAlbumInfoTest(unittest.TestCase):
def _make_release(self, date_str='2009', tracks=None):
release = {
'title': 'ALBUM TITLE',
'id': 'ALBUM ID',
'asin': 'ALBUM ASIN',
'release-group': {
'type': 'Album',
'first-release-date': date_str,
'id': 'RELEASE GROUP ID',
'disambiguation': 'DISAMBIGUATION',
},
'artist-credit': [
{
'artist': {
'name': 'ARTIST NAME',
'id': 'ARTIST ID',
'sort-name': 'ARTIST SORT NAME',
},
'name': 'ARTIST CREDIT',
}
],
'date': '3001',
'medium-list': [],
'label-info-list': [{
'catalog-number': 'CATALOG NUMBER',
'label': {'name': 'LABEL NAME'},
}],
'text-representation': {
'script': 'SCRIPT',
'language': 'LANGUAGE',
},
'country': 'COUNTRY',
'status': 'STATUS',
}
if tracks:
track_list = []
for i, track in enumerate(tracks):
track_list.append({
'recording': track,
'position': str(i+1),
})
release['medium-list'].append({
'position': '1',
'track-list': track_list,
'format': 'FORMAT',
'title': 'MEDIUM TITLE',
})
return release
def _make_track(self, title, tr_id, duration, artist=False):
track = {
'title': title,
'id': tr_id,
}
if duration is not None:
track['length'] = duration
if artist:
track['artist-credit'] = [
{
'artist': {
'name': 'TRACK ARTIST NAME',
'id': 'TRACK ARTIST ID',
'sort-name': 'TRACK ARTIST SORT NAME',
},
'name': 'TRACK ARTIST CREDIT',
}
]
return track
def test_parse_release_with_year(self):
release = self._make_release('1984')
d = mb.album_info(release)
self.assertEqual(d.album, 'ALBUM TITLE')
self.assertEqual(d.album_id, 'ALBUM ID')
self.assertEqual(d.artist, 'ARTIST NAME')
self.assertEqual(d.artist_id, 'ARTIST ID')
self.assertEqual(d.year, 1984)
self.assertEqual(d.artist_credit, 'ARTIST CREDIT')
def test_parse_release_type(self):
release = self._make_release('1984')
d = mb.album_info(release)
self.assertEqual(d.albumtype, 'album')
def test_parse_release_full_date(self):
release = self._make_release('1987-03-31')
d = mb.album_info(release)
self.assertEqual(d.year, 1987)
self.assertEqual(d.month, 3)
self.assertEqual(d.day, 31)
def test_parse_tracks(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(tracks=tracks)
d = mb.album_info(release)
t = d.tracks
self.assertEqual(len(t), 2)
self.assertEqual(t[0].title, 'TITLE ONE')
self.assertEqual(t[0].track_id, 'ID ONE')
self.assertEqual(t[0].length, 100.0)
self.assertEqual(t[1].title, 'TITLE TWO')
self.assertEqual(t[1].track_id, 'ID TWO')
self.assertEqual(t[1].length, 200.0)
def test_parse_track_indices(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(tracks=tracks)
d = mb.album_info(release)
t = d.tracks
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[1].medium_index, 2)
def test_parse_medium_numbers_single_medium(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(tracks=tracks)
d = mb.album_info(release)
self.assertEqual(d.mediums, 1)
t = d.tracks
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[1].medium, 1)
def test_parse_medium_numbers_two_mediums(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(tracks=[tracks[0]])
second_track_list = [{
'recording': tracks[1],
'position': '1',
}]
release['medium-list'].append({
'position': '2',
'track-list': second_track_list,
})
d = mb.album_info(release)
self.assertEqual(d.mediums, 2)
t = d.tracks
self.assertEqual(t[0].medium, 1)
self.assertEqual(t[0].medium_index, 1)
self.assertEqual(t[1].medium, 2)
self.assertEqual(t[1].medium_index, 1)
def test_parse_release_year_month_only(self):
release = self._make_release('1987-03')
d = mb.album_info(release)
self.assertEqual(d.year, 1987)
self.assertEqual(d.month, 3)
def test_no_durations(self):
tracks = [self._make_track('TITLE', 'ID', None)]
release = self._make_release(tracks=tracks)
d = mb.album_info(release)
self.assertEqual(d.tracks[0].length, None)
def test_no_release_date(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertFalse(d.year)
self.assertFalse(d.month)
self.assertFalse(d.day)
def test_various_artists_defaults_false(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertFalse(d.va)
def test_detect_various_artists(self):
release = self._make_release(None)
release['artist-credit'][0]['artist']['id'] = \
mb.VARIOUS_ARTISTS_ID
d = mb.album_info(release)
self.assertTrue(d.va)
def test_parse_artist_sort_name(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.artist_sort, 'ARTIST SORT NAME')
def test_parse_releasegroupid(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.releasegroup_id, 'RELEASE GROUP ID')
def test_parse_asin(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.asin, 'ALBUM ASIN')
def test_parse_catalognum(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.catalognum, 'CATALOG NUMBER')
def test_parse_textrepr(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.script, 'SCRIPT')
self.assertEqual(d.language, 'LANGUAGE')
def test_parse_country(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.country, 'COUNTRY')
def test_parse_status(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.albumstatus, 'STATUS')
def test_parse_media(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(None, tracks=tracks)
d = mb.album_info(release)
self.assertEqual(d.media, 'FORMAT')
def test_parse_disambig(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.albumdisambig, 'DISAMBIGUATION')
def test_parse_disctitle(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
self._make_track('TITLE TWO', 'ID TWO', 200.0 * 1000.0)]
release = self._make_release(None, tracks=tracks)
d = mb.album_info(release)
t = d.tracks
self.assertEqual(t[0].disctitle, 'MEDIUM TITLE')
self.assertEqual(t[1].disctitle, 'MEDIUM TITLE')
def test_missing_language(self):
release = self._make_release(None)
del release['text-representation']['language']
d = mb.album_info(release)
self.assertEqual(d.language, None)
def test_parse_track_artist(self):
tracks = [self._make_track('a', 'b', 1, True)]
release = self._make_release(None, tracks=tracks)
track = mb.album_info(release).tracks[0]
self.assertEqual(track.artist, 'TRACK ARTIST NAME')
self.assertEqual(track.artist_id, 'TRACK ARTIST ID')
self.assertEqual(track.artist_sort, 'TRACK ARTIST SORT NAME')
self.assertEqual(track.artist_credit, 'TRACK ARTIST CREDIT')
class ArtistFlatteningTest(unittest.TestCase):
def _credit_dict(self, suffix=''):
return {
'artist': {
'name': 'NAME' + suffix,
'sort-name': 'SORT' + suffix,
},
'name': 'CREDIT' + suffix,
}
def test_single_artist(self):
a, s, c = mb._flatten_artist_credit([self._credit_dict()])
self.assertEqual(a, 'NAME')
self.assertEqual(s, 'SORT')
self.assertEqual(c, 'CREDIT')
def test_two_artists(self):
a, s, c = mb._flatten_artist_credit(
[self._credit_dict('a'), ' AND ', self._credit_dict('b')]
)
self.assertEqual(a, 'NAMEa AND NAMEb')
self.assertEqual(s, 'SORTa AND SORTb')
self.assertEqual(c, 'CREDITa AND CREDITb')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class NetworkWatchersOperations(object):
"""NetworkWatchersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-09-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01"
self.config = config
def create_or_update(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a network watcher in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the network watcher
resource.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.NetworkWatcher
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkWatcher or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_09_01.models.NetworkWatcher or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NetworkWatcher')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkWatcher', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def get(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified network watcher by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkWatcher or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_09_01.models.NetworkWatcher or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def _delete_initial(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified network watcher resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def update_tags(
self, resource_group_name, network_watcher_name, tags=None, custom_headers=None, raw=False, **operation_config):
"""Updates a network watcher tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: NetworkWatcher or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_09_01.models.NetworkWatcher or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkWatcher', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all network watchers by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkWatcher
:rtype:
~azure.mgmt.network.v2017_09_01.models.NetworkWatcherPaged[~azure.mgmt.network.v2017_09_01.models.NetworkWatcher]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all network watchers by subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of NetworkWatcher
:rtype:
~azure.mgmt.network.v2017_09_01.models.NetworkWatcherPaged[~azure.mgmt.network.v2017_09_01.models.NetworkWatcher]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.NetworkWatcherPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkWatchers'}
def get_topology(
self, resource_group_name, network_watcher_name, target_resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets the current network topology by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param target_resource_group_name: The name of the target resource
group to perform topology on.
:type target_resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Topology or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_09_01.models.Topology or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = models.TopologyParameters(target_resource_group_name=target_resource_group_name)
# Construct URL
url = self.get_topology.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TopologyParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Topology', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_topology.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/topology'}
def _verify_ip_flow_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.verify_ip_flow.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VerificationIPFlowParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VerificationIPFlowResult', response)
if response.status_code == 202:
deserialized = self._deserialize('VerificationIPFlowResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def verify_ip_flow(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Verify IP flow from the specified VM to a location given the currently
configured NSG rules.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the IP flow to be verified.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.VerificationIPFlowParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
VerificationIPFlowResult or
ClientRawResponse<VerificationIPFlowResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.VerificationIPFlowResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.VerificationIPFlowResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._verify_ip_flow_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VerificationIPFlowResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
verify_ip_flow.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/ipFlowVerify'}
def _get_next_hop_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_next_hop.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'NextHopParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NextHopResult', response)
if response.status_code == 202:
deserialized = self._deserialize('NextHopResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_next_hop(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the next hop from the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param parameters: Parameters that define the source and destination
endpoint.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.NextHopParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns NextHopResult or
ClientRawResponse<NextHopResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.NextHopResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.NextHopResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_next_hop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('NextHopResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_next_hop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/nextHop'}
def _get_vm_security_rules_initial(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
parameters = models.SecurityGroupViewParameters(target_resource_id=target_resource_id)
# Construct URL
url = self.get_vm_security_rules.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SecurityGroupViewParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SecurityGroupViewResult', response)
if response.status_code == 202:
deserialized = self._deserialize('SecurityGroupViewResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vm_security_rules(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the configured and effective security group rules on the specified
VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param target_resource_id: ID of the target VM.
:type target_resource_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns SecurityGroupViewResult
or ClientRawResponse<SecurityGroupViewResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.SecurityGroupViewResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.SecurityGroupViewResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_vm_security_rules_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
target_resource_id=target_resource_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('SecurityGroupViewResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vm_security_rules.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/securityGroupView'}
def _get_troubleshooting_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_troubleshooting.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TroubleshootingParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_troubleshooting(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Initiate troubleshooting on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the resource to
troubleshoot.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.TroubleshootingParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns TroubleshootingResult
or ClientRawResponse<TroubleshootingResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.TroubleshootingResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.TroubleshootingResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_troubleshooting_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_troubleshooting.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/troubleshoot'}
def _get_troubleshooting_result_initial(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
parameters = models.QueryTroubleshootingParameters(target_resource_id=target_resource_id)
# Construct URL
url = self.get_troubleshooting_result.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'QueryTroubleshootingParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('TroubleshootingResult', response)
if response.status_code == 202:
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_troubleshooting_result(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Get the last completed troubleshooting result on a specified resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param target_resource_id: The target resource ID to query the
troubleshooting result.
:type target_resource_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns TroubleshootingResult
or ClientRawResponse<TroubleshootingResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.TroubleshootingResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.TroubleshootingResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_troubleshooting_result_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
target_resource_id=target_resource_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('TroubleshootingResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_troubleshooting_result.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryTroubleshootResult'}
def _set_flow_log_configuration_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.set_flow_log_configuration.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FlowLogInformation')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def set_flow_log_configuration(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Configures flow log on a specified resource.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that define the configuration of flow
log.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.FlowLogInformation
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns FlowLogInformation or
ClientRawResponse<FlowLogInformation> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.FlowLogInformation]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.FlowLogInformation]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._set_flow_log_configuration_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
set_flow_log_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/configureFlowLog'}
def _get_flow_log_status_initial(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, **operation_config):
parameters = models.FlowLogStatusParameters(target_resource_id=target_resource_id)
# Construct URL
url = self.get_flow_log_status.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'FlowLogStatusParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('FlowLogInformation', response)
if response.status_code == 202:
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_flow_log_status(
self, resource_group_name, network_watcher_name, target_resource_id, custom_headers=None, raw=False, polling=True, **operation_config):
"""Queries status of flow log on a specified resource.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param target_resource_id: The target resource where getting the flow
logging status.
:type target_resource_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns FlowLogInformation or
ClientRawResponse<FlowLogInformation> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.FlowLogInformation]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.FlowLogInformation]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_flow_log_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
target_resource_id=target_resource_id,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('FlowLogInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_flow_log_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/queryFlowLogStatus'}
def _check_connectivity_initial(
self, resource_group_name, network_watcher_name, source, destination, custom_headers=None, raw=False, **operation_config):
parameters = models.ConnectivityParameters(source=source, destination=destination)
# Construct URL
url = self.check_connectivity.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'ConnectivityParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ConnectivityInformation', response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectivityInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def check_connectivity(
self, resource_group_name, network_watcher_name, source, destination, custom_headers=None, raw=False, polling=True, **operation_config):
"""Verifies the possibility of establishing a direct TCP connection from a
virtual machine to a given endpoint including another VM or an
arbitrary remote server.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param source:
:type source:
~azure.mgmt.network.v2017_09_01.models.ConnectivitySource
:param destination:
:type destination:
~azure.mgmt.network.v2017_09_01.models.ConnectivityDestination
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns ConnectivityInformation
or ClientRawResponse<ConnectivityInformation> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.ConnectivityInformation]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.ConnectivityInformation]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._check_connectivity_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
source=source,
destination=destination,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ConnectivityInformation', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
check_connectivity.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectivityCheck'}
def _get_azure_reachability_report_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_azure_reachability_report.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'AzureReachabilityReportParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AzureReachabilityReport', response)
if response.status_code == 202:
deserialized = self._deserialize('AzureReachabilityReport', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_azure_reachability_report(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets the relative latency score for internet service providers from a
specified location to Azure regions.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that determine Azure reachability report
configuration.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.AzureReachabilityReportParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns AzureReachabilityReport
or ClientRawResponse<AzureReachabilityReport> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.AzureReachabilityReport]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.AzureReachabilityReport]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_azure_reachability_report_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('AzureReachabilityReport', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_azure_reachability_report.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/azureReachabilityReport'}
def _list_available_providers_initial(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.list_available_providers.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'AvailableProvidersListParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AvailableProvidersList', response)
if response.status_code == 202:
deserialized = self._deserialize('AvailableProvidersList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_available_providers(
self, resource_group_name, network_watcher_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Lists all available internet service providers for a specified Azure
region.
:param resource_group_name: The name of the network watcher resource
group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher resource.
:type network_watcher_name: str
:param parameters: Parameters that scope the list of available
providers.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.AvailableProvidersListParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns AvailableProvidersList
or ClientRawResponse<AvailableProvidersList> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.AvailableProvidersList]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.AvailableProvidersList]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._list_available_providers_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('AvailableProvidersList', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
list_available_providers.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/availableProvidersList'}
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a CloudTasksHook
which allows you to connect to Google Cloud Tasks service,
performing actions to queues or tasks.
"""
from typing import Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.tasks_v2 import CloudTasksClient
from google.cloud.tasks_v2.types import Queue, Task
from google.protobuf.field_mask_pb2 import FieldMask
from airflow.exceptions import AirflowException
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudTasksHook(GoogleBaseHook):
"""
Hook for Google Cloud Tasks APIs. Cloud Tasks allows developers to manage
the execution of background work in their applications.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:type impersonation_chain: Union[str, Sequence[str]]
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client = None
def get_conn(self) -> CloudTasksClient:
"""
Provides a client for interacting with the Google Cloud Tasks API.
:return: Google Cloud Tasks API Client
:rtype: google.cloud.tasks_v2.CloudTasksClient
"""
if not self._client:
self._client = CloudTasksClient(credentials=self._get_credentials(), client_info=self.client_info)
return self._client
@GoogleBaseHook.fallback_to_default_project_id
def create_queue(
self,
location: str,
task_queue: Union[dict, Queue],
project_id: str,
queue_name: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Queue:
"""
Creates a queue in Cloud Tasks.
:param location: The location name in which the queue will be created.
:type location: str
:param task_queue: The task queue to create.
Queue's name cannot be the same as an existing queue.
If a dict is provided, it must be of the same form as the protobuf message Queue.
:type task_queue: dict or google.cloud.tasks_v2.types.Queue
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:type queue_name: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
if queue_name:
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
full_location_path = f"projects/{project_id}/locations/{location}"
return client.create_queue(
request={'parent': full_location_path, 'queue': task_queue},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
@GoogleBaseHook.fallback_to_default_project_id
def update_queue(
self,
task_queue: Queue,
project_id: str,
location: Optional[str] = None,
queue_name: Optional[str] = None,
update_mask: Optional[FieldMask] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Queue:
"""
Updates a queue in Cloud Tasks.
:param task_queue: The task queue to update.
This method creates the queue if it does not exist and updates the queue if
it does exist. The queue's name must be specified.
:type task_queue: dict or google.cloud.tasks_v2.types.Queue
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param location: (Optional) The location name in which the queue will be updated.
If provided, it will be used to construct the full queue path.
:type location: str
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:type queue_name: str
:param update_mask: A mast used to specify which fields of the queue are being updated.
If empty, then all fields will be updated.
If a dict is provided, it must be of the same form as the protobuf message.
:type update_mask: dict or google.protobuf.field_mask_pb2.FieldMask
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
if queue_name and location:
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
return client.update_queue(
request={'queue': task_queue, 'update_mask': update_mask},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
@GoogleBaseHook.fallback_to_default_project_id
def get_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Queue:
"""
Gets a queue from Cloud Tasks.
:param location: The location name in which the queue was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.get_queue(
request={'name': full_queue_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def list_queues(
self,
location: str,
project_id: str,
results_filter: Optional[str] = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
"""
Lists queues from Cloud Tasks.
:param location: The location name in which the queues were created.
:type location: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param results_filter: (Optional) Filter used to specify a subset of queues.
:type results_filter: str
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_location_path = f"projects/{project_id}/locations/{location}"
queues = client.list_queues(
request={'parent': full_location_path, 'filter': results_filter, 'page_size': page_size},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
return list(queues)
@GoogleBaseHook.fallback_to_default_project_id
def delete_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> None:
"""
Deletes a queue from Cloud Tasks, even if it has tasks in it.
:param location: The location name in which the queue will be deleted.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
client.delete_queue(
request={'name': full_queue_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def purge_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
"""
Purges a queue by deleting all of its tasks from Cloud Tasks.
:param location: The location name in which the queue will be purged.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.purge_queue(
request={'name': full_queue_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def pause_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
"""
Pauses a queue in Cloud Tasks.
:param location: The location name in which the queue will be paused.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.pause_queue(
request={'name': full_queue_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def resume_queue(
self,
location: str,
queue_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Queue]:
"""
Resumes a queue in Cloud Tasks.
:param location: The location name in which the queue will be resumed.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.resume_queue(
request={'name': full_queue_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def create_task(
self,
location: str,
queue_name: str,
task: Union[Dict, Task],
project_id: str,
task_name: Optional[str] = None,
response_view: Optional = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Task:
"""
Creates a task in Cloud Tasks.
:param location: The location name in which the task will be created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task: The task to add.
If a dict is provided, it must be of the same form as the protobuf message Task.
:type task: dict or google.cloud.tasks_v2.types.Task
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param task_name: (Optional) The task's name.
If provided, it will be used to construct the full task path.
:type task_name: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
if task_name:
full_task_name = (
f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
)
if isinstance(task, Task):
task.name = full_task_name
elif isinstance(task, dict):
task['name'] = full_task_name
else:
raise AirflowException('Unable to set task_name.')
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
return client.create_task(
request={'parent': full_queue_name, 'task': task, 'response_view': response_view},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
@GoogleBaseHook.fallback_to_default_project_id
def get_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
response_view: Optional = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Task:
"""
Gets a task from Cloud Tasks.
:param location: The location name in which the task was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
return client.get_task(
request={'name': full_task_name, 'response_view': response_view},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
@GoogleBaseHook.fallback_to_default_project_id
def list_tasks(
self,
location: str,
queue_name: str,
project_id: str,
response_view: Optional = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> List[Task]:
"""
Lists the tasks in Cloud Tasks.
:param location: The location name in which the tasks were created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.Task.View
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Task]
"""
client = self.get_conn()
full_queue_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}"
tasks = client.list_tasks(
request={'parent': full_queue_name, 'response_view': response_view, 'page_size': page_size},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
return list(tasks)
@GoogleBaseHook.fallback_to_default_project_id
def delete_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> None:
"""
Deletes a task from Cloud Tasks.
:param location: The location name in which the task will be deleted.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
client.delete_task(
request={'name': full_task_name}, retry=retry, timeout=timeout, metadata=metadata or ()
)
@GoogleBaseHook.fallback_to_default_project_id
def run_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: str,
response_view: Optional = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
) -> Task:
"""
Forces to run a task in Cloud Tasks.
:param location: The location name in which the task was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
full_task_name = f"projects/{project_id}/locations/{location}/queues/{queue_name}/tasks/{task_name}"
return client.run_task(
request={'name': full_task_name, 'response_view': response_view},
retry=retry,
timeout=timeout,
metadata=metadata or (),
)
|
|
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Generates a Group Policy template file for Google Update policies.
The resulting strings and files use CRLF as required by gpedit.msc.
To unit test this module, just run the file from the command line.
"""
import codecs
import filecmp
import os
import sys
HORIZONTAL_RULE = ';%s\n' % ('-' * 78)
MAIN_POLICY_KEY = 'Software\Policies\Google\Update'
# pylint: disable-msg=C6004
HEADER = """\
CLASS MACHINE
CATEGORY !!Cat_Google
CATEGORY !!Cat_GoogleUpdate
KEYNAME \"""" + MAIN_POLICY_KEY + """\"
EXPLAIN !!Explain_GoogleUpdate
"""
PREFERENCES = """
CATEGORY !!Cat_Preferences
KEYNAME \"""" + MAIN_POLICY_KEY + """\"
EXPLAIN !!Explain_Preferences
POLICY !!Pol_AutoUpdateCheckPeriod
#if version >= 4
SUPPORTED !!Sup_GoogleUpdate1_2_145_5
#endif
EXPLAIN !!Explain_AutoUpdateCheckPeriod
PART !!Part_AutoUpdateCheckPeriod NUMERIC
VALUENAME AutoUpdateCheckPeriodMinutes
DEFAULT 1400 ; 23 hours 20 minutes.
MIN 0
MAX 43200 ; 30 days.
SPIN 60 ; Increment in hour chunks.
END PART
PART !!Part_DisableAllAutoUpdateChecks CHECKBOX
VALUENAME DisableAutoUpdateChecksCheckboxValue ; Required, unused.
ACTIONLISTON
; Writes over Part_AutoUpdateCheckPeriod. Assumes this runs last.
VALUENAME AutoUpdateCheckPeriodMinutes VALUE NUMERIC 0
END ACTIONLISTON
ACTIONLISTOFF
; Do nothing. Let Part_AutoUpdateCheckPeriod take effect.
END ACTIONLISTOFF
VALUEOFF NUMERIC 0
VALUEON NUMERIC 1
END PART
END POLICY
POLICY !!Pol_DownloadPreference
#if version >= 4
SUPPORTED !!Sup_GoogleUpdate1_3_26_0
#endif
EXPLAIN !!Explain_DownloadPreference
PART !!Part_DownloadPreference DROPDOWNLIST
VALUENAME "DownloadPreference"
ITEMLIST
NAME !!DownloadPreference_Cacheable VALUE "cacheable"
END ITEMLIST
END PART
END POLICY
END CATEGORY ; Preferences
CATEGORY !!Cat_ProxyServer
KEYNAME \"""" + MAIN_POLICY_KEY + """\"
POLICY !!Pol_ProxyMode
#if version >= 4
SUPPORTED !!Sup_GoogleUpdate1_3_21_81
#endif
EXPLAIN !!Explain_ProxyMode
PART !!Part_ProxyMode DROPDOWNLIST
VALUENAME "ProxyMode"
ITEMLIST
NAME !!ProxyDisabled_DropDown VALUE "direct"
NAME !!ProxyAutoDetect_DropDown VALUE "auto_detect"
NAME !!ProxyPacScript_DropDown VALUE "pac_script"
NAME !!ProxyFixedServers_DropDown VALUE "fixed_servers"
NAME !!ProxyUseSystem_DropDown VALUE "system"
END ITEMLIST
END PART
END POLICY
POLICY !!Pol_ProxyServer
#if version >= 4
SUPPORTED !!Sup_GoogleUpdate1_3_21_81
#endif
EXPLAIN !!Explain_ProxyServer
PART !!Part_ProxyServer EDITTEXT
VALUENAME "ProxyServer"
END PART
END POLICY
POLICY !!Pol_ProxyPacUrl
#if version >= 4
SUPPORTED !!Sup_GoogleUpdate1_3_21_81
#endif
EXPLAIN !!Explain_ProxyPacUrl
PART !!Part_ProxyPacUrl EDITTEXT
VALUENAME "ProxyPacUrl"
END PART
END POLICY
END CATEGORY
"""
APPLICATIONS_HEADER = """
CATEGORY !!Cat_Applications
KEYNAME \"""" + MAIN_POLICY_KEY + """\"
EXPLAIN !!Explain_Applications
"""
UPDATE_POLICY_ITEMLIST = """\
ITEMLIST
NAME !!Name_UpdatesEnabled
VALUE NUMERIC 1
NAME !!Name_ManualUpdatesOnly
VALUE NUMERIC 2
NAME !!Name_AutomaticUpdatesOnly
VALUE NUMERIC 3
NAME !!Name_UpdatesDisabled
VALUE NUMERIC 0
END ITEMLIST
REQUIRED"""
APPLICATION_DEFAULTS = ("""
POLICY !!Pol_DefaultAllowInstallation
#if version >= 4
SUPPORTED !!Sup_GoogleUpdate1_2_145_5
#endif
EXPLAIN !!Explain_DefaultAllowInstallation
VALUENAME InstallDefault
VALUEOFF NUMERIC 0
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_DefaultUpdatePolicy
#if version >= 4
SUPPORTED !!Sup_GoogleUpdate1_2_145_5
#endif
EXPLAIN !!Explain_DefaultUpdatePolicy
PART !!Part_UpdatePolicy DROPDOWNLIST
VALUENAME UpdateDefault
""" +
UPDATE_POLICY_ITEMLIST + """
END PART
END POLICY
""")
APP_POLICIES_TEMPLATE = ("""
CATEGORY !!Cat_$AppLegalId$
KEYNAME \"""" + MAIN_POLICY_KEY + """\"
POLICY !!Pol_AllowInstallation
#if version >= 4
SUPPORTED !!Sup_GoogleUpdate1_2_145_5
#endif
EXPLAIN !!Explain_Install$AppLegalId$
VALUENAME Install$AppGuid$
VALUEOFF NUMERIC 0
VALUEON NUMERIC 1
END POLICY
POLICY !!Pol_UpdatePolicy
#if version >= 4
SUPPORTED !!Sup_GoogleUpdate1_2_145_5
#endif
EXPLAIN !!Explain_AutoUpdate$AppLegalId$
PART !!Part_UpdatePolicy DROPDOWNLIST
VALUENAME Update$AppGuid$
""" +
UPDATE_POLICY_ITEMLIST.replace(' ', ' ') + """
END PART
END POLICY
END CATEGORY ; $AppName$
""")
APPLICATIONS_FOOTER = """
END CATEGORY ; Applications
END CATEGORY ; GoogleUpdate
END CATEGORY ; Google
"""
# Policy names that are used in multiple locations.
ALLOW_INSTALLATION_POLICY = 'Allow installation'
DEFAULT_ALLOW_INSTALLATION_POLICY = ALLOW_INSTALLATION_POLICY + ' default'
UPDATE_POLICY = 'Update policy override'
DEFAULT_UPDATE_POLICY = UPDATE_POLICY + ' default'
# Update policy options that are used in multiple locations.
UPDATES_ENABLED = 'Always allow updates'
AUTOMATIC_UPDATES_ONLY = 'Automatic silent updates only'
MANUAL_UPDATES_ONLY = 'Manual updates only'
UPDATES_DISABLED = 'Updates disabled'
# Category names that are used in multiple locations.
PREFERENCES_CATEGORY = 'Preferences'
PROXYSERVER_CATEGORY = 'Proxy Server'
APPLICATIONS_CATEGORY = 'Applications'
# The captions for update policy were selected such that they appear in order of
# decreasing preference when organized alphabetically in gpedit.
STRINGS_HEADER_AND_COMMON = ('\n' +
HORIZONTAL_RULE +
"""
[strings]
Sup_GoogleUpdate1_2_145_5=At least Google Update 1.2.145.5
Sup_GoogleUpdate1_3_21_81=At least Google Update 1.3.21.81
Sup_GoogleUpdate1_3_26_0=At least Google Update 1.3.26.0
Cat_Google=Google
Cat_GoogleUpdate=Google Update
Cat_Preferences=""" + PREFERENCES_CATEGORY + """
Cat_ProxyServer=""" + PROXYSERVER_CATEGORY + """
Cat_Applications=""" + APPLICATIONS_CATEGORY + """
Pol_AutoUpdateCheckPeriod=Auto-update check period override
Pol_DownloadPreference=Download URL class override
Pol_ProxyMode=Choose how to specify proxy server settings
Pol_ProxyServer=Address or URL of proxy server
Pol_ProxyPacUrl=URL to a proxy .pac file
Pol_DefaultAllowInstallation=""" + DEFAULT_ALLOW_INSTALLATION_POLICY + """
Pol_AllowInstallation=""" + ALLOW_INSTALLATION_POLICY + """
Pol_DefaultUpdatePolicy=""" + DEFAULT_UPDATE_POLICY + """
Pol_UpdatePolicy=""" + UPDATE_POLICY + """
Part_AutoUpdateCheckPeriod=Minutes between update checks
Part_DownloadPreference=Type of download URL to request
Part_DisableAllAutoUpdateChecks=Disable all auto-update checks (not recommended)
Part_ProxyMode=Choose how to specify proxy server settings
Part_ProxyServer=Address or URL of proxy server
Part_ProxyPacUrl=URL to a proxy .pac file
Part_UpdatePolicy=Policy
Name_UpdatesEnabled=""" + UPDATES_ENABLED + """ (recommended)
Name_ManualUpdatesOnly=""" + MANUAL_UPDATES_ONLY + """
Name_AutomaticUpdatesOnly=""" + AUTOMATIC_UPDATES_ONLY + """
Name_UpdatesDisabled=""" + UPDATES_DISABLED + """
ProxyDisabled_DropDown=Never use a proxy
ProxyAutoDetect_DropDown=Auto detect proxy settings
ProxyPacScript_DropDown=Use a .pac proxy script
ProxyFixedServers_DropDown=Use fixed proxy servers
ProxyUseSystem_DropDown=Use system proxy settings
DownloadPreference_Cacheable=Cacheable download URLs
""")
STRINGS_APP_NAME_TEMPLATE = """\
Cat_$AppLegalId$=$AppName$
"""
# pylint: disable-msg=C6310
# pylint: disable-msg=C6013
# "application's" should be preceeded by a different word in different contexts.
# The word is specified by replacing the $PreApplicationWord$ token.
STRINGS_UPDATE_POLICY_OPTIONS = """\
\\n\\nOptions:\\
\\n - """ + UPDATES_ENABLED + """: Updates are always applied when found, either by periodic update check or by a manual update check.\\
\\n - """ + MANUAL_UPDATES_ONLY + """: Updates are only applied when the user does a manual update check. (Not all apps provide an interface for this.)\\
\\n - """ + AUTOMATIC_UPDATES_ONLY + """: Updates are only applied when they are found via the periodic update check.\\
\\n - """ + UPDATES_DISABLED + """: Never apply updates.\\
\\n\\nIf you select manual updates, you should periodically check for updates using $PreApplicationWord$ application's manual update mechanism if available. If you disable updates, you should periodically check for updates and distribute them to users."""
STRINGS_COMMON_EXPLANATIONS = ("""
Explain_GoogleUpdate=Policies to control the installation and updating of Google applications that use Google Update/Google Installer.
""" +
HORIZONTAL_RULE +
'; ' + PREFERENCES_CATEGORY + '\n' +
HORIZONTAL_RULE + """
Explain_Preferences=General policies for Google Update.
Explain_AutoUpdateCheckPeriod=Minimum number of minutes between automatic update checks.
Explain_DownloadPreference=If enabled, the Google Update server will attempt to provide cache-friendly URLs for update payloads in its responses.
Explain_ProxyMode=Allows you to specify the proxy server used by Google Update.\\n\\nIf you choose to never use a proxy server and always connect directly, all other options are ignored.\\n\\nIf you choose to use system proxy settings or auto detect the proxy server, all other options are ignored.\\n\\nIf you choose fixed server proxy mode, you can specify further options in 'Address or URL of proxy server'.\\n\\nIf you choose to use a .pac proxy script, you must specify the URL to the script in 'URL to a proxy .pac file'.
Explain_ProxyServer=You can specify the URL of the proxy server here.\\n\\nThis policy only takes effect if you have selected manual proxy settings at 'Choose how to specify proxy server settings'.
Explain_ProxyPacUrl=You can specify a URL to a proxy .pac file here.\\n\\nThis policy only takes effect if you have selected manual proxy settings at 'Choose how to specify proxy server settings'.
""" +
HORIZONTAL_RULE +
'; ' + APPLICATIONS_CATEGORY + '\n' +
HORIZONTAL_RULE + """
Explain_Applications=Policies for individual applications.\\
\\n\\nAn updated ADM template will be required to support Google applications released in the future.
Explain_DefaultAllowInstallation=Specifies the default behavior for whether Google software can be installed using Google Update/Google Installer.\\
\\n\\nCan be overridden by the \"""" + ALLOW_INSTALLATION_POLICY + """\" for individual applications.\\
\\n\\nOnly affects installation of Google software using Google Update/Google Installer. Cannot prevent running the application installer directly or installation of Google software that does not use Google Update/Google Installer for installation.
Explain_DefaultUpdatePolicy=Specifies the default policy for software updates from Google.\\
\\n\\nCan be overridden by the \"""" + UPDATE_POLICY + """\" for individual applications.\\
""" +
STRINGS_UPDATE_POLICY_OPTIONS.replace('$PreApplicationWord$', 'each') + """\\
\\n\\nOnly affects updates for Google software that uses Google Update for updates. Does not prevent auto-updates of Google software that does not use Google Update for updates.\\
\\n\\nUpdates for Google Update are not affected by this setting; Google Update will continue to update itself while it is installed.\\
\\n\\nWARNING: Disabing updates will also prevent updates of any new Google applications released in the future, possibly including dependencies for future versions of installed applications.
""" +
HORIZONTAL_RULE +
'; Individual Applications\n' +
HORIZONTAL_RULE)
STRINGS_APP_POLICY_EXPLANATIONS_TEMPLATE = ("""
; $AppName$
Explain_Install$AppLegalId$=Specifies whether $AppName$ can be installed using Google Update/Google Installer.\\
\\n\\nIf this policy is not configured, $AppName$ can be installed as specified by \"""" + DEFAULT_ALLOW_INSTALLATION_POLICY + """\".
Explain_AutoUpdate$AppLegalId$=Specifies how Google Update handles available $AppName$ updates from Google.\\
\\n\\nIf this policy is not configured, Google Update handles available updates as specified by \"""" + DEFAULT_UPDATE_POLICY + """\".\\
""" +
STRINGS_UPDATE_POLICY_OPTIONS.replace('$PreApplicationWord$', 'the') + '$AppUpdateExplainExtra$\n')
# pylint: enable-msg=C6013
# pylint: enable-msg=C6310
# pylint: enable-msg=C6004
def GenerateGroupPolicyTemplate(apps):
# pylint: disable-msg=C6114
"""Generates a Group Policy template (ADM format)for the specified apps.
Replaces LF in strings above with CRLF as required by gpedit.msc.
When writing the resulting contents to a file, use binary mode to ensure the
CRLFs are preserved.
Args:
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line (\n\n).
Returns:
String containing the contents of the .ADM file.
"""
# pylint: enable-msg=C6114
def _CreateLegalIdentifier(input_string):
"""Converts input_string to a legal identifier for ADM files.
Changes some characters that do not necessarily cause problems and may not
handle all cases.
Args:
input_string: Text to convert to a legal identifier.
Returns:
String containing a legal identifier based on input_string.
"""
# pylint: disable-msg=C6004
return (input_string.replace(' ', '')
.replace('&', '')
.replace('=', '')
.replace(';', '')
.replace(',', '')
.replace('.', '')
.replace('?', '')
.replace('=', '')
.replace(';', '')
.replace("'", '')
.replace('"', '')
.replace('\\', '')
.replace('/', '')
.replace('(', '')
.replace(')', '')
.replace('[', '')
.replace(']', '')
.replace('{', '')
.replace('}', '')
.replace('-', '')
.replace('!', '')
.replace('@', '')
.replace('#', '')
.replace('$', '')
.replace('%', '')
.replace('^', '')
.replace('*', '')
.replace('+', '')
.replace(u'\u00a9', '') # Copyright (C).
.replace(u'\u00ae', '') # Registered Trademark (R).
.replace(u'\u2122', '')) # Trademark (TM).
# pylint: enable-msg=C6004
def _WriteTemplateForApp(template, app):
"""Writes the text for the specified app based on the template.
Replaces $AppName$, $AppLegalId$, $AppGuid$, and $AppUpdateExplainExtra$.
Args:
template: text to process and write.
app: tuple containing information about the app.
Returns:
String containing a copy of the template populated with app-specific
strings.
"""
(app_name, app_guid, update_explain_extra) = app
# pylint: disable-msg=C6004
return (template.replace('$AppName$', app_name)
.replace('$AppLegalId$', _CreateLegalIdentifier(app_name))
.replace('$AppGuid$', app_guid)
.replace('$AppUpdateExplainExtra$', update_explain_extra)
)
# pylint: enable-msg=C6004
def _WriteTemplateForAllApps(template, apps):
"""Writes a copy of the template for each of the specified apps.
Args:
template: text to process and write.
apps: list of tuples containing information about the apps.
Returns:
String containing concatenated copies of the template for each app in
apps, each populated with the appropriate app-specific strings.
"""
content = [_WriteTemplateForApp(template, app) for app in apps]
return ''.join(content)
target_contents = [
HEADER,
PREFERENCES,
APPLICATIONS_HEADER,
APPLICATION_DEFAULTS,
_WriteTemplateForAllApps(APP_POLICIES_TEMPLATE, apps),
APPLICATIONS_FOOTER,
STRINGS_HEADER_AND_COMMON,
_WriteTemplateForAllApps(STRINGS_APP_NAME_TEMPLATE, apps),
STRINGS_COMMON_EXPLANATIONS,
_WriteTemplateForAllApps(STRINGS_APP_POLICY_EXPLANATIONS_TEMPLATE, apps),
]
# Join the sections of content then replace LF with CRLF.
return ''.join(target_contents).replace('\n', '\r\n')
def WriteGroupPolicyTemplate(target_path, apps):
"""Writes a Group Policy template (ADM format)for the specified apps.
The file is UTF-16 and contains CRLF on all platforms.
Args:
target_path: Output path of the .ADM template file.
apps: A list of tuples containing information about each app.
Each element of the list is a tuple of:
* app name
* app ID
* optional string to append to the auto-update explanation
- Should start with a space or double new line (\n\n).
""" # pylint: disable-msg=C6114
contents = GenerateGroupPolicyTemplate(apps)
f = codecs.open(target_path, 'wb', 'utf-16')
f.write(contents)
f.close()
# Run a unit test when the module is run directly.
if __name__ == '__main__':
TEST_APPS = [
('Google Test Foo',
'{D6B08267-B440-4c85-9F79-E195E80D9937}',
' Check http://www.google.com/test_foo/.'),
(u'Google User Test Foo\u00a9\u00ae\u2122',
'{104844D6-7DDA-460b-89F0-FBF8AFDD0A67}',
' Check http://www.google.com/user_test_foo/.'),
]
TEST_GOLD_FILENAME = 'test_gold.adm'
TEST_OUTPUT_FILENAME = 'test_out.adm'
module_dir = os.path.abspath(os.path.dirname(__file__))
gold_path = os.path.join(module_dir, TEST_GOLD_FILENAME)
output_path = os.path.join(module_dir, TEST_OUTPUT_FILENAME)
WriteGroupPolicyTemplate(output_path, TEST_APPS)
if filecmp.cmp(gold_path, output_path, shallow=False):
print 'PASS: Contents equal.'
else:
print 'FAIL: Contents not equal.'
sys.exit(-1)
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
import traceback
from zope.interface import implements
from twisted.trial.unittest import TestCase
from twisted.internet.defer import succeed, gatherResults
from twisted.web._stan import Tag
from twisted.web._flatten import flattenString
from twisted.web.error import UnfilledSlot, UnsupportedType, FlattenerError
from twisted.web.template import tags, Comment, CDATA, CharRef, slot
from twisted.web.iweb import IRenderable
from twisted.web.test._util import FlattenTestCase
class TestSerialization(FlattenTestCase):
"""
Tests for flattening various things.
"""
def test_nestedTags(self):
"""
Test that nested tags flatten correctly.
"""
return self.assertFlattensTo(
tags.html(tags.body('42'), hi='there'),
'<html hi="there"><body>42</body></html>')
def test_serializeString(self):
"""
Test that strings will be flattened and escaped correctly.
"""
return gatherResults([
self.assertFlattensTo('one', 'one'),
self.assertFlattensTo('<abc&&>123', '<abc&&>123'),
])
def test_serializeSelfClosingTags(self):
"""
Test that some tags are normally written out as self-closing tags.
"""
return self.assertFlattensTo(tags.img(src='test'), '<img src="test" />')
def test_serializeComment(self):
"""
Test that comments are correctly flattened and escaped.
"""
return self.assertFlattensTo(Comment('foo bar'), '<!--foo bar-->'),
def test_commentEscaping(self):
"""
The data in a L{Comment} is escaped and mangled in the flattened output
so that the result is a legal SGML and XML comment.
SGML comment syntax is complicated and hard to use. This rule is more
restrictive, and more compatible:
Comments start with <!-- and end with --> and never contain -- or >.
Also by XML syntax, a comment may not end with '-'.
@see: U{http://www.w3.org/TR/REC-xml/#sec-comments}
"""
def verifyComment(c):
self.assertTrue(
c.startswith('<!--'),
"%r does not start with the comment prefix" % (c,))
self.assertTrue(
c.endswith('-->'),
"%r does not end with the comment suffix" % (c,))
# If it is shorter than 7, then the prefix and suffix overlap
# illegally.
self.assertTrue(
len(c) >= 7,
"%r is too short to be a legal comment" % (c,))
content = c[4:-3]
self.assertNotIn('--', content)
self.assertNotIn('>', content)
if content:
self.assertNotEqual(content[-1], '-')
results = []
for c in [
'',
'foo---bar',
'foo---bar-',
'foo>bar',
'foo-->bar',
'----------------',
]:
d = flattenString(None, Comment(c))
d.addCallback(verifyComment)
results.append(d)
return gatherResults(results)
def test_serializeCDATA(self):
"""
Test that CDATA is correctly flattened and escaped.
"""
return gatherResults([
self.assertFlattensTo(CDATA('foo bar'), '<![CDATA[foo bar]]>'),
self.assertFlattensTo(
CDATA('foo ]]> bar'),
'<![CDATA[foo ]]]]><![CDATA[> bar]]>'),
])
def test_serializeUnicode(self):
"""
Test that unicode is encoded correctly in the appropriate places, and
raises an error when it occurs in inappropriate place.
"""
snowman = u'\N{SNOWMAN}'
return gatherResults([
self.assertFlattensTo(snowman, '\xe2\x98\x83'),
self.assertFlattensTo(tags.p(snowman), '<p>\xe2\x98\x83</p>'),
self.assertFlattensTo(Comment(snowman), '<!--\xe2\x98\x83-->'),
self.assertFlattensTo(CDATA(snowman), '<![CDATA[\xe2\x98\x83]]>'),
self.assertFlatteningRaises(
Tag(snowman), UnicodeEncodeError),
self.assertFlatteningRaises(
Tag('p', attributes={snowman: ''}), UnicodeEncodeError),
])
def test_serializeCharRef(self):
"""
A character reference is flattened to a string using the I{&#NNNN;}
syntax.
"""
ref = CharRef(ord(u"\N{SNOWMAN}"))
return self.assertFlattensTo(ref, "☃")
def test_serializeDeferred(self):
"""
Test that a deferred is substituted with the current value in the
callback chain when flattened.
"""
return self.assertFlattensTo(succeed('two'), 'two')
def test_serializeSameDeferredTwice(self):
"""
Test that the same deferred can be flattened twice.
"""
d = succeed('three')
return gatherResults([
self.assertFlattensTo(d, 'three'),
self.assertFlattensTo(d, 'three'),
])
def test_serializeIRenderable(self):
"""
Test that flattening respects all of the IRenderable interface.
"""
class FakeElement(object):
implements(IRenderable)
def render(ign,ored):
return tags.p(
'hello, ',
tags.transparent(render='test'), ' - ',
tags.transparent(render='test'))
def lookupRenderMethod(ign, name):
self.assertEqual(name, 'test')
return lambda ign, node: node('world')
return gatherResults([
self.assertFlattensTo(FakeElement(), '<p>hello, world - world</p>'),
])
def test_serializeSlots(self):
"""
Test that flattening a slot will use the slot value from the tag.
"""
t1 = tags.p(slot('test'))
t2 = t1.clone()
t2.fillSlots(test='hello, world')
return gatherResults([
self.assertFlatteningRaises(t1, UnfilledSlot),
self.assertFlattensTo(t2, '<p>hello, world</p>'),
])
def test_serializeDeferredSlots(self):
"""
Test that a slot with a deferred as its value will be flattened using
the value from the deferred.
"""
t = tags.p(slot('test'))
t.fillSlots(test=succeed(tags.em('four>')))
return self.assertFlattensTo(t, '<p><em>four></em></p>')
def test_unknownTypeRaises(self):
"""
Test that flattening an unknown type of thing raises an exception.
"""
return self.assertFlatteningRaises(None, UnsupportedType)
# Use the co_filename mechanism (instead of the __file__ mechanism) because
# it is the mechanism traceback formatting uses. The two do not necessarily
# agree with each other. This requires a code object compiled in this file.
# The easiest way to get a code object is with a new function. I'll use a
# lambda to avoid adding anything else to this namespace. The result will
# be a string which agrees with the one the traceback module will put into a
# traceback for frames associated with functions defined in this file.
HERE = (lambda: None).func_code.co_filename
class FlattenerErrorTests(TestCase):
"""
Tests for L{FlattenerError}.
"""
def test_string(self):
"""
If a L{FlattenerError} is created with a string root, up to around 40
bytes from that string are included in the string representation of the
exception.
"""
self.assertEqual(
str(FlattenerError(RuntimeError("reason"), ['abc123xyz'], [])),
"Exception while flattening:\n"
" 'abc123xyz'\n"
"RuntimeError: reason\n")
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), ['0123456789' * 10], [])),
"Exception while flattening:\n"
" '01234567890123456789<...>01234567890123456789'\n"
"RuntimeError: reason\n")
def test_unicode(self):
"""
If a L{FlattenerError} is created with a unicode root, up to around 40
characters from that string are included in the string representation
of the exception.
"""
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), [u'abc\N{SNOWMAN}xyz'], [])),
"Exception while flattening:\n"
" u'abc\\u2603xyz'\n" # Codepoint for SNOWMAN
"RuntimeError: reason\n")
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), [u'01234567\N{SNOWMAN}9' * 10],
[])),
"Exception while flattening:\n"
" u'01234567\\u2603901234567\\u26039<...>01234567\\u2603901234567"
"\\u26039'\n"
"RuntimeError: reason\n")
def test_renderable(self):
"""
If a L{FlattenerError} is created with an L{IRenderable} provider root,
the repr of that object is included in the string representation of the
exception.
"""
class Renderable(object):
implements(IRenderable)
def __repr__(self):
return "renderable repr"
self.assertEqual(
str(FlattenerError(
RuntimeError("reason"), [Renderable()], [])),
"Exception while flattening:\n"
" renderable repr\n"
"RuntimeError: reason\n")
def test_tag(self):
"""
If a L{FlattenerError} is created with a L{Tag} instance with source
location information, the source location is included in the string
representation of the exception.
"""
tag = Tag(
'div', filename='/foo/filename.xhtml', lineNumber=17, columnNumber=12)
self.assertEqual(
str(FlattenerError(RuntimeError("reason"), [tag], [])),
"Exception while flattening:\n"
" File \"/foo/filename.xhtml\", line 17, column 12, in \"div\"\n"
"RuntimeError: reason\n")
def test_tagWithoutLocation(self):
"""
If a L{FlattenerError} is created with a L{Tag} instance without source
location information, only the tagName is included in the string
representation of the exception.
"""
self.assertEqual(
str(FlattenerError(RuntimeError("reason"), [Tag('span')], [])),
"Exception while flattening:\n"
" Tag <span>\n"
"RuntimeError: reason\n")
def test_traceback(self):
"""
If a L{FlattenerError} is created with traceback frames, they are
included in the string representation of the exception.
"""
# Try to be realistic in creating the data passed in for the traceback
# frames.
def f():
g()
def g():
raise RuntimeError("reason")
try:
f()
except RuntimeError, exc:
# Get the traceback, minus the info for *this* frame
tbinfo = traceback.extract_tb(sys.exc_info()[2])[1:]
else:
self.fail("f() must raise RuntimeError")
self.assertEqual(
str(FlattenerError(exc, [], tbinfo)),
"Exception while flattening:\n"
" File \"%s\", line %d, in f\n"
" g()\n"
" File \"%s\", line %d, in g\n"
" raise RuntimeError(\"reason\")\n"
"RuntimeError: reason\n" % (
HERE, f.func_code.co_firstlineno + 1,
HERE, g.func_code.co_firstlineno + 1))
|
|
# Copyright (c) 2011-2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import unittest
from mock import patch, MagicMock
import calendar
from datetime import datetime
import mock
import requests
import json
import six
from paste.deploy import loadwsgi
from six.moves.urllib.parse import unquote, quote
import swift.common.middleware.s3api
from swift.common.middleware.s3api.utils import Config
from swift.common.middleware.keystoneauth import KeystoneAuth
from swift.common import swob, registry
from swift.common.swob import Request
from swift.common.utils import md5
from keystonemiddleware.auth_token import AuthProtocol
from keystoneauth1.access import AccessInfoV2
from test.debug_logger import debug_logger
from test.unit.common.middleware.s3api import S3ApiTestCase
from test.unit.common.middleware.s3api.helpers import FakeSwift
from test.unit.common.middleware.s3api.test_s3token import \
GOOD_RESPONSE_V2, GOOD_RESPONSE_V3
from swift.common.middleware.s3api.s3request import SigV4Request, S3Request
from swift.common.middleware.s3api.etree import fromstring
from swift.common.middleware.s3api.s3api import filter_factory, \
S3ApiMiddleware
from swift.common.middleware.s3api.s3token import S3Token
class TestListingMiddleware(S3ApiTestCase):
def test_s3_etag_in_json(self):
# This translation happens all the time, even on normal swift requests
body_data = json.dumps([
{'name': 'obj1', 'hash': '0123456789abcdef0123456789abcdef'},
{'name': 'obj2', 'hash': 'swiftetag; s3_etag=mu-etag'},
{'name': 'obj2', 'hash': 'swiftetag; something=else'},
{'subdir': 'path/'},
]).encode('ascii')
self.swift.register(
'GET', '/v1/a/c', swob.HTTPOk,
{'Content-Type': 'application/json; charset=UTF-8'},
body_data)
req = Request.blank('/v1/a/c')
status, headers, body = self.call_s3api(req)
self.assertEqual(json.loads(body), [
{'name': 'obj1', 'hash': '0123456789abcdef0123456789abcdef'},
{'name': 'obj2', 'hash': 'swiftetag', 's3_etag': '"mu-etag"'},
{'name': 'obj2', 'hash': 'swiftetag; something=else'},
{'subdir': 'path/'},
])
def test_s3_etag_non_json(self):
self.swift.register(
'GET', '/v1/a/c', swob.HTTPOk,
{'Content-Type': 'application/json; charset=UTF-8'},
b'Not actually JSON')
req = Request.blank('/v1/a/c')
status, headers, body = self.call_s3api(req)
self.assertEqual(body, b'Not actually JSON')
# Yes JSON, but wrong content-type
body_data = json.dumps([
{'name': 'obj1', 'hash': '0123456789abcdef0123456789abcdef'},
{'name': 'obj2', 'hash': 'swiftetag; s3_etag=mu-etag'},
{'name': 'obj2', 'hash': 'swiftetag; something=else'},
{'subdir': 'path/'},
]).encode('ascii')
self.swift.register(
'GET', '/v1/a/c', swob.HTTPOk,
{'Content-Type': 'text/plain; charset=UTF-8'},
body_data)
req = Request.blank('/v1/a/c')
status, headers, body = self.call_s3api(req)
self.assertEqual(body, body_data)
class TestS3ApiMiddleware(S3ApiTestCase):
def setUp(self):
super(TestS3ApiMiddleware, self).setUp()
self.swift.register('GET', '/something', swob.HTTPOk, {}, 'FAKE APP')
def test_init_config(self):
# verify config loading
# note: test confs do not have __file__ attribute so check_pipeline
# will be short-circuited
# check all defaults
expected = dict(Config())
expected.update({
'auth_pipeline_check': True,
'check_bucket_owner': False,
'max_bucket_listing': 1000,
'max_multi_delete_objects': 1000,
'max_parts_listing': 1000,
'max_upload_part_num': 1000,
'min_segment_size': 5242880,
'multi_delete_concurrency': 2,
's3_acl': False,
'cors_preflight_allow_origin': [],
'ratelimit_as_client_error': False,
})
s3api = S3ApiMiddleware(None, {})
self.assertEqual(expected, s3api.conf)
# check all non-defaults are loaded
conf = {
'storage_domain': 'somewhere,some.other.where',
'location': 'us-west-1',
'force_swift_request_proxy_log': True,
'dns_compliant_bucket_names': False,
'allow_multipart_uploads': False,
'allow_no_owner': True,
'allowable_clock_skew': 300,
'auth_pipeline_check': False,
'check_bucket_owner': True,
'max_bucket_listing': 500,
'max_multi_delete_objects': 600,
'max_parts_listing': 70,
'max_upload_part_num': 800,
'min_segment_size': 1000000,
'multi_delete_concurrency': 1,
's3_acl': True,
'cors_preflight_allow_origin': 'foo.example.com,bar.example.com',
'ratelimit_as_client_error': True,
}
s3api = S3ApiMiddleware(None, conf)
conf['cors_preflight_allow_origin'] = \
conf['cors_preflight_allow_origin'].split(',')
conf['storage_domains'] = conf.pop('storage_domain').split(',')
self.assertEqual(conf, s3api.conf)
# test allow_origin list with a '*' fails.
conf = {
'storage_domain': 'somewhere',
'location': 'us-west-1',
'force_swift_request_proxy_log': True,
'dns_compliant_bucket_names': False,
'allow_multipart_uploads': False,
'allow_no_owner': True,
'allowable_clock_skew': 300,
'auth_pipeline_check': False,
'check_bucket_owner': True,
'max_bucket_listing': 500,
'max_multi_delete_objects': 600,
'max_parts_listing': 70,
'max_upload_part_num': 800,
'min_segment_size': 1000000,
'multi_delete_concurrency': 1,
's3_acl': True,
'cors_preflight_allow_origin': 'foo.example.com,bar.example.com,*',
}
with self.assertRaises(ValueError) as ex:
S3ApiMiddleware(None, conf)
self.assertIn("if cors_preflight_allow_origin should include all "
"domains, * must be the only entry", str(ex.exception))
def check_bad_positive_ints(**kwargs):
bad_conf = dict(conf, **kwargs)
self.assertRaises(ValueError, S3ApiMiddleware, None, bad_conf)
check_bad_positive_ints(allowable_clock_skew=-100)
check_bad_positive_ints(allowable_clock_skew=0)
check_bad_positive_ints(max_bucket_listing=-100)
check_bad_positive_ints(max_bucket_listing=0)
check_bad_positive_ints(max_multi_delete_objects=-100)
check_bad_positive_ints(max_multi_delete_objects=0)
check_bad_positive_ints(max_parts_listing=-100)
check_bad_positive_ints(max_parts_listing=0)
check_bad_positive_ints(max_upload_part_num=-100)
check_bad_positive_ints(max_upload_part_num=0)
check_bad_positive_ints(min_segment_size=-100)
check_bad_positive_ints(min_segment_size=0)
check_bad_positive_ints(multi_delete_concurrency=-100)
check_bad_positive_ints(multi_delete_concurrency=0)
def test_init_passes_wsgi_conf_file_to_check_pipeline(self):
# verify that check_pipeline is called during init: add __file__ attr
# to test config to make it more representative of middleware being
# init'd by wgsi
context = mock.Mock()
with patch("swift.common.middleware.s3api.s3api.loadcontext",
return_value=context) as loader, \
patch("swift.common.middleware.s3api.s3api.PipelineWrapper") \
as pipeline:
conf = dict(self.conf,
auth_pipeline_check=True,
__file__='proxy-conf-file')
pipeline.return_value = 's3api tempauth proxy-server'
self.s3api = S3ApiMiddleware(None, conf)
loader.assert_called_with(loadwsgi.APP, 'proxy-conf-file')
pipeline.assert_called_with(context)
def test_non_s3_request_passthrough(self):
req = Request.blank('/something')
status, headers, body = self.call_s3api(req)
self.assertEqual(body, b'FAKE APP')
def test_bad_format_authorization(self):
req = Request.blank('/something',
headers={'Authorization': 'hoge',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
def test_bad_method(self):
req = Request.blank('/',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'MethodNotAllowed')
def test_bad_method_but_method_exists_in_controller(self):
req = Request.blank(
'/bucket',
environ={'REQUEST_METHOD': '_delete_segments_bucket'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'MethodNotAllowed')
def test_path_info_encode(self):
bucket_name = 'b%75cket'
object_name = 'ob%6aect:1'
self.swift.register('GET', '/v1/AUTH_test/bucket/object:1',
swob.HTTPOk, {}, None)
req = Request.blank('/%s/%s' % (bucket_name, object_name),
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
raw_path_info = "/%s/%s" % (bucket_name, object_name)
path_info = req.environ['PATH_INFO']
self.assertEqual(path_info, unquote(raw_path_info))
self.assertEqual(req.path, quote(path_info))
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_test/bucket/object:1',
req.environ['swift.backend_path'])
def test_canonical_string_v2(self):
"""
The hashes here were generated by running the same requests against
boto.utils.canonical_string
"""
def canonical_string(path, headers):
if '?' in path:
path, query_string = path.split('?', 1)
else:
query_string = ''
env = {
'REQUEST_METHOD': 'GET',
'PATH_INFO': path,
'QUERY_STRING': query_string,
'HTTP_AUTHORIZATION': 'AWS X:Y:Z',
}
for header, value in headers.items():
header = 'HTTP_' + header.replace('-', '_').upper()
if header in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
header = header[5:]
env[header] = value
with patch('swift.common.middleware.s3api.s3request.'
'S3Request._validate_headers'), \
patch('swift.common.middleware.s3api.s3request.'
'S3Request._validate_dates'):
req = S3Request(env)
return req.environ['s3api.auth_details']['string_to_sign']
def verify(hash, path, headers):
s = canonical_string(path, headers)
self.assertEqual(hash, md5(s, usedforsecurity=False).hexdigest())
verify('6dd08c75e42190a1ce9468d1fd2eb787', '/bucket/object',
{'Content-Type': 'text/plain', 'X-Amz-Something': 'test',
'Date': 'whatever'})
verify('c8447135da232ae7517328f3429df481', '/bucket/object',
{'Content-Type': 'text/plain', 'X-Amz-Something': 'test'})
verify('bf49304103a4de5c325dce6384f2a4a2', '/bucket/object',
{'content-type': 'text/plain'})
verify('be01bd15d8d47f9fe5e2d9248cc6f180', '/bucket/object', {})
verify('e9ec7dca45eef3e2c7276af23135e896', '/bucket/object',
{'Content-MD5': 'somestuff'})
verify('a822deb31213ad09af37b5a7fe59e55e', '/bucket/object?acl', {})
verify('cce5dd1016595cb706c93f28d3eaa18f', '/bucket/object',
{'Content-Type': 'text/plain', 'X-Amz-A': 'test',
'X-Amz-Z': 'whatever', 'X-Amz-B': 'lalala',
'X-Amz-Y': 'lalalalalalala'})
verify('7506d97002c7d2de922cc0ec34af8846', '/bucket/object',
{'Content-Type': None, 'X-Amz-Something': 'test'})
verify('28f76d6162444a193b612cd6cb20e0be', '/bucket/object',
{'Content-Type': None,
'X-Amz-Date': 'Mon, 11 Jul 2011 10:52:57 +0000',
'Date': 'Tue, 12 Jul 2011 10:52:57 +0000'})
verify('ed6971e3eca5af4ee361f05d7c272e49', '/bucket/object',
{'Content-Type': None,
'Date': 'Tue, 12 Jul 2011 10:52:57 +0000'})
verify('41ecd87e7329c33fea27826c1c9a6f91', '/bucket/object?cors', {})
verify('d91b062f375d8fab407d6dab41fd154e', '/bucket/object?tagging',
{})
verify('ebab878a96814b30eb178e27efb3973f', '/bucket/object?restore',
{})
verify('f6bf1b2d92b054350d3679d28739fc69', '/bucket/object?'
'response-cache-control&response-content-disposition&'
'response-content-encoding&response-content-language&'
'response-content-type&response-expires', {})
str1 = canonical_string('/', headers={'Content-Type': None,
'X-Amz-Something': 'test'})
str2 = canonical_string('/', headers={'Content-Type': '',
'X-Amz-Something': 'test'})
str3 = canonical_string('/', headers={'X-Amz-Something': 'test'})
self.assertEqual(str1, str2)
self.assertEqual(str2, str3)
# Note that boto does not do proper stripping (as of 2.42.0).
# These were determined by examining the StringToSignBytes element of
# resulting SignatureDoesNotMatch errors from AWS.
str1 = canonical_string('/', {'Content-Type': 'text/plain',
'Content-MD5': '##'})
str2 = canonical_string('/', {'Content-Type': '\x01\x02text/plain',
'Content-MD5': '\x1f ##'})
str3 = canonical_string('/', {'Content-Type': 'text/plain \x10',
'Content-MD5': '##\x18'})
self.assertEqual(str1, str2)
self.assertEqual(str2, str3)
def test_signed_urls_expired(self):
expire = '1000000000'
req = Request.blank('/bucket/object?Signature=X&Expires=%s&'
'AWSAccessKeyId=test:tester' % expire,
environ={'REQUEST_METHOD': 'GET'},
headers={'Date': self.get_date_header()})
req.headers['Date'] = datetime.utcnow()
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
def test_signed_urls(self):
# Set expire to last 32b timestamp value
# This number can't be higher, because it breaks tests on 32b systems
expire = '2147483647' # 19 Jan 2038 03:14:07
utc_date = datetime.utcnow()
req = Request.blank('/bucket/object?Signature=X&Expires=%s&'
'AWSAccessKeyId=test:tester&Timestamp=%s' %
(expire, utc_date.isoformat().rsplit('.')[0]),
environ={'REQUEST_METHOD': 'GET'},
headers={'Date': self.get_date_header()})
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_test/bucket/object',
req.environ['swift.backend_path'])
for _, path, headers in self.swift.calls_with_headers:
self.assertNotIn('Authorization', headers)
def test_signed_urls_no_timestamp(self):
expire = '2147483647' # 19 Jan 2038 03:14:07
req = Request.blank('/bucket/object?Signature=X&Expires=%s&'
'AWSAccessKeyId=test:tester' % expire,
environ={'REQUEST_METHOD': 'GET'})
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
# Curious! But actually S3 doesn't verify any x-amz-date/date headers
# for signed_url access and it also doesn't check timestamp
self.assertEqual(status.split()[0], '200')
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_test/bucket/object',
req.environ['swift.backend_path'])
for _, _, headers in self.swift.calls_with_headers:
self.assertNotIn('Authorization', headers)
def test_signed_urls_invalid_expire(self):
expire = 'invalid'
req = Request.blank('/bucket/object?Signature=X&Expires=%s&'
'AWSAccessKeyId=test:tester' % expire,
environ={'REQUEST_METHOD': 'GET'},
headers={'Date': self.get_date_header()})
req.headers['Date'] = datetime.utcnow()
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
def test_signed_urls_no_sign(self):
expire = '2147483647' # 19 Jan 2038 03:14:07
req = Request.blank('/bucket/object?Expires=%s&'
'AWSAccessKeyId=test:tester' % expire,
environ={'REQUEST_METHOD': 'GET'},
headers={'Date': self.get_date_header()})
req.headers['Date'] = datetime.utcnow()
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
def test_signed_urls_no_access(self):
expire = '2147483647' # 19 Jan 2038 03:14:07
req = Request.blank('/bucket/object?Expires=%s&'
'AWSAccessKeyId=' % expire,
environ={'REQUEST_METHOD': 'GET'})
req.headers['Date'] = datetime.utcnow()
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
def test_signed_urls_v4(self):
req = Request.blank(
'/bucket/object'
'?X-Amz-Algorithm=AWS4-HMAC-SHA256'
'&X-Amz-Credential=test:tester/%s/us-east-1/s3/aws4_request'
'&X-Amz-Date=%s'
'&X-Amz-Expires=1000'
'&X-Amz-SignedHeaders=host'
'&X-Amz-Signature=X' % (
self.get_v4_amz_date_header().split('T', 1)[0],
self.get_v4_amz_date_header()),
headers={'Date': self.get_date_header()},
environ={'REQUEST_METHOD': 'GET'})
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_test/bucket/object',
req.environ['swift.backend_path'])
self.assertEqual(status.split()[0], '200', body)
for _, _, headers in self.swift.calls_with_headers:
self.assertNotIn('Authorization', headers)
self.assertNotIn('X-Auth-Token', headers)
def test_signed_urls_v4_bad_credential(self):
def test(credential, message, extra=b''):
req = Request.blank(
'/bucket/object'
'?X-Amz-Algorithm=AWS4-HMAC-SHA256'
'&X-Amz-Credential=%s'
'&X-Amz-Date=%s'
'&X-Amz-Expires=1000'
'&X-Amz-SignedHeaders=host'
'&X-Amz-Signature=X' % (
credential,
self.get_v4_amz_date_header()),
headers={'Date': self.get_date_header()},
environ={'REQUEST_METHOD': 'GET'})
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '400', body)
self.assertEqual(self._get_error_code(body),
'AuthorizationQueryParametersError')
self.assertEqual(self._get_error_message(body), message)
self.assertIn(extra, body)
dt = self.get_v4_amz_date_header().split('T', 1)[0]
test('test:tester/not-a-date/us-east-1/s3/aws4_request',
'Invalid credential date "not-a-date". This date is not the same '
'as X-Amz-Date: "%s".' % dt)
test('test:tester/%s/us-west-1/s3/aws4_request' % dt,
"Error parsing the X-Amz-Credential parameter; the region "
"'us-west-1' is wrong; expecting 'us-east-1'",
b'<Region>us-east-1</Region>')
test('test:tester/%s/us-east-1/not-s3/aws4_request' % dt,
'Error parsing the X-Amz-Credential parameter; incorrect service '
'"not-s3". This endpoint belongs to "s3".')
test('test:tester/%s/us-east-1/s3/not-aws4_request' % dt,
'Error parsing the X-Amz-Credential parameter; incorrect '
'terminal "not-aws4_request". This endpoint uses "aws4_request".')
def test_signed_urls_v4_missing_x_amz_date(self):
req = Request.blank(
'/bucket/object'
'?X-Amz-Algorithm=AWS4-HMAC-SHA256'
'&X-Amz-Credential=test/20T20Z/us-east-1/s3/aws4_request'
'&X-Amz-Expires=1000'
'&X-Amz-SignedHeaders=host'
'&X-Amz-Signature=X',
environ={'REQUEST_METHOD': 'GET'})
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
def test_signed_urls_v4_invalid_algorithm(self):
req = Request.blank(
'/bucket/object'
'?X-Amz-Algorithm=FAKE'
'&X-Amz-Credential=test/20T20Z/us-east-1/s3/aws4_request'
'&X-Amz-Date=%s'
'&X-Amz-Expires=1000'
'&X-Amz-SignedHeaders=host'
'&X-Amz-Signature=X' %
self.get_v4_amz_date_header(),
environ={'REQUEST_METHOD': 'GET'})
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'InvalidArgument')
def test_signed_urls_v4_missing_signed_headers(self):
req = Request.blank(
'/bucket/object'
'?X-Amz-Algorithm=AWS4-HMAC-SHA256'
'&X-Amz-Credential=test/20T20Z/us-east-1/s3/aws4_request'
'&X-Amz-Date=%s'
'&X-Amz-Expires=1000'
'&X-Amz-Signature=X' %
self.get_v4_amz_date_header(),
environ={'REQUEST_METHOD': 'GET'})
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body),
'AuthorizationHeaderMalformed')
def test_signed_urls_v4_invalid_credentials(self):
req = Request.blank('/bucket/object'
'?X-Amz-Algorithm=AWS4-HMAC-SHA256'
'&X-Amz-Credential=test'
'&X-Amz-Date=%s'
'&X-Amz-Expires=1000'
'&X-Amz-SignedHeaders=host'
'&X-Amz-Signature=X' %
self.get_v4_amz_date_header(),
environ={'REQUEST_METHOD': 'GET'})
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
def test_signed_urls_v4_missing_signature(self):
req = Request.blank(
'/bucket/object'
'?X-Amz-Algorithm=AWS4-HMAC-SHA256'
'&X-Amz-Credential=test/20T20Z/us-east-1/s3/aws4_request'
'&X-Amz-Date=%s'
'&X-Amz-Expires=1000'
'&X-Amz-SignedHeaders=host' %
self.get_v4_amz_date_header(),
environ={'REQUEST_METHOD': 'GET'})
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'AccessDenied')
def test_bucket_virtual_hosted_style(self):
req = Request.blank('/',
environ={'HTTP_HOST': 'bucket.localhost:80',
'REQUEST_METHOD': 'HEAD',
'HTTP_AUTHORIZATION':
'AWS test:tester:hmac'},
headers={'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_test/bucket',
req.environ['swift.backend_path'])
def test_object_virtual_hosted_style(self):
req = Request.blank('/object',
environ={'HTTP_HOST': 'bucket.localhost:80',
'REQUEST_METHOD': 'HEAD',
'HTTP_AUTHORIZATION':
'AWS test:tester:hmac'},
headers={'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_test/bucket/object',
req.environ['swift.backend_path'])
def test_token_generation(self):
self.swift.register('HEAD', '/v1/AUTH_test/bucket+segments/'
'object/123456789abcdef',
swob.HTTPOk, {}, None)
self.swift.register('PUT', '/v1/AUTH_test/bucket+segments/'
'object/123456789abcdef/1',
swob.HTTPCreated, {}, None)
req = Request.blank('/bucket/object?uploadId=123456789abcdef'
'&partNumber=1',
environ={'REQUEST_METHOD': 'PUT'})
req.headers['Authorization'] = 'AWS test:tester:hmac'
date_header = self.get_date_header()
req.headers['Date'] = date_header
with mock.patch('swift.common.middleware.s3api.s3request.'
'S3Request.check_signature') as mock_cs:
status, headers, body = self.call_s3api(req)
self.assertIn('swift.backend_path', req.environ)
self.assertEqual(
'/v1/AUTH_test/bucket+segments/object/123456789abcdef/1',
req.environ['swift.backend_path'])
_, _, headers = self.swift.calls_with_headers[-1]
self.assertEqual(req.environ['s3api.auth_details'], {
'access_key': 'test:tester',
'signature': 'hmac',
'string_to_sign': b'\n'.join([
b'PUT', b'', b'', date_header.encode('ascii'),
b'/bucket/object?partNumber=1&uploadId=123456789abcdef']),
'check_signature': mock_cs})
def test_non_ascii_user(self):
self.swift.register('HEAD', '/v1/AUTH_test/bucket+segments/'
'object/123456789abcdef',
swob.HTTPOk, {}, None)
self.swift.register('PUT', '/v1/AUTH_test/bucket+segments/'
'object/123456789abcdef/1',
swob.HTTPCreated, {}, None)
req = Request.blank('/bucket/object?uploadId=123456789abcdef'
'&partNumber=1',
environ={'REQUEST_METHOD': 'PUT'})
# NB: WSGI string for a snowman
req.headers['Authorization'] = 'AWS test:\xe2\x98\x83:sig'
date_header = self.get_date_header()
req.headers['Date'] = date_header
with mock.patch('swift.common.middleware.s3api.s3request.'
'S3Request.check_signature') as mock_cs:
status, headers, body = self.call_s3api(req)
self.assertIn('swift.backend_path', req.environ)
self.assertEqual(
'/v1/AUTH_test/bucket+segments/object/123456789abcdef/1',
req.environ['swift.backend_path'])
_, _, headers = self.swift.calls_with_headers[-1]
self.assertEqual(req.environ['s3api.auth_details'], {
'access_key': (u'test:\N{SNOWMAN}'.encode('utf-8') if six.PY2
else u'test:\N{SNOWMAN}'),
'signature': 'sig',
'string_to_sign': b'\n'.join([
b'PUT', b'', b'', date_header.encode('ascii'),
b'/bucket/object?partNumber=1&uploadId=123456789abcdef']),
'check_signature': mock_cs})
def test_invalid_uri(self):
req = Request.blank('/bucket/invalid\xffname',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'InvalidURI')
def test_object_create_bad_md5_unreadable(self):
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_AUTHORIZATION': 'AWS X:Y:Z',
'HTTP_CONTENT_MD5': '#'},
headers={'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'InvalidDigest')
def test_object_create_bad_md5_too_short(self):
too_short_digest = md5(b'hey', usedforsecurity=False).digest()[:-1]
md5_str = base64.b64encode(too_short_digest).strip()
if not six.PY2:
md5_str = md5_str.decode('ascii')
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_AUTHORIZATION': 'AWS X:Y:Z',
'HTTP_CONTENT_MD5': md5_str},
headers={'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'InvalidDigest')
def test_object_create_bad_md5_bad_padding(self):
too_short_digest = md5(b'hey', usedforsecurity=False).digest()
md5_str = base64.b64encode(too_short_digest).strip(b'=\n')
if not six.PY2:
md5_str = md5_str.decode('ascii')
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_AUTHORIZATION': 'AWS X:Y:Z',
'HTTP_CONTENT_MD5': md5_str},
headers={'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'InvalidDigest')
def test_object_create_bad_md5_too_long(self):
too_long_digest = md5(
b'hey', usedforsecurity=False).digest() + b'suffix'
md5_str = base64.b64encode(too_long_digest).strip()
if not six.PY2:
md5_str = md5_str.decode('ascii')
req = Request.blank(
'/bucket/object',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_AUTHORIZATION': 'AWS X:Y:Z',
'HTTP_CONTENT_MD5': md5_str},
headers={'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'InvalidDigest')
def test_invalid_metadata_directive(self):
req = Request.blank('/',
environ={'REQUEST_METHOD': 'GET',
'HTTP_AUTHORIZATION': 'AWS X:Y:Z',
'HTTP_X_AMZ_METADATA_DIRECTIVE':
'invalid'},
headers={'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'InvalidArgument')
def test_invalid_storage_class(self):
req = Request.blank('/',
environ={'REQUEST_METHOD': 'GET',
'HTTP_AUTHORIZATION': 'AWS X:Y:Z',
'HTTP_X_AMZ_STORAGE_CLASS': 'INVALID'},
headers={'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'InvalidStorageClass')
def test_invalid_ssc(self):
req = Request.blank('/',
environ={'REQUEST_METHOD': 'GET',
'HTTP_AUTHORIZATION': 'AWS X:Y:Z'},
headers={'x-amz-server-side-encryption': 'invalid',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'InvalidArgument')
def _test_unsupported_header(self, header, value=None):
if value is None:
value = 'value'
req = Request.blank('/error',
environ={'REQUEST_METHOD': 'GET',
'HTTP_AUTHORIZATION': 'AWS X:Y:Z'},
headers={header: value,
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'NotImplemented')
def test_mfa(self):
self._test_unsupported_header('x-amz-mfa')
@mock.patch.object(registry, '_swift_admin_info', dict())
def test_server_side_encryption(self):
sse_header = 'x-amz-server-side-encryption'
self._test_unsupported_header(sse_header, 'AES256')
self._test_unsupported_header(sse_header, 'aws:kms')
registry.register_swift_info('encryption', admin=True, enabled=False)
self._test_unsupported_header(sse_header, 'AES256')
self._test_unsupported_header(sse_header, 'aws:kms')
registry.register_swift_info('encryption', admin=True, enabled=True)
# AES256 now works
self.swift.register('PUT', '/v1/AUTH_X/bucket/object',
swob.HTTPCreated, {}, None)
req = Request.blank('/bucket/object',
environ={'REQUEST_METHOD': 'PUT',
'HTTP_AUTHORIZATION': 'AWS X:Y:Z'},
headers={sse_header: 'AES256',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status, '200 OK')
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_X/bucket/object',
req.environ['swift.backend_path'])
# ...but aws:kms continues to fail
self._test_unsupported_header(sse_header, 'aws:kms')
def test_website_redirect_location(self):
self._test_unsupported_header('x-amz-website-redirect-location')
def test_aws_chunked(self):
self._test_unsupported_header('content-encoding', 'aws-chunked')
# https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
# has a multi-encoding example:
#
# > Amazon S3 supports multiple content encodings. For example:
# >
# > Content-Encoding : aws-chunked,gzip
# > That is, you can specify your custom content-encoding when using
# > Signature Version 4 streaming API.
self._test_unsupported_header('Content-Encoding', 'aws-chunked,gzip')
# Some clients skip the content-encoding,
# such as minio-go and aws-sdk-java
self._test_unsupported_header('x-amz-content-sha256',
'STREAMING-AWS4-HMAC-SHA256-PAYLOAD')
self._test_unsupported_header('x-amz-decoded-content-length')
def test_object_tagging(self):
self._test_unsupported_header('x-amz-tagging')
def _test_unsupported_resource(self, resource):
req = Request.blank('/error?' + resource,
environ={'REQUEST_METHOD': 'GET',
'HTTP_AUTHORIZATION': 'AWS X:Y:Z'},
headers={'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'NotImplemented')
def test_notification(self):
self._test_unsupported_resource('notification')
def test_policy(self):
self._test_unsupported_resource('policy')
def test_request_payment(self):
self._test_unsupported_resource('requestPayment')
def test_torrent(self):
self._test_unsupported_resource('torrent')
def test_website(self):
self._test_unsupported_resource('website')
def test_cors(self):
self._test_unsupported_resource('cors')
def test_tagging(self):
req = Request.blank('/bucket?tagging',
environ={'REQUEST_METHOD': 'GET'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200')
req = Request.blank('/bucket?tagging',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'NotImplemented')
req = Request.blank('/bucket?tagging',
environ={'REQUEST_METHOD': 'DELETE'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), 'NotImplemented')
def test_restore(self):
self._test_unsupported_resource('restore')
def test_unsupported_method(self):
req = Request.blank('/bucket?acl',
environ={'REQUEST_METHOD': 'POST'},
headers={'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
status, headers, body = self.call_s3api(req)
elem = fromstring(body, 'Error')
self.assertEqual(elem.find('./Code').text, 'MethodNotAllowed')
self.assertEqual(elem.find('./Method').text, 'POST')
self.assertEqual(elem.find('./ResourceType').text, 'ACL')
@mock.patch.object(registry, '_sensitive_headers', set())
@mock.patch.object(registry, '_sensitive_params', set())
def test_registered_sensitive_info(self):
self.assertFalse(registry.get_sensitive_headers())
self.assertFalse(registry.get_sensitive_params())
filter_factory(self.conf)
sensitive = registry.get_sensitive_headers()
self.assertIn('authorization', sensitive)
sensitive = registry.get_sensitive_params()
self.assertIn('X-Amz-Signature', sensitive)
self.assertIn('Signature', sensitive)
@mock.patch.object(registry, '_swift_info', dict())
def test_registered_defaults(self):
conf_from_file = {k: str(v) for k, v in self.conf.items()}
filter_factory(conf_from_file)
swift_info = registry.get_swift_info()
self.assertTrue('s3api' in swift_info)
registered_keys = [
'max_bucket_listing', 'max_parts_listing', 'max_upload_part_num',
'max_multi_delete_objects', 'allow_multipart_uploads',
'min_segment_size', 's3_acl']
expected = dict((k, self.conf[k]) for k in registered_keys)
self.assertEqual(expected, swift_info['s3api'])
def test_check_pipeline(self):
with patch("swift.common.middleware.s3api.s3api.loadcontext"), \
patch("swift.common.middleware.s3api.s3api.PipelineWrapper") \
as pipeline:
# cause check_pipeline to not return early...
self.conf['__file__'] = ''
# ...and enable pipeline auth checking
self.s3api.conf.auth_pipeline_check = True
pipeline.return_value = 's3api tempauth proxy-server'
self.s3api.check_pipeline(self.conf)
# This *should* still work; authtoken will remove our auth details,
# but the X-Auth-Token we drop in will remain
# if we found one in the response
pipeline.return_value = 's3api s3token authtoken keystoneauth ' \
'proxy-server'
self.s3api.check_pipeline(self.conf)
# This should work now; no more doubled-up requests to keystone!
pipeline.return_value = 's3api s3token keystoneauth proxy-server'
self.s3api.check_pipeline(self.conf)
pipeline.return_value = 's3api swauth proxy-server'
self.s3api.check_pipeline(self.conf)
# Note that authtoken would need to have delay_auth_decision=True
pipeline.return_value = 's3api authtoken s3token keystoneauth ' \
'proxy-server'
self.s3api.check_pipeline(self.conf)
pipeline.return_value = 's3api proxy-server'
with self.assertRaises(ValueError) as cm:
self.s3api.check_pipeline(self.conf)
self.assertIn('expected auth between s3api and proxy-server',
cm.exception.args[0])
pipeline.return_value = 'proxy-server'
with self.assertRaises(ValueError) as cm:
self.s3api.check_pipeline(self.conf)
self.assertIn("missing filters ['s3api']",
cm.exception.args[0])
def test_s3api_initialization_with_disabled_pipeline_check(self):
with patch("swift.common.middleware.s3api.s3api.loadcontext"), \
patch("swift.common.middleware.s3api.s3api.PipelineWrapper") \
as pipeline:
# cause check_pipeline to not return early...
self.conf['__file__'] = ''
# ...but disable pipeline auth checking
self.s3api.conf.auth_pipeline_check = False
pipeline.return_value = 's3api tempauth proxy-server'
self.s3api.check_pipeline(self.conf)
pipeline.return_value = 's3api s3token authtoken keystoneauth ' \
'proxy-server'
self.s3api.check_pipeline(self.conf)
pipeline.return_value = 's3api swauth proxy-server'
self.s3api.check_pipeline(self.conf)
pipeline.return_value = 's3api authtoken s3token keystoneauth ' \
'proxy-server'
self.s3api.check_pipeline(self.conf)
pipeline.return_value = 's3api proxy-server'
self.s3api.check_pipeline(self.conf)
pipeline.return_value = 'proxy-server'
with self.assertRaises(ValueError):
self.s3api.check_pipeline(self.conf)
def test_signature_v4(self):
environ = {
'REQUEST_METHOD': 'GET'}
authz_header = 'AWS4-HMAC-SHA256 ' + ', '.join([
'Credential=test:tester/%s/us-east-1/s3/aws4_request' %
self.get_v4_amz_date_header().split('T', 1)[0],
'SignedHeaders=host;x-amz-date',
'Signature=X',
])
headers = {
'Authorization': authz_header,
'X-Amz-Date': self.get_v4_amz_date_header(),
'X-Amz-Content-SHA256': '0123456789'}
req = Request.blank('/bucket/object', environ=environ, headers=headers)
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '200', body)
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_test/bucket/object',
req.environ['swift.backend_path'])
for _, _, headers in self.swift.calls_with_headers:
self.assertEqual(authz_header, headers['Authorization'])
self.assertNotIn('X-Auth-Token', headers)
def test_signature_v4_no_date(self):
environ = {
'REQUEST_METHOD': 'GET'}
headers = {
'Authorization':
'AWS4-HMAC-SHA256 '
'Credential=test:tester/20130524/us-east-1/s3/aws4_request, '
'SignedHeaders=host;range;x-amz-date,'
'Signature=X',
'X-Amz-Content-SHA256': '0123456789'}
req = Request.blank('/bucket/object', environ=environ, headers=headers)
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '403')
self.assertEqual(self._get_error_code(body), 'AccessDenied')
def test_signature_v4_no_payload(self):
environ = {
'REQUEST_METHOD': 'GET'}
headers = {
'Authorization':
'AWS4-HMAC-SHA256 '
'Credential=test:tester/%s/us-east-1/s3/aws4_request, '
'SignedHeaders=host;x-amz-date,'
'Signature=X' % self.get_v4_amz_date_header().split('T', 1)[0],
'X-Amz-Date': self.get_v4_amz_date_header()}
req = Request.blank('/bucket/object', environ=environ, headers=headers)
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '400')
self.assertEqual(self._get_error_code(body), 'InvalidRequest')
self.assertEqual(
self._get_error_message(body),
'Missing required header for this request: x-amz-content-sha256')
def test_signature_v4_bad_authorization_string(self):
def test(auth_str, error, msg, extra=b''):
environ = {
'REQUEST_METHOD': 'GET'}
headers = {
'Authorization': auth_str,
'X-Amz-Date': self.get_v4_amz_date_header(),
'X-Amz-Content-SHA256': '0123456789'}
req = Request.blank('/bucket/object', environ=environ,
headers=headers)
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(self._get_error_code(body), error)
self.assertEqual(self._get_error_message(body), msg)
self.assertIn(extra, body)
auth_str = ('AWS4-HMAC-SHA256 '
'SignedHeaders=host;x-amz-date,'
'Signature=X')
test(auth_str, 'AccessDenied', 'Access Denied.')
auth_str = (
'AWS4-HMAC-SHA256 '
'Credential=test:tester/20130524/us-east-1/s3/aws4_request, '
'Signature=X')
test(auth_str, 'AuthorizationHeaderMalformed',
'The authorization header is malformed; the authorization '
'header requires three components: Credential, SignedHeaders, '
'and Signature.')
auth_str = ('AWS4-HMAC-SHA256 '
'Credential=test:tester/%s/us-west-2/s3/aws4_request, '
'Signature=X, SignedHeaders=host;x-amz-date' %
self.get_v4_amz_date_header().split('T', 1)[0])
test(auth_str, 'AuthorizationHeaderMalformed',
"The authorization header is malformed; "
"the region 'us-west-2' is wrong; expecting 'us-east-1'",
b'<Region>us-east-1</Region>')
auth_str = ('AWS4-HMAC-SHA256 '
'Credential=test:tester/%s/us-east-1/not-s3/aws4_request, '
'Signature=X, SignedHeaders=host;x-amz-date' %
self.get_v4_amz_date_header().split('T', 1)[0])
test(auth_str, 'AuthorizationHeaderMalformed',
'The authorization header is malformed; '
'incorrect service "not-s3". This endpoint belongs to "s3".')
auth_str = ('AWS4-HMAC-SHA256 '
'Credential=test:tester/%s/us-east-1/s3/not-aws4_request, '
'Signature=X, SignedHeaders=host;x-amz-date' %
self.get_v4_amz_date_header().split('T', 1)[0])
test(auth_str, 'AuthorizationHeaderMalformed',
'The authorization header is malformed; '
'incorrect terminal "not-aws4_request". '
'This endpoint uses "aws4_request".')
auth_str = (
'AWS4-HMAC-SHA256 '
'Credential=test:tester/20130524/us-east-1/s3/aws4_request, '
'SignedHeaders=host;x-amz-date')
test(auth_str, 'AccessDenied', 'Access Denied.')
def test_canonical_string_v4(self):
def _get_req(path, environ):
if '?' in path:
path, query_string = path.split('?', 1)
else:
query_string = ''
env = {
'REQUEST_METHOD': 'GET',
'PATH_INFO': path,
'QUERY_STRING': query_string,
'HTTP_DATE': 'Mon, 09 Sep 2011 23:36:00 GMT',
'HTTP_X_AMZ_CONTENT_SHA256':
'e3b0c44298fc1c149afbf4c8996fb924'
'27ae41e4649b934ca495991b7852b855',
'HTTP_AUTHORIZATION':
'AWS4-HMAC-SHA256 '
'Credential=X:Y/20110909/us-east-1/s3/aws4_request, '
'SignedHeaders=content-md5;content-type;date, '
'Signature=x',
}
fake_time = calendar.timegm((2011, 9, 9, 23, 36, 0))
env.update(environ)
with patch('swift.common.middleware.s3api.s3request.'
'S3Request._validate_headers'), \
patch('swift.common.middleware.s3api.utils.time.time',
return_value=fake_time):
req = SigV4Request(env, conf=self.s3api.conf)
return req
def canonical_string(path, environ):
return _get_req(path, environ)._canonical_request()
def verify(hash_val, path, environ):
# See http://docs.aws.amazon.com/general/latest/gr
# /signature-v4-test-suite.html for where location, service, and
# signing key came from
with patch.object(self.s3api.conf, 'location', 'us-east-1'), \
patch.object(swift.common.middleware.s3api.s3request,
'SERVICE', 'host'):
req = _get_req(path, environ)
hash_in_sts = req._string_to_sign().split(b'\n')[3]
self.assertEqual(hash_val, hash_in_sts.decode('ascii'))
self.assertTrue(req.check_signature(
'wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY'))
# all next data got from aws4_testsuite from Amazon
# http://docs.aws.amazon.com/general/latest/gr/samples
# /aws4_testsuite.zip
# Each *expected* hash value is the 4th line in <test-name>.sts in the
# test suite.
# get-vanilla
env = {
'HTTP_AUTHORIZATION': (
'AWS4-HMAC-SHA256 '
'Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, '
'SignedHeaders=date;host, '
'Signature=b27ccfbfa7df52a200ff74193ca6e32d'
'4b48b8856fab7ebf1c595d0670a7e470'),
'HTTP_HOST': 'host.foo.com'}
verify('366b91fb121d72a00f46bbe8d395f53a'
'102b06dfb7e79636515208ed3fa606b1',
'/', env)
# get-header-value-trim
env = {
'REQUEST_METHOD': 'POST',
'HTTP_AUTHORIZATION': (
'AWS4-HMAC-SHA256 '
'Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, '
'SignedHeaders=date;host;p, '
'Signature=debf546796015d6f6ded8626f5ce9859'
'7c33b47b9164cf6b17b4642036fcb592'),
'HTTP_HOST': 'host.foo.com',
'HTTP_P': 'phfft'}
verify('dddd1902add08da1ac94782b05f9278c'
'08dc7468db178a84f8950d93b30b1f35',
'/', env)
# get-utf8 (not exact)
env = {
'HTTP_AUTHORIZATION': (
'AWS4-HMAC-SHA256 '
'Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, '
'SignedHeaders=date;host, '
'Signature=8d6634c189aa8c75c2e51e106b6b5121'
'bed103fdb351f7d7d4381c738823af74'),
'HTTP_HOST': 'host.foo.com',
'RAW_PATH_INFO': '/%E1%88%B4'}
# This might look weird because actually S3 doesn't care about utf-8
# encoded multi-byte bucket name from bucket-in-host name constraint.
# However, aws4_testsuite has only a sample hash with utf-8 *bucket*
# name to make sure the correctness (probably it can be used in other
# aws resource except s3) so, to test also utf-8, skip the bucket name
# validation in the following test.
# NOTE: eventlet's PATH_INFO is unquoted
with patch('swift.common.middleware.s3api.s3request.'
'validate_bucket_name'):
verify('27ba31df5dbc6e063d8f87d62eb07143'
'f7f271c5330a917840586ac1c85b6f6b',
swob.wsgi_unquote('/%E1%88%B4'), env)
# get-vanilla-query-order-key
env = {
'HTTP_AUTHORIZATION': (
'AWS4-HMAC-SHA256 '
'Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, '
'SignedHeaders=date;host, '
'Signature=0dc122f3b28b831ab48ba65cb47300de'
'53fbe91b577fe113edac383730254a3b'),
'HTTP_HOST': 'host.foo.com'}
verify('2f23d14fe13caebf6dfda346285c6d9c'
'14f49eaca8f5ec55c627dd7404f7a727',
'/?a=foo&b=foo', env)
# post-header-value-case
env = {
'REQUEST_METHOD': 'POST',
'HTTP_AUTHORIZATION': (
'AWS4-HMAC-SHA256 '
'Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, '
'SignedHeaders=date;host;zoo, '
'Signature=273313af9d0c265c531e11db70bbd653'
'f3ba074c1009239e8559d3987039cad7'),
'HTTP_HOST': 'host.foo.com',
'HTTP_ZOO': 'ZOOBAR'}
verify('3aae6d8274b8c03e2cc96fc7d6bda4b9'
'bd7a0a184309344470b2c96953e124aa',
'/', env)
# post-x-www-form-urlencoded-parameters
env = {
'REQUEST_METHOD': 'POST',
'HTTP_AUTHORIZATION': (
'AWS4-HMAC-SHA256 '
'Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, '
'SignedHeaders=date;host;content-type, '
'Signature=b105eb10c6d318d2294de9d49dd8b031'
'b55e3c3fe139f2e637da70511e9e7b71'),
'HTTP_HOST': 'host.foo.com',
'HTTP_X_AMZ_CONTENT_SHA256':
'3ba8907e7a252327488df390ed517c45'
'b96dead033600219bdca7107d1d3f88a',
'CONTENT_TYPE':
'application/x-www-form-urlencoded; charset=utf8'}
verify('c4115f9e54b5cecf192b1eaa23b8e88e'
'd8dc5391bd4fde7b3fff3d9c9fe0af1f',
'/', env)
# post-x-www-form-urlencoded
env = {
'REQUEST_METHOD': 'POST',
'HTTP_AUTHORIZATION': (
'AWS4-HMAC-SHA256 '
'Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, '
'SignedHeaders=date;host;content-type, '
'Signature=5a15b22cf462f047318703b92e6f4f38'
'884e4a7ab7b1d6426ca46a8bd1c26cbc'),
'HTTP_HOST': 'host.foo.com',
'HTTP_X_AMZ_CONTENT_SHA256':
'3ba8907e7a252327488df390ed517c45'
'b96dead033600219bdca7107d1d3f88a',
'CONTENT_TYPE':
'application/x-www-form-urlencoded'}
verify('4c5c6e4b52fb5fb947a8733982a8a5a6'
'1b14f04345cbfe6e739236c76dd48f74',
'/', env)
# Note that boto does not do proper stripping (as of 2.42.0).
# These were determined by examining the StringToSignBytes element of
# resulting SignatureDoesNotMatch errors from AWS.
str1 = canonical_string('/', {'CONTENT_TYPE': 'text/plain',
'HTTP_CONTENT_MD5': '##'})
str2 = canonical_string('/', {'CONTENT_TYPE': '\x01\x02text/plain',
'HTTP_CONTENT_MD5': '\x1f ##'})
str3 = canonical_string('/', {'CONTENT_TYPE': 'text/plain \x10',
'HTTP_CONTENT_MD5': '##\x18'})
self.assertEqual(str1, str2)
self.assertEqual(str2, str3)
def test_mixture_param_v4(self):
# now we have an Authorization header
headers = {
'Authorization':
'AWS4-HMAC-SHA256 '
'Credential=test/20130524/us-east-1/s3/aws4_request_A, '
'SignedHeaders=hostA;rangeA;x-amz-dateA,'
'Signature=X',
'X-Amz-Date': self.get_v4_amz_date_header(),
'X-Amz-Content-SHA256': '0123456789'}
# and then, different auth info (Credential, SignedHeaders, Signature)
# in query
req = Request.blank(
'/bucket/object'
'?X-Amz-Algorithm=AWS4-HMAC-SHA256'
'&X-Amz-Credential=test/20T20Z/us-east-1/s3/aws4_requestB'
'&X-Amz-SignedHeaders=hostB'
'&X-Amz-Signature=Y',
environ={'REQUEST_METHOD': 'GET'},
headers=headers)
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
# FIXME: should this failed as 400 or pass via query auth?
# for now, 403 forbidden for safety
self.assertEqual(status.split()[0], '403', body)
# But if we are missing Signature in query param
req = Request.blank(
'/bucket/object'
'?X-Amz-Algorithm=AWS4-HMAC-SHA256'
'&X-Amz-Credential=test/20T20Z/us-east-1/s3/aws4_requestB'
'&X-Amz-SignedHeaders=hostB',
environ={'REQUEST_METHOD': 'GET'},
headers=headers)
req.content_type = 'text/plain'
status, headers, body = self.call_s3api(req)
self.assertEqual(status.split()[0], '403', body)
def test_s3api_with_only_s3_token(self):
self.swift = FakeSwift()
self.keystone_auth = KeystoneAuth(
self.swift, {'operator_roles': 'swift-user'})
self.s3_token = S3Token(
self.keystone_auth, {'auth_uri': 'https://fakehost/identity'})
self.s3api = S3ApiMiddleware(self.s3_token, self.conf)
self.s3api.logger = debug_logger()
req = Request.blank(
'/bucket',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS access:signature',
'Date': self.get_date_header()})
self.swift.register('PUT', '/v1/AUTH_TENANT_ID/bucket',
swob.HTTPCreated, {}, None)
self.swift.register('HEAD', '/v1/AUTH_TENANT_ID',
swob.HTTPOk, {}, None)
with patch.object(self.s3_token, '_json_request') as mock_req:
mock_resp = requests.Response()
mock_resp._content = json.dumps(GOOD_RESPONSE_V2).encode('ascii')
mock_resp.status_code = 201
mock_req.return_value = mock_resp
status, headers, body = self.call_s3api(req)
self.assertEqual(body, b'')
self.assertEqual(1, mock_req.call_count)
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_TENANT_ID/bucket',
req.environ['swift.backend_path'])
def test_s3api_with_only_s3_token_v3(self):
self.swift = FakeSwift()
self.keystone_auth = KeystoneAuth(
self.swift, {'operator_roles': 'swift-user'})
self.s3_token = S3Token(
self.keystone_auth, {'auth_uri': 'https://fakehost/identity'})
self.s3api = S3ApiMiddleware(self.s3_token, self.conf)
self.s3api.logger = debug_logger()
req = Request.blank(
'/bucket',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS access:signature',
'Date': self.get_date_header()})
self.swift.register('PUT', '/v1/AUTH_PROJECT_ID/bucket',
swob.HTTPCreated, {}, None)
self.swift.register('HEAD', '/v1/AUTH_PROJECT_ID',
swob.HTTPOk, {}, None)
with patch.object(self.s3_token, '_json_request') as mock_req:
mock_resp = requests.Response()
mock_resp._content = json.dumps(GOOD_RESPONSE_V3).encode('ascii')
mock_resp.status_code = 200
mock_req.return_value = mock_resp
status, headers, body = self.call_s3api(req)
self.assertEqual(body, b'')
self.assertEqual(1, mock_req.call_count)
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_PROJECT_ID/bucket',
req.environ['swift.backend_path'])
def test_s3api_with_s3_token_and_auth_token(self):
self.swift = FakeSwift()
self.keystone_auth = KeystoneAuth(
self.swift, {'operator_roles': 'swift-user'})
self.auth_token = AuthProtocol(
self.keystone_auth, {'delay_auth_decision': 'True'})
self.s3_token = S3Token(
self.auth_token, {'auth_uri': 'https://fakehost/identity'})
self.s3api = S3ApiMiddleware(self.s3_token, self.conf)
self.s3api.logger = debug_logger()
req = Request.blank(
'/bucket',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS access:signature',
'Date': self.get_date_header()})
self.swift.register('PUT', '/v1/AUTH_TENANT_ID/bucket',
swob.HTTPCreated, {}, None)
self.swift.register('HEAD', '/v1/AUTH_TENANT_ID',
swob.HTTPOk, {}, None)
with patch.object(self.s3_token, '_json_request') as mock_req:
with patch.object(self.auth_token,
'_do_fetch_token') as mock_fetch:
# sanity check
self.assertIn('id', GOOD_RESPONSE_V2['access']['token'])
mock_resp = requests.Response()
mock_resp._content = json.dumps(
GOOD_RESPONSE_V2).encode('ascii')
mock_resp.status_code = 201
mock_req.return_value = mock_resp
mock_access_info = AccessInfoV2(GOOD_RESPONSE_V2)
mock_access_info.will_expire_soon = \
lambda stale_duration: False
mock_fetch.return_value = (MagicMock(), mock_access_info)
status, headers, body = self.call_s3api(req)
# Even though s3token got a token back from keystone, we drop
# it on the floor, resulting in a 401 Unauthorized at
# `swift.common.middleware.keystoneauth` because
# keystonemiddleware's auth_token strips out all auth headers,
# significantly 'X-Identity-Status'. Without a token, it then
# sets 'X-Identity-Status: Invalid' and never contacts
# Keystone.
self.assertEqual('403 Forbidden', status)
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_TENANT_ID/bucket',
req.environ['swift.backend_path'])
self.assertEqual(1, mock_req.call_count)
# it never even tries to contact keystone
self.assertEqual(0, mock_fetch.call_count)
def test_s3api_with_only_s3_token_in_s3acl(self):
self.swift = FakeSwift()
self.keystone_auth = KeystoneAuth(
self.swift, {'operator_roles': 'swift-user'})
self.s3_token = S3Token(
self.keystone_auth, {'auth_uri': 'https://fakehost/identity'})
self.conf['s3_acl'] = True
self.s3api = S3ApiMiddleware(self.s3_token, self.conf)
self.s3api.logger = debug_logger()
req = Request.blank(
'/bucket',
environ={'REQUEST_METHOD': 'PUT'},
headers={'Authorization': 'AWS access:signature',
'Date': self.get_date_header()})
self.swift.register('PUT', '/v1/AUTH_TENANT_ID/bucket',
swob.HTTPCreated, {}, None)
# For now, s3 acl commits the bucket owner acl via POST
# after PUT container so we need to register the resposne here
self.swift.register('POST', '/v1/AUTH_TENANT_ID/bucket',
swob.HTTPNoContent, {}, None)
self.swift.register('TEST', '/v1/AUTH_TENANT_ID',
swob.HTTPMethodNotAllowed, {}, None)
with patch.object(self.s3_token, '_json_request') as mock_req:
mock_resp = requests.Response()
mock_resp._content = json.dumps(GOOD_RESPONSE_V2).encode('ascii')
mock_resp.status_code = 201
mock_req.return_value = mock_resp
status, headers, body = self.call_s3api(req)
self.assertEqual(body, b'')
self.assertIn('swift.backend_path', req.environ)
self.assertEqual('/v1/AUTH_TENANT_ID/bucket',
req.environ['swift.backend_path'])
self.assertEqual(1, mock_req.call_count)
if __name__ == '__main__':
unittest.main()
|
|
"""Iterative methods for solving linear systems"""
__all__ = ['bicg','bicgstab','cg','cgs','gmres','qmr']
import warnings
import numpy as np
from . import _iterative
from scipy.sparse.linalg._interface import LinearOperator
from .utils import make_system
from scipy._lib._util import _aligned_zeros
from scipy._lib._threadsafety import non_reentrant
_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'}
# Part of the docstring common to all iterative solvers
common_doc1 = \
"""
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}"""
common_doc2 = \
"""b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : ndarray
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
"""
def _stoptest(residual, atol):
"""
Successful termination condition for the solvers.
"""
resid = np.linalg.norm(residual)
if resid <= atol:
return resid, 1
else:
return resid, 0
def _get_atol(tol, atol, bnrm2, get_residual, routine_name):
"""
Parse arguments for absolute tolerance in termination condition.
Parameters
----------
tol, atol : object
The arguments passed into the solver routine by user.
bnrm2 : float
2-norm of the rhs vector.
get_residual : callable
Callable ``get_residual()`` that returns the initial value of
the residual.
routine_name : str
Name of the routine.
"""
if atol is None:
warnings.warn("scipy.sparse.linalg.{name} called without specifying `atol`. "
"The default value will be changed in a future release. "
"For compatibility, specify a value for `atol` explicitly, e.g., "
"``{name}(..., atol=0)``, or to retain the old behavior "
"``{name}(..., atol='legacy')``".format(name=routine_name),
category=DeprecationWarning, stacklevel=4)
atol = 'legacy'
tol = float(tol)
if atol == 'legacy':
# emulate old legacy behavior
resid = get_residual()
if resid <= tol:
return 'exit'
if bnrm2 == 0:
return tol
else:
return tol * float(bnrm2)
else:
return max(float(atol), tol * float(bnrm2))
def set_docstring(header, Ainfo, footer='', atol_default='0'):
def combine(fn):
fn.__doc__ = '\n'.join((header, common_doc1,
' ' + Ainfo.replace('\n', '\n '),
common_doc2, footer))
return fn
return combine
@set_docstring('Use BIConjugate Gradient iteration to solve ``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` and ``A^T x`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.',
footer="""
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import bicg
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = bicg(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
)
@non_reentrant()
def bicg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A,M,x,b,postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec, rmatvec = A.matvec, A.rmatvec
psolve, rpsolve = M.matvec, M.rmatvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'bicgrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicg')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(6*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice2] *= sclr2
work[slice2] += sclr1*rmatvec(work[slice1])
elif (ijob == 3):
work[slice1] = psolve(work[slice2])
elif (ijob == 4):
work[slice1] = rpsolve(work[slice2])
elif (ijob == 5):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 6):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use BIConjugate Gradient STABilized iteration to solve '
'``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.')
@non_reentrant()
def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'bicgstabrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicgstab')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(7*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use Conjugate Gradient iteration to solve ``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.\n'
'``A`` must represent a hermitian, positive definite matrix.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.')
@non_reentrant()
def cg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'cgrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cg')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(4*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
if info == 1 and iter_ > 1:
# recompute residual and recheck, to avoid
# accumulating rounding error
work[slice1] = b - matvec(x)
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use Conjugate Gradient Squared iteration to solve ``Ax = b``.',
'The real-valued N-by-N matrix of the linear system.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.')
@non_reentrant()
def cgs(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'cgsrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cgs')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(7*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
if info == 1 and iter_ > 1:
# recompute residual and recheck, to avoid
# accumulating rounding error
work[slice1] = b - matvec(x)
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info == -10:
# termination due to breakdown: check for convergence
resid, ok = _stoptest(b - matvec(x), atol)
if ok:
info = 0
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@non_reentrant()
def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None, callback=None,
restrt=None, atol=None, callback_type=None):
"""
Use Generalized Minimal RESidual iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : ndarray
The converged solution.
info : int
Provides convergence information:
* 0 : successful exit
* >0 : convergence to tolerance not achieved, number of iterations
* <0 : illegal input or breakdown
Other parameters
----------------
x0 : ndarray
Starting guess for the solution (a vector of zeros by default).
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
restart : int, optional
Number of iterations between restarts. Larger values increase
iteration cost, but may be necessary for convergence.
Default is 20.
maxiter : int, optional
Maximum number of iterations (restart cycles). Iteration will stop
after maxiter steps even if the specified tolerance has not been
achieved.
M : {sparse matrix, ndarray, LinearOperator}
Inverse of the preconditioner of A. M should approximate the
inverse of A and be easy to solve for (see Notes). Effective
preconditioning dramatically improves the rate of convergence,
which implies that fewer iterations are needed to reach a given
error tolerance. By default, no preconditioner is used.
callback : function
User-supplied function to call after each iteration. It is called
as `callback(args)`, where `args` are selected by `callback_type`.
callback_type : {'x', 'pr_norm', 'legacy'}, optional
Callback function argument requested:
- ``x``: current iterate (ndarray), called on every restart
- ``pr_norm``: relative (preconditioned) residual norm (float),
called on every inner iteration
- ``legacy`` (default): same as ``pr_norm``, but also changes the
meaning of 'maxiter' to count inner iterations instead of restart
cycles.
restrt : int, optional
DEPRECATED - use `restart` instead.
See Also
--------
LinearOperator
Notes
-----
A preconditioner, P, is chosen such that P is close to A but easy to solve
for. The preconditioner parameter required by this routine is
``M = P^-1``. The inverse should preferably not be calculated
explicitly. Rather, use the following template to produce M::
# Construct a linear operator that computes P^-1 @ x.
import scipy.sparse.linalg as spla
M_x = lambda x: spla.spsolve(P, x)
M = spla.LinearOperator((n, n), M_x)
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import gmres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = gmres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
# Change 'restrt' keyword to 'restart'
if restrt is None:
restrt = restart
elif restart is not None:
raise ValueError("Cannot specify both restart and restrt keywords. "
"Preferably use 'restart' only.")
if callback is not None and callback_type is None:
# Warn about 'callback_type' semantic changes.
# Probably should be removed only in far future, Scipy 2.0 or so.
warnings.warn("scipy.sparse.linalg.gmres called without specifying `callback_type`. "
"The default value will be changed in a future release. "
"For compatibility, specify a value for `callback_type` explicitly, e.g., "
"``{name}(..., callback_type='pr_norm')``, or to retain the old behavior "
"``{name}(..., callback_type='legacy')``",
category=DeprecationWarning, stacklevel=3)
if callback_type is None:
callback_type = 'legacy'
if callback_type not in ('x', 'pr_norm', 'legacy'):
raise ValueError("Unknown callback_type: {!r}".format(callback_type))
if callback is None:
callback_type = 'none'
A, M, x, b,postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
if restrt is None:
restrt = 20
restrt = min(restrt, n)
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'gmresrevcom')
bnrm2 = np.linalg.norm(b)
Mb_nrm2 = np.linalg.norm(psolve(b))
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, bnrm2, get_residual, 'gmres')
if atol == 'exit':
return postprocess(x), 0
if bnrm2 == 0:
return postprocess(b), 0
# Tolerance passed to GMRESREVCOM applies to the inner iteration
# and deals with the left-preconditioned residual.
ptol_max_factor = 1.0
ptol = Mb_nrm2 * min(ptol_max_factor, atol / bnrm2)
resid = np.nan
presid = np.nan
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros((6+restrt)*n,dtype=x.dtype)
work2 = _aligned_zeros((restrt+1)*(2*restrt+2),dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
old_ijob = ijob
first_pass = True
resid_ready = False
iter_num = 1
while True:
olditer = iter_
x, iter_, presid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, restrt, work, work2, iter_, presid, info, ndx1, ndx2, ijob, ptol)
if callback_type == 'x' and iter_ != olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1): # gmres success, update last residual
if callback_type in ('pr_norm', 'legacy'):
if resid_ready:
callback(presid / bnrm2)
elif callback_type == 'x':
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
if not first_pass and old_ijob == 3:
resid_ready = True
first_pass = False
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
if resid_ready:
if callback_type in ('pr_norm', 'legacy'):
callback(presid / bnrm2)
resid_ready = False
iter_num = iter_num+1
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
# Inner loop tolerance control
if info or presid > ptol:
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
else:
# Inner loop tolerance OK, but outer loop not.
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
if resid != 0:
ptol = presid * min(ptol_max_factor, atol / resid)
else:
ptol = presid * ptol_max_factor
old_ijob = ijob
ijob = 2
if callback_type == 'legacy':
# Legacy behavior
if iter_num > maxiter:
info = maxiter
break
if info >= 0 and not (resid <= atol):
# info isn't set appropriately otherwise
info = maxiter
return postprocess(x), info
@non_reentrant()
def qmr(A, b, x0=None, tol=1e-5, maxiter=None, M1=None, M2=None, callback=None,
atol=None):
"""Use Quasi-Minimal Residual iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real-valued N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^T x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : ndarray
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : ndarray
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M1 : {sparse matrix, ndarray, LinearOperator}
Left preconditioner for A.
M2 : {sparse matrix, ndarray, LinearOperator}
Right preconditioner for A. Used together with the left
preconditioner M1. The matrix M1@A@M2 should have better
conditioned than A alone.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
See Also
--------
LinearOperator
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import qmr
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = qmr(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A_ = A
A, M, x, b, postprocess = make_system(A, None, x0, b)
if M1 is None and M2 is None:
if hasattr(A_,'psolve'):
def left_psolve(b):
return A_.psolve(b,'left')
def right_psolve(b):
return A_.psolve(b,'right')
def left_rpsolve(b):
return A_.rpsolve(b,'left')
def right_rpsolve(b):
return A_.rpsolve(b,'right')
M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve)
M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve)
else:
def id(b):
return b
M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)
n = len(b)
if maxiter is None:
maxiter = n*10
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'qmrrevcom')
get_residual = lambda: np.linalg.norm(A.matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'qmr')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(11*n,x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(work[slice1])
elif (ijob == 2):
work[slice2] *= sclr2
work[slice2] += sclr1*A.rmatvec(work[slice1])
elif (ijob == 3):
work[slice1] = M1.matvec(work[slice2])
elif (ijob == 4):
work[slice1] = M2.matvec(work[slice2])
elif (ijob == 5):
work[slice1] = M1.rmatvec(work[slice2])
elif (ijob == 6):
work[slice1] = M2.rmatvec(work[slice2])
elif (ijob == 7):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(x)
elif (ijob == 8):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
|
|
import logging
from datetime import datetime
from sqlalchemy import func
from sqlalchemy.orm import joinedload
from sqlalchemy.dialects.postgresql import JSONB, ARRAY
from aleph.core import db, schemata
from aleph.text import normalize_strong, string_value
from aleph.util import ensure_list
from aleph.model.collection import Collection
from aleph.model.reference import Reference
from aleph.model.entity_identity import EntityIdentity
from aleph.model.common import SoftDeleteModel, UuidModel
from aleph.model.common import make_textid, merge_data
log = logging.getLogger(__name__)
class Entity(db.Model, UuidModel, SoftDeleteModel):
STATE_ACTIVE = 'active'
STATE_PENDING = 'pending'
STATE_DELETED = 'deleted'
name = db.Column(db.Unicode)
type = db.Column(db.String(255), index=True)
state = db.Column(db.String(128), nullable=True, default=STATE_ACTIVE, index=True) # noqa
foreign_ids = db.Column(ARRAY(db.Unicode()))
data = db.Column('data', JSONB)
collection_id = db.Column(db.Integer, db.ForeignKey('collection.id'), index=True) # noqa
collection = db.relationship(Collection, backref=db.backref('entities', lazy='dynamic')) # noqa
def delete_references(self, origin=None):
pq = db.session.query(Reference)
pq = pq.filter(Reference.entity_id == self.id)
if origin is not None:
pq = pq.filter(Reference.origin == origin)
pq.delete(synchronize_session='fetch')
db.session.refresh(self)
def delete_identities(self):
pq = db.session.query(EntityIdentity)
pq = pq.filter(EntityIdentity.entity_id == self.id)
pq.delete(synchronize_session='fetch')
db.session.refresh(self)
def delete(self, deleted_at=None):
self.delete_references()
self.delete_identities()
deleted_at = deleted_at or datetime.utcnow()
for alert in self.alerts:
alert.delete(deleted_at=deleted_at)
self.state = self.STATE_DELETED
super(Entity, self).delete(deleted_at=deleted_at)
@classmethod
def delete_dangling(cls, collection_id):
"""Delete dangling entities.
Entities can dangle in pending state while they have no references
pointing to them, thus making it impossible to enable them. This is
a routine cleanup function.
"""
q = db.session.query(cls)
q = q.filter(cls.collection_id == collection_id)
q = q.filter(cls.state == cls.STATE_PENDING)
q = q.outerjoin(Reference)
q = q.group_by(cls)
q = q.having(func.count(Reference.id) == 0)
for entity in q.all():
entity.delete()
def merge(self, other):
if self.id == other.id:
raise ValueError("Cannot merge an entity with itself.")
if self.collection_id != other.collection_id:
raise ValueError("Cannot merge entities from different collections.") # noqa
data = merge_data(self.data, other.data)
if self.name.lower() != other.name.lower():
data = merge_data(data, {'alias': [other.name]})
self.data = data
self.state = self.STATE_ACTIVE
self.foreign_ids = self.foreign_ids or []
self.foreign_ids += other.foreign_ids or []
self.created_at = min((self.created_at, other.created_at))
self.updated_at = datetime.utcnow()
# update alerts
from aleph.model.alert import Alert
q = db.session.query(Alert).filter(Alert.entity_id == other.id)
q.update({'entity_id': self.id})
# update document references
from aleph.model.reference import Reference
q = db.session.query(Reference).filter(Reference.entity_id == other.id)
q.update({'entity_id': self.id})
# delete source entities
other.delete()
db.session.add(self)
db.session.commit()
db.session.refresh(other)
def update(self, entity):
data = entity.get('data') or {}
data['name'] = entity.get('name')
self.data = self.schema.validate(data)
self.name = self.data.pop('name')
fid = [string_value(f) for f in entity.get('foreign_ids') or []]
self.foreign_ids = list(set([f for f in fid if f is not None]))
self.state = entity.pop('state', self.STATE_ACTIVE)
self.updated_at = datetime.utcnow()
db.session.add(self)
@classmethod
def save(cls, data, collection, merge=False):
ent = cls.by_id(data.get('id'))
if ent is None:
ent = cls()
ent.type = data.pop('schema', None)
if ent.type is None:
raise ValueError("No schema provided.")
ent.id = make_textid()
if merge:
data = merge_data(data, ent.to_dict())
if collection is None:
raise ValueError("No collection specified.")
ent.collection = collection
ent.update(data)
return ent
@classmethod
def filter_collections(cls, q, collections=None):
if collections is None:
return q
collection_ids = []
for collection in collections:
if isinstance(collection, Collection):
collection = collection.id
collection_ids.append(collection)
q = q.filter(Entity.collection_id.in_(collection_ids))
return q
@classmethod
def by_id_set(cls, ids, collections=None):
if not len(ids):
return {}
q = cls.all()
q = cls.filter_collections(q, collections=collections)
q = q.options(joinedload('collection'))
q = q.filter(cls.id.in_(ids))
entities = {}
for ent in q:
entities[ent.id] = ent
return entities
@classmethod
def by_foreign_id(cls, foreign_id, collection_id, deleted=False):
foreign_id = string_value(foreign_id)
if foreign_id is None:
return None
q = cls.all(deleted=deleted)
q = q.filter(Entity.collection_id == collection_id)
foreign_id = func.cast([foreign_id], ARRAY(db.Unicode()))
q = q.filter(cls.foreign_ids.contains(foreign_id))
q = q.order_by(Entity.deleted_at.desc().nullsfirst())
return q.first()
@classmethod
def latest(cls):
q = db.session.query(func.max(cls.updated_at))
q = q.filter(cls.state == cls.STATE_ACTIVE)
return q.scalar()
@property
def schema(self):
return schemata.get(self.type)
@property
def terms(self):
terms = set([self.name])
for alias in ensure_list(self.data.get('alias')):
if alias is not None and len(alias):
terms.add(alias)
return terms
@property
def regex_terms(self):
# This is to find the shortest possible regex for each entity.
# If, for example, and entity matches both "Al Qaeda" and
# "Al Qaeda in Iraq, Syria and the Levant", it is useless to
# search for the latter.
terms = set([normalize_strong(t) for t in self.terms])
regex_terms = set()
for term in terms:
if term is None or len(term) < 4 or len(term) > 120:
continue
contained = False
for other in terms:
if other is None or other == term:
continue
if other in term:
contained = True
if not contained:
regex_terms.add(term)
return regex_terms
def to_dict(self):
data = super(Entity, self).to_dict()
data.update({
'schema': self.type,
'name': self.name,
'state': self.state,
'data': self.data,
'foreign_ids': self.foreign_ids or [],
'collection_id': self.collection_id
})
return data
def to_index(self):
entity = self.to_dict()
entity['properties'] = {'name': [self.name]}
for k, v in self.data.items():
v = ensure_list(v)
if len(v):
entity['properties'][k] = v
return entity
def to_ref(self):
return {
'id': self.id,
'label': self.name,
'schema': self.type,
'collection_id': self.collection_id
}
def __unicode__(self):
return self.name
def __repr__(self):
return '<Entity(%r, %r)>' % (self.id, self.name)
|
|
"""
Copyright (C) 2013-2018 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
model_data.py
~~~~~~~~~~~~~~~~~~
Functionality to build the model-internal data array and process
time-varying parameters.
"""
import collections
import ruamel.yaml
import xarray as xr
import numpy as np
from calliope.core.attrdict import AttrDict
from calliope._version import __version__
from calliope.core.preprocess import checks
from calliope.core.preprocess.util import split_loc_techs_transmission, concat_iterable
from calliope.core.preprocess.time import add_time_dimension
from calliope.core.preprocess.lookup import add_lookup_arrays
def build_model_data(model_run, debug=False):
"""
Take a Calliope model_run and convert it into an xarray Dataset, ready for
constraint generation. Timeseries data is also extracted from file at this
point, and the time dimension added to the data
Parameters
----------
model_run : AttrDict
preprocessed model_run dictionary, as produced by
Calliope.core.preprocess_model
debug : bool, default = False
Used to debug steps within build_model_data, particularly before/after
time dimension addition. If True, more information is returned
Returns
-------
data : xarray Dataset
Dataset with optimisation parameters as variables, optimisation sets as
coordinates, and other information in attributes.
data_dict : dict, only returned if debug = True
dictionary of parameters, prior to time dimension addition. Used here to
populate the Dataset (using `from_dict()`)
data_pre_time : xarray Dataset, only returned if debug = True
Dataset, prior to time dimension addition, with optimisation parameters
as variables, optimisation sets as coordinates, and other information
in attributes.
"""
# We build up a dictionary of the data, then convert it to an xarray Dataset
# before applying time dimensions
data = xr.Dataset(
coords=add_sets(model_run),
attrs=add_attributes(model_run)
)
data_dict = dict()
data_dict.update(constraints_to_dataset(model_run))
data_dict.update(costs_to_dataset(model_run))
data_dict.update(location_specific_to_dataset(model_run))
data_dict.update(tech_specific_to_dataset(model_run))
data_dict.update(carrier_specific_to_dataset(model_run))
data.merge(xr.Dataset.from_dict(data_dict), inplace=True)
add_lookup_arrays(data, model_run)
if debug:
data_pre_time = data.copy(deep=True)
add_time_dimension(data, model_run)
# Carrier information uses DataArray indexing in the function, so we merge
# these directly into the main xarray Dataset
if debug:
return data, data_dict, data_pre_time
else:
return data
def add_sets(model_run):
coords = dict()
for key, value in model_run.sets.items():
if value:
coords[key] = value
for key, value in model_run.constraint_sets.items():
if value:
coords[key] = value
return coords
def constraints_to_dataset(model_run):
"""
Extract all constraints from the processed dictionary (model.model_run) and
return an xarray Dataset with all the constraints as DataArray variables and
model sets as Dataset dimensions.
Parameters
----------
model_run : AttrDict
processed Calliope model_run dict
Returns
-------
data_dict : dict conforming to xarray conventions
"""
data_dict = dict()
# FIXME: hardcoding == bad
def _get_set(constraint):
"""
return the set of loc_techs over which the given constraint should be
built
"""
if '_area' in constraint:
return 'loc_techs_area'
elif any(i in constraint for i in ['resource_cap', 'parasitic', 'resource_min_use']):
return 'loc_techs_supply_plus'
elif 'resource' in constraint: # i.e. everything with 'resource' in the name that isn't resource_cap
return 'loc_techs_finite_resource'
elif 'storage' in constraint or 'charge_rate' in constraint:
return 'loc_techs_store'
elif 'purchase' in constraint:
return 'loc_techs_purchase'
elif 'units_' in constraint:
return 'loc_techs_milp'
elif 'export' in constraint:
return 'loc_techs_export'
else:
return 'loc_techs'
# find all constraints which are actually defined in the yaml file
relevant_constraints = set(i.split('.constraints.')[1]
for i in model_run.locations.as_dict_flat().keys()
if '.constraints.' in i and
'.carrier_ratios.'not in i)
for constraint in relevant_constraints:
data_dict[constraint] = dict(dims=_get_set(constraint), data=[])
for loc_tech in model_run.sets[_get_set(constraint)]:
loc, tech = loc_tech.split('::', 1)
# for transmission technologies, we also need to go into link nesting
if ':' in tech: # i.e. transmission technologies
tech, link = tech.split(':')
loc_tech_dict = model_run.locations[loc].links[link].techs[tech]
else: # all other technologies
loc_tech_dict = model_run.locations[loc].techs[tech]
constraint_value = loc_tech_dict.constraints.get(constraint, np.nan)
# inf is assumed to be string on import, so we need to np.inf it
if constraint_value == 'inf':
constraint_value = np.inf
# add the value for the particular location & technology combination to the list
data_dict[constraint]['data'].append(constraint_value)
# once we've looped through all technology & location combinations, add the array to the dataset
# Additional system-wide constraints from model_run.model
# FIXME: hardcoding == bad
data_dict['reserve_margin'] = {
'data': [model_run.model.get('reserve_margin', {}).get(c, np.nan)
for c in model_run.sets['carriers']],
'dims': 'carriers'
}
group_share_data = {}
group_constraints = ['energy_cap_min', 'energy_cap_max', 'energy_cap_equals']
group_constraints_carrier = ['carrier_prod_min', 'carrier_prod_max', 'carrier_prod_equals']
for constraint in [ # Only process constraints that are defined
c for c in group_constraints
if c in ''.join(model_run.model.get_key('group_share', AttrDict()).keys_nested())]:
group_share_data[constraint] = [
model_run.model.get_key('group_share.{}.{}'.format(techlist, constraint), np.nan)
for techlist in model_run.sets['techlists']
]
for constraint in [ # Only process constraints that are defined
c for c in group_constraints_carrier
if c in ''.join(model_run.model.get_key('group_share', AttrDict()).keys_nested())]:
group_share_data[constraint] = [
[
model_run.model.get_key('group_share.{}.{}.{}'.format(techlist, constraint, carrier), np.nan)
for techlist in model_run.sets['techlists']
]
for carrier in model_run.sets['carriers']
]
# Add to data_dict and set dims correctly
for k in group_share_data:
data_dict['group_share_' + k] = {
'data': group_share_data[k],
'dims': 'techlists' if k in group_constraints else ('carriers', 'techlists')
}
return data_dict
def costs_to_dataset(model_run):
"""
Extract all costs from the processed dictionary (model.model_run) and
return an xarray Dataset with all the costs as DataArray variables. Variable
names will be prepended with `cost_` to differentiate from other constraints
Parameters
----------
model_run : AttrDict
processed Calliope model_run dict
Returns
-------
data_dict : dict conforming to xarray conventions
"""
data_dict = dict()
# FIXME: hardcoding == bad
def _get_set(cost):
"""
return the set of loc_techs over which the given cost should be built
"""
if any(i in cost for i in ['_cap', 'depreciation_rate', 'purchase', 'area']):
return 'loc_techs_investment_cost'
elif any(i in cost for i in ['om_', 'export']):
return 'loc_techs_om_cost'
else:
return 'loc_techs'
# find all cost classes and associated costs which are actually defined in the model_run
costs = set(i.split('.costs.')[1].split('.')[1]
for i in model_run.locations.as_dict_flat().keys()
if '.costs.' in i)
cost_classes = model_run.sets['costs']
# loop over unique costs, cost classes and technology & location combinations
for cost in costs:
data_dict['cost_' + cost] = dict(dims=["costs", _get_set(cost)], data=[])
for cost_class in cost_classes:
cost_class_array = []
for loc_tech in model_run.sets[_get_set(cost)]:
loc, tech = loc_tech.split('::', 1)
# for transmission technologies, we also need to go into link nesting
if ':' in tech: # i.e. transmission technologies
tech, link = tech.split(':')
loc_tech_dict = model_run.locations[loc].links[link].techs[tech]
else: # all other technologies
loc_tech_dict = model_run.locations[loc].techs[tech]
cost_dict = loc_tech_dict.get_key('costs.' + cost_class, None)
# inf is assumed to be string on import, so need to np.inf it
cost_value = np.nan if not cost_dict else cost_dict.get(cost, np.nan)
# add the value for the particular location & technology combination to the correct cost class list
cost_class_array.append(cost_value)
data_dict['cost_' + cost]['data'].append(cost_class_array)
return data_dict
def carrier_specific_to_dataset(model_run):
"""
Extract carrier information from the processed dictionary (model.model_run)
and return an xarray Dataset with DataArray variables describing carrier_in,
carrier_out, and carrier_ratio (for conversion plus technologies) information.
Parameters
----------
model_run : AttrDict
processed Calliope model_run dict
Returns
-------
data_dict : dict conforming to xarray conventions
"""
carrier_tiers = model_run.sets['carrier_tiers']
loc_tech_dict = {k: [] for k in model_run.sets['loc_techs_conversion_plus']}
data_dict = dict()
# Set information per carrier tier ('out', 'out_2', 'in', etc.)
# for conversion-plus technologies
if model_run.sets['loc_techs_conversion_plus']:
# carrier ratios are the floating point numbers used to compare one
# carrier_in/_out value with another carrier_in/_out value
data_dict['carrier_ratios'] = dict(
dims=['carrier_tiers', 'loc_tech_carriers_conversion_plus'], data=[]
)
data_dict['carrier_ratios_min'] = dict(
dims=['carrier_tiers', 'loc_techs_conversion_plus'], data=[]
)
for carrier_tier in carrier_tiers:
data = []
for loc_tech_carrier in model_run.sets['loc_tech_carriers_conversion_plus']:
loc, tech, carrier = loc_tech_carrier.split('::')
carrier_ratio = (
model_run.locations[loc].techs[tech].constraints.get_key(
'carrier_ratios.carrier_' + carrier_tier + '.' + carrier, 1
)
)
data.append(carrier_ratio)
loc_tech_dict[loc + '::' + tech].append(carrier_ratio)
data_dict['carrier_ratios']['data'].append(data)
data_dict['carrier_ratios_min']['data'].append(
[min(i) for i in loc_tech_dict.values()]
)
# Additional system-wide constraints from model_run.model
if 'reserve_margin' in model_run.model.keys():
data_dict['reserve_margin'] = {
'data': [model_run.model.reserve_margin.get(c, np.nan)
for c in model_run.sets['carriers']],
'dims': 'carriers'
}
return data_dict
def location_specific_to_dataset(model_run):
"""
Extract location specific information from the processed dictionary
(model.model_run) and return an xarray Dataset with DataArray variables
describing distance, coordinate and available area information.
Parameters
----------
model_run : AttrDict
processed Calliope model_run dict
Returns
-------
data_dict : dict conforming to xarray conventions
"""
# for every transmission technology, we extract distance information, if it
# is available
data_dict = dict()
data_dict['distance'] = dict(dims='loc_techs_transmission', data=[
model_run.get_key(
'locations.{loc_from}.links.{loc_to}.techs.{tech}.distance'
.format(**split_loc_techs_transmission(loc_tech)), np.nan)
for loc_tech in model_run.sets['loc_techs_transmission']
])
# If there is no distance information stored, distance array is deleted
if data_dict['distance']['data'].count(np.nan) == len(data_dict['distance']['data']):
del data_dict['distance']
data_dict['lookup_remotes'] = dict(
dims='loc_techs_transmission',
data=concat_iterable([
(k['loc_to'], k['tech'], k['loc_from'])
for k in [
split_loc_techs_transmission(loc_tech)
for loc_tech in model_run.sets['loc_techs_transmission']
]
], ['::', ':'])
)
# If there are no remote locations stored, lookup_remotes array is deleted
if data_dict['lookup_remotes']['data'].count(np.nan) == len(data_dict['lookup_remotes']['data']):
del data_dict['lookup_remotes']
data_dict['available_area'] = dict(dims='locs', data=[
model_run.locations[loc].get('available_area', np.nan)
for loc in model_run.sets['locs']
])
# remove this dictionary element if nothing is defined in it
if set(data_dict['available_area']['data']) == {np.nan}:
del data_dict['available_area']
# Coordinates are defined per location, but may not be defined at all for
# the model
if 'coordinates' in model_run.sets:
data_dict['loc_coordinates'] = dict(dims=['locs', 'coordinates'], data=[])
for loc in model_run.sets['locs']:
data_dict['loc_coordinates']['data'].append([
model_run.locations[loc].coordinates[coordinate]
for coordinate in model_run.sets.coordinates])
return data_dict
def tech_specific_to_dataset(model_run):
"""
Extract technology (location inspecific) information from the processed
dictionary (model.model_run) and return an xarray Dataset with DataArray
variables describing color and inheritance chain information.
Parameters
----------
model_run : AttrDict
processed Calliope model_run dict
Returns
-------
data_dict : dict conforming to xarray conventions
"""
data_dict = collections.defaultdict(
lambda: {'dims': ['techs'], 'data': []}
)
systemwide_constraints = set([
k.split('.')[-1] for k in model_run.techs.keys_nested()
if '.constraints.' in k and
k.endswith('_systemwide')
])
for tech in model_run.sets['techs']:
if tech in model_run.sets['techs_transmission']:
tech = tech.split(':')[0]
data_dict['colors']['data'].append(model_run.techs[tech].get_key(
'essentials.color'))
data_dict['inheritance']['data'].append('.'.join(
model_run.techs[tech].get_key('inheritance')))
data_dict['names']['data'].append(
# Default to tech ID if no name is set
model_run.techs[tech].get_key('essentials.name', tech))
for k in systemwide_constraints:
data_dict[k]['data'].append(
model_run.techs[tech].constraints.get_key(k, np.nan)
)
return data_dict
def add_attributes(model_run):
attr_dict = AttrDict()
attr_dict['model'] = model_run.model.copy()
attr_dict['run'] = model_run.run.copy()
# Some keys are killed right away
for k in ['model.time', 'model.data_path', 'model.timeseries_data_path',
'run.config_run_path', 'run.model']:
try:
attr_dict.del_key(k)
except KeyError:
pass
# Now we flatten the AttrDict into a dict
attr_dict = attr_dict.as_dict(flat=True)
# Anything empty or None in the flattened dict is also killed
for k in list(attr_dict.keys()):
val = attr_dict[k]
if val is None or (hasattr(val, '__iter__') and not val):
del attr_dict[k]
attr_dict['calliope_version'] = __version__
attr_dict['applied_overrides'] = model_run['applied_overrides']
attr_dict['scenario'] = model_run['scenario']
default_tech_dict = checks.defaults.default_tech.as_dict()
default_location_dict = checks.defaults.default_location.as_dict()
attr_dict['defaults'] = ruamel.yaml.dump({
**default_tech_dict['constraints'],
**{'cost_{}'.format(k): v for k, v in default_tech_dict['costs']['default'].items()},
**default_location_dict
})
return attr_dict
|
|
##
## This file is a layer that sits between challenge.py
## and the R scoring code. It's purpose is to be an
## adaptor between generic code and scoring code specific
## to a given challenge question.
## Communication with R is through the RPy2 package.
############################################################
import rpy2.robjects as robjects
import synapseclient
import math
# read in email templates
with open("templates/confirmation_email.txt") as f:
validation_confirmation_template = unicode(f.read())
with open("templates/validation_error_email.txt") as f:
validation_error_template = unicode(f.read())
with open("templates/scored_email.txt") as f:
scored_template = unicode(f.read())
with open("templates/scored_final_email.txt") as f:
scored_final_template = unicode(f.read())
with open("templates/scored_community_phase_email.txt") as f:
scored_community_phase_template = unicode(f.read())
with open("templates/scoring_error_email.txt") as f:
scoring_error_template = unicode(f.read())
## Configure scoring of evaluation queues
##
## These parameters link an evaluation queue to a validation
## function and a scoring function and supply other bits of
## configuration.
##
## The scoring functions defined below (score_q1, score_q2, etc.) insert
## statistics onto the submission status annotations. Later, the 'fields'
## named in config_evaluations are used to compute mean ranking.
config_evaluations = [
## Q1
{
'id':2480744,
'score_as_part_of_challenge': False,
'validation_function': 'validate_q1',
'validation_expected_format': 'q1.txt',
'scoring_function': 'score_q1',
'observed': 'q1.rosmap.csv',
'fields': ['correlation_pearson_clin',
'correlation_pearson_clin_gen',
'correlation_spearman_clin',
'correlation_spearman_clin_gen'],
'validation_error_template': validation_error_template,
'scored_template': scored_template,
'scoring_error_template': scoring_error_template,
'submission_quota': 105
},
## Q2
{
'id':2480748,
'score_as_part_of_challenge': False,
'validation_function': 'validate_q2',
'validation_expected_format': 'q2.txt',
'scoring_function': 'score_q2',
'observed': 'q2.observed.txt',
'fields': ['auc', 'accuracy'],
'validation_error_template': validation_error_template,
'scored_template': scored_template,
'scoring_error_template': scoring_error_template,
'submission_quota': 55
},
## Q3
{
'id':2480750,
'score_as_part_of_challenge': False,
'validation_function': 'validate_q3',
'validation_expected_format': 'q3.txt',
'scoring_function': 'score_q3',
'observed': 'q3.observed.csv',
'fields': ['pearson_mmse', 'ccc_mmse'],
'validation_error_template': validation_error_template,
'scored_template': scored_template,
'scoring_error_template': scoring_error_template,
'submission_quota': 55
},
## Q1 final
{
'id':2700269,
'score_as_part_of_challenge': False,
'validation_function': 'validate_q1',
'validation_expected_format': 'q1.final.example.txt',
'scoring_function': 'score_q1',
'observed': 'q1.final.observed.txt',
'fields': ['correlation_pearson_clin',
'correlation_pearson_clin_gen',
'correlation_spearman_clin',
'correlation_spearman_clin_gen'],
'validation_error_template': validation_error_template,
'scored_template': scored_final_template,
'scoring_error_template': scoring_error_template,
},
## Q2 final
{
'id':2700271,
'score_as_part_of_challenge': False,
'validation_function': 'validate_q2',
'validation_expected_format': 'q2.final.example.txt',
'scoring_function': 'score_q2',
'observed': 'q2.final.observed.txt',
'fields': ['auc', 'accuracy'],
'validation_error_template': validation_error_template,
'scored_template': scored_final_template,
'scoring_error_template': scoring_error_template,
},
## Q3 final
{
'id':2700273,
'score_as_part_of_challenge': False,
'validation_function': 'validate_q3',
'validation_expected_format': 'q3.final.example.txt',
'scoring_function': 'score_q3',
'observed': 'q3.final.observed.txt',
'fields': ['pearson_mmse', 'ccc_mmse'],
'validation_error_template': validation_error_template,
'scored_template': scored_final_template,
'scoring_error_template': scoring_error_template,
},
## Q1 community phase
{
'id':2924418,
'score_as_part_of_challenge': True,
'validation_function': 'validate_q1',
'validation_expected_format': 'q1.txt',
'scoring_function': 'score_q1',
'observed': 'q1.rosmap.csv',
'fields': ['correlation_pearson_clin',
'correlation_pearson_clin_gen',
'correlation_spearman_clin',
'correlation_spearman_clin_gen'],
'validation_error_template': validation_error_template,
'scored_template': scored_community_phase_template,
'scoring_error_template': scoring_error_template,
},
## Q2 community phase
{
'id':2924420,
'score_as_part_of_challenge': True,
'validation_function': 'validate_q2',
'validation_expected_format': 'q2.txt',
'scoring_function': 'score_q2',
'observed': 'q2.observed.txt',
'fields': ['auc', 'accuracy'],
'validation_error_template': validation_error_template,
'scored_template': scored_community_phase_template,
'scoring_error_template': scoring_error_template,
},
## Q3 community phase
{
'id':2924422,
'score_as_part_of_challenge': True,
'validation_function': 'validate_q3',
'validation_expected_format': 'q3.txt',
'scoring_function': 'score_q3',
'observed': 'q3.observed.csv',
'fields': ['pearson_mmse', 'ccc_mmse'],
'validation_error_template': validation_error_template,
'scored_template': scored_community_phase_template,
'scoring_error_template': scoring_error_template,
},
## testing
{
'id':2495614,
'score_as_part_of_challenge': False,
'validation_function': 'validate_q3',
'scoring_function': 'score_q3',
'fields': ['pearson_mmse', 'ccc_mmse'],
'validation_error_template': validation_error_template,
'scored_template': scored_template,
'scoring_error_template': scoring_error_template,
},
## use old Q1b queue for testing, too
{
'id':2480746,
'score_as_part_of_challenge': False,
'validation_function': 'validate_q1',
'scoring_function': 'score_q1',
'fields': ['correlation_pearson_clin',
'correlation_pearson_clin_gen',
'correlation_spearman_clin',
'correlation_spearman_clin_gen'],
'validation_error_template': validation_error_template,
'scored_template': scored_template,
'scoring_error_template': scoring_error_template,
'submission_quota': 50
}
]
config_evaluations_map = {ev['id']:ev for ev in config_evaluations}
robjects.r('source("validate_and_score.R")')
r_mean_rank = robjects.r['mean_rank']
output_templates = {
"score_q1":
"Submission scored.\n\n Correlations are:\n" \
" Pearson, clinical: {correlation_pearson_clin}\n" \
" Pearson, clinical+genetic: {correlation_pearson_clin_gen}\n" \
" Spearman, clinical: {correlation_spearman_clin}\n" \
" Spearman, clinical+genetic: {correlation_spearman_clin_gen}\n",
"score_q2":
"Submission scored.\n\n" \
" Accuracy = {accuracy}\n" \
" AUC = {auc}\n" \
" Brier's score = {brier}\n" \
" Somer's D = {somer}\n",
"score_q3":
"Submission scored.\n\n" \
" Pearson correlation = {pearson_mmse}\n" \
" Concordance correlation coefficient = {ccc_mmse}\n" \
" Diagnosis percent correct = {percent_correct_diagnosis}\n"
}
def as_dict(vector):
"""Convert an RPy2 ListVector to a Python dict"""
result = {}
for i, name in enumerate(vector.names):
if isinstance(vector[i], robjects.ListVector):
result[name] = as_dict(vector[i])
elif len(vector[i]) == 1:
result[name] = vector[i][0]
else:
result[name] = vector[i]
return result
def validate_submission(evaluation, submission, status):
"""
To be called by challenge.py:validate()
"""
config = config_evaluations_map[int(evaluation.id)]
## get the R function that validates submissions for
## this evaluation
r_validate_submission = robjects.r[config['validation_function']]
## call an R function with signature: function(submission_path, expected_filename)
result = as_dict(r_validate_submission(submission.filePath, config['validation_expected_format']))
print result
status.status = "VALIDATED" if result['valid'] else "INVALID"
return status, result['message']
## these scoring statistics may return NANs, which will be remapped to another value
correlation_keys = [
"correlation_pearson_clin",
"correlation_pearson_clin_gen",
"correlation_spearman_clin",
"correlation_spearman_clin_gen",
"pearson_mmse",
"ccc_mmse"]
def score_submission(evaluation, submission, status):
"""
To be called by challenge.py:score()
"""
config = config_evaluations_map[int(evaluation.id)]
## get the R function that scores submissions for this
## evaluation and a matching template for formatting the output
r_score_submission = robjects.r[config['scoring_function']]
template = output_templates[config['scoring_function']]
## call an R function with signature: function(submission_path, observed_path)
result = as_dict(r_score_submission(submission.filePath, config['observed']))
## change NANs to -99, because JSON is broken for NANs and -99 is
## outside the space of correlation values
for key,value in result.iteritems():
if key in correlation_keys and math.isnan(value):
result[key] = -99.0
print result
status.status = "SCORED"
## add scoring statistics to submission status annotations
if 'annotations' in status:
annotations = synapseclient.annotations.from_submission_status_annotations(status.annotations)
else:
annotations = {}
annotations.update(result)
status.annotations = synapseclient.annotations.to_submission_status_annotations(annotations, is_private=False)
return status, (template).format(**annotations)
def mean_rank(data):
## convert to an R data frame
df = robjects.DataFrame({key:robjects.FloatVector(values) for key,values in data.iteritems()})
## calculate the mean and final rankings
r_results = r_mean_rank(df)
return {name:col for name, col in r_results.items()}
|
|
import sys
import os
from PyQt4.QtSql import *
import ui_forms.ui_receiveform
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from sqlalchemy import *
from sqlalchemy.orm import *
from databaseschema import *
import modelsandviews
import genericdelegates
from functions import *
localTITLE = "Receiving"
#==============================================================================
### Model Setup ==============
(ITEM, DESCRIPTION, QTY, PRICE, SHIPPING, COST, TOTAL, CAD_TOTAL, MEMO) = range(9)
class ReceivingDetailModel(QAbstractTableModel):
### Model Initializer ==============
def __init__(self, session, parent=None):
super(ReceivingDetailModel, self).__init__(parent)
self.records = []
self.records.append(ReceiveRMD())
self.shippingRate = 0
self.currencyRate = 1
self.journal_date = None
self.session = session
def setDate(self, date):
self.journal_date = date
### Base Implemantations ==============
def rowCount(self, index=QModelIndex()):
return len(self.records)
def columnCount(self, index=QModelIndex()):
return 9
def headerData(self, section, orientation, role):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
if section == ITEM:
return QVariant("Item")
elif section == DESCRIPTION:
return QVariant("Description")
elif section == QTY:
return QVariant('Qty (KG,LT)')
elif section == PRICE:
return QVariant("Price")
elif section == SHIPPING:
return QVariant("Shipping")
elif section == COST:
return QVariant("Cost")
elif section == TOTAL:
return QVariant("Total")
elif section == MEMO:
return QVariant("Memo")
elif section == CAD_TOTAL:
return QVariant('CAD Total')
return QVariant(section + 1)
def flags(self, index):
flag = QAbstractTableModel.flags(self, index)
if index.column() not in (COST, SHIPPING, CAD_TOTAL):
flag |= Qt.ItemIsEditable
return flag
def data(self, index, role=Qt.DisplayRole):
if not index.isValid() or not (0 <= index.row() < len(self.records)):
return QVariant()
record = self.records[index.row()]
column = index.column()
if role == Qt.DisplayRole:
if column == ITEM:
item_no = dLookup(BOM.bom_no, BOM.bom_id==record.bom_id)
return QVariant(item_no)
elif column == DESCRIPTION:
return QVariant(record.rmd_desc)
elif column == QTY:
if not record.qty:
return QVariant(record.qty)
return QVariant(round(record.qty, 4))
elif column == PRICE:
if not record.cost_native:
return QVariant(record.cost_native)
return QVariant(round(getType(record.cost_native), 4))
elif column == SHIPPING:
if not record.rmd_shipping:
return QVariant(record.rmd_shipping)
return QVariant(round(getType(record.rmd_shipping), 2))
elif column == COST:
if not record.cost:
return QVariant(record.cost)
return QVariant(round(getType(record.cost), 2))
elif column == TOTAL:
if not record.native_total:
return QVariant(record.native_total)
return QVariant(round(getType(record.native_total), 2))
elif column == CAD_TOTAL:
if not record.total:
return QVariant(record.total)
return QVariant(round(getType(record.total), 2))
elif column == MEMO:
return QVariant(record.rmd_memo)
return QVariant()
def setData(self, index, value, role=Qt.EditRole):
if index.isValid() and role == Qt.EditRole:
record = self.records[index.row()]
column = index.column()
if column == ITEM:
item = value.toInt()[0]
record.bom_id = item
record.rmd_desc = dLookup(BOM.bom_desc,
BOM.bom_id==item)
record.cost_native = dLookup(BOM.bom_cost,
BOM.bom_id==item)
self.checkPrice(record)
elif column == DESCRIPTION:
record.rmd_desc = value.toString()
elif column == QTY:
qty, ok = value.toFloat()
if not ok:
return False
record.qty = qty
record.rmd_shipping = self.calcShipping(record)
record.cost = self.calcCost(record)
record.native_total = self.calcNTotal(record)
record.total = self.calcTotal(record)
elif column == PRICE:
price, ok = value.toFloat()
if not ok:
return False
record.cost_native = price
record.rmd_shipping = self.calcShipping(record)
record.cost = self.calcCost(record)
record.native_total = self.calcNTotal(record)
record.total = self.calcTotal(record)
self.checkPrice(record)
elif column == TOTAL:
total, ok = value.toFloat()
if not ok:
return
record.cost_native = self.calcPrice(record, total)
record.rmd_shipping = self.calcShipping(record)
record.cost = self.calcCost(record)
record.native_total = self.calcNTotal(record)
record.total = self.calcTotal(record)
self.checkPrice(record)
elif column == MEMO:
record.rmd_memo = value.toString()
self.emit(SIGNAL("dataChanged(QModelIndex, QModelIndex)"), index, index)
return True
return False
def insertRows(self, position, rows=1, index=QModelIndex()):
self.beginInsertRows(QModelIndex(), position, position + rows - 1)
for row in range(rows):
self.records.insert(position + row + 1, ReceiveRMD())
self.endInsertRows()
return True
def removeRows(self, position, rows=1, index=QModelIndex()):
self.beginRemoveRows(QModelIndex(), position, position + rows - 1)
self.records = self.records[:position] + self.records[position + rows:]
self.endRemoveRows()
return True
### Data and Calculations ==============
def calcPrice(self, record, total):
qty = getType(record.qty)
return abs(total / qty)
def calcCost(self, record):
price = getType(record.cost_native)
shipping = getType(record.rmd_shipping)
return price + shipping
def calcShipping(self, record):
price = getType(record.cost_native)
rate = self.shippingRate
return price * rate
def calcNTotal(self, record):
price = getType(record.cost_native)
shipping = getType(record.rmd_shipping)
qty = getType(record.qty)
return (price + shipping) * qty
def calcTotal(self, record):
price = getType(record.cost_native)
shipping = getType(record.rmd_shipping)
qty = getType(record.qty)
exRate = getType(nonZero(self.currencyRate, 1))
return (price + shipping) * qty * exRate
def updateDetailModel(self, shipping=1, currency=1):
assert isinstance(shipping, float)
self.beginResetModel()
for record in self.records:
if record.bom_id:
record.rmd_shipping = getType(record.cost_native) * shipping
record.cost = self.calcCost(record)
record.native_total = self.calcNTotal(record)
record.total = self.calcTotal(record)
self.shippingRate = shipping
self.currencyRate = currency
self.endResetModel()
def getSumTotal(self):
return sum(map(calculate_total, self.records), 0.0)
def checkPrice(self, record):
if not self.journal_date:
return
where = Settings.setting=='check_price'
on = dLookup(Settings.bool_value, where) # // this returns a string, so we need to eval to convert to binery
if not eval(on):
return
price_query = self.session.query(ReceiveRMD.id, ReceiveRMD.bom_id, ReceiveRMD.cost_native, JournalHeader.journal_date) \
.join(JournalHeader) \
.filter(JournalHeader.journal_date<=self.journal_date) \
.filter(ReceiveRMD.bom_id==record.bom_id) \
.order_by(JournalHeader.journal_date.desc()).first()
if not price_query:
return
price_diff = dLookup(Settings.value_1, where)
price_diff = float(getType(price_diff))
price = float(getType(price_query[2]))
if abs(price - float(getType(record.cost_native))) >= price_diff:
QMessageBox.information(None, 'Check Price', 'Last price i have for this item is: %s, \n ' \
'please make sure your price is correct' % price,
QMessageBox.Ok)
### Operations ==============
def clear(self):
self.beginResetModel()
self.records = []
self.records.append(ReceiveRMD())
self.endResetModel()
def save(self, journal_id, date, journal_type='Bill'):
#// update price on bom_table if setting is on
update_price = dLookup(Settings.bool_value, Settings.setting=='update_price')
if eval(update_price):
for record in self.records:
self.session.query(BOM).filter(BOM.bom_id==record.bom_id).update({'bom_cost': unicode(record.cost_native)})
records_ = []
adjustments = []
for record in self.records:
if record.bom_id:
bom_id = int(record.bom_id)
rmd_desc = str(record.rmd_desc)
qty = float(record.qty)
price = unicode(record.cost_native)
cost = unicode(record.cost)
shipping = unicode(record.rmd_shipping)
rmd_memo = str(record.rmd_memo) if record.rmd_memo else ""
total = unicode(record.total)
native_total = unicode(record.native_total)
records_ += [ReceiveRMD(journal_id, bom_id, rmd_desc, qty, cost, price, shipping, rmd_memo, total, native_total)]
if journal_type == 'Credit':
cost = None
adjRmd = adjustAvgCost(self.session, bom_id, str(date), journal_id, cost)
if adjRmd:
adjustments += adjRmd
return (records_, adjustments)
def load(self, objectList):
self.beginResetModel()
self.records = []
self.endResetModel()
for item in objectList:
bom_id = item.bom_id
rmd_desc = item.rmd_desc
qty = float(item.qty)
rmd_shipping = float(nonZero(item.rmd_shipping, 0))
cost = float(item.cost)
cost_native = cost * (1 - (rmd_shipping / cost))
total = float(getType(item.total))
native_total = float(getType(item.native_total))
memo = item.rmd_memo
self.records.append(ReceiveRMD(None, bom_id, rmd_desc, qty, cost, cost_native, rmd_shipping, memo, total, native_total))
self.records.append(ReceiveRMD())
def copy(self, indexList):
clipboard = QApplication.clipboard()
clipText = QString()
indexList.sort()
previous = indexList[0]
for current in indexList:
text = self.data(current, Qt.DisplayRole).toString()
if current.row() != previous.row():
clipText.append('\n')
else:
clipText.append('\t')
clipText.append(text)
previous = current
clipText.remove(0, 1)
clipboard.setText(clipText)
def paste(self, position, index=QModelIndex()):
myList = []
clipboard = QApplication.clipboard()
text = clipboard.text()
rows = text.split('\n')
for rec in rows:
col = rec.split('\t')
bom_id = dLookup(BOM.bom_id, BOM.bom_no==str(col[0]))
if bom_id:
desc = col[1] if len(col) >= 2 else dLookup(BOM.bom_desc, BOM.bom_id==bom_id)
qty = float(getType(col[2])) if len(col) >= 3 else None
price = float(getType(col[3])) if len(col) >= 4 else None
myList += [ReceiveRMD(None, bom_id, desc, qty, price)]
rowCount = len(myList)
self.beginInsertRows(QModelIndex(), position, position + rowCount - 1)
for row in range(rowCount):
self.records.insert(position + row, myList[row])
self.endInsertRows()
self.updateDetailModel(1.0)
return True
#==============================================================================
### Form Setup ==============
def calculate_total(record):
assert isinstance(record, ReceiveRMD)
total = record.native_total
if isinstance(total, QString):
total = record.native_total.toFloat()[0]
elif not total:
total = 0
return float(total)
class ReceiveForm(QDialog, ui_forms.ui_receiveform.Ui_ReceiveForm):
### Initializer =============
def __init__(self, supplierModel, bomModel, parent=None):
super(ReceiveForm, self).__init__(parent)
self.setupUi(self)
self.session = Session()
self.my_parent = parent
self.supplierModel = supplierModel
self.supcom.setVisible(False)
self.supplier_comboBox = modelsandviews.SupplierComboBox(self.supplierModel)
self.supplier_comboBox.setMaximumSize(QSize(197, 25))
self.supplier_comboBox.setMinimumSize(QSize(197, 25))
self.gridLayout_2.addWidget(self.supplier_comboBox, 0, 1, 1, 2)
self.setTabOrder(self.note_textEdit, self.supplier_comboBox)
self.date_dateEdit.setDate(self.my_parent.getDate())
self.curr_lineEdit.setText("1")
self.amount_lineEdit.setText('0.00')
self.receive_radioButton.setChecked(True)
self.export_checkBox.setChecked(True)
self.supplier_comboBox.setFocus()
self.itemModel = bomModel
self.itemView = modelsandviews.SupplierView(self.supplierModel)
self.detailModel = ReceivingDetailModel(self.session)
self.receive_tableView.setModel(self.detailModel)
delegate = genericdelegates.GenericDelegate(self)
delegate.insertDelegate(ITEM, genericdelegates.ComboDelegate(self.itemModel, True))
delegate.insertDelegate(DESCRIPTION, genericdelegates.PlainTextDelegate())
delegate.insertDelegate(QTY, genericdelegates.NumberDelegate())
delegate.insertDelegate(PRICE, genericdelegates.NumberDelegate())
delegate.insertDelegate(MEMO, genericdelegates.PlainTextDelegate())
tblView = self.receive_tableView
tblView.setItemDelegate(delegate)
tblView.setColumnWidth(ITEM, 50)
tblView.setColumnWidth(DESCRIPTION, 200)
tblView.setColumnWidth(QTY, 70)
tblView.setColumnWidth(PRICE, 70)
tblView.setColumnWidth(SHIPPING, 70)
tblView.setColumnWidth(COST, 100)
tblView.setColumnWidth(TOTAL, 100)
tblView.setColumnWidth(MEMO, 200)
self.receive_tableView.setColumnHidden(CAD_TOTAL, True)
tblView.horizontalHeader().setStretchLastSection(True)
self.detailModel.dataChanged.connect(self.autoAddRow)
self.shipping_lineEdit.editingFinished.connect(self.updateDetailModel)
self.curr_lineEdit.editingFinished.connect(self.updateDetailModel)
self.detailModel.dataChanged.connect(self.updateSumTotal)
self.billno_lineEdit.editingFinished.connect(self.checkBillNo)
self.supplier_comboBox.currentIndexChanged.connect(self.changeLayout)
self.date_dateEdit.dateChanged.connect(self.setModelDate)
self.date_dateEdit.dateChanged.connect(self.setParentDate)
self.newButton.clicked.connect(self.clear)
self.saveButton.clicked.connect(self.save)
self.deleteButton.clicked.connect(self.delete)
self.calcButton.clicked.connect(self.updateDetailModel)
self.findButton.clicked.connect(self.find)
self.closeButton.clicked.connect(self.accept)
self.receive_tableView.doubleClicked.connect(self.findBomID)
self.setupConnection()
self.setModelDate()
self.dirty = False
self.editing = False
self.record_id = None
self.current_record = None
### Form Behaviour =============
def setupConnection(self):
""" connect every widget on form to the data changed function,
to set the form to dirty """
widgets = self.findChildren(QWidget)
for widget in widgets:
if isinstance(widget, (QLineEdit, QTextEdit)):
self.connect(widget, SIGNAL("textEdited(QString)"), self.setDirty)
elif isinstance(widget, QComboBox):
self.connect(widget, SIGNAL("activated(int)"), self.setDirty)
elif isinstance(widget, QCheckBox):
self.connect(widget, SIGNAL("stateChanged(int)"), self.setDirty)
def setDirty(self):
self.updateDetailModel()
self.dirty = True
self.setWindowTitle("%s - Editing..." % localTITLE)
def setParentDate(self):
date = self.date_dateEdit.date().toPyDate()
self.my_parent.setDate(date)
def setModelDate(self):
date = self.date_dateEdit.date().toPyDate()
self.detailModel.setDate(date)
def changeLayout(self):
supplier = str(self.supplier_comboBox.currentText())
if not supplier:
cur = 'CAD'
else:
cur = dLookup(Suppliers.currency, Suppliers.supplier_name==supplier)
if cur == 'USD':
self.receive_tableView.setColumnHidden(CAD_TOTAL, False)
else:
self.receive_tableView.setColumnHidden(CAD_TOTAL, True)
def contextMenuEvent(self, event):
menu = QMenu(self)
if self.receive_tableView.hasFocus():
copyAction = menu.addAction('Copy', QObject, 'Ctrl+C')
pasteAction = menu.addAction('Paste', QObject, 'Ctrl+V')
insertAction = menu.addAction("Insert Line", QObject, "Ctrl+I")
deleteAction = menu.addAction("Delete Line", QObject, "Ctrl+D")
copyAction.triggered.connect(self.copy)
pasteAction.triggered.connect(self.paste)
self.connect(insertAction, SIGNAL("triggered()"), self.insertRow)
self.connect(deleteAction, SIGNAL("triggered()"), self.removeRow)
addActions(self, self.receive_tableView, (insertAction, deleteAction))
menu.exec_(event.globalPos())
def copy(self):
if self.detailModel.rowCount() <= 1:
return
selectedItems = self.receive_tableView.selectionModel().selectedIndexes()
self.detailModel.copy(selectedItems)
def paste(self):
row = self.receive_tableView.currentIndex().row()
self.detailModel.paste(row)
self.updateSumTotal()
def autoAddRow(self):
view = self.receive_tableView
row = view.currentIndex().row()
if self.detailModel.rowCount() == row + 1:
self.insertRow()
def insertRow(self):
view = self.receive_tableView
index = view.currentIndex()
row = index.row()
self.detailModel.insertRows(row)
view.setFocus()
view.setCurrentIndex(index)
def removeRow(self):
view = self.receive_tableView
rowsSelected = view.selectionModel().selectedRows()
if not rowsSelected:
row = view.currentIndex().row()
rows = 1
else:
for i in rowsSelected:
row = i.row()
rows = len(rowsSelected)
row = row - rows + 1
self.detailModel.removeRows(row, rows)
if self.detailModel.rowCount() < 1:
self.insertRow()
def findBomID(self):
row = self.receive_tableView.currentIndex().row()
index = self.detailModel.index(row, 0)
self.my_parent.findItem(0, (self, index), localTITLE)
def enterBOMNo(self, index, bomID):
i = 0
ok = True
while ok:
myIndex = self.detailModel.index(index.row() + i, index.column())
bom = self.detailModel.data(myIndex).toString()
if not bom:
ok = False
i += 1
self.receive_tableView.setCurrentIndex(myIndex)
self.detailModel.setData(myIndex, QVariant(bomID))
### Data and Calculations =============
def updateDetailModel(self):
shipping_str = str(self.shipping_lineEdit.text()) if not self.shipping_lineEdit.text() == "" else 0
gst_str = str(self.gst_lineEdit.text()) if not self.gst_lineEdit.text() == "" else 0
qst_str = str(self.qst_lineEdit.text()) if not self.qst_lineEdit.text() == "" else 0
total_str = str(self.total_lineEdit.text()) if not self.total_lineEdit.text() == "" else 0
currency_str = str(self.curr_lineEdit.text()) if not self.curr_lineEdit.text() == "" else 1
try:
shipping = float(shipping_str) / nonZero(nonZero(float(total_str), 0
) - nonZero(float(gst_str), 0
) - nonZero(float(qst_str), 0
) - nonZero(float(shipping_str), 0), 1)
currency = float(currency_str)
except ValueError:
return
self.detailModel.updateDetailModel(shipping, currency)
self.updateSumTotal()
def updateSumTotal(self):
sum_total = QString('%L1').arg(self.detailModel.getSumTotal(), 0, 'f', 2)
self.amount_lineEdit.setText(sum_total)
### Operations =============
def reject(self):
self.accept()
def accept(self):
if self.dirty:
answer = QMessageBox.question(self, "Editing - %s" % localTITLE, "Would you like to save your data?",
QMessageBox.Yes| QMessageBox.No| QMessageBox.Cancel)
if answer == QMessageBox.Cancel:
return
elif answer == QMessageBox.No:
QDialog.accept(self)
elif answer == QMessageBox.Yes:
self.save()
QDialog.accept(self)
self.my_parent.formClosed()
def checkBillNo(self):
if not self.billno_lineEdit.isModified():
return
self.billno_lineEdit.setModified(False)
billNo = str(self.billno_lineEdit.text())
supplier = str(self.supplier_comboBox.currentText())
supplier_id = dLookup(Suppliers.supplier_id, Suppliers.supplier_name==supplier)
ckBillNo = dLookup(ReceiveHeader.journal_no,
(ReceiveHeader.journal_no==billNo and ReceiveHeader.supplier_id==supplier_id))
if ckBillNo:
QMessageBox.information(self, localTITLE, 'Bill Number already exists for this vendor.', buttons=QMessageBox.Ok)
def save(self):
#// Prepare the items to be recorded
journal_id = self.record_id
journal_type = 'Bill'
if self.return_radioButton.isChecked():
journal_type = 'Credit'
qDate = self.date_dateEdit.date()
journal_date = qDate.toPyDate()
journal_no = str(self.billno_lineEdit.text())
journal_memo = str(self.note_textEdit.toPlainText())
supplier = str(self.supplier_comboBox.currentText())
supplier_id = dLookup(Suppliers.supplier_id, Suppliers.supplier_name==supplier)
journal_total = unicode(self.total_lineEdit.text())
currency_rate = unicode(self.curr_lineEdit.text())
shipping = unicode(self.shipping_lineEdit.text())
gst = unicode(self.gst_lineEdit.text())
qst = unicode(self.qst_lineEdit.text())
export = self.export_checkBox.isChecked()
modified_date = QDateTime().currentDateTime().toPyDateTime()
log_memo = 'Created'
#// do some checks
if not supplier_id:
QMessageBox.information(self, 'Save bill - %s' % localTITLE, 'Please specify a supplier.', QMessageBox.Ok)
return
if self.detailModel.rowCount() <= 1:
QMessageBox.information(self, 'Save bill - %s' % localTITLE, 'No details found.', QMessageBox.Ok)
return
detailTotal = float(unicode(self.amount_lineEdit.text().replace(',','')))
detailTotal = float(detailTotal + getType(gst) + getType(qst))
totalDiff = float(getType(journal_total)) - detailTotal
if abs(totalDiff) > .05:
QMessageBox.information(self, 'Save bill - %s' % localTITLE, "Total doesn't match.")
return
#// do differently if new record or old record
if self.editing:
#// check for closing date issues
old_date = dLookup(JournalHeader.journal_date, JournalHeader.journal_id==self.record_id)
if not closingDate(old_date):
return
if not closingDate(journal_date):
return
log_memo = 'Modified'
self.current_record = self.session.query(ReceiveHeader).filter(ReceiveHeader.journal_id==journal_id)
self.current_record.update({'journal_type': journal_type, 'journal_date': journal_date, 'journal_no': journal_no,
'journal_memo': journal_memo, 'supplier_id': supplier_id, 'journal_total': journal_total,
'currency_rate': currency_rate, 'shipping': shipping, 'gst': gst, 'qst': qst,
'export': export, 'modified_date': modified_date})
self.session.query(ReceiveRMD).filter(ReceiveRMD.journal_id==self.record_id).delete()
else:
if not closingDate(journal_date):
return
journal_id = dMax(JournalHeader.journal_id) + 1
self.session.add(ReceiveHeader(journal_id, journal_type, journal_date, journal_no, journal_memo,
supplier_id, journal_total, currency_rate, shipping, gst, qst, modified_date, export))
details, adjustments = self.detailModel.save(journal_id, journal_date, journal_type)
self.session.add_all(details)
self.session.add_all(adjustments)
self.session.add(Logs(journal_id, self.my_parent.user_id, modified_date, log_memo))
self.sendToDB()
self.editing = True
self.record_id = journal_id
self.dirty = False
self.setWindowTitle('%s - (Data Saved)' % localTITLE)
def recall(self, journal_id):
# // first find out if the user is in middle of entering data.
if self.dirty:
answer = QMessageBox.question(self, "Editing - %s" % localTITLE, "Would you like to save your data?",
QMessageBox.Yes| QMessageBox.Discard| QMessageBox.Cancel)
if answer == QMessageBox.Cancel:
return
elif answer == QMessageBox.Yes:
self.save()
self.record_id = journal_id
self.current_record = self.session.query(ReceiveHeader).filter(ReceiveHeader.journal_id==journal_id)
for field in self.current_record:
text = dLookup(Suppliers.supplier_name, Suppliers.supplier_id==field.supplier_id)
if text:
index = self.supplier_comboBox.findText(text, Qt.MatchExactly)
self.supplier_comboBox.setCurrentIndex(index)
self.note_textEdit.setText(field.journal_memo)
self.curr_lineEdit.setText(str(field.currency_rate))
self.transid_lineEdit.setText(str(journal_id))
self.date_dateEdit.setDate(field.journal_date)
self.billno_lineEdit.setText(str(field.journal_no))
self.shipping_lineEdit.setText(str(field.shipping))
self.total_lineEdit.setText(str(field.journal_total))
self.receive_radioButton.setChecked(True)
if field.journal_type == 'Credit':
self.return_radioButton.setChecked(True)
self.gst_lineEdit.setText(str(field.gst))
self.qst_lineEdit.setText(str(field.qst))
self.export_checkBox.setChecked(field.export)
objectList = self.session.query(ReceiveRMD).filter(ReceiveRMD.journal_id==journal_id)
self.detailModel.load(objectList)
self.updateSumTotal()
self.editing = True
def delete(self):
if not self.record_id:
return
#// check for closing date issues
old_date = dLookup(JournalHeader.journal_date, JournalHeader.journal_id==self.record_id)
if not closingDate(old_date):
return
answer = QMessageBox.question(self, "Delete - %s" % localTITLE, "Are you sure you " \
"want to delete bill: %s:, %s" % (self.supplier_comboBox.currentText(),
self.billno_lineEdit.text()),
QMessageBox.Yes| QMessageBox.No, QMessageBox.NoButton)
if answer == QMessageBox.No:
return
self.session.query(ReceiveRMD).filter(ReceiveRMD.journal_id==self.record_id).delete()
self.current_record.delete()
log_memo = 'Deleted - Supplier: %s, Date: %s, Bill: %s Amount %s' % (str(self.supplier_comboBox.currentText()),
self.date_dateEdit.date().toPyDate(),
str(self.billno_lineEdit.text()),
str(self.total_lineEdit.text()))
self.session.add(Logs(self.record_id, self.my_parent.user_id, QDateTime().currentDateTime().toPyDateTime(), log_memo))
self.sendToDB()
self.clear()
def sendToDB(self):
try:
self.session.flush
self.session.commit()
except Exception, e:
self.session.rollback()
raise e
def find(self):
self.my_parent.findForm()
def clear(self):
widgets = self.findChildren(QWidget)
for widget in widgets:
if isinstance(widget, (QLineEdit, QTextEdit)):
widget.clear()
elif isinstance(widget, QComboBox):
widget.setCurrentIndex(-1)
elif isinstance(widget, QCheckBox):
widget.setChecked(False)
elif isinstance(widget, QLabel):
if widget.objectName()[:2] == 'v_':
widget.clear()
self.detailModel.clear()
if defaultDate() == 'current':
self.date_dateEdit.setDate(QDate.currentDate())
self.editing = False
self.dirty = False
self.setWindowTitle(localTITLE)
if __name__ == '__main__':
app = QApplication(sys.argv)
setupDatabase("Production.sqlite")
form = ReceiveForm()
form.show()
app.exec_()
|
|
import os
import time
import numpy as np
import OpenGL.GL as gl
import OpenGL.GLU as glu
import OpenGL.GLUT as glut
import Image
import PIL.ImageOps as iops
from fos.core.utils import list_indices as lind
from os.path import join as pjoin
import fos.core.pyramid as pyramid
from dipy.core import track_metrics as tm
import fos.core.collision as cll
#import fos.core.plots as plots
global angle_table
global anglex
global angley
global anglez
global angle_table_index
MS=1000
def make_angle_table(lists):
#angle_table = make_angle_table([[[0,0,0],[90,0,0],30],[[90,0,0],[90,90,0],30]])
table = []
for list in lists:
start,finish,n = list
sx,sy,sz = start
fx,fy,fz = finish
cx = np.linspace(sx,fx,n)
cy = np.linspace(sy,fy,n)
cz = np.linspace(sz,fz,n)
if table == []:
table = np.column_stack((cx,cy,cz))
else:
table = np.vstack((table,np.column_stack((cx,cy,cz))))
print 'angle table has length %d' % table.shape[0]
return table
'''
angle_table = make_angle_table([
[[0,0,90],[90,0,90],200],
[[90,0,90],[90,90,90],200],
[[90,90,90],[90,90,360],200]
])
'''
angle_table = make_angle_table([
[[-90,0,180],[-90,0,180],130],
[[-90,0,180],[-90,0,180+360],260],
[[-90,0,180+360],[-90,0,180+360],900],
[[-90,0,1800],[-90,0,36000],17100]
])
'''
angle_table = make_angle_table([
[[-90,0,180],[-90,0,180],130],
[[-90,0,180],[0,0,0],260],
[[0,0,0],[-90,0,1800],900],
[[-90,0,1800],[-90,0,36000],17100]
])
'''
'''
angle_table = make_angle_table([
#[[0,0,0],[0,0,0],50],
[[0,0,0],[-90,0,1800],900],
[[-90,0,1800],[-90,0,36000],17100]
])
'''
'''
angle_table = make_angle_table([[[0,0,0],[-90,0,0],200],
[[-90,0,0],[-90,-90,0],200],
[[-90,-90,0],[-90,-90,90],200],
[[-90,-90,90],[0,-90,-90],400]])
'''
angle_table_index = 0
anglex = 0.
angley = 0.
anglez = 0.
data_path = pjoin(os.path.dirname(__file__), 'data')
class Ghost(object):
def __init__(self):
pass
def init(self):
pass
def display(self):
global angle_table_index
global angle_table
angle_table_index += 1
if angle_table_index >= angle_table.shape[0]:
angle_table_index = angle_table.shape[0] - 1
class Empty(object):
def __init__(self):
self.slots = None
self.time = 0
self.near_pick = None
self.far_pick = None
def init(self):
pass
def display(self):
pass
'''
now = self.time
for s in self.slots:
if now >= self.slots[s]['slot'][0] and now <=self.slots[s]['slot'][1]:
self.slots[s]['actor'].near_pick = self.near_pick
self.slots[s]['actor'].far_pick = self.far_pick
self.slots[s]['actor'].display()
'''
#=======================================================
class Tracks(object):
def __init__(self,fname,ang_table=None,colormap=None, line_width=3., shrink=None,subset=None,data_ext=None):
self.position = (0,0,0)
self.fname = fname
self.manycolors = True
self.bbox = None
self.list_index = None
self.affine = None
self.data = None
self.list_index = None
self.rot_angle = 0
self.colormap = None
self.min = None
self.max = None
self.mean = None
self.material_color = False
self.fadeout = False
self.fadein = False
self.fadeout_speed = 0.
self.fadein_speed = 0.
self.min_length = 20.
self.data_ext = data_ext
self.angle = 0.
self.angular_speed = .5
self.line_width = line_width
self.opacity = 1.
self.near_pick = None
self.far_pick = None
self.near_pick_prev = None
self.far_pick_prev = None
self.picked_track = None
self.pick_color = [1,1,0]
self.brain_color = [1,1,1]
self.yellow_indices = None
self.dummy_data = False
if subset != None:
self.data_subset = subset #[0,20000]#None
else:
self.data_subset = None
self.orbit_demo = False
self.orbit_anglez = 0.
self.orbit_anglez_rate = 0.
self.orbit_anglex = 0.
self.orbit_anglex_rate = 0.
self.orbit_angley = 0.
self.orbit_angley_rate = 0.
self.angle_table = ang_table
self.angle_table_index = 0
self.shrink = shrink
self.picking_example = False
if self.data_ext!=None:
self.data=self.data_ext
else:
import dipy.io.trackvis as tv
lines,hdr = tv.read(self.fname)
ras = tv.aff_from_hdr(hdr)
self.affine=ras
tracks = [l[0] for l in lines]
if self.yellow_indices != None :
tracks = [t for t in tracks if tm.length(t) > 20]
print 'tracks loaded'
#self.data = [100*np.array([[0,0,0],[1,0,0],[2,0,0]]).astype(np.float32) ,100*np.array([[0,1,0],[0,2,0],[0,3,0]]).astype(np.float32)]#tracks[:20000]
if self.dummy_data:
self.data = [100*np.array([[0,0,0],[1,0,0],[2,0,0]]).astype(np.float32) ,100*np.array([[0,1,0],[0,2,0],[0,3,0]]).astype(np.float32)]
if self.data_subset!=None:
self.data = tracks[self.data_subset[0]:self.data_subset[1]]
else:
self.data = tracks
data_stats = np.concatenate(tracks)
self.min=np.min(data_stats,axis=0)
self.max=np.max(data_stats,axis=0)
self.mean=np.mean(data_stats,axis=0)
if self.shrink != None:
self.data = [ self.shrink*t for t in self.data]
del data_stats
del lines
def init(self):
if self.material_color:
self.material_colors()
else:
self.multiple_colors()
def display(self):
if self.near_pick!= None:
#print self.near_pick
if np.sum(np.equal(self.near_pick, self.near_pick_prev))< 3:
self.process_picking(self.near_pick, self.far_pick)
self.near_pick_prev = self.near_pick
self.far_pick_prev = self.far_pick
x,y,z=self.position
if self.orbit_demo and self.angle_table == None:
#print('Yo%f',self.position[0])
gl.glPushMatrix()
gl.glTranslatef(x,y,z)
gl.glPushMatrix()
#gl.glTranslatef(x,y,z)
self.orbit_anglex+=self.orbit_anglex_rate
gl.glRotatef(self.orbit_anglex,1,0,0)
#'''
gl.glPushMatrix()
self.orbit_angley+=self.orbit_angley_rate
gl.glRotatef(self.orbit_angley,0,1,0)
gl.glPushMatrix()
self.orbit_anglez+=self.orbit_anglez_rate
#x,y,z=self.position
gl.glRotatef(self.orbit_anglez,0,0,1)
#gl.glTranslatef(x,y,z)
#'''
#gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gl.glCallList(self.list_index)
gl.glFinish()
gl.glPopMatrix()
gl.glPopMatrix()
gl.glPopMatrix()
gl.glPopMatrix()
elif self.orbit_demo == True and self.angle_table != None:
gl.glPushMatrix()
#print angle_table
#print table_ind
global angle_table_index
global angle_table
table_ind=angle_table_index
#print 'ti',table_ind
anglex=angle_table[table_ind,0]
#print anglex
gl.glRotatef(anglex,1,0,0)
gl.glPushMatrix()
angley=angle_table[table_ind,1]
gl.glRotatef(angley,0,1,0)
gl.glPushMatrix()
anglez=angle_table[table_ind,2]
gl.glRotatef(anglez,0,0,1)
gl.glTranslate(x,y,z)
gl.glCallList(self.list_index)
gl.glFinish()
gl.glPopMatrix()
gl.glPopMatrix()
gl.glPopMatrix()
'''
angle_table_index += 1
if angle_table_index >= angle_table.shape[0]:
angle_table_index = angle_table.shape[0] - 1
'''
'''
gl.glPushMatrix()
gl.glRotatef(self.angle_table[self.angle_table_index,0],1,0,0)
#x,y,z = self.position
gl.glPushMatrix()
gl.glRotatef(self.angle_table[self.angle_table_index,1],0,1,0)
gl.glPushMatrix()
gl.glRotatef(self.angle_table[self.angle_table_index,2],0,0,1)
gl.glTranslate(x,y,z)
gl.glCallList(self.list_index)
gl.glFinish()
gl.glPopMatrix()
gl.glPopMatrix()
gl.glPopMatrix()
self.angle_table_index += 1
if self.angle_table_index >= self.angle_table.shape[0]:
self.angle_table_index = self.angle_table.shape[0] - 1
'''
else:
gl.glCallList(self.list_index)
if self.picked_track != None:
self.display_one_track(self.picked_track)
if self.yellow_indices != None:
for i in self.yellow_indices:
self.display_one_track(i)
gl.glFinish()
def process_picking(self,near,far):
print('process picking')
min_dist=[cll.mindistance_segment2track(near,far,xyz) for xyz in self.data]
min_dist=np.array(min_dist)
#print min_dist
self.picked_track=min_dist.argmin()
print 'min index',self.picked_track
min_dist_info=[cll.mindistance_segment2track_info(near,far,xyz) for xyz in self.data]
A = np.array(min_dist_info)
dist=10**(-3)
iA=np.where(A[:,0]<dist)
minA=A[iA]
print 'minA ', minA
miniA=minA[:,1].argmin()
print 'final min index ',iA[0][miniA]
self.picked_track=iA[0][miniA]
def display_one_track(self,track_index,color4=np.array([1,1,0,1],dtype=np.float32)):
x,y,z = self.position
if self.orbit_demo and self.angle_table == None:
#print('Yo%f',self.position[0])
gl.glPushMatrix()
gl.glTranslatef(x,y,z)
gl.glPushMatrix()
#gl.glTranslatef(x,y,z)
self.orbit_anglex+=self.orbit_anglex_rate
gl.glRotatef(self.orbit_anglex,1,0,0)
#'''
gl.glPushMatrix()
self.orbit_angley+=self.orbit_angley_rate
gl.glRotatef(self.orbit_angley,0,1,0)
gl.glPushMatrix()
self.orbit_anglez+=self.orbit_anglez_rate
#x,y,z=self.position
gl.glRotatef(self.orbit_anglez,0,0,1)
#gl.glTranslatef(x,y,z)
#'''
#gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
#gl.glPushMatrix()
gl.glDisable(gl.GL_LIGHTING)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glDisable(gl.GL_DEPTH_TEST)
#gl.glDepthFunc(gl.GL_NEVER)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_DONT_CARE)
gl.glLineWidth(3.)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glColor4fv(color4)
d=self.data[track_index].astype(np.float32)
gl.glVertexPointerf(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnable(gl.GL_LIGHTING)
gl.glEnable(gl.GL_DEPTH_TEST)
#gl.glPopMatrix()
gl.glFinish()
gl.glPopMatrix()
gl.glPopMatrix()
gl.glPopMatrix()
gl.glPopMatrix()
def multiple_colors(self):
from dipy.viz.colormaps import boys2rgb
from dipy.core.track_metrics import mean_orientation, length, downsample
colors=np.random.rand(1,3).astype(np.float32)
print colors
self.list_index = gl.glGenLists(1)
gl.glNewList( self.list_index,gl.GL_COMPILE)
#gl.glPushMatrix()
gl.glDisable(gl.GL_LIGHTING)
#!!!gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glDisable(gl.GL_DEPTH_TEST)
#gl.glDepthFunc(gl.GL_NEVER)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glBlendFunc(gl.GL_SRC_ALPHA_SATURATE,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE)
#!!!gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_DONT_CARE)
#gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_NICEST)
gl.glLineWidth(self.line_width)
#gl.glDepthMask(gl.GL_FALSE)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
for d in self.data:
if length(d)> self.min_length:
#mo=mean_orientation(d)
if self.manycolors:
ds=downsample(d,6)
mo=ds[3]-ds[2]
mo=mo/np.sqrt(np.sum(mo**2))
mo.shape=(1,3)
color=boys2rgb(mo)
color4=np.array([color[0][0],color[0][1],color[0][2],self.opacity],np.float32)
else:
color4=np.array([self.brain_color[0],self.brain_color[1],\
self.brain_color[2],self.opacity],np.float32)
if self.fadein == True:
color4[3] += self.fadein_speed
if self.fadeout == True:
color4[3] -= self.fadeout_speed
gl.glColor4fv(color4)
gl.glVertexPointerf(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
#gl.glDisable(gl.GL_BLEND)
gl.glEnable(gl.GL_LIGHTING)
gl.glEnable(gl.GL_DEPTH_TEST)
#gl.glPopMatrix()
gl.glEndList()
def material_colors(self):
self.list_index = gl.glGenLists(1)
gl.glNewList( self.list_index,gl.GL_COMPILE)
gl.glMaterialfv( gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT, [1,1,1,.1] )
gl.glMaterialfv( gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE, [1,1,1,.1] )
#gl.glMaterialf( gl.GL_FRONT_AND_BACK, gl.GL_SHININESS, 50. )
#gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION, [1,1,1,1.])
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glMaterialfv( gl.GL_FRONT, gl.GL_SPECULAR, self.specular )
#gl.glMaterialf( gl.GL_FRONT, gl.GL_SHININESS, self.shininess )
#gl.glMaterialfv(gl.GL_FRONT, gl.GL_EMISSION, self.emission)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
for d in self.data:
gl.glVertexPointerd(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glEndList()
class TracksModified(object):
def __init__(self,fname,ang_table=None,colormap=None, line_width=3., shrink=None,subset=None,tracks=None,text=None,text_color=np.array([1,0,0])):
self.position = (0,0,0)
self.fname = fname
self.manycolors = True
self.bbox = None
self.list_index = None
self.affine = None
self.data = None
self.list_index = None
self.rot_angle = 0
self.colormap = None
self.text = text
self.min = None
self.max = None
self.mean = None
self.material_color = False
self.fadeout = False
self.fadein = False
self.fadeout_speed = 0.
self.fadein_speed = 0.
self.min_length = 20.
self.angle = 0.
self.angular_speed = .5
self.line_width = line_width
self.opacity = 1.
self.near_pick = None
self.far_pick = None
self.near_pick_prev = None
self.far_pick_prev = None
self.picked_track = None
self.pick_color = [1,1,0]
self.brain_color = [1,1,1]
self.yellow_indices = None
self.dummy_data = False
self.tracks = tracks
if subset != None:
self.data_subset = subset #[0,20000]#None
else:
self.data_subset = None
self.orbit_demo = False
self.orbit_anglez = 0.
self.orbit_anglez_rate = 10.
self.orbit_anglex = 0.
self.orbit_anglex_rate = 2.
self.angle_table = ang_table
self.angle_table_index = 0
self.shrink = shrink
self.picking_example = False
self.partial_colors = False
import dipy.io.trackvis as tv
if self.tracks == None:
lines,hdr = tv.read(self.fname)
ras = tv.aff_from_hdr(hdr)
self.affine=ras
tracks = [l[0] for l in lines]
del lines
else:
tracks = self.tracks
if self.yellow_indices != None :
tracks = [t for t in tracks if tm.length(t) > 20]
print '%d tracks loaded' % len(tracks)
#self.data = [100*np.array([[0,0,0],[1,0,0],[2,0,0]]).astype(np.float32) ,100*np.array([[0,1,0],[0,2,0],[0,3,0]]).astype(np.float32)]#tracks[:20000]
if self.dummy_data:
self.data = [100*np.array([[0,0,0],[1,0,0],[2,0,0]]).astype(np.float32) ,100*np.array([[0,1,0],[0,2,0],[0,3,0]]).astype(np.float32)]
if self.data_subset!=None:
self.data = tracks[self.data_subset[0]:self.data_subset[1]]
else:
self.data = tracks
if self.shrink != None:
self.data = [ self.shrink*t for t in self.data]
data_stats = np.concatenate(tracks)
self.min=np.min(data_stats,axis=0)
self.max=np.max(data_stats,axis=0)
self.mean=np.mean(data_stats,axis=0)
del data_stats
def init(self):
if self.material_color:
self.material_colors()
else:
self.multiple_colors()
def display(self):
if self.near_pick!= None:
#print self.near_pick
if np.sum(np.equal(self.near_pick, self.near_pick_prev))< 3:
self.process_picking(self.near_pick, self.far_pick)
self.near_pick_prev = self.near_pick
self.far_pick_prev = self.far_pick
x,y,z=self.position
if self.orbit_demo and self.angle_table == None:
gl.glPushMatrix()
self.orbit_anglex+=self.orbit_anglex_rate
gl.glRotatef(self.orbit_anglex,1,0,0)
gl.glPushMatrix()
self.orbit_anglez+=self.orbit_anglez_rate
x,y,z=self.position
gl.glRotatef(self.orbit_anglez,0,0,1)
gl.glTranslatef(x,y,z)
#gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gl.glCallList(self.list_index)
gl.glFinish()
gl.glPopMatrix()
gl.glPopMatrix()
elif self.orbit_demo == True and self.angle_table != None:
gl.glPushMatrix()
#print angle_table
#print table_ind
global angle_table_index
table_ind=angle_table_index
anglex=angle_table[table_ind,0]
#print anglex
gl.glRotatef(anglex,1,0,0)
gl.glPushMatrix()
angley=angle_table[table_ind,1]
gl.glRotatef(angley,0,1,0)
gl.glPushMatrix()
anglez=angle_table[table_ind,2]
gl.glRotatef(anglez,0,0,1)
gl.glTranslate(x,y,z)
gl.glCallList(self.list_index)
gl.glFinish()
gl.glPopMatrix()
gl.glPopMatrix()
gl.glPopMatrix()
angle_table_index += 1
if angle_table_index >= angle_table.shape[0]:
angle_table_index = angle_table.shape[0] - 1
'''
gl.glPushMatrix()
gl.glRotatef(self.angle_table[self.angle_table_index,0],1,0,0)
#x,y,z = self.position
gl.glPushMatrix()
gl.glRotatef(self.angle_table[self.angle_table_index,1],0,1,0)
gl.glPushMatrix()
gl.glRotatef(self.angle_table[self.angle_table_index,2],0,0,1)
gl.glTranslate(x,y,z)
gl.glCallList(self.list_index)
gl.glFinish()
gl.glPopMatrix()
gl.glPopMatrix()
gl.glPopMatrix()
self.angle_table_index += 1
if self.angle_table_index >= self.angle_table.shape[0]:
self.angle_table_index = self.angle_table.shape[0] - 1
'''
else:
gl.glCallList(self.list_index)
if self.picked_track != None:
self.display_one_track(self.picked_track)
if self.yellow_indices != None:
for i in self.yellow_indices:
self.display_one_track(i)
if self.text != None:
gl.glDisable(gl.GL_LIGHTING)
gl.glColor3f(0.,0.47,.76)
for (i,t) in enumerate(self.data):
#gl.glRasterPos3f(t[0][0],t[0][1],t[0][2])
label = self.text+str(i)
t2=tm.downsample(t,3)
gl.glRasterPos3f(t2[1][0],t2[1][1],t2[1][2])
for c in label:
glut.glutBitmapCharacter(glut.GLUT_BITMAP_TIMES_ROMAN_24, ord(c))
'''
if i == 22: #cortico spinal track
gl.glRasterPos3f(t[0][0],t[0][1],t[0][2])
label='spine'
for c in label:
glut.glutBitmapCharacter(glut.GLUT_BITMAP_TIMES_ROMAN_24, ord(c))
gl.glRasterPos3f(t[-1][0],t[-1][1],t[-1][2])
label='motor'
for c in label:
glut.glutBitmapCharacter(glut.GLUT_BITMAP_TIMES_ROMAN_24, ord(c))
label='corticospinal'
t2=tm.downsample(t,len(label)+3)
for (ci,c) in enumerate(label[::-1]):
gl.glRasterPos3f(t2[ci+2][0],t2[ci+2][1],t2[ci+2][2])
glut.glutBitmapCharacter(glut.GLUT_BITMAP_TIMES_ROMAN_24, ord(c))
else:
pass
'''
'''
gl.glRasterPos3f(t[0][0],t[0][1],t[0][2])
for c in label:
glut.glutBitmapCharacter(glut.GLUT_BITMAP_TIMES_ROMAN_24, ord(c))
'''
gl.glEnable(gl.GL_LIGHTING)
gl.glFinish()
def process_picking(self,near,far):
print('process picking')
min_dist=[cll.mindistance_segment2track(near,far,xyz) for xyz in self.data]
min_dist=np.array(min_dist)
#print min_dist
self.picked_track=min_dist.argmin()
print 'min index',self.picked_track
min_dist_info=[cll.mindistance_segment2track_info(near,far,xyz) for xyz in self.data]
A = np.array(min_dist_info)
dist=10**(-3)
iA=np.where(A[:,0]<dist)
minA=A[iA]
print 'minA ', minA
miniA=minA[:,1].argmin()
print 'final min index ',iA[0][miniA]
self.picked_track=iA[0][miniA]
def display_one_track(self,track_index,color4=np.array([1,1,0,1],dtype=np.float32)):
gl.glPushMatrix()
gl.glDisable(gl.GL_LIGHTING)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glDisable(gl.GL_DEPTH_TEST)
#gl.glDepthFunc(gl.GL_NEVER)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_DONT_CARE)
gl.glLineWidth(7.)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glColor4fv(color4)
d=self.data[track_index].astype(np.float32)
gl.glVertexPointerf(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnable(gl.GL_LIGHTING)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glPopMatrix()
def multiple_colors(self):
from dipy.viz.colormaps import boys2rgb
from dipy.core.track_metrics import mean_orientation, length, downsample
colors=np.random.rand(1,3).astype(np.float32)
print colors
self.list_index = gl.glGenLists(1)
gl.glNewList( self.list_index,gl.GL_COMPILE)
#gl.glPushMatrix()
gl.glDisable(gl.GL_LIGHTING)
#!!!gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glDisable(gl.GL_DEPTH_TEST)
#gl.glDepthFunc(gl.GL_NEVER)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glBlendFunc(gl.GL_SRC_ALPHA_SATURATE,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE)
#!!!gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_DONT_CARE)
#gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_NICEST)
gl.glLineWidth(self.line_width)
#gl.glDepthMask(gl.GL_FALSE)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
for d in self.data:
if length(d)> self.min_length:
#mo=mean_orientation(d)
if self.manycolors:
ds=downsample(d,6)
mo=ds[3]-ds[2]
mo=mo/np.sqrt(np.sum(mo**2))
mo.shape=(1,3)
color=boys2rgb(mo)
color4=np.array([color[0][0],color[0][1],color[0][2],self.opacity],np.float32)
else:
color4=np.array([self.brain_color[0],self.brain_color[1],\
self.brain_color[2],self.opacity],np.float32)
if self.fadein == True:
color4[3] += self.fadein_speed
if self.fadeout == True:
color4[3] -= self.fadeout_speed
gl.glColor4fv(color4)
gl.glVertexPointerf(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
#gl.glDisable(gl.GL_BLEND)
gl.glEnable(gl.GL_LIGHTING)
gl.glEnable(gl.GL_DEPTH_TEST)
#gl.glPopMatrix()
gl.glEndList()
def material_colors(self):
self.list_index = gl.glGenLists(1)
gl.glNewList( self.list_index,gl.GL_COMPILE)
gl.glMaterialfv( gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT, [1,1,1,.1] )
gl.glMaterialfv( gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE, [1,1,1,.1] )
#gl.glMaterialf( gl.GL_FRONT_AND_BACK, gl.GL_SHININESS, 50. )
#gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION, [1,1,1,1.])
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glMaterialfv( gl.GL_FRONT, gl.GL_SPECULAR, self.specular )
#gl.glMaterialf( gl.GL_FRONT, gl.GL_SHININESS, self.shininess )
#gl.glMaterialfv(gl.GL_FRONT, gl.GL_EMISSION, self.emission)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
for d in self.data:
gl.glVertexPointerd(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glEndList()
class ChromoTracks(object):
def __init__(self,fname,colormap=None, line_width=1., shrink=None, thinning = 0,
angle_table = None, manycolors = False, brain_color=[1,1,1]):
self.position = (0,0,0)
self.fname = fname
self.manycolors = manycolors
#self.color = monocolor
self.bbox = None
self.list_index = None
self.affine = None
self.data = None
self.list_index = None
self.rot_angle = 0
self.colormap = None
self.min = None
self.max = None
self.mean = None
self.material_color = False
self.fadeout = False
self.fadein = False
self.fadeout_speed = 0.
self.fadein_speed = 0.
self.min_length = 20.
self.angle = 0.
self.angular_speed = .5
self.line_width = line_width
self.opacity = 1.
self.opacity_rate = 0.
self.near_pick = None
self.far_pick = None
self.near_pick_prev = None
self.far_pick_prev = None
self.picked_track = None
self.pick_color = [1,1,0]
#self.brain_color = [1,1,1] # white
#self.brain_color = [.941,.862,.510] # buff
self.brain_color = brain_color
self.yellow_indices = None
self.dummy_data = False
self.data_subset = [0,20000]#None
self.orbit_demo = False
self.orbit_anglez =0.
self.orbit_anglez_rate = 10.
self.orbit_anglex = 0.
self.orbit_anglex_rate = 2.
self.angle_table = angle_table
if angle_table != None:
print 'Tracks angle_table shape %s' % str(self.angle_table.shape)
self.angle_table_index = 0
#print 'angle_table_index %d' % self.angle_table_index
self.shrink = shrink
self.picking_example = False
import dipy.io.trackvis as tv
lines,hdr = tv.read(self.fname)
ras = tv.aff_from_hdr(hdr
)
self.affine=ras
tracks = [l[0] for l in lines]
print 'tracks %d loaded' % len(tracks)
self.thinning = thinning
if self.yellow_indices != None :
tracks = [t for t in tracks if tm.length(t) > 20]
if self.thinning != 0:
tracks = [tracks[k] for k in range(0,len(tracks),self.thinning)]
print '%d tracks active' % len(tracks)
#self.data = [100*np.array([[0,0,0],[1,0,0],[2,0,0]]).astype(np.float32) ,100*np.array([[0,1,0],[0,2,0],[0,3,0]]).astype(np.float32)]#tracks[:20000]
if self.dummy_data:
self.data = [100*np.array([[0,0,0],[1,0,0],[2,0,0]]).astype(np.float32) ,100*np.array([[0,1,0],[0,2,0],[0,3,0]]).astype(np.float32)]
if self.data_subset!=None:
self.data = tracks[self.data_subset[0]:self.data_subset[1]]
else:
self.data = tracks
if self.shrink != None:
self.data = [ self.shrink*t for t in self.data]
data_stats = np.concatenate(tracks)
self.min=np.min(data_stats,axis=0)
self.max=np.max(data_stats,axis=0)
self.mean=np.mean(data_stats,axis=0)
del data_stats
del lines
def init(self):
if self.material_color:
self.material_colors()
else:
self.multiple_colors()
def display(self):
if self.near_pick!= None:
#print self.near_pick
if np.sum(np.equal(self.near_pick, self.near_pick_prev))< 3:
self.process_picking(self.near_pick, self.far_pick)
self.near_pick_prev = self.near_pick
self.far_pick_prev = self.far_pick
x,y,z=self.position
if self.orbit_demo and self.angle_table == None:
gl.glPushMatrix()
self.orbit_anglex+=self.orbit_anglex_rate
gl.glRotatef(self.orbit_anglex,1,0,0)
gl.glPushMatrix()
self.orbit_anglez+=self.orbit_anglez_rate
#x,y,z=self.position
gl.glRotatef(self.orbit_anglez,0,0,1)
gl.glTranslatef(x,y,z)
#gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
gl.glCallList(self.list_index)
gl.glFinish()
gl.glPopMatrix()
gl.glPopMatrix()
elif self.orbit_demo == True and self.angle_table != None:
gl.glPushMatrix()
gl.glRotatef(self.angle_table[self.angle_table_index,0],1,0,0)
#x,y,z = self.position
gl.glPushMatrix()
gl.glRotatef(self.angle_table[self.angle_table_index,1],0,1,0)
gl.glPushMatrix()
gl.glRotatef(self.angle_table[self.angle_table_index,2],0,0,1)
gl.glTranslate(x,y,z)
gl.glCallList(self.list_index)
gl.glFinish()
gl.glPopMatrix()
gl.glPopMatrix()
gl.glPopMatrix()
self.angle_table_index += 1
if self.angle_table_index >= self.angle_table.shape[0]:
self.angle_table_index = self.angle_table.shape[0] - 1
#print 'self.angle_table_index = %d' % self.angle_table_index
elif self.fade_demo:
#gl.glPushMatrix()
self.opacity += self.opacity_rate
if self.opacity <= 0.0:
self.opacity = 0.0
self.opacity_rate = -self.opacity_rate
elif self.opacity >= 1.0:
self.opacity = 1.0
self.opacity_rate = -self.opacity_rate
#print self.opacity
gl.glCallList(self.list_index)
gl.glFinish()
#gl.glPopMatrix()
else:
gl.glCallList(self.list_index)
if self.picked_track != None:
self.display_one_track(self.picked_track)
if self.yellow_indices != None:
for i in self.yellow_indices:
self.display_one_track(i)
gl.glFinish()
def process_picking(self,near,far):
print('process picking')
min_dist=[cll.mindistance_segment2track(near,far,xyz) for xyz in self.data]
min_dist=np.array(min_dist)
#print min_dist
self.picked_track=min_dist.argmin()
print 'min index',self.picked_track
min_dist_info=[cll.mindistance_segment2track_info(near,far,xyz) for xyz in self.data]
A = np.array(min_dist_info)
dist=10**(-3)
iA=np.where(A[:,0]<dist)
minA=A[iA]
print 'minA ', minA
miniA=minA[:,1].argmin()
print 'final min index ',iA[0][miniA]
self.picked_track=iA[0][miniA]
def display_one_track(self,track_index,color4=np.array([1,1,0,1],dtype=np.float32)):
gl.glPushMatrix()
gl.glDisable(gl.GL_LIGHTING)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_DONT_CARE)
gl.glLineWidth(7.)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
gl.glColor4fv(color4)
d=self.data[track_index].astype(np.float32)
gl.glVertexPointerf(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glEnable(gl.GL_LIGHTING)
gl.glPopMatrix()
def multiple_colors(self):
from dipy.viz.colormaps import boys2rgb
from dipy.core.track_metrics import mean_orientation, length, downsample
colors=np.random.rand(1,3).astype(np.float32)
print colors
self.list_index = gl.glGenLists(1)
if self.fade_demo:
#gl.glPushMatrix()
self.opacity += self.opacity_rate
if self.opacity <= 0.0:
self.opacity = 0.0
self.opacity_rate = -self.opacity_rate
elif self.opacity >= 1.0:
self.opacity = 1.0
self.opacity_rate = -self.opacity_rate
#print self.opacity
gl.glCallList(self.list_index)
gl.glFinish()
#gl.glPopMatrix()
gl.glNewList( self.list_index,gl.GL_COMPILE_AND_EXECUTE)
#gl.glPushMatrix()
gl.glDisable(gl.GL_LIGHTING)
#!!!gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glDisable(gl.GL_DEPTH_TEST)
gl.glDepthFunc(gl.GL_NEVER)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glBlendFunc(gl.GL_SRC_ALPHA_SATURATE,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE)
#!!!gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_DONT_CARE)
#gl.glHint(gl.GL_LINE_SMOOTH_HINT,gl.GL_NICEST)
gl.glLineWidth(self.line_width)
#gl.glDepthMask(gl.GL_FALSE)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
for d in self.data:
if length(d)> self.min_length:
#mo=mean_orientation(d)
if self.manycolors:
ds=downsample(d,6)
mo=ds[3]-ds[2]
mo=mo/np.sqrt(np.sum(mo**2))
mo.shape=(1,3)
color=boys2rgb(mo)
color4=np.array([color[0][0],color[0][1],color[0][2],self.opacity],np.float32)
else:
color4=np.array([self.brain_color[0],self.brain_color[1],\
self.brain_color[2],self.opacity],np.float32)
if self.fadein == True:
color4[3] += self.fadein_speed
if self.fadeout == True:
color4[3] -= self.fadeout_speed
gl.glColor4fv(color4)
gl.glVertexPointerf(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
#gl.glDisable(gl.GL_BLEND)
gl.glEnable(gl.GL_LIGHTING)
#gl.glPopMatrix()
gl.glEndList()
def material_colors(self):
self.list_index = gl.glGenLists(1)
gl.glNewList( self.list_index,gl.GL_COMPILE)
gl.glMaterialfv( gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT, [1,1,1,.1] )
gl.glMaterialfv( gl.GL_FRONT_AND_BACK, gl.GL_DIFFUSE, [1,1,1,.1] )
#gl.glMaterialf( gl.GL_FRONT_AND_BACK, gl.GL_SHININESS, 50. )
#gl.glMaterialfv(gl.GL_FRONT_AND_BACK, gl.GL_EMISSION, [1,1,1,1.])
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA)
#gl.glMaterialfv( gl.GL_FRONT, gl.GL_SPECULAR, self.specular )
#gl.glMaterialf( gl.GL_FRONT, gl.GL_SHININESS, self.shininess )
#gl.glMaterialfv(gl.GL_FRONT, gl.GL_EMISSION, self.emission)
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
for d in self.data:
gl.glVertexPointerd(d)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, len(d))
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
gl.glEndList()
|
|
#!/usr/bin/env python
#/************************************************************
#*
#* Licensed to the Apache Software Foundation (ASF) under one
#* or more contributor license agreements. See the NOTICE file
#* distributed with this work for additional information
#* regarding copyright ownership. The ASF licenses this file
#* to you under the Apache License, Version 2.0 (the
#* "License"); you may not use this file except in compliance
#* with the License. You may obtain a copy of the License at
#*
#* http://www.apache.org/licenses/LICENSE-2.0
#*
#* Unless required by applicable law or agreed to in writing,
#* software distributed under the License is distributed on an
#* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#* KIND, either express or implied. See the License for the
#* specific language governing permissions and limitations
#* under the License.
#*
#*************************************************************/
'''
This script includes Model class and its subclasses that
users can configure model parameter.
'''
import sys, re, subprocess
from singa.layer import *
from singa.utils.utility import *
from singa.utils.message import *
from google.protobuf import text_format
from singa.driver import Updater as SingaUpdater
class Model(object):
''' Configure model parameter
- add(): add layer
- compile(): specify Updater and Cluster protos
- build(): construct a model (i.e., NetProto)
- fit(): run singa for training
- evaluate(): run singa for testing
'''
def __init__(self, name='my model', argv=None, label=False):
'''
optional
name = (string) // name of model/job
argv // pass sys.argv to source
label = (bool) // exist label layer (depreciated)
'''
self.jobconf = Message('Job', name=name).proto
self.layers = []
self.label = label
self.argv = argv
self.result = None
self.last_checkpoint_path = None
self.cudnn = False
self.accuracy = False
def add(self, layer):
'''
add layer
'''
pass
def exist_datalayer(self, phase):
'''
check if data layer exists
'''
for ly in self.layers:
if enumPhase(phase) in ly.layer.include:
return True
return False
def compile(self, optimizer=None, cluster=None,
loss=None, topk=1, **kwargs):
'''
required
optimizer = (Updater) // updater settings, e.g., SGD
cluster = (Cluster) // cluster settings
optional
loss = (string) // name of loss function type
topk = (int) // nb of results considered to compute accuracy
'''
assert optimizer != None, 'optimizer (Updater component) should be set'
assert cluster != None, 'cluster (Cluster component) should be set'
setval(self.jobconf, updater=optimizer.proto)
setval(self.jobconf, cluster=cluster.proto)
# take care of loss function layer
if loss == None:
print 'loss layer is not set'
else:
if hasattr(self.layers[-1], 'mask'):
ly = self.layers[-1].mask
else:
ly = self.layers[-1].layer
# take care of the last layer
if ly.type == enumLayerType('softmax'):
# revise the last layer
if loss == 'categorical_crossentropy':
setval(ly, type=enumLayerType('softmaxloss'))
setval(ly.softmaxloss_conf, topk=topk)
elif loss == 'mean_squared_error':
setval(ly, type=enumLayerType('euclideanloss'))
else:
# add new layer
if loss == 'categorical_crossentropy':
self.add(Loss('softmaxloss', topk=topk))
elif loss == 'mean_squared_error':
self.add(Loss('euclideanloss'))
elif loss == 'user_loss_rnnlm': # user-defined loss layer
self.add(UserLossRNNLM(nclass=kwargs['nclass'],
vocab_size=kwargs['in_dim']))
def build(self):
'''
construct neuralnet proto
'''
net = NetProto()
slyname = self.layers[0].layer.name
for i in range(len(self.layers)):
ly = net.layer.add()
ly.CopyFrom(self.layers[i].layer)
lastly = ly
if self.layers[i].is_datalayer == True:
continue
getattr(ly, 'srclayers').append(slyname)
slyname = ly.name
if hasattr(self.layers[i], 'mask'):
mly = net.layer.add()
mly.CopyFrom(self.layers[i].mask)
getattr(mly, 'srclayers').append(slyname)
slyname = mly.name
lastly = mly
if hasattr(self.layers[i], 'bidirect'):
bly = net.layer.add()
bly.CopyFrom(self.layers[i].bidirect)
getattr(bly, 'srclayers').append(slyname)
# deal with label layer (depreciated)
if self.label == True:
label_layer = Layer(name='label', type=kLabel)
ly = net.layer.add()
ly.CopyFrom(label_layer.layer)
getattr(ly, 'srclayers').append(self.layers[0].layer.name)
getattr(lastly, 'srclayers').append(label_layer.layer.name)
else:
if lastly.name == 'RBMVis':
getattr(lastly, 'srclayers').append(bly.name)
else:
getattr(lastly, 'srclayers').append(self.layers[0].layer.name)
if self.accuracy == True:
smly = net.layer.add()
smly.CopyFrom(Layer(name='softmax', type=kSoftmax).layer)
setval(smly, include=kTest)
getattr(smly, 'srclayers').append(self.layers[-1].layer.name)
aly = net.layer.add()
aly.CopyFrom(Accuracy().layer)
setval(aly, include=kTest)
getattr(aly, 'srclayers').append('softmax')
getattr(aly, 'srclayers').append(self.layers[0].layer.name)
# use of cudnn
if self.cudnn == True:
self.set_cudnn_layer_type(net)
setval(self.jobconf, neuralnet=net)
def fit(self, data=None, alg='bp', nb_epoch=0,
with_test=False, execpath='', device=None, **fields):
'''
required
data = (Data) // Data class object for training data
alg = (string) // algorithm, e.g., 'bp', 'cd'
nb_epoch = (int) // the number of training steps
optional
with_test = (bool) // flag if singa runs for test data
execpath = (string) // path to user own singa (executable file)
device = (int/list) // a list of gpu ids
**fields (KEY=VALUE)
batch_size = (int) // batch size for training data
train_steps = (int) // nb of steps for training, i.e., epoch
disp_freq = (int) // frequency to display training info
disp_after = (int) // display after this number
validate_data = (Data) // valid data, specified in load_data()
validate_freq = (int) // frequency of validation
validate_steps = (int) // total number of steps for validation
validate_after = (int) // start validation after this number
checkpoint_path = (string) // path to checkpoint file
checkpoint_freq = (int) // frequency for checkpoint
checkpoint_after = (int) // start checkpointing after this number
'''
assert data != None, 'Training data shold be set'
assert nb_epoch > 0, 'Training steps shold be set'
if 'batch_size' in fields: # if new value is set, replace it
setval(data.layer.store_conf, batchsize=fields['batch_size'])
# insert layer for training
if self.exist_datalayer('train') == False:
self.layers.insert(0, data)
setval(self.jobconf, train_steps=nb_epoch)
setval(self.jobconf, disp_freq=nb_epoch/10)
if 'disp_freq' in fields:
setval(self.jobconf, disp_freq=fields['disp_freq'])
if 'validate_data' in fields:
self.layers.insert(1, fields['validate_data'])
setval(self.jobconf, validate_freq=nb_epoch/10)
setval(self.jobconf, **fields)
# loading checkpoint if it is set
if data.checkpoint != None:
setval(self.jobconf, checkpoint_path=data.checkpoint)
# save model parameter (i.e., checkpoint_path)
setval(self.jobconf, checkpoint_freq=nb_epoch)
self.last_checkpoint_path = '{0}/step{1}-worker0'.format(
self.jobconf.cluster.workspace, nb_epoch)
# set Train_one_batch component, using backprogapation at default
setval(self.jobconf,
train_one_batch=Algorithm(type=enumAlgType(alg)).proto)
# use of cudnn
if device != None:
setval(self.jobconf, gpu=device)
self.cudnn = True
# start to run singa for training
if with_test == False:
self.build() # construct Nneuralnet Component
#self.display()
return SingaRun(jobproto=self.jobconf,
argv=self.argv, execpath=execpath)
else:
# run singa in evaluate() with test data
pass
def evaluate(self, data=None, alg='bp',
checkpoint_path=None, execpath='',
device=None, show_acc=False, **fields):
'''
required
data = (Data) // Data class object for testing data
optional
alg = (string) // algorithm type, (bp at default)
checkpoint_path = (list) // checkpoint path
execpaths = (string) // path to user's own executable
device = (int/list) // a list of gpu ids
show_acc = (bool) // compute and the accuacy
**fields (KEY=VALUE)
batch_size = (int) // batch size for testing data
test_freq = (int) // frequency of testing
test_steps = (int) // total number of steps for testing
test_after = (int) // start testing after this number of steps
'''
assert data != None, 'Testing data should be set'
is_testonly = False
if 'batch_size' in fields: # if new value is set, replace it
setval(data.layer.store_conf, batchsize=fields['batch_size'])
# insert layer for testing
if self.exist_datalayer('test') == False:
self.layers.insert(0, data)
# loading checkpoint if singa runs only for testing
if self.exist_datalayer('train') == False:
is_testonly = True
if checkpoint_path == None:
print 'checkpoint_path has not been specified'
else:
setval(self.jobconf, checkpoint_path=checkpoint_path)
steps = fields['test_steps'] if 'test_steps' in fields else 10
setval(self.jobconf, test_steps=steps)
setval(self.jobconf, **fields)
# set Train_one_batch component, using backprogapation at default
setval(self.jobconf,
train_one_batch=Algorithm(type=enumAlgType(alg)).proto)
# use of cudnn
if device != None:
setval(self.jobconf, gpu=device)
self.cudnn = True
# set True if showing the accuracy
self.accuracy = show_acc
self.build() # construct Nneuralnet Component
#--- generate job.conf file for debug purpose
#filename = 'job.conf'
#with open(filename, 'w') as f:
# f.write(text_format.MessageToString(self.jobconf.cluster))
#self.display()
#--- run singa ---
return SingaRun(jobproto=self.jobconf,
argv=self.argv, execpath=execpath, testmode=is_testonly)
#return SingaRun_script(filename=filename, execpath=execpath)
def display(self):
''' print out job proto
'''
print text_format.MessageToString(self.jobconf)
def set_cudnn_layer_type(self, net):
''' convert LayerType to CdunnLayerType
'''
for i in range(len(net.layer)):
ly_type = net.layer[i].type
cudnn_ly_type = ly_type
if ly_type == kCConvolution: cudnn_ly_type = kCudnnConv
elif ly_type == kCPooling: cudnn_ly_type = kCudnnPool
elif ly_type == kLRN: cudnn_ly_type = kCudnnLRN
elif ly_type == kSoftmax: cudnn_ly_type = kCudnnSoftmax
elif ly_type == kSoftmaxLoss: cudnn_ly_type = kCudnnSoftmaxLoss
elif ly_type == kActivation:
cudnn_ly_type = kCudnnActivation
elif ly_type == kSTanh:
print 'Error report: STanh layer is not supported for GPU'
'''
elif ly_type == kReLU:
cudnn_ly_type = kCudnnActivation
net.layer[i].activation_conf.type = RELU
elif ly_type == kSigmoid:
cudnn_ly_type = kCudnnActivation
net.layer[i].activation_conf.type = SIGMOID
elif ly_type == kTanh:
cudnn_ly_type = kCudnnActivation
net.layer[i].activation_conf.type = TANH
'''
#elif ly_type == kSTanh:
# print 'Error report: STanh layer is not supported for GPU'
#cudnn_ly_type = kCudnnActivation
#net.layer[i].activation_conf.type = STANH
net.layer[i].type = cudnn_ly_type
def show(self):
for ly in self.jobconf.neuralnet.layer:
print layer(ly.name)
def layer_by_id(self, k):
return self.jobconf.neuralnet.layer[k]
def layer_by_name(self, name):
return self.layers[k]
def size(self):
return len(self.jobconf.neuralnet.layer)
class Energy(Model):
''' energy model
'''
def __init__(self, name='my model', argv=[], label=False):
super(Energy, self).__init__(name=name, argv=argv, label=label)
def add(self, layer):
if hasattr(layer, 'layer_type'):
if layer.layer_type == kRBMVis:
dim = 0
for i in range(1, len(layer.out_dim)):
parw = Parameter(name='w', init='none', level=i)
parb = Parameter(name='b', init='none', level=i)
dim = layer.out_dim[i-1]
self.layers.append(Dense(dim, w_param=parw, b_param=parb,
activation='sigmoid'))
self.layers.append(layer)
class Sequential(Model):
''' sequential model
'''
def __init__(self, name='my model', argv=[], label=False):
super(Sequential, self).__init__(name=name, argv=argv, label=label)
def add(self, layer):
if hasattr(layer, 'layer_type'):
if layer.layer_type == 'AutoEncoder':
dim = 0
if layer.param_share == True:
# Encoding
for i in range(1, len(layer.hid_dim)+1):
parw = Parameter(name='w',
init='none', level=i)
parb = Parameter(name='b',
init='none', level=i)
dim = layer.hid_dim[i-1]
if i == len(layer.hid_dim): activation = None
else: activation = layer.activation
self.layers.append(Dense(dim,
w_param=parw, b_param=parb,
activation=activation))
# Decoding
for i in range(len(layer.hid_dim), 0, -1):
parw = Parameter(name=generate_name('w', 2),
init='none')
parb = Parameter(name=generate_name('b', 2),
init='none')
setval(parw.param, share_from='w'+str(i))
setval(parb.param, name='b'+str(i))
if i == 1: dim = layer.out_dim
else: dim = layer.hid_dim[i-2]
self.layers.append(Dense(dim,
w_param=parw, b_param=parb,
activation=layer.activation,
transpose=True))
else:
# MLP
for i in range(1, len(layer.hid_dim)+2):
parw = Parameter(name='w',
init='none', level=i)
parb = Parameter(name='b',
init='none', level=i)
if i == len(layer.hid_dim)+1: dim = layer.out_dim
else: dim = layer.hid_dim[i-1]
self.layers.append(Dense(dim,
w_param=parw, b_param=parb,
activation=layer.activation))
else:
self.layers.append(layer)
else:
self.layers.append(layer)
class Store(object):
def __init__(self, **kwargs):
'''
**kwargs
path = (string) // path to dataset
backend = (string) //
batch_size = (int) // batch size of dataset
shape = (int) //
'''
self.proto = Message('Store', **kwargs).proto
class Algorithm(object):
def __init__(self, type=enumAlgType('bp'), **kwargs):
'''
type = (string) // type of algorithm, bp at default
'''
alg = Message('Alg', alg=type, **kwargs).proto
if type == enumAlgType('cd'):
setval(alg.cd_conf, **kwargs)
self.proto = alg
class Updater(object):
def __init__(self, upd_type, lr, lr_type,
decay, momentum,
step, step_lr, **fields):
'''
required
upd_type = (enum) // enum type of updater
lr = (float) // base learning rate
optional
lr_type = (string) // type of the learning rate (Fixed at default)
'''
upd = Message('Updater', type=upd_type, **fields).proto
setval(upd.learning_rate, base_lr=lr)
if decay > 0:
setval(upd, weight_decay=decay)
if momentum > 0:
setval(upd, momentum=momentum)
if lr_type == None or lr_type == "fixed":
setval(upd.learning_rate, type=kFixed)
elif lr_type == 'step':
cp = Message('Step', change_freq=60, gamma=0.997)
setval(upd.learning_rate, type=kStep, step_conf=cp.proto)
elif lr_type == 'manual':
cp = Message('FixedStep', step=step, step_lr=step_lr)
setval(upd.learning_rate, type=kFixedStep, fixedstep_conf=cp.proto)
elif lr_type == 'linear':
cp = Message('Linear', change_freq=10, final_lr=0.1)
setval(upd.learning_rate, type=kLinear, linear_conf=cp.proto)
self.proto = upd
self.singaupdater = None
def Update(self, step, layer):
''' This method updates parameters of layer
step = (int) // training step, i.e., param version
'''
if self.singaupdater == None:
self.singaupdater = SingaUpdater.CreateUpdater(
self.proto.SerializeToString())
# update parameters
singaParams = layer.singalayer.GetParams()
for par in singaParams:
self.singaupdater.Update(step, par, 1.0)
class SGD(Updater):
def __init__(self, lr=0.01, lr_type=None,
decay=0, momentum=0,
step=(0), step_lr=(0.01), **fields):
'''
required
lr = (float) // base learning rate
optional
lr_type = (string) // type of learning rate, 'Fixed' at default
decay = (float) // weight decay
momentum = (float) // momentum
step = (int/list) // steps
step_lr = (float/list) // learning rate after the steps
**fields (KEY=VALUE)
'''
assert lr
super(SGD, self).__init__(upd_type=kSGD,
lr=lr, lr_type=lr_type,
decay=decay, momentum=momentum,
step=step, step_lr=step_lr, **fields)
class AdaGrad(Updater):
def __init__(self, lr=0.01, lr_type=None,
decay=0, momentum=0,
step=(0), step_lr=(0.01), **fields):
'''
required
lr = (float) // base learning rate
optional
lr_type = (string) // type of learning rate, 'Fixed' at default
decay = (float) // weight decay
momentum = (float) // momentum
step = (int/list) // steps
step_lr = (float/list) // learning rate after the steps
**fields (KEY=VALUE)
'''
assert lr
super(AdaGrad, self).__init__(upd_type=kAdaGrad,
lr=lr, lr_type=lr_type,
decay=decay, momentum=momentum,
step=step, step_lr=step_lr, **fields)
class Cluster(object):
""" Specify the cluster topology, e.g., number of workers/servers.
Currently we need to create this object in the .py file and also provide a
cluster configuration file to the command line. TODO(wangwei) update SINGA
code to eliminate the requirement of the cluster configuration file for
training on a single node or the cluster object in the pyfile for training
in a cluster.
"""
def __init__(self, workspace=None,
nworker_groups=1, nserver_groups=1,
nworkers_per_group=1, nservers_per_group=1,
nworkers_per_procs=1, nservers_per_procs=1,
**fields):
'''
required
workspace = (string) // workspace path
optional
nworker_groups = (int)
nserver_groups = (int)
nworkers_per_group = (int)
nservers_per_group = (int)
nworkers_per_procs = (int)
nservers_per_procs = (int)
**fields
server_worker_separate = (bool)
'''
assert workspace != None, 'need to set workspace'
self.proto = Message('Cluster', workspace=workspace).proto
# optional
self.proto.nworker_groups = nworker_groups
self.proto.nserver_groups = nserver_groups
self.proto.nworkers_per_group = nworkers_per_group
self.proto.nservers_per_group = nservers_per_group
self.proto.nworkers_per_procs = nworkers_per_procs
self.proto.nservers_per_procs = nservers_per_procs
# other fields
setval(self.proto, **fields)
def StoreResults(lines):
""" Parsing metrics from each line in the log file.
TODO(wangwei) format the log string to make them uniform for easy parsing
Another approach is creating a protobuf message for metrics, which can be
used for dumping metrics to string and loading perf string back to messages.
"""
resultDic = {}
for line in lines:
line = re.findall(r'[\w|*.*]+', line)
if 'Train' in line:
step = line[line.index('step')+1]
if 'accuracy' in line:
resultDic.setdefault(step, {})['acc'] \
= line[line.index('accuracy')+1]
if 'loss' in line:
resultDic.setdefault(step, {})['loss'] \
= line[line.index('loss')+1]
if 'ppl' in line:
resultDic.setdefault(step, {})['ppl'] \
= line[line.index('ppl')+1]
if 'Squared' in line:
resultDic.setdefault(step, {})['se'] \
= line[line.index('Squared')+2]
return resultDic
def SingaRun(jobproto='', argv=None, execpath='', testmode=False):
"""
Run Singa and receive the training/test results.
"""
import singa.driver as driver
d = driver.Driver()
d.InitLog(argv[0])
d.Init(argv)
if testmode == True:
d.Test(jobproto.SerializeToString())
else:
d.Train(False, jobproto.SerializeToString())
# Get the performance from the latest log file.
# TODO(wangwei) the log file would be overwritten by other running instance
# of the same program, e.g., lt-singa
logfile = '/tmp/singa-log/{0}.ERROR'.format(argv[0].split('/')[-1])
fin = open(logfile, 'r')
result = StoreResults(fin.readlines())
return result
def SingaRun_script(filename='', execpath=''):
"""
Deprecated.
Generate the job conf file and run the shell command.
"""
SINGAROOT = '../../../'
conf = 'examples/' + filename
if execpath == '':
cmd = SINGAROOT+'bin/singa-run.sh ' \
+ '-conf %s ' % conf
else:
cmd = SINGAROOT+'bin/singa-run.sh ' \
+ '-conf %s ' % conf \
+ '-exec %s ' % execpath
procs = subprocess.Popen(cmd.strip().split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
resultDic = {}
outputlines = iter(procs.stdout.readline, '')
resultDic = StoreResults(outputlines)
#TODO better format to store the result??
return resultDic
def load_model_parameter(fin, neuralnet, batchsize=1, data_shape=None):
"""
this method loads model parameter
"""
hly_idx = 0
for i in range(len(neuralnet)):
if neuralnet[i].is_datalayer:
if data_shape == None:
shape = neuralnet[i].shape
shape[0] = batchsize
neuralnet[i].setup(shape)
else:
neuralnet[i].setup(data_shape)
else:
hly_idx = i
break
net = layerVector(len(neuralnet)-hly_idx)
for i in range(hly_idx, len(neuralnet)):
if neuralnet[i].src==None:
neuralnet[i].setup(neuralnet[i-1])
else:
neuralnet[i].setup(neuralnet[i].src)
net[i-hly_idx] = neuralnet[i].singalayer
from singa.driver import Worker
alg = Algorithm(type=enumAlgType('bp')).proto
w = Worker.CreateWorker(alg.SerializeToString())
w.InitNetParams(fin, net)
def save_model_parameter(step, fout, neuralnet):
"""
this method saves model parameter
"""
hly_idx = 0
for i in range(len(neuralnet)):
if not neuralnet[i].is_datalayer:
hly_idx = i
break
from singa.driver import Worker
net = layerVector(len(neuralnet)-hly_idx)
for i in range(hly_idx, len(neuralnet)):
net[i-hly_idx] = neuralnet[i].singalayer
alg = Algorithm(type=enumAlgType('bp')).proto
w = Worker.CreateWorker(alg.SerializeToString())
w.Checkpoint(step, fout, net)
|
|
# -*- coding: utf-8 -*-
import json
import pytest
from hyper.cli import KeyValue
from hyper.cli import get_content_type_and_charset, main, parse_argument
from hyper.cli import set_request_data, set_url_info
from hyper.common.headers import HTTPHeaderMap
# mock for testing
class DummyUrlInfo(object):
def __init__(self):
self.path = '/'
class DummyNamespace(object):
def __init__(self, attrs):
self.body = {}
self.headers = HTTPHeaderMap()
self.items = []
self.method = None
self._url = ''
self.url = DummyUrlInfo()
for key, value in attrs.items():
setattr(self, key, value)
class DummyResponse(object):
def __init__(self, headers):
self.headers = HTTPHeaderMap(headers.items())
def read(self):
ctype = self.headers.get('content-type')
if ctype is not None:
if 'json' in ctype[0].decode('utf-8'):
return b'{"data": "dummy"}'
return b'<html>dummy</html>'
def getheader(self, name):
return self.headers.get(name)
def getheaders(self):
return self.headers
class DummyConnection(object):
def __init__(self, host, port, secure=False):
self.host = host
self.port = port
self.response = DummyResponse({'content-type': 'application/json'})
self.secure = secure
def request(self, method, path, body, headers):
return method, path, body, headers
def get_response(self):
return self.response
def _get_value(obj, key):
if '.' in key:
attr1, attr2 = key.split('.')
return _get_value(getattr(obj, attr1), attr2)
else:
return getattr(obj, key)
@pytest.mark.parametrize('argv', [
['example.com'],
['example.com/'],
['http://example.com'],
['https://example.com'],
['https://example.com/'],
['https://example.com/httpbin/get'],
], ids=[
'specified host only',
'specified host and path',
'specified host with url scheme http://',
'specified host with url scheme https://',
'specified host with url scheme https:// and root',
'specified host with url scheme https:// and path',
])
def test_cli_normal(monkeypatch, argv):
monkeypatch.setattr('hyper.cli.HTTPConnection', DummyConnection)
main(argv)
assert True
@pytest.mark.parametrize('argv', [
[],
['-h'],
['--version'],
], ids=[
'specified no argument',
'specified "-h" option',
'specified "--version" option',
])
def test_cli_with_system_exit(argv):
with pytest.raises(SystemExit):
main(argv)
@pytest.mark.parametrize(('argv', 'expected'), [
(['--debug', 'example.com'], {'debug': True}),
(['GET', 'example.com', 'x-test:header'],
{'method': 'GET', 'headers': {'x-test': 'header'}}),
(['GET', 'example.com', 'param==test'],
{'method': 'GET', 'url.path': '/?param=test'}),
(['POST', 'example.com', 'data=test'],
{'method': 'POST', 'body': '{"data": "test"}'}),
], ids=[
'specified "--debug" option',
'specified host and additional header',
'specified host and get parameter',
'specified host and post data',
])
def test_parse_argument(argv, expected):
args = parse_argument(argv)
for key, value in expected.items():
assert value == _get_value(args, key)
@pytest.mark.parametrize(('response', 'expected'), [
(DummyResponse({}), ('unknown', 'utf-8')),
(DummyResponse({'content-type': 'text/html; charset=latin-1'}),
('text/html', 'latin-1')),
(DummyResponse({'content-type': 'application/json'}),
('application/json', 'utf-8')),
], ids=[
'unknown conetnt type and default charset',
'text/html and charset=latin-1',
'application/json and default charset',
])
def test_get_content_type_and_charset(response, expected):
ctype, charset = get_content_type_and_charset(response)
assert expected == (ctype, charset)
@pytest.mark.parametrize(('args', 'expected'), [
(DummyNamespace({}), {'headers': {}, 'method': 'GET'}),
(
DummyNamespace(
{'items': [
KeyValue('x-header', 'header', ':', ''),
KeyValue('param', 'test', '==', ''),
]}
),
{'headers': {'x-header': 'header'},
'method': 'GET',
'url.path': '/?param=test',
}
),
(
DummyNamespace(
{'items': [
KeyValue('data1', 'test1', '=', ''),
KeyValue('data2', 'test2', '=', ''),
]}
),
{'headers': {'content-type': 'application/json'},
'method': 'POST',
'body': json.dumps({'data1': 'test1', 'data2': 'test2'}),
}
),
], ids=[
'set no request data',
'set header and GET parameters',
'set header and POST data',
])
def test_set_request_data(args, expected):
set_request_data(args)
for key, value in expected.items():
assert value == _get_value(args, key)
@pytest.mark.parametrize(('args', 'expected'), [
(DummyNamespace({'_url': ''}),
{'query': None, 'host': 'localhost', 'fragment': None,
'port': 443, 'netloc': None, 'scheme': 'https', 'path': '/',
'secure': True}),
(DummyNamespace({'_url': 'example.com'}),
{'host': 'example.com', 'port': 443, 'path': '/', 'secure': True}),
(DummyNamespace({'_url': 'example.com/httpbin/get'}),
{'host': 'example.com', 'port': 443, 'path': '/httpbin/get',
'secure': True}),
(DummyNamespace({'_url': 'example.com:80'}),
{'host': 'example.com', 'port': 80, 'path': '/', 'secure': True}),
(DummyNamespace({'_url': 'http://example.com'}),
{'host': 'example.com', 'port': 80, 'path': '/', 'scheme': 'http',
'secure': False}),
(DummyNamespace({'_url': 'http://example.com/'}),
{'host': 'example.com', 'port': 80, 'path': '/', 'scheme': 'http',
'secure': False}),
(DummyNamespace({'_url': 'http://example.com:8080'}),
{'host': 'example.com', 'port': 8080, 'path': '/', 'scheme': 'http',
'secure': False}),
(DummyNamespace({'_url': 'https://example.com'}),
{'host': 'example.com', 'port': 443, 'path': '/', 'scheme': 'https',
'secure': True}),
(DummyNamespace({'_url': 'https://example.com/httpbin/get'}),
{'host': 'example.com', 'port': 443, 'path': '/httpbin/get',
'scheme': 'https', 'secure': True}),
(DummyNamespace({'_url': 'https://example.com:8443/httpbin/get'}),
{'host': 'example.com', 'port': 8443, 'path': '/httpbin/get',
'scheme': 'https', 'secure': True}),
], ids=[
'set no url (it means default settings)',
'set only hostname',
'set hostname with path',
'set hostname with port number',
'set url with http://',
'set url + "/" with http://',
'set url with http:// and port number',
'set url with https://',
'set url with path',
'set url with port number and path',
])
def test_set_url_info(args, expected):
set_url_info(args)
for key, value in expected.items():
assert value == getattr(args.url, key)
|
|
"""
pywow wdb/dbc field types
"""
from structures.fields import *
##
# Core custom types for WDB/DBC files
#
class IDField(IntegerField):
"""
Integer field containing the row's ID
"""
def __init__(self, name="_id"):
IntegerField.__init__(self, name=name, primary_key=True)
class RecLenField(IntegerField):
"""
Integer field containing the length of the row from itself
"""
def __init__(self, name="_reclen"):
IntegerField.__init__(self, name=name)
class LocalizedField(Field):
"""
Localized StringField.
Structure handled at wdbc.structures.LocalizedStringField
"""
pass
##
# Dynamic types
#
class DynamicFieldsBase(list):
def get_fields(self):
return self
def delete_field(self, name):
"""
Delete a field, by name or by instance
"""
if isinstance(name, basestring):
for index, field in enumerate(self):
if field.name == name:
del self[index]
break
else:
for index, field in enumerate(self):
if isinstance(field, name.__class__):
del self[index]
break
class DynamicMaster(IntegerField):
"""
Master field for dynamic columns, determining how many will be present.
"""
pass
class DynamicFields(DynamicFieldsBase):
"""
A dynamic column master, followed by the full list of dynamic columns.
Used in itemcache.wdb
DynamicFields("name", [((Field, "x"), (Field, "y"), ...), 10])
"""
def __init__(self, name, columns):
self.name = name
self.master = DynamicMaster(name, group=self)
self.append(self.master)
cols, amt = columns
for i in xrange(amt):
self.append([v[0](name="%s_%s_%i" % (name, v[1], i+1), dynamic=i+1, group=self) for v in cols])
def get_fields(self):
yield self.master
for v in self[1:]:
for f in v:
yield f
class SubRow(object):
"""
Used in Unions as a fake DBRow
"""
def __init__(self, field, row, structure):
self.__field = field
self.__row = row
self._structure = structure(row._parent.build, row._parent)
def __dir__(self):
result = self.__dict__.keys()
result.extend(self._structure.column_names)
return result
def __getattr__(self, name):
if name in self._structure:
index = self._structure.index(name)
value = self._raw(name)
return self._structure[index].to_python(value, self.__row)
return super(SubRow, self).__getattribute__(name)
def _raw(self, name):
index = self._structure.index(name)
real_name = self.__field.column_names[index]
return getattr(self.__row, real_name)
class Union(DynamicFieldsBase):
"""
Imitates a C++ union.
Takes a name argument and field_1, ... field_n fields to
populate the default union.
Required get_structure(x, row) callable argument that
returns the structure corresponding to a specific row.
"""
def __init__(self, name, fields, get_structure):
DynamicFieldsBase.__init__(self, fields)
self.name = name
if not callable(get_structure):
raise StructureError("%s._get_structure must be a callable type" % (self.__class__.__name__))
self._get_structure = get_structure
self.column_names = [k.name for k in fields]
def __build_list(self, field, row):
"Builds a fake DBRow to allow deep attribute seeking"
return SubRow(field, row, self._get_structure(row))
def get_abstraction(self):
return self.name, self.__build_list
def get_structure(self, row):
return self._get_structure(row)
class MultiField(DynamicFieldsBase):
"""
Expands a list of fields to a specific amount
"""
def __init__(self, name, fields, amount):
super(DynamicFieldsBase, self).__init__(fields)
def __build_list(self):
pass
def get_abstraction(self):
return self.name, self.__build_list
##
# Relations
#
class UnresolvedObjectRef(int):
def __repr__(self):
return "<%s: %d>" % (self.__class__.__name__, int(self))
class RelationError(Exception):
pass
class UnresolvedTable(RelationError):
pass
class UnresolvedKey(RelationError):
pass
class ForeignKeyBase(IntegerField):
"""
Base class for ForeignKeys
"""
def from_python(self, value): # FIXME use isinstance(DBFile) instead
if isinstance(value, int) or isinstance(value, long):
return value
pk = value.structure.primary_keys[0] # TODO: what about multiple primary keys ?
index = value.structure.index(pk.name)
return value[index]
def to_python(self, value, row):
if isinstance(value, int):
self.raw_value = value
f = self.relationTable(value)
key = self.relationKey(value, row)
try:
value = f[key]
except KeyError:
# If the key is 0 and is not in the target table, we assume it's meant to be empty
if key == 0:
value = None
else:
raise UnresolvedKey("Key %r does not exist in %s" % (key, f.structure.name()))
return self.get_final_value(value, row)
return value
def relationTable(self, value):
"""
Return the forward relation "table" (file) in the Environment
"""
environment = self.parent.parent.environment
relation = self.relation(value)
try:
return environment.dbFile(relation)
except KeyError:
raise UnresolvedTable("Table %r does not exist in the current environment" % (relation))
def get_final_value(self, value, row):
return value
def relation(self, value):
raise NotImplementedError("Subclasses must implement this method")
def relationKey(self, value, row):
raise NotImplementedError("Subclasses must implement this method")
class ForeignKey(ForeignKeyBase):
"""
Integer link to another table's primary key.
Relation required.
"""
def __init__(self, name, relation):
IntegerField.__init__(self, name)
self._relation = relation
def relation(self, value):
return self._relation
def relationKey(self, value, row):
return value
class ForeignMask(BitMaskField):
"""
Integer field containing a bitmask relation to
multiple rows in another file.
"""
def __init__(self, name, relation, **kwargs):
super(ForeignMask, self).__init__(name=name, **kwargs)
self._relation = relation
self.flags = {}
def __init_flags(self):
env = self.parent.parent.environment
try:
f = env.dbFile(self._relation)
except KeyError:
raise UnresolvedTable("Relation %r does not exist in the current environment" % (self._relation), value)
for k in f:
self.flags[2 ** (k-1)] = f[k]
def from_python(self, value):
assert isinstance(value, BitFlags)
return int(value)
def to_python(self, value, row):
if isinstance(value, BitFlags):
return value
if not self.flags:
self.__init_flags()
return BitMask(value, self.flags)
class ForeignByte(ForeignKey):
"""
This is a HACK
"""
char = "b"
size = 1
class GenericForeignKey(ForeignKeyBase):
def __init__ (self, name="", get_relation=None, get_value=lambda x, value: value):
IntegerField.__init__(self, name)
if not callable(get_relation):
raise FieldError("%s._get_relation must be a callable type" % (self.__class__.__name__))
self._get_relation = get_relation
self._get_value = get_value
def relation(self, value):
return self._get_relation(self, value)
def relationKey(self, value, row):
return self._get_value(self, value)
class ForeignCell(ForeignKeyBase):
"""
Like a ForeignKey, but returns a specific cell
from the relation. Requires both a get_column
and a get_row method.
"""
def __init__(self, name, relation, get_column, get_row):
IntegerField.__init__(self, name)
self._relation = relation
self.get_column = get_column
self.get_row = get_row
def get_final_value(self, value, row):
column = self.get_column(row, self.raw_value)
if column:
return getattr(value, column)
return self.raw_value
def relationKey(self, value, row):
return self.get_row(row, self.raw_value)
def relation(self, value):
return self._relation
##
# Misc. types
#
class UnknownField(IntegerField):
pass
class ColorField(UnsignedIntegerField):
pass
class MoneyField(UnsignedIntegerField):
pass
class FilePathField(StringField):
pass
class GUIDField(BigIntegerField):
pass
class HashField(Field):
char = "16s"
size = 16
class DataField(Field):
char = "s"
def __init__(self, name, master):
Field.__init__(self, name=name)
self.master = master
|
|
# Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Vertex AI Batch Prediction Job Remote Runner Client module."""
import json
from logging import raiseExceptions
import os
import time
import unittest
from unittest import mock
from google.cloud import aiplatform
from google.cloud.aiplatform.explain import ExplanationMetadata
from google.cloud.aiplatform.compat.types import job_state as gca_job_state
from google.protobuf import json_format
from google_cloud_pipeline_components.container.utils.execution_context import ExecutionContext
from google_cloud_pipeline_components.proto.gcp_resources_pb2 import GcpResources
from google_cloud_pipeline_components.container.v1.gcp_launcher import batch_prediction_job_remote_runner
from google_cloud_pipeline_components.container.v1.gcp_launcher import job_remote_runner
from google_cloud_pipeline_components.container.v1.gcp_launcher.utils import json_util
import requests
import google.auth
import google.auth.transport.requests
class BatchPredictionJobRemoteRunnerUtilsTests(unittest.TestCase):
def setUp(self):
super(BatchPredictionJobRemoteRunnerUtilsTests, self).setUp()
self._payload = (
'{"batchPredictionJob": {"displayName": '
'"BatchPredictionComponentName", "model": '
'"projects/test/locations/test/models/test-model","inputConfig":'
' {"instancesFormat": "CSV","gcsSource": {"uris": '
'["test_gcs_source"]}}, "outputConfig": {"predictionsFormat": '
'"CSV", "gcsDestination": {"outputUriPrefix": '
'"test_gcs_destination"}}}}')
self._job_type = 'BatchPredictionJob'
self._project = 'test_project'
self._location = 'test_region'
self._batch_prediction_job_name = '/projects/{self._project}/locations/{self._location}/jobs/test_job_id'
self._gcp_resources = os.path.join(os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'), "gcp_resources")
self._batch_prediction_job_uri_prefix = f'https://{self._location}-aiplatform.googleapis.com/v1/'
self._output_file_path = os.path.join(os.getenv('TEST_UNDECLARED_OUTPUTS_DIR'), "localpath/foo")
self._executor_input = '{"outputs":{"artifacts":{"batchpredictionjob":{"artifacts":[{"metadata":{},"name":"foobar","type":{"schemaTitle":"google.VertexBatchPredictionJob"},"uri":"gs://abc"}]}},"outputFile":"'+self._output_file_path+'"}}'
def tearDown(self):
if os.path.exists(self._gcp_resources):
os.remove(self._gcp_resources)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_succeeded(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
get_batch_prediction_job_response.name = 'job1'
get_batch_prediction_job_response.output_info.bigquery_output_table = 'bigquery_output_table'
get_batch_prediction_job_response.output_info.bigquery_output_dataset = 'bigquery_output_dataset'
get_batch_prediction_job_response.output_info.gcs_output_directory = 'gcs_output_directory'
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources,self._executor_input)
mock_job_service_client.assert_called_once_with(
client_options={
'api_endpoint': 'test_region-aiplatform.googleapis.com'
},
client_info=mock.ANY)
expected_parent = f'projects/{self._project}/locations/{self._location}'
expected_job_spec = json.loads(self._payload, strict=False)
job_client.create_batch_prediction_job.assert_called_once_with(
parent=expected_parent, batch_prediction_job=expected_job_spec)
with open(self._gcp_resources) as f:
serialized_gcp_resources = f.read()
# Instantiate GCPResources Proto
batch_prediction_job_resources = json_format.Parse(
serialized_gcp_resources, GcpResources())
self.assertEqual(len(batch_prediction_job_resources.resources), 1)
batch_prediction_job_name = batch_prediction_job_resources.resources[
0].resource_uri[len(self._batch_prediction_job_uri_prefix):]
self.assertEqual(batch_prediction_job_name,
self._batch_prediction_job_name)
with open(self._output_file_path) as f:
executor_output = json.load(f, strict=False)
self.assertEqual(
executor_output,
json.loads(
'{"artifacts": {"batchpredictionjob": {"artifacts": [{"metadata": {"resourceName": "job1", "bigqueryOutputDataset": "bigquery_output_dataset","bigqueryOutputTable": "bigquery_output_table","gcsOutputDirectory": "gcs_output_directory"}, "name": "foobar", "type": {"schemaTitle": "google.VertexBatchPredictionJob"}, "uri": "https://test_region-aiplatform.googleapis.com/v1/job1"}]}}}'
))
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
def test_batch_prediction_job_remote_runner_raises_exception_on_error(
self, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_FAILED
mock_path_exists.return_value = False
with self.assertRaises(RuntimeError):
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources,self._executor_input)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(time, 'sleep', autospec=True)
def test_batch_prediction_job_remote_runner_retries_to_get_status_on_non_completed_job(
self, mock_time_sleep, mock_path_exists, mock_job_service_client):
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response_success = mock.Mock()
get_batch_prediction_job_response_success.name = 'job1'
get_batch_prediction_job_response_success.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
get_batch_prediction_job_response_success.output_info.bigquery_output_table = 'bigquery_output_table'
get_batch_prediction_job_response_success.output_info.bigquery_output_dataset = 'bigquery_output_dataset'
get_batch_prediction_job_response_success.output_info.gcs_output_directory = 'gcs_output_directory'
get_batch_prediction_job_response_running = mock.Mock()
get_batch_prediction_job_response_running.state = gca_job_state.JobState.JOB_STATE_RUNNING
job_client.get_batch_prediction_job.side_effect = [
get_batch_prediction_job_response_running,
get_batch_prediction_job_response_success
]
mock_path_exists.return_value = False
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources,self._executor_input)
mock_time_sleep.assert_called_once_with(
job_remote_runner._POLLING_INTERVAL_IN_SECONDS)
self.assertEqual(job_client.get_batch_prediction_job.call_count, 2)
@mock.patch.object(aiplatform.gapic, 'JobServiceClient', autospec=True)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(google.auth, 'default', autospec=True)
@mock.patch.object(google.auth.transport.requests, 'Request', autospec=True)
@mock.patch.object(requests, 'post', autospec=True)
@mock.patch.object(ExecutionContext, '__init__', autospec=True)
def test_batch_prediction_job_remote_runner_cancel(self,
mock_execution_context,
mock_post_requests,
_, mock_auth,
mock_path_exists,
mock_job_service_client):
creds = mock.Mock()
creds.token = 'fake_token'
mock_auth.return_value = [creds, "project"]
job_client = mock.Mock()
mock_job_service_client.return_value = job_client
create_batch_prediction_job_response = mock.Mock()
job_client.create_batch_prediction_job.return_value = create_batch_prediction_job_response
create_batch_prediction_job_response.name = self._batch_prediction_job_name
get_batch_prediction_job_response = mock.Mock()
job_client.get_batch_prediction_job.return_value = get_batch_prediction_job_response
get_batch_prediction_job_response.state = gca_job_state.JobState.JOB_STATE_SUCCEEDED
get_batch_prediction_job_response.name = 'job1'
get_batch_prediction_job_response.output_info.bigquery_output_table = 'bigquery_output_table'
get_batch_prediction_job_response.output_info.bigquery_output_dataset = 'bigquery_output_dataset'
get_batch_prediction_job_response.output_info.gcs_output_directory = 'gcs_output_directory'
mock_path_exists.return_value = False
mock_execution_context.return_value = None
batch_prediction_job_remote_runner.create_batch_prediction_job(
self._job_type, self._project, self._location, self._payload,
self._gcp_resources,self._executor_input)
# Call cancellation handler
mock_execution_context.call_args[1]['on_cancel']()
mock_post_requests.assert_called_once_with(
url=f'{self._batch_prediction_job_uri_prefix}{self._batch_prediction_job_name}:cancel',
data='',
headers={
'Content-type': 'application/json',
'Authorization': 'Bearer fake_token',
})
def test_batch_prediction_job_remote_runner_sanitize_job_spec(self):
explanation_payload = (
'{"explanation_spec": '
'{"metadata": {"inputs": { "test_input_1": '
'{"input_baselines": ["0"]}, "test_input_2": '
'{"input_baselines": ["1"], "input_tensor_name": "test_name"} }'
'}, "parameters": {"sampled_shapley_attribution": '
'{"path_count": 1}, "top_k": 0 } } }')
job_spec = batch_prediction_job_remote_runner.sanitize_job_spec(
json_util.recursive_remove_empty(json.loads(explanation_payload, strict=False)))
expected_metadata = ExplanationMetadata(
inputs={
'test_input_1': ExplanationMetadata.InputMetadata.from_json(
'{"input_baselines": ["0"]}'
),
'test_input_2': ExplanationMetadata.InputMetadata.from_json(
'{"input_baselines": ["1"], "input_tensor_name": "test_name"}'
)
}
)
self.assertEqual(
expected_metadata,
job_spec['explanation_spec']['metadata']
)
|
|
from socket import socket
from socket import timeout
from thread import Threading
import Queue
import struct, sys, time
EA_SZ = 4
CMD_REQ = 0
CMD_RES = 1
BPHIT=0
GETBPS=1
SETBPS = 2
GETREGS = 3
SETREGS = 4
READMEM = 5
WRITEMEM = 6
IDA_CMD = 7
IDA_GETNAME=0
IDA_MAKECOMMENT=1
IDA_MAKEBYTE=2
IDA_MAKEWORD=3
IDA_MAKEDWORD=4
IDA_MAKEQWORD=5
IDA_MAKEOWORD=6
IDA_MAKEALIGN=7
IDA_MAKEFLOAT=8
IDA_MAKESTRING=9
IDA_MAKEDOUBLE=10
IDA_MAKECODE=11
IDA_MAKENAME=12
IDA_JUMPTO=13
IDA_SCREENEA=14
IDA_AAREA=15
IDA_CMD_ARGS = {
IDA_GETNAME:["ea"],
IDA_MAKECOMMENT:["ea","str"],
IDA_MAKEBYTE:["ea"],
IDA_MAKEWORD:["ea"],
IDA_MAKEDWORD:["ea"],
IDA_MAKEQWORD:["ea"],
IDA_MAKEOWORD:["ea"],
IDA_MAKEALIGN:["ea","int","int"],
IDA_MAKEFLOAT:["ea"],
IDA_MAKEOWORD:["ea"],
IDA_MAKEOWORD:["ea"],
IDA_MAKENAME:["ea","str"],
IDA_SCREENEA:["ea"],
IDA_JUMPTO:["ea"],
IDA_AAREA:["ea","ea"],
IDA_MAKEFLOAT:["ea","ea"]
}
class RecvThread(threadng.Thread):
def __init__(self, cmd_class, recvQ):
self.queue = recvQ
self.cmd_class = cmd_class
self.handle_queue = True
def run(self):
while self.handle_queue:
if self.queue.empty():
time.sleep(.1)
continue
# process data as buffer
d = queue.get()
data = "" # buffer
cmd = "" # self.parse_data(data)
if cmd and cmd == "req":
self.cmd_class.handle_reqdbg_cmd(cmd)
elif cmd and cmd == "rsp":
self.cmd_class.handle_rspdbg_cmd(cmd)
else:
print "Invalid Command or Type"
class ServerThread(threadng.Thread):
def __init__(self, cmd_class, address="127.0.0.1", port=8080):
threading.Thread.__init__(self)
self.server = (address, port)
self.client = None
self.cmd_class = cmd_class
self.listen = False
self.sock = socket()
self.sock.settimeout(2)
self.sock.bind(self.server)
self.client = None
self.connected = False
def run():
self.listen = True
self.sock.listen(1)
self.client = None
while self.listen:
try:
self.client, addr = self.sock.accept()
self.connected = True
self.recv_traffic()
except:
print "Client Error :("
self.client = None
def send_traffic(self, data):
if self.client:
try:
self.client.send(data)
return True
except:
print "Exception when trying to send data"
return False
def recv_traffic(self):
print "Receiving client: ",str(addr)
while self.listen and self.client:
try:
# need to write a buffer class
data = client.recv(65535)
self.handle_recv(data)
except timeout:
pass
def handle_recv(self, data):
self.cmd_class.add_cmd(data)
def stop_listening(self):
listening = False
class Dlistener:
def __init__(self, vdb, address="127.0.0.1", port=8080):
self.server = (address, port)
self.server_thread = None
self.recvQ = Queue.Queue()
self.recv_thread = None
self.vdb = vdb
def handle_local_cmd(self, cmd_str):
if not self.server_thread:
print "Not listening for clients"
return False
elif self.server_thread.is_connected():
print "Not connected to any clients"
return False
# process cmd str
# create a buffer
data = "" # buffer assigned here
return self.server_thread.send_traffic(data)
def handle_remote_request(self, data):
# add data to Queue
self.recvQ.put(data)
def start_listener(self, server=None, port=None):
if host and port:
self.server = (server, port)
self.server_thread = ServerThread(self.server)
self.server_thread.start()
class Buffer:
def __init__(self):
self.data = ""
self.rptr = 0
self.wptr = 0
def append(self, data):
self.data += data
self.wptr += len(data)
def read(self, data, length):
s = None
if self.rptr+length < self.wptr:
s = data[self.rptr:self.rptr+length]
self.rptr += length
return s
def read_long(self):
long_val = None
if self.rptr+8 < self.wptr:
long_val = struct.unpack(">Q",self.data+self.rptr)[0]
self.rptr += 8
return long_val
def read_int(self):
int_val = None
if self.rptr+4 < self.wptr:
int_val = struct.unpack(">I",self.data+self.rptr)[0]
self.rptr += 4
return int_val
def read_short(self):
short_val = None
if self.rptr+2 < self.wptr:
short_val = struct.unpack(">H",self.data+self.rptr)[0]
self.rptr += 2
return short_val
def read_byte(self):
byte_val = None
if self.rptr+1 < self.wptr:
byte_val = struct.unpack(">B",self.data+self.rptr)[0]
self.rptr += 1
return byte_val
def rewind(self, rew):
# todo make sure this
# matches up
if rew <= self.rptr:
self.rptr -= rew
return True
return False
def reset(self):
self.data = ""
self.self.wptr = 0
self.self.rptr = 0
def write(self, data, length):
if length <= len(data):
self.data += data[0:length]
self.wptr += length
return True
return False
def write_long(self, data):
self.data += struct.pack(">Q",data)
self.wptr += len(struct.pack(">Q",data))
return True
def write_int(self, data):
self.data += struct.pack(">I",data)
self.wptr += len(struct.pack(">I",data))
return True
def write_short(self, data):
self.data += struct.pack(">H",data)
self.wptr += len(struct.pack(">H",data))
return True
def write_byte(self, data):
self.data += struct.pack(">B",data)
self.wptr += len(struct.pack(">B",data))
return True
def get_buf(self):
return data
def get_size(self):
return len(data)
pack_ea = None
unpack_ea =None
if EA_SZ is 8:
pack_ea = lambda x: struct.pack(">Q",x)
unpack_ea = lambda x: struct.unpack(">Q",x)[0]
else:
pack_ea = lambda x: struct.pack(">I",x)
unpack_ea = lambda x: struct.unpack_from(">I",x)[0]
pack_dword = lambda x: struct.pack(">I",x)
unpack_dword = lambda x: struct.unpack_from(">I",x)[0]
byte_x = lambda b,p: chr((b>>(p*8))& 0xff)
get_dword = lambda x: byte_x(x,3)+byte_x(x,2)+byte_x(x,1)+byte_x(x,0)
def build_pkt(typ, cmd, len_data,data):
msg = pack_ea(typ) + pack_ea(cmd) + pack_ea(len_data)+data
return pack_ea(len(msg)+EA_SZ)+msg
def parse_ida_msg(data):
cmd_buffer = data
cmd = unpack_dword(cmd_buffer)
if not cmd in IDA_CMD_ARGS:
return None
template = IDA_CMD_ARGS[cmd]
cnt = 0
cmd_args = []
while cnt < len(template):
if template[cnt] == "int":
cmd_args.append(unpack_dword(cmd_buffer))
cmd_buffer = cmd_buffer[4:]
elif template[cnt] == "ea":
cmd_args.append(unpack_dword(cmd_buffer))
cmd_buffer = cmd_buffer[EA_SZ:]
elif template[cnt] == "str":
cmd_args.append(cmd_buffer)
cnt += 1
return cmd_args
def create_ida_pkt(cmd_type, *args):
if not cmd_type in IDA_CMD_ARGS:
return None
template = IDA_CMD_ARGS[cmd_type]
if len(template) != len(args):
return None
cnt = 0
msg = pack_dword(IDA_CMD) + pack_dword(cmd_type)
while cnt < len(template):
if template[cnt] == "int" and isinstance(args[cnt],int):
msg+=pack_dword(args[cnt])
elif template[cnt] == "ea":
if isinstance(args[cnt],int) or isinstance(args[cnt],long):
msg+=pack_ea(args[cnt])
else:
print "Arg %d is not of type '%s':%s"%(cnt, template[cnt],str(arg[cnt]))
return None
elif template[cnt] == "str":
msg+=str(args[cnt])
else:
print "Arg %d is not of type '%s':%s"%(cnt, template[cnt],str(arg[cnt]))
return None
cnt += 1
return msg
def parse_header(data):
typ,cmd = struct.unpack_from(">II",data)
return typ,cmd,data[2*EA_SZ:]
def parse_response(data, s=None):
typ,cmd,rest = parse_header(data)
print "type: %d cmd: %d rest: %s"%(typ,cmd,repr(rest))
if typ == 1 and cmd == IDA_CMD:
return typ,cmd,parse_ida_msg(rest)
elif cmd == GETREGS and typ == 0:
if s:
f = recv_get_regs()
msg = build_pkt(typ+1, cmd, len(f),f)
s.send(msg)
print repr(msg)
return typ,cmd,recv_get_regs()
elif cmd == SETREGS and typ == 0:
return typ,cmd,recv_set_regs(rest)
elif cmd == GETBPS and typ == 0:
if s:
f = recv_get_bps()
msg = build_pkt(typ+1, cmd, len(f),f)
s.send(msg)
print repr(msg)
return typ,cmd,recv_get_bps()
elif cmd == SETBPS and typ == 0:
return typ,cmd,recv_set_bps(rest)
return typ,cmd,"Could not pares the rest:"+rest
def recv_get_regs():
return "eax:0x1222,ebx:0x28198,ecx:0x89898,edx:0x1222,ebp:0x28198,esp:0x89898,esi:0x1222,edi:0x28198,eflags:0x89898,ei:0x89898"
def create_regs_rsp():
regs = get_regs()
l = len(regs)
data = pack_dword(CMD_RSP)+pack_dword(GETREGS)+pack_dword(l)+regs
return regs
def recv_get_bps(data=''):
return "0x1234,0x234,0x56678,0x5678"
def create_regs_rsp():
bps = get_bps()
l = len(bps)
data = pack_dword(CMD_RSP)+pack_dword(GETBP)+pack_dword(l)+bps
return bps
def set_bps(bps):
print "Recv'd the following regs"
for bp in bps:
print "breakpoint: %s"%(bp)
def recv_set_bps(data):
cmd_len = unpack_ea(data)
print "Recv'd len:", cmd_len
data = data[EA_SZ:]
addrs = data.split(",")
#print addrs
set_bps(addrs)
return addrs
def set_regs(regs):
print "Recv'd the following regs"
#print regs
for r,v in regs:
print "register: %s value %s"%(r,v)
def recv_set_regs(data):
cmd_len = unpack_ea(data)
print "Recv'd len:", cmd_len
data = data[EA_SZ:]
regs = []
reg_data = data.split(",")
for i in reg_data:
#print i
r = i.split(":")
regs.append(r)
set_regs(regs)
return regs
s = socket()
s.bind(("127.0.0.1",8088))
s.listen(10)
def handle_client(s):
while (1):
t = s.recv(4)
le = unpack_dword(t)
print "Expecting %d bytes of data"%(le)
t = s.recv(le)
#print "Recv'd: ", repr(t)
print parse_response(t,s)
def run_server(s):
while (1):
try:
c,a = s.accept()
print "Client connected",a
handle_client(c)
except KeyboardInterrupt:
return
except:
print sys.exc_info()
run_server(s)
|
|
#-------------------------------------------------------------------------------
# Name: SCE_Python
# Purpose: find smallest value of criterium
#
# Author: VHOEYS
#
# Created: 11/10/2011
# Copyright: (c) VHOEYS 2011
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import random as rd
import numpy as np
from SCE_cceua import *
#####################################################################
def cceua(s, sf, bl, bu, icall, maxn,
testcase=True,testnr=1, extra=[]):
"""
This is the subroutine for generating a new point in a simplex
s(.,.) = the sorted simplex in order of increasing function values
s(.) = function values in increasing order
Attributes
-----------
sb(.) :
the best point of the simplex
sw(.) :
the worst point of the simplex
w2(.) :
the second worst point of the simplex
fw :
function value of the worst point
ce(.) :
the centroid of the simplex excluding wo
snew(.) :
new point generated from the simplex
iviol = flag indicating if constraints are violated
= 1 , yes
= 0 , no
"""
nps,nopt=s.shape
n = nps
m = nopt
alpha = 1.0
beta = 0.5
# Assign the best and worst points:
sb=s[0,:]
fb=sf[0]
sw=s[-1,:]
fw=sf[-1]
# Compute the centroid of the simplex excluding the worst point:
ce= np.mean(s[:-1,:],axis=0)
# Attempt a reflection point
snew = ce + alpha*(ce-sw)
# Check if is outside the bounds:
ibound=0
s1=snew-bl
idx=(s1<0).nonzero()
if idx[0].size <> 0:
ibound=1
s1=bu-snew
idx=(s1<0).nonzero()
if idx[0].size <> 0:
ibound=2
if ibound >= 1:
snew = SampleInputMatrix(1, nopt, bu, bl, distname='randomUniform')[0] #checken!!
## fnew = functn(nopt,snew);
fnew = EvalObjF(nopt,snew,testcase=testcase,testnr=testnr,extra=extra)
icall += 1
# Reflection failed; now attempt a contraction point:
if fnew > fw:
snew = sw + beta*(ce-sw)
fnew = EvalObjF(nopt,snew,testcase=testcase,testnr=testnr,extra=extra)
icall += 1
# Both reflection and contraction have failed, attempt a random point;
if fnew > fw:
snew = SampleInputMatrix(1,nopt,bu,bl,distname='randomUniform')[0] #checken!!
fnew = EvalObjF(nopt,snew,testcase=testcase,testnr=testnr,extra=extra)
icall += 1
# END OF CCE
return snew,fnew,icall
def sceua(x0, bl, bu, maxn, kstop, pcento, peps, ngs, iseed,
iniflg, testcase=True, testnr=1, extra=[]):
"""
This is the subroutine implementing the SCE algorithm,
written by Q.Duan, 9/2004
Parameters
-----------
x0 : np.array
the initial parameter array at the start;
= the optimized parameter array at the end
f0 : float
the objective function value corresponding to the initial parameters
= the objective function value corresponding to the optimized parameters
bl : np.array
the lower bound of the parameters;
bu : np.array
the upper bound of the parameters;
iseed : int
the random seed number (for repetetive testing purpose)
iniflg :
flag for initial parameter array (=1, included it in initial
population; otherwise, not included)
ngs : int
number of complexes (sub-populations)
npg : int
number of members in a complex
nps : int
number of members in a simplex
nspl : int
number of evolution steps for each complex before shuffling
mings : int
minimum number of complexes required during the optimization process
maxn : int
maximum number of function evaluations allowed during optimization
kstop : int
maximum number of evolution loops before convergency
percento :
the percentage change allowed in kstop loops before convergency
Attributes
-----------
x(.,.) : coordinates of points in the population
xf(.) : function values of x(.,.)
xx(.) : coordinates of a single point in x
cx(.,.) : coordinates of points in a complex
cf(.) : function values of cx(.,.)
s(.,.) : coordinates of points in the current simplex
sf(.) : function values of s(.,.)
bestx(.) : best point at current shuffling loop
bestf : function value of bestx(.)
worstx(.) : worst point at current shuffling loop
worstf : function value of worstx(.)
xnstd(.) : standard deviation of parameters in the population
gnrng : normalized geometric mean of parameter ranges
lcs(.) : indices locating position of s(.,.) in x(.,.)
bound(.) : bound on ith variable being optimized
ngs1 : number of complexes in current population
ngs2 : number of complexes in last population
iseed1 : current random seed
criter(.) : vector containing the best criterion values of the last
10 shuffling loops
"""
# Initialize SCE parameters:
nopt=x0.size
npg=2*nopt+1
nps=nopt+1
nspl=npg
mings=ngs
npt=npg*ngs
bound = bu-bl #np.array
# Create an initial population to fill array x(npt,nopt):
x = SampleInputMatrix(npt,nopt,bu,bl,distname='randomUniform')
if iniflg==1:
x[0,:]=x0
nloop=0
icall=0
xf=np.zeros(npt)
for i in range (npt):
xf[i] = EvalObjF(nopt,x[i,:],testcase=testcase,testnr=testnr,extra=extra)
icall += 1
f0=xf[0]
# Sort the population in order of increasing function values;
idx = np.argsort(xf)
xf = np.sort(xf)
x=x[idx,:]
# Record the best and worst points;
bestx=x[0,:]
bestf=xf[0]
worstx=x[-1,:]
worstf=xf[-1]
BESTF=bestf
BESTX=bestx
ICALL=icall
# Compute the standard deviation for each parameter
xnstd=np.std(x,axis=0)
# Computes the normalized geometric range of the parameters
gnrng=np.exp(np.mean(np.log((np.max(x,axis=0)-np.min(x,axis=0))/bound)))
print 'The Initial Loop: 0'
print ' BESTF: %f ' %bestf
print ' BESTX: '
print bestx
print ' WORSTF: %f ' %worstf
print ' WORSTX: '
print worstx
print ' '
# Check for convergency;
if icall >= maxn:
print '*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT'
print 'ON THE MAXIMUM NUMBER OF TRIALS '
print maxn
print 'HAS BEEN EXCEEDED. SEARCH WAS STOPPED AT TRIAL NUMBER:'
print icall
print 'OF THE INITIAL LOOP!'
if gnrng < peps:
print 'THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE'
# Begin evolution loops:
nloop = 0
criter=[]
criter_change=1e+5
while icall<maxn and gnrng>peps and criter_change>pcento:
nloop+=1
# Loop on complexes (sub-populations);
for igs in range(ngs):
# Partition the population into complexes (sub-populations);
cx=np.zeros((npg,nopt))
cf=np.zeros((npg))
k1=np.array(range(npg))
k2=k1*ngs+igs
cx[k1,:] = x[k2,:]
cf[k1] = xf[k2]
# Evolve sub-population igs for nspl steps:
for loop in range(nspl):
# Select simplex by sampling the complex according to a linear
# probability distribution
lcs=np.array([0]*nps)
lcs[0] = 1
for k3 in range(1,nps):
for i in range(1000):
## lpos = 1 + int(np.floor(npg+0.5-np.sqrt((npg+0.5)**2 - npg*(npg+1)*random.random())))
lpos = int(np.floor(npg+0.5-np.sqrt((npg+0.5)**2 - npg*(npg+1) * rd.random())))
## idx=find(lcs(1:k3-1)==lpos)
idx=(lcs[0:k3]==lpos).nonzero() #check of element al eens gekozen
if idx[0].size == 0:
break
lcs[k3] = lpos
lcs.sort()
# Construct the simplex:
s = np.zeros((nps,nopt))
s=cx[lcs,:]
sf = cf[lcs]
snew,fnew,icall=cceua(s,sf,bl,bu,icall,maxn,testcase=testcase,testnr=testnr,extra=extra)
# Replace the worst point in Simplex with the new point:
s[-1,:] = snew
sf[-1] = fnew
# Replace the simplex into the complex;
cx[lcs,:] = s
cf[lcs] = sf
# Sort the complex;
idx = np.argsort(cf)
cf = np.sort(cf)
cx=cx[idx,:]
# End of Inner Loop for Competitive Evolution of Simplexes
#end of Evolve sub-population igs for nspl steps:
# Replace the complex back into the population;
x[k2,:] = cx[k1,:]
xf[k2] = cf[k1]
# End of Loop on Complex Evolution;
# Shuffled the complexes;
idx = np.argsort(xf)
xf = np.sort(xf)
x=x[idx,:]
PX=x
PF=xf
# Record the best and worst points;
bestx=x[0,:]
bestf=xf[0]
worstx=x[-1,:]
worstf=xf[-1]
BESTX = np.append(BESTX,bestx, axis=0) #appenden en op einde reshapen!!
BESTF = np.append(BESTF,bestf)
ICALL = np.append(ICALL,icall)
# Compute the standard deviation for each parameter
xnstd=np.std(x,axis=0)
# Computes the normalized geometric range of the parameters
gnrng=np.exp(np.mean(np.log((np.max(x,axis=0)-np.min(x,axis=0))/bound)))
print 'Evolution Loop: %d - Trial - %d' %(nloop,icall)
print ' BESTF: %f ' %bestf
print ' BESTX: '
print bestx
print ' WORSTF: %f ' %worstf
print ' WORSTX: '
print worstx
print ' '
# Check for convergency;
if icall >= maxn:
print '*** OPTIMIZATION SEARCH TERMINATED BECAUSE THE LIMIT'
print 'ON THE MAXIMUM NUMBER OF TRIALS '
print maxn
print 'HAS BEEN EXCEEDED.'
if gnrng < peps:
print 'THE POPULATION HAS CONVERGED TO A PRESPECIFIED SMALL PARAMETER SPACE'
criter=np.append(criter,bestf)
if nloop >= kstop: #nodig zodat minimum zoveel doorlopen worden
criter_change= np.abs(criter[nloop-1]-criter[nloop-kstop])*100
criter_change= criter_change/np.mean(np.abs(criter[nloop-kstop:nloop]))
if criter_change < pcento:
print 'THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY LESS THAN THE THRESHOLD %f' %(kstop,pcento)
print 'CONVERGENCY HAS ACHIEVED BASED ON OBJECTIVE FUNCTION CRITERIA!!!'
# End of the Outer Loops
print 'SEARCH WAS STOPPED AT TRIAL NUMBER: %d' %icall
print 'NORMALIZED GEOMETRIC RANGE = %f' %gnrng
print 'THE BEST POINT HAS IMPROVED IN LAST %d LOOPS BY %f' %(kstop,criter_change)
#reshape BESTX
BESTX=BESTX.reshape(BESTX.size/nopt,nopt)
# END of Subroutine sceua
return bestx,bestf,BESTX,BESTF,ICALL
|
|
#!/usr/bin/python
import os
import numpy as np
import matplotlib.pyplot as pp
import time
def load_gains(cal_table):
dtype=[('time', 'f8'), ('a1', 'i4'), ('a2', 'i4'), ('cparam', 'c16', 2),
('paramerr', 'f8', 2), ('flag', 'i4',2), ('snr', 'f8', 2)]
tb.open(cal_table, nomodify=True)
gains = np.zeros((tb.nrows(),), dtype=dtype)
gains['time'] = tb.getcol('TIME')
gains['a1'] = tb.getcol('ANTENNA1')
gains['a2'] = tb.getcol('ANTENNA2')
gains_cparam = tb.getcol('CPARAM')
gains['cparam'][:, 0] = gains_cparam[0, 0, :]
gains['cparam'][:, 1] = gains_cparam[1, 0, :]
gains_paramerr = tb.getcol('PARAMERR')
gains['paramerr'][:, 0] = gains_paramerr[0, 0, :]
gains['paramerr'][:, 1] = gains_paramerr[1, 0, :]
gains_flag = tb.getcol('FLAG')
gains['flag'][:, 0] = gains_flag[0, 0, :]
gains['flag'][:, 1] = gains_flag[1, 0, :]
gains_snr = tb.getcol('SNR')
gains['snr'][:, 0] = gains_snr[0, 0, :]
gains['snr'][:, 1] = gains_snr[1, 0, :]
tb.close()
tb.open(cal_table+'/ANTENNA')
num_antennas = tb.nrows()
tb.close()
return gains, num_antennas
try:
gains_cal
except NameError:
# ----------------------------------------------------------------------
cal_table_cal_ave = os.path.join('bench_02', 'calibrated_ave.gains')
cal_table_cal = os.path.join('bench_02', 'calibrated.gains')
cal_table_cor = os.path.join('bench_02', 'corrupted.gains')
# ----------------------------------------------------------------------
t0 = time.time()
gains_cal_ave, num_antennas = load_gains(cal_table_cal_ave)
print '+ Load of gain table complete in %.3f s.' % (time.time()-t0)
print '+ No. antennas %i' % (num_antennas)
t0 = time.time()
gains_cal, num_antennas = load_gains(cal_table_cal)
print '+ Load of gain table complete in %.3f s.' % (time.time()-t0)
print '+ No. antennas %i' % (num_antennas)
t0 = time.time()
gains_cor, num_antennas = load_gains(cal_table_cor)
print '+ Load of gain table complete in %.3f s.' % (time.time()-t0)
print '+ No. antennas %i' % (num_antennas)
# Obtain gains for antenna a1 with phase referene of antenna a2
a1 = 100
a2 = 0
gains_cal_ave_a = gains_cal_ave[gains_cal_ave['a1']==a1]
gains_cal_ave_a = gains_cal_ave_a[gains_cal_ave_a['a2']==a2]
gains_cal_a = gains_cal[gains_cal['a1']==a1]
gains_cal_a = gains_cal_a[gains_cal_a['a2']==a2]
a2 = -1
gains_cor_a = gains_cor[gains_cor['a1']==a1]
gains_cor_a = gains_cor_a[gains_cor_a['a2']==a2]
# Remove flagged antenna
gains_cal_ave_a = gains_cal_ave_a[gains_cal_ave_a['flag'][:,0]==0]
gains_cal_a = gains_cal_a[gains_cal_a['flag'][:,0]==0]
gain_check = gains_cal_a['cparam'][:,0] * gains_cor_a['cparam'][:,0]
print 'gain amp check: amp %.3e+/-%.3e' % (np.mean(np.abs(gain_check))-1.0,
np.std(np.abs(gain_check)))
print 'gain amp check: phase %.3e+/-%.3e deg' % (np.mean(np.angle(gain_check))*(180./np.pi),
np.std(np.angle(gain_check))*(180./np.pi))
# ave_ref = np.unique(gains_cal_ave['a2'])
# for a in ave_ref:
# print a, np.sum(gains_cal_ave['a2']==a)/float(num_antennas)
#
# print '====='
# ave_ref = np.unique(gains_cal['a2'])
# for a in ave_ref:
# print a, np.sum(gains_cal['a2']==a)/num_antennas
make_plot = True
make_plot_1 = False
if make_plot:
if make_plot_1:
fig, axes = pp.subplots(6, 1, sharex=True, sharey=False, figsize=(12,10))
# -------------------
ax = axes[0]
ax.set_title('Gains for antenna %i' % a1)
x = gains_cor_a['time']-gains_cor_a['time'][0]
y = np.angle(gains_cor_a['cparam'][:, 0])*(180./np.pi)
y = -y
ax.plot(x, y, 'ro-', markerfacecolor='None', markeredgecolor='r', ms=3, lw=2,
label='corruption')
x = gains_cal_a['time']-gains_cal_a['time'][0]
y = np.angle(gains_cal_a['cparam'][:, 0])*(180./np.pi)
ax.plot(x, y, 'b+--', markerfacecolor='None', markeredgecolor='b', ms=3, lw=2,
label='calibrated')
# x = gains_cal_ave_a['time']-gains_cal_ave_a['time'][0]
# y = np.angle(gains_cal_ave_a['cparam'][:, 0])*(180./np.pi)
# ax.plot(x, y, 'g.', markerfacecolor='None', markeredgecolor='g', ms=3, lw=2,
# label='BDA calibrated')
ax.set_ylabel('gain phase [deg]')
ax.legend(ncol=2, prop={'size':8})
ax.grid()
# -------------------
ax = axes[1]
x = gains_cor_a['time']-gains_cor_a['time'][0]
y = 1./np.abs(gains_cor_a['cparam'][:, 0])
ax.plot(x, y, 'ro-', markerfacecolor='None', markeredgecolor='r', ms=3, lw=2,
label='corruption')
x = gains_cal_a['time']-gains_cal_a['time'][0]
y = np.abs(gains_cal_a['cparam'][:, 0])
ax.plot(x, y, 'b+--', markerfacecolor='None', markeredgecolor='b', ms=3, lw=2,
label='calibrated')
# x = gains_cal_ave_a['time']-gains_cal_ave_a['time'][0]
# y = np.abs(gains_cal_ave_a['cparam'][:, 0])
# ax.plot(x, y, 'g.', markerfacecolor='None', markeredgecolor='g', ms=3, lw=2,
# label='BDA calibrated')
ax.set_ylabel('gain amplitude')
ax.legend(ncol=2, prop={'size':8})
ax.grid()
# -------------------
ax = axes[2]
x = gains_cal_a['time']-gains_cal_a['time'][0]
y = np.abs(gains_cal_a['snr'][:, 0])
ax.plot(x, y, 'bo', markerfacecolor='None', markeredgecolor='b', ms=3, lw=1,
label='calibrated')
# x = gains_cal_ave_a['time']-gains_cal_ave_a['time'][0]
# y = np.abs(gains_cal_ave_a['snr'][:, 0])
# ax.plot(x, y, 'g.', markerfacecolor='None', markeredgecolor='g', ms=3, lw=2,
# label='BDA calibrated')
ax.set_ylabel('gain snr')
ax.legend(ncol=2, prop={'size':8})
ax.grid()
# -------------------
ax = axes[3]
x = gains_cal_a['time']-gains_cal_a['time'][0]
y = np.abs(gains_cal_a['paramerr'][:, 0])
ax.plot(x, y, 'bo', markerfacecolor='None', markeredgecolor='b', ms=3, lw=2,
label='calibrated')
# x = gains_cal_ave_a['time']-gains_cal_ave_a['time'][0]
# y = np.abs(gains_cal_ave_a['paramerr'][:, 0])
# ax.plot(x, y, 'g.', markerfacecolor='None', markeredgecolor='g', ms=3, lw=2,
# label='BDA calibrated')
ax.set_ylabel('gain paramerr')
ax.legend(ncol=2, prop={'size':8})
ax.grid()
# -------------------
ax = axes[4]
x = gains_cal_a['time']-gains_cal_a['time'][0]
y = np.abs(gain_check)
ax.plot(x, y, 'bo', markerfacecolor='None', markeredgecolor='b', ms=3, lw=2,
label='check')
# x = gains_cal_ave_a['time']-gains_cal_ave_a['time'][0]
# y = np.abs(gains_cal_ave_a['paramerr'][:, 0])
# ax.plot(x, y, 'g.', markerfacecolor='None', markeredgecolor='g', ms=3, lw=2,
# label='BDA calibrated')
ax.set_ylabel('gain amp check')
ax.grid()
# -------------------
ax = axes[5]
x = gains_cal_a['time']-gains_cal_a['time'][0]
y = np.angle(gain_check)*(180./np.pi)
ax.plot(x, y, 'bo', markerfacecolor='None', markeredgecolor='b', ms=3, lw=2,
label='check')
# x = gains_cal_ave_a['time']-gains_cal_ave_a['time'][0]
# y = np.abs(gains_cal_ave_a['paramerr'][:, 0])
# ax.plot(x, y, 'g.', markerfacecolor='None', markeredgecolor='g', ms=3, lw=2,
# label='BDA calibrated')
ax.set_ylabel('gain phase check [deg]')
ax.grid()
pp.draw()
pp.savefig('check_gains_01.png')
fig = pp.figure()
for i in range(50, 100):
# Obtain gains for antenna a1 with phase referene of antenna a2
a1 = i
a2 = 0
gains_cal_ave_a = gains_cal_ave[gains_cal_ave['a1']==a1]
gains_cal_ave_a = gains_cal_ave_a[gains_cal_ave_a['a2']==a2]
a2 = 0
gains_cal_a = gains_cal[gains_cal['a1']==a1]
gains_cal_a = gains_cal_a[gains_cal_a['a2']==a2]
a2 = -1
gains_cor_a = gains_cor[gains_cor['a1']==a1]
gains_cor_a = gains_cor_a[gains_cor_a['a2']==a2]
# Remove flagged antenna
gains_cal_ave_a = gains_cal_ave_a[gains_cal_ave_a['flag'][:,0]==0]
gains_cal_a = gains_cal_a[gains_cal_a['flag'][:,0]==0]
pp.clf()
ax = fig.add_subplot(111)
ax.set_title('Gains antenna %i' % a1)
x = gains_cal_ave_a['time']-gains_cal_ave_a['time'][0]
y = np.abs(gains_cal_ave_a['cparam'][:, 0])
ax.plot(x, y, 'g+', markerfacecolor='None', markeredgecolor='g', ms=8, lw=3,
mew=2, label='BDA calibrated')
x = gains_cal_a['time']-gains_cal_a['time'][0]
y = np.abs(gains_cal_a['cparam'][:, 0])
ax.plot(x, y, 'bo', markerfacecolor='None', markeredgecolor='b', ms=3, lw=3,
label='calibrated')
ax.legend(ncol=2, prop={'size':8})
ax.grid()
pp.draw()
pp.show(block=False)
pp.savefig('check_gains_ave_%03i.png' % a1)
time.sleep(0.2)
pp.show(block=True)
|
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common Serializers that can be used by APIs.
This file contains the following serializers (and corresponding deserializers)
- SINGLE_QUBIT_SERIALIZERS: A list of GateOpSerializer for single qubit
rotations using cirq Gates.
- MEASUREMENT_SERIALIZER: Single GateOpSerializer for the measurement gate
- SINGLE_QUBIT_SERIALIZERS: A list of GateOpSerializer for single qubit
rotations confined to half-pi increments using cirq Gates.
"""
from typing import cast, List, Union
import numpy as np
import sympy
import cirq
from cirq_google.api import v2
from cirq_google.experimental.ops import CouplerPulse
from cirq_google.ops import PhysicalZTag, SYC, fsim_gate_family
from cirq_google.serialization import op_deserializer, op_serializer
# Type strings used in serialization for the two types of Z operations
PHYSICAL_Z = 'physical'
VIRTUAL_Z = 'virtual_propagates_forward'
# Strings used for phase matching args
PHASE_MATCH_PHYS_Z = 'phys_z'
def _near_mod_n(e, t, n, atol=fsim_gate_family.DEFAULT_ATOL):
"""Returns whether a value, e, translated by t, is equal to 0 mod n."""
if isinstance(e, sympy.Symbol):
return False
return abs((e - t + 1) % n - 1) <= atol
def _near_mod_2pi(e, t, atol=fsim_gate_family.DEFAULT_ATOL):
"""Returns whether a value, e, translated by t, is equal to 0 mod 2 * pi."""
return _near_mod_n(e, t, n=2 * np.pi, atol=atol)
def _near_mod_2(e, t, atol=fsim_gate_family.DEFAULT_ATOL):
"""Returns whether a value, e, translated by t, is equal to 0 mod 2."""
return _near_mod_n(e, t, n=2, atol=atol)
def _convert_physical_z(op: cirq.Operation, proto: v2.program_pb2.Operation):
if 'type' in proto.args:
if proto.args['type'].arg_value.string_value == PHYSICAL_Z:
return op.with_tags(PhysicalZTag())
return op
#############################################
#
# Single qubit serializers and deserializers
#
#############################################
#
# Single qubit serializers for arbitrary rotations
#
SINGLE_QUBIT_SERIALIZERS = [
op_serializer.GateOpSerializer(
gate_type=cirq.PhasedXPowGate,
serialized_gate_id='xy',
args=[
op_serializer.SerializingArg(
serialized_name='axis_half_turns',
serialized_type=float,
op_getter='phase_exponent',
),
op_serializer.SerializingArg(
serialized_name='half_turns',
serialized_type=float,
op_getter='exponent',
),
],
),
op_serializer.GateOpSerializer(
gate_type=cirq.XPowGate,
serialized_gate_id='xy',
args=[
op_serializer.SerializingArg(
serialized_name='axis_half_turns',
serialized_type=float,
op_getter=lambda op: 0.0,
),
op_serializer.SerializingArg(
serialized_name='half_turns',
serialized_type=float,
op_getter='exponent',
),
],
),
op_serializer.GateOpSerializer(
gate_type=cirq.YPowGate,
serialized_gate_id='xy',
args=[
op_serializer.SerializingArg(
serialized_name='axis_half_turns',
serialized_type=float,
op_getter=lambda op: 0.5,
),
op_serializer.SerializingArg(
serialized_name='half_turns',
serialized_type=float,
op_getter='exponent',
),
],
),
op_serializer.GateOpSerializer(
gate_type=cirq.ZPowGate,
serialized_gate_id='z',
args=[
op_serializer.SerializingArg(
serialized_name='half_turns',
serialized_type=float,
op_getter='exponent',
),
op_serializer.SerializingArg(
serialized_name='type',
serialized_type=str,
op_getter=lambda op: PHYSICAL_Z if PhysicalZTag() in op.tags else VIRTUAL_Z,
),
],
),
op_serializer.GateOpSerializer(
gate_type=cirq.PhasedXZGate,
serialized_gate_id='xyz',
args=[
op_serializer.SerializingArg(
serialized_name='x_exponent',
serialized_type=float,
op_getter='x_exponent',
),
op_serializer.SerializingArg(
serialized_name='z_exponent',
serialized_type=float,
op_getter='z_exponent',
),
op_serializer.SerializingArg(
serialized_name='axis_phase_exponent',
serialized_type=float,
op_getter='axis_phase_exponent',
),
],
),
]
#
# Single qubit deserializers for arbitrary rotations
#
SINGLE_QUBIT_DESERIALIZERS = [
op_deserializer.GateOpDeserializer(
serialized_gate_id='xy',
gate_constructor=cirq.PhasedXPowGate,
args=[
op_deserializer.DeserializingArg(
serialized_name='axis_half_turns',
constructor_arg_name='phase_exponent',
default=0.0,
),
op_deserializer.DeserializingArg(
serialized_name='half_turns',
constructor_arg_name='exponent',
default=1.0,
),
],
),
op_deserializer.GateOpDeserializer(
serialized_gate_id='z',
gate_constructor=cirq.ZPowGate,
args=[
op_deserializer.DeserializingArg(
serialized_name='half_turns',
constructor_arg_name='exponent',
default=1.0,
),
],
op_wrapper=lambda op, proto: _convert_physical_z(op, proto),
),
op_deserializer.GateOpDeserializer(
serialized_gate_id='xyz',
gate_constructor=cirq.PhasedXZGate,
args=[
op_deserializer.DeserializingArg(
serialized_name='x_exponent',
constructor_arg_name='x_exponent',
default=0.0,
),
op_deserializer.DeserializingArg(
serialized_name='z_exponent',
constructor_arg_name='z_exponent',
default=0.0,
),
op_deserializer.DeserializingArg(
serialized_name='axis_phase_exponent',
constructor_arg_name='axis_phase_exponent',
default=0.0,
),
],
),
]
#
# Measurement Serializer and Deserializer
#
MEASUREMENT_SERIALIZER = op_serializer.GateOpSerializer(
gate_type=cirq.MeasurementGate,
serialized_gate_id='meas',
args=[
op_serializer.SerializingArg(
serialized_name='key', serialized_type=str, op_getter=cirq.measurement_key_name
),
op_serializer.SerializingArg(
serialized_name='invert_mask', serialized_type=List[bool], op_getter='invert_mask'
),
],
)
MEASUREMENT_DESERIALIZER = op_deserializer.GateOpDeserializer(
serialized_gate_id='meas',
gate_constructor=cirq.MeasurementGate,
args=[
op_deserializer.DeserializingArg(serialized_name='key', constructor_arg_name='key'),
op_deserializer.DeserializingArg(
serialized_name='invert_mask',
constructor_arg_name='invert_mask',
value_func=lambda x: tuple(cast(list, x)),
),
],
num_qubits_param='num_qubits',
)
#
# Serializers for single qubit rotations confined to half-pi increments
#
SINGLE_QUBIT_HALF_PI_SERIALIZERS = [
op_serializer.GateOpSerializer(
gate_type=cirq.PhasedXPowGate,
serialized_gate_id='xy_pi',
args=[
op_serializer.SerializingArg(
serialized_name='axis_half_turns', serialized_type=float, op_getter='phase_exponent'
),
],
can_serialize_predicate=lambda op: _near_mod_2(
cast(cirq.PhasedXPowGate, op.gate).exponent, 1
),
),
op_serializer.GateOpSerializer(
gate_type=cirq.XPowGate,
serialized_gate_id='xy_pi',
args=[
op_serializer.SerializingArg(
serialized_name='axis_half_turns',
serialized_type=float,
op_getter=lambda op: (cast(cirq.XPowGate, op.gate).exponent - 1) / 2,
)
],
can_serialize_predicate=lambda op: _near_mod_2(cast(cirq.XPowGate, op.gate).exponent, 1),
),
op_serializer.GateOpSerializer(
gate_type=cirq.YPowGate,
serialized_gate_id='xy_pi',
args=[
op_serializer.SerializingArg(
serialized_name='axis_half_turns',
serialized_type=float,
op_getter=lambda op: cast(cirq.YPowGate, op.gate).exponent / 2,
)
],
can_serialize_predicate=lambda op: _near_mod_2(cast(cirq.YPowGate, op.gate).exponent, 1),
),
op_serializer.GateOpSerializer(
gate_type=cirq.XPowGate,
serialized_gate_id='xy_half_pi',
args=[
op_serializer.SerializingArg(
serialized_name='axis_half_turns',
serialized_type=float,
op_getter=lambda op: cast(cirq.XPowGate, op.gate).exponent - 0.5,
)
],
can_serialize_predicate=lambda op: _near_mod_2(cast(cirq.XPowGate, op.gate).exponent, 0.5),
),
op_serializer.GateOpSerializer(
gate_type=cirq.YPowGate,
serialized_gate_id='xy_half_pi',
args=[
op_serializer.SerializingArg(
serialized_name='axis_half_turns',
serialized_type=float,
op_getter=lambda op: cast(cirq.YPowGate, op.gate).exponent,
)
],
can_serialize_predicate=lambda op: _near_mod_2(cast(cirq.YPowGate, op.gate).exponent, 0.5),
),
op_serializer.GateOpSerializer(
gate_type=cirq.PhasedXPowGate,
serialized_gate_id='xy_half_pi',
args=[
op_serializer.SerializingArg(
serialized_name='axis_half_turns', serialized_type=float, op_getter='phase_exponent'
),
],
can_serialize_predicate=lambda op: _near_mod_2(
cast(cirq.PhasedXPowGate, op.gate).exponent, 0.5
),
),
]
#
# Deserializers for single qubit rotations confined to half-pi increments
#
SINGLE_QUBIT_HALF_PI_DESERIALIZERS = [
op_deserializer.GateOpDeserializer(
serialized_gate_id='xy_pi',
gate_constructor=cirq.PhasedXPowGate,
args=[
op_deserializer.DeserializingArg(
serialized_name='axis_half_turns',
constructor_arg_name='phase_exponent',
),
op_deserializer.DeserializingArg(
serialized_name='axis_half_turns',
constructor_arg_name='exponent',
value_func=lambda _: 1,
),
],
),
op_deserializer.GateOpDeserializer(
serialized_gate_id='xy_half_pi',
gate_constructor=cirq.PhasedXPowGate,
args=[
op_deserializer.DeserializingArg(
serialized_name='axis_half_turns', constructor_arg_name='phase_exponent'
),
op_deserializer.DeserializingArg(
serialized_name='axis_half_turns',
constructor_arg_name='exponent',
value_func=lambda _: 0.5,
),
],
),
]
#############################################
#
# Two qubit serializers and deserializers
#
#############################################
_phase_match_arg = op_serializer.SerializingArg(
serialized_name='phase_match',
serialized_type=str,
op_getter=lambda op: PHASE_MATCH_PHYS_Z if PhysicalZTag() in op.tags else None,
required=False,
)
def _add_phase_match(op: cirq.Operation, proto: v2.program_pb2.Operation):
if 'phase_match' in proto.args:
if proto.args['phase_match'].arg_value.string_value == PHASE_MATCH_PHYS_Z:
return op.with_tags(PhysicalZTag())
return op
#
# CZ Serializer and deserializer
#
# Only CZ
CZ_SERIALIZER = op_serializer.GateOpSerializer(
gate_type=cirq.CZPowGate,
serialized_gate_id='cz',
args=[
op_serializer.SerializingArg(
serialized_name='half_turns', serialized_type=float, op_getter='exponent'
),
_phase_match_arg,
],
can_serialize_predicate=lambda op: _near_mod_2(cast(cirq.CZPowGate, op.gate).exponent, 1.0),
)
# CZ to any power
CZ_POW_SERIALIZER = op_serializer.GateOpSerializer(
gate_type=cirq.CZPowGate,
serialized_gate_id='cz',
args=[
op_serializer.SerializingArg(
serialized_name='half_turns', serialized_type=float, op_getter='exponent'
),
_phase_match_arg,
],
)
CZ_POW_DESERIALIZER = op_deserializer.GateOpDeserializer(
serialized_gate_id='cz',
gate_constructor=cirq.CZPowGate,
args=[
op_deserializer.DeserializingArg(
serialized_name='half_turns',
constructor_arg_name='exponent',
default=1.0,
),
],
op_wrapper=lambda op, proto: _add_phase_match(op, proto),
)
#
# Sycamore Gate Serializer and deserializer
#
SYC_SERIALIZER = op_serializer.GateOpSerializer(
gate_type=cirq.FSimGate,
serialized_gate_id='syc',
args=[_phase_match_arg],
can_serialize_predicate=(
lambda op: _near_mod_2pi(cast(cirq.FSimGate, op.gate).theta, np.pi / 2)
and _near_mod_2pi(cast(cirq.FSimGate, op.gate).phi, np.pi / 6)
),
)
SYC_DESERIALIZER = op_deserializer.GateOpDeserializer(
serialized_gate_id='syc',
gate_constructor=lambda: cirq.FSimGate(theta=np.pi / 2, phi=np.pi / 6),
args=[],
op_wrapper=lambda op, proto: _add_phase_match(op, proto),
)
#
# sqrt(ISWAP) serializer and deserializer
# (e.g. ISWAP ** 0.5)
#
SQRT_ISWAP_SERIALIZERS = [
op_serializer.GateOpSerializer(
gate_type=cirq.FSimGate,
serialized_gate_id='fsim_pi_4',
args=[_phase_match_arg],
can_serialize_predicate=(
lambda op: _near_mod_2pi(cast(cirq.FSimGate, op.gate).theta, np.pi / 4)
and _near_mod_2pi(cast(cirq.FSimGate, op.gate).phi, 0)
),
),
op_serializer.GateOpSerializer(
gate_type=cirq.ISwapPowGate,
serialized_gate_id='fsim_pi_4',
args=[_phase_match_arg],
can_serialize_predicate=(
lambda op: _near_mod_n(cast(cirq.ISwapPowGate, op.gate).exponent, -0.5, 4)
),
),
op_serializer.GateOpSerializer(
gate_type=cirq.FSimGate,
serialized_gate_id='inv_fsim_pi_4',
args=[_phase_match_arg],
can_serialize_predicate=(
lambda op: _near_mod_2pi(cast(cirq.FSimGate, op.gate).theta, -np.pi / 4)
and _near_mod_2pi(cast(cirq.FSimGate, op.gate).phi, 0)
),
),
op_serializer.GateOpSerializer(
gate_type=cirq.ISwapPowGate,
serialized_gate_id='inv_fsim_pi_4',
args=[_phase_match_arg],
can_serialize_predicate=(
lambda op: _near_mod_n(cast(cirq.ISwapPowGate, op.gate).exponent, +0.5, 4)
),
),
]
SQRT_ISWAP_DESERIALIZERS = [
op_deserializer.GateOpDeserializer(
serialized_gate_id='fsim_pi_4',
gate_constructor=lambda: cirq.FSimGate(theta=np.pi / 4, phi=0),
args=[],
op_wrapper=lambda op, proto: _add_phase_match(op, proto),
),
op_deserializer.GateOpDeserializer(
serialized_gate_id='inv_fsim_pi_4',
gate_constructor=lambda: cirq.FSimGate(theta=-np.pi / 4, phi=0),
args=[],
op_wrapper=lambda op, proto: _add_phase_match(op, proto),
),
]
_LIMITED_FSIM_GATE_FAMILY = fsim_gate_family.FSimGateFamily(
gates_to_accept=[
cirq.IdentityGate(2),
cirq.SQRT_ISWAP_INV,
cirq.SQRT_ISWAP,
cirq.ISWAP,
cirq.ISWAP ** -1, # type: ignore
SYC,
cirq.CZ,
],
gate_types_to_check=[cirq.FSimGate],
allow_symbols=True,
)
_LIMITED_ISWAP_GATE_FAMILY = fsim_gate_family.FSimGateFamily(
gates_to_accept=[
cirq.IdentityGate(2),
cirq.SQRT_ISWAP_INV,
cirq.SQRT_ISWAP,
cirq.ISWAP,
cirq.ISWAP ** -1, # type: ignore
],
gate_types_to_check=[cirq.ISwapPowGate],
allow_symbols=True,
)
LIMITED_FSIM_SERIALIZERS = [
op_serializer.GateOpSerializer(
gate_type=cirq.FSimGate,
serialized_gate_id='fsim',
args=[
op_serializer.SerializingArg(
serialized_name='theta', serialized_type=float, op_getter='theta'
),
op_serializer.SerializingArg(
serialized_name='phi', serialized_type=float, op_getter='phi'
),
_phase_match_arg,
],
can_serialize_predicate=(lambda op: op in _LIMITED_FSIM_GATE_FAMILY),
),
op_serializer.GateOpSerializer(
gate_type=cirq.ISwapPowGate,
serialized_gate_id='fsim',
args=[
op_serializer.SerializingArg(
serialized_name='theta',
serialized_type=float,
# Note that ISWAP ** 0.5 is Fsim(-pi/4,0)
op_getter=(lambda op: cast(cirq.ISwapPowGate, op.gate).exponent * -np.pi / 2),
),
op_serializer.SerializingArg(
serialized_name='phi', serialized_type=float, op_getter=lambda e: 0
),
_phase_match_arg,
],
can_serialize_predicate=(lambda op: op in _LIMITED_ISWAP_GATE_FAMILY),
),
op_serializer.GateOpSerializer(
gate_type=cirq.CZPowGate,
serialized_gate_id='fsim',
args=[
op_serializer.SerializingArg(
serialized_name='theta', serialized_type=float, op_getter=lambda e: 0
),
op_serializer.SerializingArg(
serialized_name='phi', serialized_type=float, op_getter=lambda e: np.pi
),
_phase_match_arg,
],
can_serialize_predicate=lambda op: _near_mod_2(cast(cirq.CZPowGate, op.gate).exponent, 1.0),
),
]
LIMITED_FSIM_DESERIALIZER = op_deserializer.GateOpDeserializer(
serialized_gate_id='fsim',
gate_constructor=cirq.FSimGate,
args=[
op_deserializer.DeserializingArg(
serialized_name='theta',
constructor_arg_name='theta',
default=0.0,
),
op_deserializer.DeserializingArg(
serialized_name='phi',
constructor_arg_name='phi',
default=0.0,
),
],
op_wrapper=lambda op, proto: _add_phase_match(op, proto),
)
#############################################
#
# Miscellaneous serializers and deserializers
#
#############################################
#
# Coupler Pulse serializer and deserializer
#
COUPLER_PULSE_SERIALIZER = op_serializer.GateOpSerializer(
gate_type=CouplerPulse,
serialized_gate_id='coupler_pulse',
args=[
op_serializer.SerializingArg(
serialized_name='coupling_mhz', serialized_type=float, op_getter='coupling_mhz'
),
op_serializer.SerializingArg(
serialized_name='hold_time_ns',
serialized_type=float,
op_getter=lambda op: cast(CouplerPulse, op.gate).hold_time.total_nanos(),
),
op_serializer.SerializingArg(
serialized_name='rise_time_ns',
serialized_type=float,
op_getter=lambda op: cast(CouplerPulse, op.gate).rise_time.total_nanos(),
),
op_serializer.SerializingArg(
serialized_name='padding_time_ns',
serialized_type=float,
op_getter=lambda op: cast(CouplerPulse, op.gate).padding_time.total_nanos(),
),
],
)
COUPLER_PULSE_DESERIALIZER = op_deserializer.GateOpDeserializer(
serialized_gate_id='coupler_pulse',
gate_constructor=CouplerPulse,
args=[
op_deserializer.DeserializingArg(
serialized_name='coupling_mhz',
constructor_arg_name='coupling_mhz',
),
op_deserializer.DeserializingArg(
serialized_name='hold_time_ns',
constructor_arg_name='hold_time',
value_func=lambda nanos: cirq.Duration(
nanos=cast(Union[int, float, sympy.Basic], nanos)
),
),
op_deserializer.DeserializingArg(
serialized_name='rise_time_ns',
constructor_arg_name='rise_time',
value_func=lambda nanos: cirq.Duration(
nanos=cast(Union[int, float, sympy.Basic], nanos)
),
),
op_deserializer.DeserializingArg(
serialized_name='padding_time_ns',
constructor_arg_name='padding_time',
value_func=lambda nanos: cirq.Duration(
nanos=cast(Union[int, float, sympy.Basic], nanos)
),
),
],
)
#
# WaitGate serializer and deserializer
#
WAIT_GATE_SERIALIZER = op_serializer.GateOpSerializer(
gate_type=cirq.WaitGate,
serialized_gate_id='wait',
args=[
op_serializer.SerializingArg(
serialized_name='nanos',
serialized_type=float,
op_getter=lambda op: cast(cirq.WaitGate, op.gate).duration.total_nanos(),
),
],
)
WAIT_GATE_DESERIALIZER = op_deserializer.GateOpDeserializer(
serialized_gate_id='wait',
gate_constructor=cirq.WaitGate,
args=[
op_deserializer.DeserializingArg(
serialized_name='nanos',
constructor_arg_name='duration',
value_func=lambda nanos: cirq.Duration(
nanos=cast(Union[int, float, sympy.Basic], nanos)
),
)
],
num_qubits_param='num_qubits',
)
#
# CircuitOperation serializer and deserializer
#
CIRCUIT_OP_SERIALIZER = op_serializer.CircuitOpSerializer()
CIRCUIT_OP_DESERIALIZER = op_deserializer.CircuitOpDeserializer()
|
|
import unittest
from test import support
from random import random
from math import atan2, isnan, copysign
import operator
INF = float("inf")
NAN = float("nan")
# These tests ensure that complex math does the right thing
class ComplexTest(unittest.TestCase):
def assertAlmostEqual(self, a, b):
if isinstance(a, complex):
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a.real, b.real)
unittest.TestCase.assertAlmostEqual(self, a.imag, b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a.real, b)
unittest.TestCase.assertAlmostEqual(self, a.imag, 0.)
else:
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a, b.real)
unittest.TestCase.assertAlmostEqual(self, 0., b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a, b)
def assertCloseAbs(self, x, y, eps=1e-9):
"""Return true iff floats x and y "are close\""""
# put the one with larger magnitude second
if abs(x) > abs(y):
x, y = y, x
if y == 0:
return abs(x) < eps
if x == 0:
return abs(y) < eps
# check that relative difference < eps
self.assertTrue(abs((x-y)/y) < eps)
def assertFloatsAreIdentical(self, x, y):
"""assert that floats x and y are identical, in the sense that:
(1) both x and y are nans, or
(2) both x and y are infinities, with the same sign, or
(3) both x and y are zeros, with the same sign, or
(4) x and y are both finite and nonzero, and x == y
"""
msg = 'floats {!r} and {!r} are not identical'
if isnan(x) or isnan(y):
if isnan(x) and isnan(y):
return
elif x == y:
if x != 0.0:
return
# both zero; check that signs match
elif copysign(1.0, x) == copysign(1.0, y):
return
else:
msg += ': zeros have different signs'
self.fail(msg.format(x, y))
def assertClose(self, x, y, eps=1e-9):
"""Return true iff complexes x and y "are close\""""
self.assertCloseAbs(x.real, y.real, eps)
self.assertCloseAbs(x.imag, y.imag, eps)
def check_div(self, x, y):
"""Compute complex z=x*y, and check that z/x==y and z/y==x."""
z = x * y
if x != 0:
q = z / x
self.assertClose(q, y)
q = z.__truediv__(x)
self.assertClose(q, y)
if y != 0:
q = z / y
self.assertClose(q, x)
q = z.__truediv__(y)
self.assertClose(q, x)
def test_truediv(self):
simple_real = [float(i) for i in range(-5, 6)]
simple_complex = [complex(x, y) for x in simple_real for y in simple_real]
for x in simple_complex:
for y in simple_complex:
self.check_div(x, y)
# A naive complex division algorithm (such as in 2.0) is very prone to
# nonsense errors for these (overflows and underflows).
self.check_div(complex(1e200, 1e200), 1+0j)
self.check_div(complex(1e-200, 1e-200), 1+0j)
# Just for fun.
for i in range(100):
self.check_div(complex(random(), random()),
complex(random(), random()))
self.assertRaises(ZeroDivisionError, complex.__truediv__, 1+1j, 0+0j)
# FIXME: The following currently crashes on Alpha
# self.assertRaises(OverflowError, pow, 1e200+1j, 1e200+1j)
self.assertAlmostEqual(complex.__truediv__(2+0j, 1+1j), 1-1j)
self.assertRaises(ZeroDivisionError, complex.__truediv__, 1+1j, 0+0j)
def test_floordiv(self):
self.assertRaises(TypeError, complex.__floordiv__, 3+0j, 1.5+0j)
self.assertRaises(TypeError, complex.__floordiv__, 3+0j, 0+0j)
def test_richcompare(self):
self.assertIs(complex.__eq__(1+1j, 1<<10000), False)
self.assertIs(complex.__lt__(1+1j, None), NotImplemented)
self.assertIs(complex.__eq__(1+1j, 1+1j), True)
self.assertIs(complex.__eq__(1+1j, 2+2j), False)
self.assertIs(complex.__ne__(1+1j, 1+1j), False)
self.assertIs(complex.__ne__(1+1j, 2+2j), True)
for i in range(1, 100):
f = i / 100.0
self.assertIs(complex.__eq__(f+0j, f), True)
self.assertIs(complex.__ne__(f+0j, f), False)
self.assertIs(complex.__eq__(complex(f, f), f), False)
self.assertIs(complex.__ne__(complex(f, f), f), True)
self.assertIs(complex.__lt__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__le__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__gt__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__ge__(1+1j, 2+2j), NotImplemented)
self.assertRaises(TypeError, operator.lt, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.le, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.gt, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.ge, 1+1j, 2+2j)
self.assertIs(operator.eq(1+1j, 1+1j), True)
self.assertIs(operator.eq(1+1j, 2+2j), False)
self.assertIs(operator.ne(1+1j, 1+1j), False)
self.assertIs(operator.ne(1+1j, 2+2j), True)
def test_richcompare_boundaries(self):
def check(n, deltas, is_equal, imag = 0.0):
for delta in deltas:
i = n + delta
z = complex(i, imag)
self.assertIs(complex.__eq__(z, i), is_equal(delta))
self.assertIs(complex.__ne__(z, i), not is_equal(delta))
# For IEEE-754 doubles the following should hold:
# x in [2 ** (52 + i), 2 ** (53 + i + 1)] -> x mod 2 ** i == 0
# where the interval is representable, of course.
for i in range(1, 10):
pow = 52 + i
mult = 2 ** i
check(2 ** pow, range(1, 101), lambda delta: delta % mult == 0)
check(2 ** pow, range(1, 101), lambda delta: False, float(i))
check(2 ** 53, range(-100, 0), lambda delta: True)
def test_mod(self):
# % is no longer supported on complex numbers
self.assertRaises(TypeError, (1+1j).__mod__, 0+0j)
self.assertRaises(TypeError, lambda: (3.33+4.43j) % 0)
self.assertRaises(TypeError, (1+1j).__mod__, 4.3j)
def test_divmod(self):
self.assertRaises(TypeError, divmod, 1+1j, 1+0j)
self.assertRaises(TypeError, divmod, 1+1j, 0+0j)
def test_pow(self):
self.assertAlmostEqual(pow(1+1j, 0+0j), 1.0)
self.assertAlmostEqual(pow(0+0j, 2+0j), 0.0)
self.assertRaises(ZeroDivisionError, pow, 0+0j, 1j)
self.assertAlmostEqual(pow(1j, -1), 1/1j)
self.assertAlmostEqual(pow(1j, 200), 1)
self.assertRaises(ValueError, pow, 1+1j, 1+1j, 1+1j)
a = 3.33+4.43j
self.assertEqual(a ** 0j, 1)
self.assertEqual(a ** 0.+0.j, 1)
self.assertEqual(3j ** 0j, 1)
self.assertEqual(3j ** 0, 1)
try:
0j ** a
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
try:
0j ** (3-2j)
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
# The following is used to exercise certain code paths
self.assertEqual(a ** 105, a ** 105)
self.assertEqual(a ** -105, a ** -105)
self.assertEqual(a ** -30, a ** -30)
self.assertEqual(0.0j ** 0, 1)
b = 5.1+2.3j
self.assertRaises(ValueError, pow, a, b, 0)
def test_boolcontext(self):
for i in range(100):
self.assertTrue(complex(random() + 1e-6, random() + 1e-6))
self.assertTrue(not complex(0.0, 0.0))
def test_conjugate(self):
self.assertClose(complex(5.3, 9.8).conjugate(), 5.3-9.8j)
def test_constructor(self):
class OS:
def __init__(self, value): self.value = value
def __complex__(self): return self.value
class NS(object):
def __init__(self, value): self.value = value
def __complex__(self): return self.value
self.assertEqual(complex(OS(1+10j)), 1+10j)
self.assertEqual(complex(NS(1+10j)), 1+10j)
self.assertRaises(TypeError, complex, OS(None))
self.assertRaises(TypeError, complex, NS(None))
self.assertRaises(TypeError, complex, {})
self.assertAlmostEqual(complex("1+10j"), 1+10j)
self.assertAlmostEqual(complex(10), 10+0j)
self.assertAlmostEqual(complex(10.0), 10+0j)
self.assertAlmostEqual(complex(10), 10+0j)
self.assertAlmostEqual(complex(10+0j), 10+0j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10.0), 1+10j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10.0), 1+10j)
self.assertAlmostEqual(complex(1.0,10), 1+10j)
self.assertAlmostEqual(complex(1.0,10), 1+10j)
self.assertAlmostEqual(complex(1.0,10.0), 1+10j)
self.assertAlmostEqual(complex(3.14+0j), 3.14+0j)
self.assertAlmostEqual(complex(3.14), 3.14+0j)
self.assertAlmostEqual(complex(314), 314.0+0j)
self.assertAlmostEqual(complex(314), 314.0+0j)
self.assertAlmostEqual(complex(3.14+0j, 0j), 3.14+0j)
self.assertAlmostEqual(complex(3.14, 0.0), 3.14+0j)
self.assertAlmostEqual(complex(314, 0), 314.0+0j)
self.assertAlmostEqual(complex(314, 0), 314.0+0j)
self.assertAlmostEqual(complex(0j, 3.14j), -3.14+0j)
self.assertAlmostEqual(complex(0.0, 3.14j), -3.14+0j)
self.assertAlmostEqual(complex(0j, 3.14), 3.14j)
self.assertAlmostEqual(complex(0.0, 3.14), 3.14j)
self.assertAlmostEqual(complex("1"), 1+0j)
self.assertAlmostEqual(complex("1j"), 1j)
self.assertAlmostEqual(complex(), 0)
self.assertAlmostEqual(complex("-1"), -1)
self.assertAlmostEqual(complex("+1"), +1)
self.assertAlmostEqual(complex("(1+2j)"), 1+2j)
self.assertAlmostEqual(complex("(1.3+2.2j)"), 1.3+2.2j)
self.assertAlmostEqual(complex("3.14+1J"), 3.14+1j)
self.assertAlmostEqual(complex(" ( +3.14-6J )"), 3.14-6j)
self.assertAlmostEqual(complex(" ( +3.14-J )"), 3.14-1j)
self.assertAlmostEqual(complex(" ( +3.14+j )"), 3.14+1j)
self.assertAlmostEqual(complex("J"), 1j)
self.assertAlmostEqual(complex("( j )"), 1j)
self.assertAlmostEqual(complex("+J"), 1j)
self.assertAlmostEqual(complex("( -j)"), -1j)
self.assertAlmostEqual(complex('1e-500'), 0.0 + 0.0j)
self.assertAlmostEqual(complex('-1e-500j'), 0.0 - 0.0j)
self.assertAlmostEqual(complex('-1e-500+1e-500j'), -0.0 + 0.0j)
class complex2(complex): pass
self.assertAlmostEqual(complex(complex2(1+1j)), 1+1j)
self.assertAlmostEqual(complex(real=17, imag=23), 17+23j)
self.assertAlmostEqual(complex(real=17+23j), 17+23j)
self.assertAlmostEqual(complex(real=17+23j, imag=23), 17+46j)
self.assertAlmostEqual(complex(real=1+2j, imag=3+4j), -3+5j)
# check that the sign of a zero in the real or imaginary part
# is preserved when constructing from two floats. (These checks
# are harmless on systems without support for signed zeros.)
def split_zeros(x):
"""Function that produces different results for 0. and -0."""
return atan2(x, -1.)
self.assertEqual(split_zeros(complex(1., 0.).imag), split_zeros(0.))
self.assertEqual(split_zeros(complex(1., -0.).imag), split_zeros(-0.))
self.assertEqual(split_zeros(complex(0., 1.).real), split_zeros(0.))
self.assertEqual(split_zeros(complex(-0., 1.).real), split_zeros(-0.))
c = 3.14 + 1j
self.assertTrue(complex(c) is c)
del c
self.assertRaises(TypeError, complex, "1", "1")
self.assertRaises(TypeError, complex, 1, "1")
# SF bug 543840: complex(string) accepts strings with \0
# Fixed in 2.3.
self.assertRaises(ValueError, complex, '1+1j\0j')
self.assertRaises(TypeError, int, 5+3j)
self.assertRaises(TypeError, int, 5+3j)
self.assertRaises(TypeError, float, 5+3j)
self.assertRaises(ValueError, complex, "")
self.assertRaises(TypeError, complex, None)
self.assertRaises(ValueError, complex, "\0")
self.assertRaises(ValueError, complex, "3\09")
self.assertRaises(TypeError, complex, "1", "2")
self.assertRaises(TypeError, complex, "1", 42)
self.assertRaises(TypeError, complex, 1, "2")
self.assertRaises(ValueError, complex, "1+")
self.assertRaises(ValueError, complex, "1+1j+1j")
self.assertRaises(ValueError, complex, "--")
self.assertRaises(ValueError, complex, "(1+2j")
self.assertRaises(ValueError, complex, "1+2j)")
self.assertRaises(ValueError, complex, "1+(2j)")
self.assertRaises(ValueError, complex, "(1+2j)123")
self.assertRaises(ValueError, complex, "x")
self.assertRaises(ValueError, complex, "1j+2")
self.assertRaises(ValueError, complex, "1e1ej")
self.assertRaises(ValueError, complex, "1e++1ej")
self.assertRaises(ValueError, complex, ")1+2j(")
# the following three are accepted by Python 2.6
self.assertRaises(ValueError, complex, "1..1j")
self.assertRaises(ValueError, complex, "1.11.1j")
self.assertRaises(ValueError, complex, "1e1.1j")
# check that complex accepts long unicode strings
self.assertEqual(type(complex("1"*500)), complex)
# check whitespace processing
self.assertEqual(complex('\N{EM SPACE}(\N{EN SPACE}1+1j ) '), 1+1j)
class EvilExc(Exception):
pass
class evilcomplex:
def __complex__(self):
raise EvilExc
self.assertRaises(EvilExc, complex, evilcomplex())
class float2:
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
self.assertAlmostEqual(complex(float2(42.)), 42)
self.assertAlmostEqual(complex(real=float2(17.), imag=float2(23.)), 17+23j)
self.assertRaises(TypeError, complex, float2(None))
class complex0(complex):
"""Test usage of __complex__() when inheriting from 'complex'"""
def __complex__(self):
return 42j
class complex1(complex):
"""Test usage of __complex__() with a __new__() method"""
def __new__(self, value=0j):
return complex.__new__(self, 2*value)
def __complex__(self):
return self
class complex2(complex):
"""Make sure that __complex__() calls fail if anything other than a
complex is returned"""
def __complex__(self):
return None
self.assertAlmostEqual(complex(complex0(1j)), 42j)
self.assertAlmostEqual(complex(complex1(1j)), 2j)
self.assertRaises(TypeError, complex, complex2(1j))
def test_hash(self):
for x in range(-30, 30):
self.assertEqual(hash(x), hash(complex(x, 0)))
x /= 3.0 # now check against floating point
self.assertEqual(hash(x), hash(complex(x, 0.)))
def test_abs(self):
nums = [complex(x/3., y/7.) for x in range(-9,9) for y in range(-9,9)]
for num in nums:
self.assertAlmostEqual((num.real**2 + num.imag**2) ** 0.5, abs(num))
def test_repr_str(self):
def test(v, expected, test_fn=self.assertEqual):
test_fn(repr(v), expected)
test_fn(str(v), expected)
test(1+6j, '(1+6j)')
test(1-6j, '(1-6j)')
test(-(1+0j), '(-1+-0j)', test_fn=self.assertNotEqual)
test(complex(1., INF), "(1+infj)")
test(complex(1., -INF), "(1-infj)")
test(complex(INF, 1), "(inf+1j)")
test(complex(-INF, INF), "(-inf+infj)")
test(complex(NAN, 1), "(nan+1j)")
test(complex(1, NAN), "(1+nanj)")
test(complex(NAN, NAN), "(nan+nanj)")
test(complex(0, INF), "infj")
test(complex(0, -INF), "-infj")
test(complex(0, NAN), "nanj")
self.assertEqual(1-6j,complex(repr(1-6j)))
self.assertEqual(1+6j,complex(repr(1+6j)))
self.assertEqual(-6j,complex(repr(-6j)))
self.assertEqual(6j,complex(repr(6j)))
@support.requires_IEEE_754
def test_negative_zero_repr_str(self):
def test(v, expected, test_fn=self.assertEqual):
test_fn(repr(v), expected)
test_fn(str(v), expected)
test(complex(0., 1.), "1j")
test(complex(-0., 1.), "(-0+1j)")
test(complex(0., -1.), "-1j")
test(complex(-0., -1.), "(-0-1j)")
test(complex(0., 0.), "0j")
test(complex(0., -0.), "-0j")
test(complex(-0., 0.), "(-0+0j)")
test(complex(-0., -0.), "(-0-0j)")
def test_neg(self):
self.assertEqual(-(1+6j), -1-6j)
def test_file(self):
a = 3.33+4.43j
b = 5.1+2.3j
fo = None
try:
fo = open(support.TESTFN, "w")
print(a, b, file=fo)
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), ("%s %s\n" % (a, b)))
finally:
if (fo is not None) and (not fo.closed):
fo.close()
support.unlink(support.TESTFN)
def test_getnewargs(self):
self.assertEqual((1+2j).__getnewargs__(), (1.0, 2.0))
self.assertEqual((1-2j).__getnewargs__(), (1.0, -2.0))
self.assertEqual((2j).__getnewargs__(), (0.0, 2.0))
self.assertEqual((-0j).__getnewargs__(), (0.0, -0.0))
self.assertEqual(complex(0, INF).__getnewargs__(), (0.0, INF))
self.assertEqual(complex(INF, 0).__getnewargs__(), (INF, 0.0))
@support.requires_IEEE_754
def test_plus_minus_0j(self):
# test that -0j and 0j literals are not identified
z1, z2 = 0j, -0j
self.assertEqual(atan2(z1.imag, -1.), atan2(0., -1.))
self.assertEqual(atan2(z2.imag, -1.), atan2(-0., -1.))
@support.requires_IEEE_754
def test_negated_imaginary_literal(self):
z0 = -0j
z1 = -7j
z2 = -1e1000j
# Note: In versions of Python < 3.2, a negated imaginary literal
# accidentally ended up with real part 0.0 instead of -0.0, thanks to a
# modification during CST -> AST translation (see issue #9011). That's
# fixed in Python 3.2.
self.assertFloatsAreIdentical(z0.real, -0.0)
self.assertFloatsAreIdentical(z0.imag, -0.0)
self.assertFloatsAreIdentical(z1.real, -0.0)
self.assertFloatsAreIdentical(z1.imag, -7.0)
self.assertFloatsAreIdentical(z2.real, -0.0)
self.assertFloatsAreIdentical(z2.imag, -INF)
@support.requires_IEEE_754
def test_overflow(self):
self.assertEqual(complex("1e500"), complex(INF, 0.0))
self.assertEqual(complex("-1e500j"), complex(0.0, -INF))
self.assertEqual(complex("-1e500+1.8e308j"), complex(-INF, INF))
@support.requires_IEEE_754
def test_repr_roundtrip(self):
vals = [0.0, 1e-500, 1e-315, 1e-200, 0.0123, 3.1415, 1e50, INF, NAN]
vals += [-v for v in vals]
# complex(repr(z)) should recover z exactly, even for complex
# numbers involving an infinity, nan, or negative zero
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = complex(repr(z))
self.assertFloatsAreIdentical(z.real, roundtrip.real)
self.assertFloatsAreIdentical(z.imag, roundtrip.imag)
# if we predefine some constants, then eval(repr(z)) should
# also work, except that it might change the sign of zeros
inf, nan = float('inf'), float('nan')
infj, nanj = complex(0.0, inf), complex(0.0, nan)
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = eval(repr(z))
# adding 0.0 has no effect beside changing -0.0 to 0.0
self.assertFloatsAreIdentical(0.0 + z.real,
0.0 + roundtrip.real)
self.assertFloatsAreIdentical(0.0 + z.imag,
0.0 + roundtrip.imag)
def test_format(self):
# empty format string is same as str()
self.assertEqual(format(1+3j, ''), str(1+3j))
self.assertEqual(format(1.5+3.5j, ''), str(1.5+3.5j))
self.assertEqual(format(3j, ''), str(3j))
self.assertEqual(format(3.2j, ''), str(3.2j))
self.assertEqual(format(3+0j, ''), str(3+0j))
self.assertEqual(format(3.2+0j, ''), str(3.2+0j))
# empty presentation type should still be analogous to str,
# even when format string is nonempty (issue #5920).
self.assertEqual(format(3.2+0j, '-'), str(3.2+0j))
self.assertEqual(format(3.2+0j, '<'), str(3.2+0j))
z = 4/7. - 100j/7.
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '10'), str(z))
z = complex(0.0, 3.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '2'), str(z))
z = complex(-0.0, 2.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '3'), str(z))
self.assertEqual(format(1+3j, 'g'), '1+3j')
self.assertEqual(format(3j, 'g'), '0+3j')
self.assertEqual(format(1.5+3.5j, 'g'), '1.5+3.5j')
self.assertEqual(format(1.5+3.5j, '+g'), '+1.5+3.5j')
self.assertEqual(format(1.5-3.5j, '+g'), '+1.5-3.5j')
self.assertEqual(format(1.5-3.5j, '-g'), '1.5-3.5j')
self.assertEqual(format(1.5+3.5j, ' g'), ' 1.5+3.5j')
self.assertEqual(format(1.5-3.5j, ' g'), ' 1.5-3.5j')
self.assertEqual(format(-1.5+3.5j, ' g'), '-1.5+3.5j')
self.assertEqual(format(-1.5-3.5j, ' g'), '-1.5-3.5j')
self.assertEqual(format(-1.5-3.5e-20j, 'g'), '-1.5-3.5e-20j')
self.assertEqual(format(-1.5-3.5j, 'f'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'F'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'e'), '-1.500000e+00-3.500000e+00j')
self.assertEqual(format(-1.5-3.5j, '.2e'), '-1.50e+00-3.50e+00j')
self.assertEqual(format(-1.5-3.5j, '.2E'), '-1.50E+00-3.50E+00j')
self.assertEqual(format(-1.5e10-3.5e5j, '.2G'), '-1.5E+10-3.5E+05j')
self.assertEqual(format(1.5+3j, '<20g'), '1.5+3j ')
self.assertEqual(format(1.5+3j, '*<20g'), '1.5+3j**************')
self.assertEqual(format(1.5+3j, '>20g'), ' 1.5+3j')
self.assertEqual(format(1.5+3j, '^20g'), ' 1.5+3j ')
self.assertEqual(format(1.5+3j, '<20'), '(1.5+3j) ')
self.assertEqual(format(1.5+3j, '>20'), ' (1.5+3j)')
self.assertEqual(format(1.5+3j, '^20'), ' (1.5+3j) ')
self.assertEqual(format(1.123-3.123j, '^20.2'), ' (1.1-3.1j) ')
self.assertEqual(format(1.5+3j, '20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '>20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '<20.2f'), '1.50+3.00j ')
self.assertEqual(format(1.5e20+3j, '<20.2f'), '150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '>40.2f'), ' 150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '^40,.2f'), ' 150,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3j, '^40,.2f'), ' 1,500,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3000j, ',.2f'), '1,500,000,000,000,000,000,000.00+3,000.00j')
# Issue 7094: Alternate formatting (specified by #)
self.assertEqual(format(1+1j, '.0e'), '1e+00+1e+00j')
self.assertEqual(format(1+1j, '#.0e'), '1.e+00+1.e+00j')
self.assertEqual(format(1+1j, '.0f'), '1+1j')
self.assertEqual(format(1+1j, '#.0f'), '1.+1.j')
self.assertEqual(format(1.1+1.1j, 'g'), '1.1+1.1j')
self.assertEqual(format(1.1+1.1j, '#g'), '1.10000+1.10000j')
# Alternate doesn't make a difference for these, they format the same with or without it
self.assertEqual(format(1+1j, '.1e'), '1.0e+00+1.0e+00j')
self.assertEqual(format(1+1j, '#.1e'), '1.0e+00+1.0e+00j')
self.assertEqual(format(1+1j, '.1f'), '1.0+1.0j')
self.assertEqual(format(1+1j, '#.1f'), '1.0+1.0j')
# Misc. other alternate tests
self.assertEqual(format((-1.5+0.5j), '#f'), '-1.500000+0.500000j')
self.assertEqual(format((-1.5+0.5j), '#.0f'), '-2.+0.j')
self.assertEqual(format((-1.5+0.5j), '#e'), '-1.500000e+00+5.000000e-01j')
self.assertEqual(format((-1.5+0.5j), '#.0e'), '-2.e+00+5.e-01j')
self.assertEqual(format((-1.5+0.5j), '#g'), '-1.50000+0.500000j')
self.assertEqual(format((-1.5+0.5j), '.0g'), '-2+0.5j')
self.assertEqual(format((-1.5+0.5j), '#.0g'), '-2.+0.5j')
# zero padding is invalid
self.assertRaises(ValueError, (1.5+0.5j).__format__, '010f')
# '=' alignment is invalid
self.assertRaises(ValueError, (1.5+3j).__format__, '=20')
# integer presentation types are an error
for t in 'bcdoxX':
self.assertRaises(ValueError, (1.5+0.5j).__format__, t)
# make sure everything works in ''.format()
self.assertEqual('*{0:.3f}*'.format(3.14159+2.71828j), '*3.142+2.718j*')
# issue 3382
self.assertEqual(format(complex(NAN, NAN), 'f'), 'nan+nanj')
self.assertEqual(format(complex(1, NAN), 'f'), '1.000000+nanj')
self.assertEqual(format(complex(NAN, 1), 'f'), 'nan+1.000000j')
self.assertEqual(format(complex(NAN, -1), 'f'), 'nan-1.000000j')
self.assertEqual(format(complex(NAN, NAN), 'F'), 'NAN+NANj')
self.assertEqual(format(complex(1, NAN), 'F'), '1.000000+NANj')
self.assertEqual(format(complex(NAN, 1), 'F'), 'NAN+1.000000j')
self.assertEqual(format(complex(NAN, -1), 'F'), 'NAN-1.000000j')
self.assertEqual(format(complex(INF, INF), 'f'), 'inf+infj')
self.assertEqual(format(complex(1, INF), 'f'), '1.000000+infj')
self.assertEqual(format(complex(INF, 1), 'f'), 'inf+1.000000j')
self.assertEqual(format(complex(INF, -1), 'f'), 'inf-1.000000j')
self.assertEqual(format(complex(INF, INF), 'F'), 'INF+INFj')
self.assertEqual(format(complex(1, INF), 'F'), '1.000000+INFj')
self.assertEqual(format(complex(INF, 1), 'F'), 'INF+1.000000j')
self.assertEqual(format(complex(INF, -1), 'F'), 'INF-1.000000j')
def test_main():
support.run_unittest(ComplexTest)
if __name__ == "__main__":
test_main()
|
|
"""wrapper for some git commands"""
import re
import subprocess
from mod import log
name = 'git'
platforms = ['linux', 'osx', 'win']
optional = False
not_found = "git not found in path, can't happen(?)"
# default git clone depth
clone_depth = 10
#-------------------------------------------------------------------------------
def check_exists(fips_dir=None) :
"""test if git is in the path
:returns: True if git is in the path
"""
try :
subprocess.check_output(['git', '--version'])
return True
except (OSError, subprocess.CalledProcessError) :
return False
#-------------------------------------------------------------------------------
def clone(url, branch, depth, name, cwd) :
"""git clone a remote git repo
:param url: the git url to clone from
:param branch: branch name (can be None)
:param depth: how deep to clone
:param name: the directory name to clone into
:param cwd: the directory where to run git
:returns: True if git returns successful
"""
if check_exists() :
cmd = 'git clone --recursive'
if branch :
cmd += ' --branch {} --single-branch'.format(branch)
if depth :
cmd += ' --depth {}'.format(depth)
cmd += ' {} {}'.format(url, name)
res = subprocess.call(cmd, cwd=cwd, shell=True)
return res == 0
else :
log.error("git not found, please run and fix './fips diag tools'")
return False
#-------------------------------------------------------------------------------
def get_branches(proj_dir) :
"""get a dictionary with all local branch names of a git repo as keys,
and their remote branch names as value
:param proj_dir: a git repo dir
:returns: dictionary of all local and remote branches
"""
branches = {}
try:
output = subprocess.check_output('git branch -vv', cwd=proj_dir, shell=True)
lines = output.splitlines()
for line in lines :
tokens = line[2:].split()
local_branch = tokens[0]
if re.compile("^\[.*(:|\])$").match(tokens[2]) :
remote_branch = tokens[2][1:-1]
branches[local_branch] = remote_branch
except subprocess.CalledProcessError :
log.error("failed to call 'git branch -vv'")
return branches;
#-------------------------------------------------------------------------------
def checkout(proj_dir, revision) :
"""checkout a specific revision hash of a repository
:param proj_dir: a git repo dir
:param revision: SHA1 hash of the commit
:returns: True if git returns successful
"""
try :
output = subprocess.check_output('git checkout {}'.format(revision), cwd=proj_dir, shell=True)
return output.split(':')[0] != 'error'
except subprocess.CalledProcessError :
log.error("failed to call 'git checkout'")
return None
#-------------------------------------------------------------------------------
def has_uncommitted_files(proj_dir) :
"""check whether a git repo has uncommitted files
:param proj_dir: a git repo dir
:returns: True/False and output string
"""
try :
output = subprocess.check_output('git status -s', cwd=proj_dir, shell=True)
if len(output) > 0 :
return True, output
else :
return False, output
except subprocess.CalledProcessError :
log.error("failed to call 'git status -s'")
return False, ''
#-------------------------------------------------------------------------------
def get_remote_rev(proj_dir, remote_branch) :
"""get the head rev of a remote branch
:param proj_dir: a git repo dir
:param remote_branch: remote branch (e.g. origin/master)
:returns: the revision string of the remote branch head or None
"""
tokens = remote_branch.split('/')
try :
output = subprocess.check_output('git ls-remote {} {}'.format(tokens[0], tokens[1]), cwd=proj_dir, shell=True)
# can return an empty string if the remote branch doesn't exist
if output != '':
return output.split()[0]
else :
return None
except subprocess.CalledProcessError :
log.error("failed to call 'git ls-remote'")
return None
#-------------------------------------------------------------------------------
def get_local_rev(proj_dir, local_branch) :
"""get the head rev of a local branch
:param proj_dir: a git repo dir
:param local_branch: local branch name (e.g. master)
:returns: the revision string of the local branch head or None
"""
try :
output = subprocess.check_output('git rev-parse {}'.format(local_branch), cwd=proj_dir, shell=True)
return output.rstrip()
except subprocess.CalledProcessError :
log.error("failed to call 'git rev-parse'")
return None
#-------------------------------------------------------------------------------
def check_out_of_sync(proj_dir) :
"""check through all branches of the git repo in proj_dir and
returns an array of all branches that are out-of-sync with their
remote branches (either have unpushed local changes, or un-pulled
remote changes)
:param proj_dir: a git repo directory
:returns: array with branch names that are out-of-sync
"""
if not check_exists() :
log.error("git not found, please run and fix './fips diag tools'")
return False
out_of_sync = False
# first check whether there are uncommitted changes
status, status_output = has_uncommitted_files(proj_dir)
if status :
out_of_sync = True
log.warn("'{}' has uncommitted changes:".format(proj_dir))
log.info(status_output)
# check whether local and remote branch are out of sync
branches_out_of_sync = False
branches = get_branches(proj_dir)
if not branches :
log.warn("'{}' no remote branches found".format(proj_dir))
for local_branch in branches :
remote_branch = branches[local_branch]
remote_rev = get_remote_rev(proj_dir, remote_branch)
# remote_rev can be None if the remote branch doesn't exists,
# this is not an error
if remote_rev :
local_rev = get_local_rev(proj_dir, local_branch)
if remote_rev != local_rev :
out_of_sync = True
if not branches_out_of_sync:
# only show this once
log.warn("'{}' branches out of sync:".format(proj_dir))
branches_out_of_sync = True
log.info(" {}: {}".format(local_branch, local_rev))
log.info(" {}: {}".format(remote_branch, remote_rev))
return out_of_sync
#-------------------------------------------------------------------------------
def check_branch_out_of_sync(proj_dir, branch) :
"""check if a single branch is out of sync with remote repo"""
if not check_exists() :
log.error("git not found, please run and fix './fips diag tools'")
return False
out_of_sync = False
remote_branches = get_branches(proj_dir)
local_rev = get_local_rev(proj_dir, branch)
if branch in remote_branches :
remote_rev = get_remote_rev(proj_dir, remote_branches[branch])
out_of_sync = remote_rev != local_rev
else :
log.warn("'{}' no remote branch found for '{}'".format(proj_dir, branch))
return out_of_sync
|
|
# Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
Unit tests for cdn client.
"""
import os
import random
import string
import unittest
import cdn_test_config
from baidubce import compat
from baidubce import exception
from baidubce.exception import BceServerError
from baidubce.services.cdn.cdn_client import CdnClient
import imp
import sys
imp.reload(sys)
if compat.PY2:
sys.setdefaultencoding('utf8')
class TestCdnClient(unittest.TestCase):
"""
Test class for cdn sdk client
"""
def setUp(self):
self.cdn_client = CdnClient(cdn_test_config.config)
"""
create_domain
"""
error = None
try:
origin = [
{'peer': '1.2.3.4'}
]
response = self.cdn_client.create_domain('www.example.com', origin)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def tearDown(self):
"""
delete_domain
"""
error = None
try:
response = self.cdn_client.delete_domain('www.example.com')
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_list_domains(self):
"""
test_list_domains
"""
error = None
try:
response = self.cdn_client.list_domains()
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_enable_domain(self):
"""
test_enable_domain
"""
error = None
try:
response = self.cdn_client.enable_domain('www.example.com')
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_disable_domain(self):
"""
test_disable_domain
"""
error = None
try:
response = self.cdn_client.disable_domain('www.example.com')
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_config(self):
"""
test_get_domain_config
"""
error = None
try:
response = self.cdn_client.get_domain_config('www.example.com')
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_set_domain_origin(self):
"""
test_set_domain_origin
"""
error = None
try:
origin = [
{'peer': '1.2.3.4', 'host': 'www.origin_host.com'},
{'peer': '1.2.3.5', 'host': 'www.origin_host.com'}
]
response = self.cdn_client.set_domain_origin('www.example.com', origin)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_set_domain_https(self):
"""
test_set_domain_https
"""
error = None
try:
https = {
'enabled': True,
'certId': 'cert-rm45x46isit4',
}
response = self.cdn_client.set_domain_https('www.example.com', https)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_set_domain_cache_ttl(self):
"""
test_set_domain_cache_ttl
"""
error = None
try:
rules = []
rules.append({'type':'suffix', 'value': '.jpg', 'ttl': 3600, 'weight': 30})
rules.append({'type':'path', 'value': '/a/b/c', 'ttl': 1800, 'weight': 15})
response = self.cdn_client.set_domain_cache_ttl('www.example.com', rules)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_cache_ttl(self):
"""
test_get_domain_cache_ttl
"""
error = None
try:
response = self.cdn_client.get_domain_cache_ttl('www.example.com')
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_set_domain_cache_full_url(self):
"""
test_set_domain_cache_full_url
"""
error = None
try:
response = self.cdn_client.set_domain_cache_full_url('www.example.com', True)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_set_domain_referer_acl(self):
"""
test_set_domain_referer_acl
"""
error = None
try:
blackList = ["http://a/b/c/", "http://c/d/e/"]
response = self.cdn_client.set_domain_referer_acl(
domain = 'www.example.com',
blackList = blackList,
allowEmpty = True)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_set_domain_ip_acl(self):
"""
test_set_domain_ip_acl
"""
error = None
try:
blackList = ['1.1.1.2', '1.1.1.3']
response = self.cdn_client.set_domain_ip_acl(
domain = 'www.example.com',
blackList = blackList)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_set_domain_limit_rate(self):
"""
test_set_domain_limit_rate
"""
error = None
try:
limitRate = 1024
response = self.cdn_client.set_domain_limit_rate('www.example.com', limitRate)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_set_request_auth(self):
"""
test_set_request_auth
"""
error = None
try:
request_auth = {
"type": "c",
"key1": "secretekey1",
"key2": "secretekey2",
"timeout": 300,
"whiteList": ["/crossdomain.xml"],
"signArg": "sign",
"timeArg": "t"
}
self.cdn_client.set_domain_request_auth('www.example.com', request_auth)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_pv_stat(self):
"""
use new stat api
params is optional
no domain->all domains by uid
no endTime->time by now
no startTime->24hour before endTime
no period->3600
no withRegion->false
"""
error = None
try:
response = self.cdn_client.get_domain_pv_stat(
domain = 'www.example.com',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z',
period = 3600, withRegion = '')
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_flow_stat(self):
"""
use new stat api
params is optional
no domain->all domains by uid
no endTime->time by now
no startTime->24hour before endTime
no period->3600
no withRegion->false
"""
error = None
try:
response = self.cdn_client.get_domain_flow_stat(
domain = 'www.example.com',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z',
period = 3600, withRegion = '')
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_src_flow_stat(self):
"""
use new stat api
params is optional
no domain->all domains by uid
no endTime->time by now
no startTime->24hour before endTime
no period->3600
"""
error = None
try:
response = self.cdn_client.get_domain_src_flow_stat(
domain = 'www.example.com',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z',
period = 3600)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_hitrate_stat(self):
"""
use new stat api
params is optional
"""
error = None
try:
response = self.cdn_client.get_domain_hitrate_stat(
domain = 'www.example.com',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z',
period = 3600)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_httpcode_stat(self):
"""
use new stat api
params is optional
"""
error = None
try:
response = self.cdn_client.get_domain_httpcode_stat(
domain = 'www.example.com',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z',
period = 3600)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_tpon_url_stat(self):
"""
use new stat api
params is optional
"""
error = None
try:
response = self.cdn_client.get_domain_topn_url_stat(
domain = 'www.example.com',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z',
period = 3600)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_topn_referer_stat(self):
"""
use new stat api
params is optional
"""
error = None
try:
response = self.cdn_client.get_domain_topn_referer_stat(
domain = 'www.example.com',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z',
period = 3600)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_uv_stat(self):
"""
use new stat api
params is optional
"""
error = None
try:
response = self.cdn_client.get_domain_uv_stat(
domain = 'www.example.com',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z',
period = 3600)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_avg_speed_stat(self):
"""
use new stat api
params is optional
no domain->all domains by uid
no endTime->time by now
no startTime->24hour before endTime
no period->3600
no withDistribution->false
"""
error = None
try:
response = self.cdn_client.get_domain_avg_speed_stat(
domain = 'www.example.com',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z',
period = 3600)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_purge(self):
"""
test_purge
"""
error = None
try:
tasks = []
tasks.append({'url': 'http://www.example.com/1.jpg'})
tasks.append({'url': 'http://www.example.com/', "type":"directory"})
response = self.cdn_client.purge(tasks)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_list_purge_tasks(self):
"""
test_list_purge_tasks
"""
error = None
try:
response = self.cdn_client.list_purge_tasks(
# id = 'eJwztjA3swQAAy4BEg==',
url = 'http://www.example.com/1.jpg',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z')
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_prefetch(self):
"""
test_prefetch
"""
error = None
try:
tasks = []
tasks.append({'url': 'http://www.example.com/1.jpg'})
tasks.append({'url': 'http://www.example.com/2.jpg'})
response = self.cdn_client.prefetch(tasks)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_list_prefetch_tasks(self):
"""
test_list_prefetch_tasks
"""
error = None
try:
response = self.cdn_client.list_prefetch_tasks(
# id = 'c942f806-1246-5870-e724-1d579b56d438',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z',)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_quota(self):
"""
test_get_quota
"""
error = None
try:
response = self.cdn_client.list_quota()
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_domain_log(self):
"""
test_get_domain_log
"""
error = None
try:
response = self.cdn_client.get_domain_log(
domain = 'www.example.com',
startTime = '2019-03-05T12:00:00Z',
endTime = '2019-03-06T13:00:00Z')
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_ip_query(self):
"""
test_ip_query
"""
error = None
try:
response = self.cdn_client.ip_query(action = 'describeIp', ip = '112.67.254.34')
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_set_seo(self):
"""
test_set_seo
"""
error = None
try:
self.cdn_client.set_seo(domain='www.example.com', push_record=True, directory_origin=True)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_get_seo(self):
"""
test_get_seo
"""
error = None
try:
response = self.cdn_client.get_seo(domain='www.example.com')
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
def test_set_follow_protocol(self):
"""
test_set_follow_protocol
"""
error = None
try:
response = self.cdn_client.set_follow_protocol(domain='www.example.com', follow=True)
print(response)
except BceServerError as e:
error = e
finally:
self.assertIsNone(error)
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/python
#
# Copyright 2015 John Kendrick
#
# This file is part of PDielec
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# You should have received a copy of the MIT License
# along with this program, if not see https://opensource.org/licenses/MIT
#
"""Read the contents of a directory containg VASP input and output files"""
import re
import numpy as np
from PDielec.UnitCell import UnitCell
from PDielec.GenericOutputReader import GenericOutputReader
from PDielec.Constants import atomic_number_to_element
def myfloat(string):
'''A replacement for float() which will return a large number if it finds a * in the string '''
if '*' in string:
return 9999.999
else:
return float(string)
class VaspOutputReader(GenericOutputReader):
"""Read the contents of a directory containg VASP input and output files"""
def __init__(self, names):
GenericOutputReader.__init__(self, names)
self.type = 'Vasp output'
self._pspots = {}
self._ediff = 0.0
self._pulay = None
self._ibrion = 0
self._potim = 0.0
return
def _read_output_files(self):
"""Read the vasp files in the directory"""
self.manage = {} # Empty the dictionary matching phrases
self.manage['ionspertype'] = (re.compile(' ions per type ='), self._read_ionspertype)
# self.manage['masses_skip'] = (re.compile(' Mass of Ions in am'), self._read_skip4)
self.manage['pspots'] = (re.compile(' POTCAR:'), self._read_pspot)
self.manage['arrays'] = (re.compile(' Dimension of arrays:'), self._read_array_dimensions)
#self.manage['masses'] = (re.compile(' POMASS ='), self._read_masses)
self.manage['spin'] = (re.compile(' ISPIN = '), self._read_spin)
self.manage['encut'] = (re.compile(' ENCUT = '), self._read_encut)
self.manage['ediff'] = (re.compile(' EDIFF = '), self._read_ediff)
self.manage['ibrion'] = (re.compile(' IBRION = '), self._read_ibrion)
self.manage['potim'] = (re.compile(' POTIM = '), self._read_potim)
self.manage['nelect'] = (re.compile(' NELECT = '), self._read_nelect)
self.manage['lattice'] = (re.compile(' volume of cell :'), self._read_lattice_vectors)
self.manage['fractional'] = (re.compile(' position of ions in fractional coordinates'), self._read_fractional_coordinates)
self.manage['forces'] = (re.compile(' POSITION *TOTAL-FORCE'), self._read_forces)
self.manage['energy'] = (re.compile(' FREE ENERGIE OF THE ION'), self._read_energy)
self.manage['magnet'] = (re.compile(' number of electron '), self._read_magnet)
self.manage['pressure'] = (re.compile(' external pressure ='), self._read_external_pressure)
self.manage['skip1'] = (re.compile(' old parameters found'), self._read_skip4)
self.manage['staticDielectric'] = (re.compile(' MACROSCOPIC STATIC DIELECTRIC TENSOR .including'), self._read_static_dielectric)
self.manage['staticIonic'] = (re.compile(' MACROSCOPIC STATIC DIELECTRIC TENSOR IONIC CONT'), self._read_ionic_dielectric)
self.manage['bornCharges'] = (re.compile(' BORN EFFECTIVE CHARGES'), self._read_born_charges)
self.manage['eigenvectors'] = (re.compile(' Eigenvectors and eige'), self._read_eigenvectors)
self.manage['eigenskip'] = (re.compile(' Eigenvectors after division'), self._read_skip4)
self.manage['elastic'] = (re.compile(' TOTAL ELASTIC MODULI'), self._read_elastic_constants)
self.manage['kpoint'] = (re.compile('^Gamma'), self._read_kpoint_grid)
self.manage['species'] = (re.compile('^ *Atomic configuration'), self._read_species)
self.manage['newmasses'] = (re.compile(' Mass of Ions in am'), self._read_newmasses)
for f in self._outputfiles:
self._read_output_file(f)
return
def _read_forces(self, line):
line = self.file_descriptor.readline()
maxf = 0.0
rmsf = 0.0
for i in range(self.nions):
line = self.file_descriptor.readline()
forces = [ float(f) for f in line.split()[3:6] ]
for f in forces:
rmsf += f*f
if abs(f) > maxf:
maxf = abs(f)
# end if
# end for f
#end for i
if not "max_force" in self.iterations:
self.iterations["max_force"] = []
self.iterations["max_force"].append(maxf)
if not "rms_force" in self.iterations:
self.iterations["rms_force"] = []
self.iterations["rms_force"].append(rmsf)
return
def _read_species(self, line):
line = self.file_descriptor.readline()
nlines = int(line.split()[0])
line = self.file_descriptor.readline()
zcharge = 0.0
for i in range(nlines):
line = self.file_descriptor.readline()
zcharge = zcharge + float(line.split()[4])
self.species.append(atomic_number_to_element[int(zcharge+0.001)])
self.nspecies = len(self.species)
return
def _read_kpoint_grid(self, line):
line = self.file_descriptor.readline()
self.kpoint_grid = [int(f) for f in line.split()[0:3] ]
return
def _read_ionspertype(self, line):
self.ions_per_type = [int(i) for i in line.split()[4:]]
self.nspecies = len(self.ions_per_type)
return
def _read_newmasses(self, line):
self.masses_per_type = []
line = self.file_descriptor.readline()
mass_string = line[12:]
start = 0
increment = 6
for i in range(self.nspecies):
mass = mass_string[start:start+increment]
self.masses_per_type.append(float(mass))
start = start + increment
self.masses = []
self.atom_type_list = []
self.species_list = []
for k, mass in enumerate(self.masses_per_type):
n = self.ions_per_type[k]
for i in range(n):
self.atom_type_list.append(k)
self.masses.append(mass)
self.species_list.append(self.species[k])
# end loop over i
# end look over current know types
return
def _read_masses(self, line):
mass_string = line.split()[2]
mass_string = mass_string.replace(";", "")
self.masses_per_type.append(float(mass_string))
return
def _read_eigenvectors(self, line):
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
self.frequencies = []
self.mass_weighted_normal_modes = []
n = 3 * self.nions
for i in range(n):
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
imaginary = (line.split()[1] == "f/i=")
if imaginary:
# represent imaginary by negative real
freq = -float(line.split()[6])
else:
freq = float(line.split()[7])
# end if
self.frequencies.append(freq)
line = self.file_descriptor.readline()
a = []
for j in range(self.nions):
line = self.file_descriptor.readline()
a.append([float(line.split()[3]), float(line.split()[4]), float(line.split()[5])])
# end for j
self.mass_weighted_normal_modes.append(a)
# end of for i in range(n)
return
def _read_born_charges(self, line):
"""Read the born charges from the OUTCAR file.
Each row of the output refers to a given field direction
Each column in the row refers the atomic displacement
so the output is arranged [[a1x a1y a1z]
[ a2x a2y a2z]
[ a3x a3y a3z]]
where 1,2,3 are the field directions and x, y, z are the atomic displacements"""
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
self.born_charges = []
for i in range(self.nions):
line = self.file_descriptor.readline()
b = []
b.append([float(line.split()[1]), float(line.split()[2]), float(line.split()[3])])
line = self.file_descriptor.readline()
b.append([float(line.split()[1]), float(line.split()[2]), float(line.split()[3])])
line = self.file_descriptor.readline()
b.append([float(line.split()[1]), float(line.split()[2]), float(line.split()[3])])
line = self.file_descriptor.readline()
self.born_charges.append(b)
return
def _read_elastic_constants(self, line):
# Read the total elastic constants
elastic_constants = []
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
elastic_constants.append([myfloat(f) for f in line.split()[1:7]])
line = self.file_descriptor.readline()
elastic_constants.append([myfloat(f) for f in line.split()[1:7]])
line = self.file_descriptor.readline()
elastic_constants.append([myfloat(f) for f in line.split()[1:7]])
line = self.file_descriptor.readline()
elastic_constants.append([myfloat(f) for f in line.split()[1:7]])
line = self.file_descriptor.readline()
elastic_constants.append([myfloat(f) for f in line.split()[1:7]])
line = self.file_descriptor.readline()
elastic_constants.append([myfloat(f) for f in line.split()[1:7]])
econs = np.array(elastic_constants)
# convert from kBar to GPa
econs = econs / 10.0
self.elastic_constants = econs.tolist()
return
def _read_ionic_dielectric(self, line):
# Read the ionic contribution to the static dielectric and use it to computet
# the full static dielectric constant
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
# the is zero frequency ionic contribution to the static dielectric
ionic_dielectric = []
if '*' in line or len(line.split()) < 3:
line = "99999.999 99999.999 99999.999"
ionic_dielectric.append([myfloat(f) for f in line.split()[0:3]])
line = self.file_descriptor.readline()
if '*' in line or len(line.split()) < 3:
line = "99999.999 99999.999 99999.999"
ionic_dielectric.append([myfloat(f) for f in line.split()[0:3]])
line = self.file_descriptor.readline()
if '*' in line or len(line.split()) < 3:
line = "99999.999 99999.999 99999.999"
ionic_dielectric.append([myfloat(f) for f in line.split()[0:3]])
array1 = np.array(self.zerof_optical_dielectric)
array2 = np.array(ionic_dielectric)
array3 = array1 + array2
self.zerof_static_dielectric = array3.tolist()
return
def _read_static_dielectric(self, line):
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
# the is epsilon infinity
self.zerof_optical_dielectric = []
if '*' in line or len(line.split()) < 3:
line = "99999.999 99999.999 99999.999"
self.zerof_optical_dielectric.append([myfloat(f) for f in line.split()[0:3]])
line = self.file_descriptor.readline()
if '*' in line or len(line.split()) < 3:
line = "99999.999 99999.999 99999.999"
self.zerof_optical_dielectric.append([myfloat(f) for f in line.split()[0:3]])
line = self.file_descriptor.readline()
if '*' in line or len(line.split()) < 3:
line = "99999.999 99999.999 99999.999"
self.zerof_optical_dielectric.append([myfloat(f) for f in line.split()[0:3]])
return
def _read_skip4(self, line):
self.file_descriptor.readline()
self.file_descriptor.readline()
self.file_descriptor.readline()
self.file_descriptor.readline()
return
def _read_external_pressure(self, line):
# Vasp writes out kbar so convert to GPa
self.pressure = float(line.split()[3])/10.0
self.pressures.append(float(line.split()[3])/10.0)
self._pulay = float(line.split()[8])/10.0
return
def _read_pspot(self, line):
self._pspots[line.split()[2]] = line.split()[1]
return
def _read_array_dimensions(self, line):
line = self.file_descriptor.readline()
self.kpoints = int(line.split()[3])
self.nbands = int(line.split()[14])
line = self.file_descriptor.readline()
self.nions = int(line.split()[11])
return
def _read_lattice_vectors(self, line):
self.volume = float(line.split()[4])
self.volumes.append(float(line.split()[4]))
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
avector = [float(line.split()[0]), float(line.split()[1]), float(line.split()[2])]
line = self.file_descriptor.readline()
bvector = [float(line.split()[0]), float(line.split()[1]), float(line.split()[2])]
line = self.file_descriptor.readline()
cvector = [float(line.split()[0]), float(line.split()[1]), float(line.split()[2])]
cell = UnitCell(avector, bvector, cvector)
if self.ncells > 0:
cell.set_fractional_coordinates(self.unit_cells[-1].fractional_coordinates)
cell.set_element_names(self.species_list)
self.unit_cells.append(cell)
self.ncells = len(self.unit_cells)
return
def _read_fractional_coordinates(self, line):
n = 0
ions = []
for n in range(self.nions):
line = self.file_descriptor.readline()
ions.append([float(f) for f in line.split()[0:3]])
self.unit_cells[-1].set_fractional_coordinates(ions)
self.unit_cells[-1].set_element_names(self.species_list)
return
def _read_spin(self, line):
self.spin = int(line.split()[2])
return
def _read_encut(self, line):
self.energy_cutoff = float(line.split()[2])
return
def _read_ediff(self, line):
self._ediff = float(line.split()[2])
return
def _read_ibrion(self, line):
self._ibrion = int(line.split()[2])
return
def _read_potim(self, line):
self._potim = float(line.split()[2])
return
def _read_nelect(self, line):
self.electrons = int(float(line.split()[2]))
return
def _read_magnet(self, line):
self.electrons = float(line.split()[3])
if len(line.split()) > 5:
self.magnetization = float(line.split()[5])
else:
self.magnetization = 0.0
def _read_energy(self, line):
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
self.final_free_energy = float(line.split()[4])
self.final_free_energies.append(float(line.split()[4]))
line = self.file_descriptor.readline()
line = self.file_descriptor.readline()
self.final_energy_without_entropy = float(line.split()[3])
self.final_energies_without_entropy.append(float(line.split()[3]))
return
|
|
# Copyright (C) 2013 Jaedyn K. Draper
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Contains a plugin class for creating android NDK projects
"""
import glob
import platform
import os
import shutil
import subprocess
import sys
import shlex
import re
import platform
from . import toolchain_gcc
from . import log
from . import _shared_globals
import csbuild
if platform.system() == "Windows":
__CSL = None
import ctypes
def symlink(source, link_name):
'''symlink(source, link_name)
Creates a symbolic link pointing to source named link_name'''
global __CSL
if __CSL is None:
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
__CSL = csl
flags = 0
if source is not None and os.path.isdir(source):
flags = 1
if __CSL(link_name, source, flags) == 0:
raise ctypes.WinError()
else:
symlink = os.symlink
class AndroidBase( object ):
def __init__(self):
#TODO: Figure out a way to share some of this data between compiler and linker
self._ndkHome = os.getenv("NDK_HOME")
self._sdkHome = os.getenv("ANDROID_HOME")
self._antHome = os.getenv("ANT_HOME")
self._javaHome = os.getenv("JAVA_HOME")
#self._maxSdkVersion = 19
#TODO: Determine this from highest number in the filesystem.
self._targetSdkVersion = 19
self._minSdkVersion = 1
self._packageName = "csbuild.autopackage"
self._activityName = None
self._usedFeatures = []
self._sysRootDir = ""
self._keystoreLocation = ""
self._keystorePwFile = ""
self._keyPwFile = ""
self._keystoreAlias = ""
self._stlVersion = "GNU"
self._addNativeAppGlue = True
def _copyTo(self, other):
other._ndkHome = self._ndkHome
other._sdkHome = self._sdkHome
other._antHome = self._antHome
other._javaHome = self._javaHome
#other._maxSdkVersion = self._maxSdkVersion
other._targetSdkVersion = self._targetSdkVersion
other._minSdkVersion = self._minSdkVersion
other._packageName = self._packageName
other._activityName = self._activityName
other._usedFeatures = list(self._usedFeatures)
other._sysRootDir = self._sysRootDir
other._keystoreLocation = self._keystoreLocation
other._keystorePwFile = self._keystorePwFile
other._keyPwFile = self._keyPwFile
other._keystoreAlias = self._keystoreAlias
other._stlVersion = self._stlVersion
other._addNativeAppGlue = self._addNativeAppGlue
def SetNdkHome(self, pathToNdk):
self._ndkHome = os.path.abspath(pathToNdk)
def SetSdkHome(self, pathToSdk):
self._sdkHome = os.path.abspath(pathToSdk)
def SetAntHome(self, pathToAnt):
self._antHome = os.path.abspath(pathToAnt)
def SetJavaHome(self, pathToJava):
self._javaHome = os.path.abspath(pathToJava)
def SetKeystoreLocation(self, pathToKeystore):
self._keystoreLocation = os.path.abspath(pathToKeystore)
if not self._keystorePwFile:
self._keystorePwFile = os.path.join(csbuild.mainfileDir, os.path.basename(pathToKeystore+".pass"))
def SetKeystorePasswordFile(self, pathToPwFile):
self._keystorePwFile = os.path.abspath(pathToPwFile)
def SetKeyPasswordFile(self, pathToPwFile):
self._keyPwFile = os.path.abspath(pathToPwFile)
def SetKeystoreAlias(self, alias):
self._keystoreAlias = alias
def SetMinSdkVersion(self, version):
self._minSdkVersion = version
#def SetMaxSdkVersion(self, version):
# self._maxSdkVersion = version
def SetTargetSdkVersion(self, version):
self._targetSdkVersion = version
def SetPackageName(self, name):
self._packageName = name
def SetActivityName(self, name):
self._activityName = name
def AddUsedFeatures(self, *args):
self._usedFeatures += list(args)
def SetNativeAppGlue(self, addGlue):
self._addNativeAppGlue = addGlue
def GetValidArchitectures(self):
return ['x86', 'armeabi', 'armeabi-v7a', 'armeabi-v7a-hard', 'mips']
def _getTargetTriple(self, project):
if self.isClang:
if project.outputArchitecture == "x86":
return "-target i686-linux-android"
elif project.outputArchitecture == "mips":
return "-target mipsel-linux-android"
elif project.outputArchitecture == "armeabi":
return "-target armv7-linux-androideabi"
else:
return "-target armv7a-linux-androideabi"
else:
return ""
def _getSimplifiedArch(self, project):
if project.outputArchitecture.startswith("arm"):
return "arm"
return project.outputArchitecture
def _setSysRootDir(self, project):
toolchainsDir = os.path.join(self._ndkHome, "toolchains")
arch = self._getSimplifiedArch(project)
dirs = glob.glob(os.path.join(toolchainsDir, "{}*".format(arch)))
bestCompilerVersion = ""
for dirname in dirs:
prebuilt = os.path.join(toolchainsDir, dirname, "prebuilt")
if not os.access(prebuilt, os.F_OK):
continue
if dirname > bestCompilerVersion:
bestCompilerVersion = dirname
if not bestCompilerVersion:
log.LOG_ERROR("Couldn't find compiler for architecture {}.".format(project.outputArchitecture))
csbuild.Exit(1)
if platform.system() == "Windows":
platformName = "windows"
else:
platformName = "linux"
sysRootDir = os.path.join(toolchainsDir, bestCompilerVersion, "prebuilt", platformName)
dirs = list(glob.glob("{}*".format(sysRootDir)))
self._sysRootDir = dirs[0]
def _getCommands(self, project, cmd1, cmd2, searchInLlvmPath = False):
toolchainsDir = os.path.join(self._ndkHome, "toolchains")
arch = self._getSimplifiedArch(project)
dirs = glob.glob(os.path.join(toolchainsDir, "{}*".format("llvm" if searchInLlvmPath else arch)))
bestCompilerVersion = ""
for dirname in dirs:
prebuilt = os.path.join(toolchainsDir, dirname, "prebuilt")
if not os.access(prebuilt, os.F_OK):
continue
if dirname > bestCompilerVersion:
bestCompilerVersion = dirname
if not bestCompilerVersion:
log.LOG_ERROR("Couldn't find compiler for architecture {}.".format(project.outputArchitecture))
csbuild.Exit(1)
if platform.system() == "Windows":
platformName = "windows"
ext = ".exe"
else:
platformName = "linux"
ext = ""
cmd1Name = cmd1 + ext
cmd2Name = cmd2 + ext
binDir = os.path.join(toolchainsDir, bestCompilerVersion, "prebuilt", platformName)
dirs = list(glob.glob("{}*".format(binDir)))
binDir = os.path.join(dirs[0], "bin")
maybeCmd1 = os.path.join(binDir, cmd1Name)
if os.access(maybeCmd1, os.F_OK):
cmd1Result = maybeCmd1
cmd2Result = os.path.join(binDir, cmd2Name)
else:
dirs = list(glob.glob(os.path.join(binDir, "*-{}".format(cmd1Name))))
prefix = dirs[0].rsplit('-', 1)[0]
cmd1Result = dirs[0]
cmd2Result = "{}-{}".format(prefix, cmd2Name)
return cmd1Result, cmd2Result
class AndroidCompiler(AndroidBase, toolchain_gcc.compiler_gcc):
def __init__(self):
AndroidBase.__init__(self)
toolchain_gcc.compiler_gcc.__init__(self)
self._toolchainPath = ""
self._setupCompleted = False
def copy(self):
ret = toolchain_gcc.compiler_gcc.copy(self)
AndroidBase._copyTo(self, ret)
ret._toolchainPath = self._toolchainPath
ret._setupCompleted = self._setupCompleted
return ret
def postPrepareBuildStep(self, project):
if project.metaType == csbuild.ProjectType.Application and self._addNativeAppGlue:
appGlueDir = os.path.join( self._ndkHome, "sources", "android", "native_app_glue" )
project.includeDirs.append(appGlueDir)
project.extraDirs.append(appGlueDir)
project.RediscoverFiles()
def GetDefaultArchitecture(self):
return "armeabi-v7a"
def _setupCompiler(self, project):
#TODO: Let user choose which compiler version to use; for now, using the highest numbered version.
if self.isClang:
ccName = "clang"
cxxName = "clang++"
else:
ccName = "gcc"
cxxName = "g++"
self._settingsOverrides["cc"], self._settingsOverrides["cxx"] = self._getCommands(project, ccName, cxxName, self.isClang)
def _setupForProject( self, project ):
#toolchain_gcc.compiler_gcc.SetupForProject(self, project)
if not self._setupCompleted:
if "clang" in project.cc or "clang" in project.cxx:
self.isClang = True
self._setupCompiler(project)
self._setSysRootDir(project)
self._setupCompleted = True
def prePrepareBuildStep(self, project):
self._setupForProject(project)
def _getSystemDirectories(self, project, isCpp):
ret = ""
if isCpp:
if self._stlVersion == "GNU":
ret += "-isystem \"{}\" ".format(os.path.join(
self._ndkHome,
"sources",
"cxx-stl",
"gnu-libstdc++",
"4.8",
"libs",
project.outputArchitecture,
"include")
)
ret += "-isystem \"{}\" ".format(os.path.join( self._ndkHome, "sources", "cxx-stl", "gnu-libstdc++", "4.8", "include"))
elif self._stlVersion == "stlport":
ret += "-isystem \"{}\" ".format(os.path.join( self._ndkHome, "sources", "cxx-stl", "system", "include"))
ret += "-isystem \"{}\" ".format(os.path.join( self._ndkHome, "sources", "cxx-stl", "stlport", "stlport"))
elif self._stlVersion == "libc++":
ret += "-isystem \"{}\" ".format(os.path.join( self._ndkHome, "sources", "cxx-stl", "llvm-libc++", "libcxx", "include"))
ret += "--sysroot \"{}\" ".format(self._sysRootDir)
ret += "-isystem \"{}\" ".format(
os.path.join(
self._ndkHome,
"platforms",
"android-{}".format(self._targetSdkVersion),
"arch-{}".format(self._getSimplifiedArch(project)),
"usr",
"include"
)
)
ret += "-I {} ".format(self._ndkHome)
return ret
def _getBaseCommand( self, compiler, project, isCpp ):
self._setupForProject(project)
if not self.isClang:
exitcodes = "-pass-exit-codes"
else:
exitcodes = ""
if isCpp:
standard = self.cppStandard
else:
standard = self.cStandard
return "\"{}\" {} -Winvalid-pch -c {}-g{} -O{} {}{}{} {} {} {}".format(
compiler,
exitcodes,
self._getDefines( project.defines, project.undefines ),
project.debugLevel,
project.optLevel,
"-fPIC " if project.type == csbuild.ProjectType.SharedLibrary else "",
"-pg " if project.profile else "",
"--std={0}".format( standard ) if standard != "" else "",
" ".join( project.cxxCompilerFlags ) if isCpp else " ".join( project.ccCompilerFlags ),
self._getSystemDirectories(project, isCpp),
self._getTargetTriple(project)
)
def _getIncludeDirs( self, includeDirs ):
"""Returns a string containing all of the passed include directories, formatted to be passed to gcc/g++."""
ret = ""
for inc in includeDirs:
ret += "-I{} ".format( os.path.abspath( inc ) )
return ret
class AndroidLinker(AndroidBase, toolchain_gcc.linker_gcc):
def __init__(self):
AndroidBase.__init__(self)
toolchain_gcc.linker_gcc.__init__(self)
self._setupCompleted = False
def copy(self):
ret = toolchain_gcc.linker_gcc.copy(self)
AndroidBase._copyTo(self, ret)
ret._setupCompleted = self._setupCompleted
return ret
@staticmethod
def AdditionalArgs( parser ):
parser.add_argument("--ndk-home", help="Location of android NDK directory")
parser.add_argument("--sdk-home", help="Location of android SDK directory")
parser.add_argument("--ant-home", help="Location of apache ant")
parser.add_argument("--java-home", help="Location of java")
parser.add_argument("--keystore", help="Location of keystore to sign release apks (default is {makefile location}/{project name}.keystore")
parser.add_argument("--keystore-pwfile", help="Location of password file for loading keystore (default is {makefile location}/{keystore_filename}.pass)")
parser.add_argument("--alias", help="Alias to use inside the keystore (default is project name)")
parser.add_argument("--key-pwfile", help="Location of password file for signing release apks (default is {makefile location}/{keystore_filename}.{alias}.pass)")
parser.add_argument("--zipalign-location", help="Location of zipalign")
def _setupLinker(self, project):
#TODO: Let user choose which compiler version to use; for now, using the highest numbered version.
self._ld, self._ar = self._getCommands(project, "ld", "ar")
def _setupForProject( self, project ):
toolchain_gcc.linker_gcc._setupForProject(self, project)
if not self._setupCompleted:
if "clang" in project.cc or "clang" in project.cxx:
self.isClang = True
self._setupLinker(project)
self._setSysRootDir(project)
self._setupCompleted = True
if not self._keystoreLocation:
self._keystoreLocation = os.path.join(csbuild.mainfileDir, project.name+".keystore")
if not self._keystoreAlias:
self._keystoreAlias = project.name
alias = csbuild.GetOption("alias")
if alias:
self._keystoreAlias = alias
if not self._keystorePwFile:
self._keystorePwFile = os.path.join(csbuild.mainfileDir, self._keystoreLocation+".pass")
if not self._keyPwFile:
self._keyPwFile = os.path.join(csbuild.mainfileDir, self._keystoreAlias + ".keystore." + project.name + ".pass")
ndkHome = csbuild.GetOption("ndk_home")
sdkHome = csbuild.GetOption("sdk_home")
antHome = csbuild.GetOption("ant_home")
javaHome = csbuild.GetOption("java_home")
keystore = csbuild.GetOption("keystore")
keystorePwFile = csbuild.GetOption("keystore_pwfile")
keyPwFile = csbuild.GetOption("key_pwfile")
if ndkHome:
self._ndkHome = ndkHome
if sdkHome:
self._sdkHome = sdkHome
if antHome:
self._antHome = antHome
if javaHome:
self._javaHome = javaHome
if keystore:
self._keystoreLocation = keystore
if keystorePwFile:
self._keystorePwFile = keystorePwFile
if keyPwFile:
self._keyPwFile = keyPwFile
def _getSystemLibDirs(self, project):
ret = ""
if project.hasCppFiles:
if self._stlVersion == "GNU":
ret += "-L\"{}\" ".format(os.path.join(
self._ndkHome,
"sources",
"cxx-stl",
"gnu-libstdc++",
"4.8",
"libs",
project.outputArchitecture)
)
if project.useStaticRuntime:
ret += "-lgnustl_static "
else:
ret += "-lgnustl_shared "
elif self._stlVersion == "stlport":
ret += "-L\"{}\" ".format(os.path.join(
self._ndkHome,
"sources",
"cxx-stl",
"stlport",
"libs",
project.outputArchitecture)
)
if project.useStaticRuntime:
ret += "-lstlport_static "
else:
ret += "-lstlport_shared "
elif self._stlVersion == "libc++":
ret += "-L\"{}\" ".format(os.path.join(
self._ndkHome,
"sources",
"cxx-stl",
"llvm-libc++",
"libs",
project.outputArchitecture)
)
if project.useStaticRuntime:
ret += "-lc++_static "
else:
ret += "-lc++_shared "
ret += "--sysroot \"{}\"".format(self._sysRootDir)
return ret
def GetLinkCommand( self, project, outputFile, objList ):
self._setupForProject( project )
linkFile = os.path.join(self._project_settings.csbuildDir, "{}.cmd".format(self._project_settings.name))
data = " ".join( objList )
if sys.version_info >= (3, 0):
data = data.encode("utf-8")
file_mode = 438 # Octal 0666
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
if platform.system() == "Windows":
flags |= os.O_NOINHERIT
fd = os.open(linkFile, flags, file_mode)
os.write(fd, data)
os.fsync(fd)
os.close(fd)
if project.type == csbuild.ProjectType.StaticLibrary:
return "\"{}\" rcs {} {}".format( self._ar, outputFile, " ".join( objList ) )
else:
if project.hasCppFiles:
cmd = project.activeToolchain.Compiler()._settingsOverrides["cxx"]
else:
cmd = project.activeToolchain.Compiler()._settingsOverrides["cc"]
libDir = os.path.join( self._ndkHome, "platforms", "android-{}".format(self._targetSdkVersion), "arch-{}".format(self._getSimplifiedArch(project)), "usr", "lib")
if self.isClang:
crtbegin = os.path.join(project.objDir, "crtbegin_so.o")
if not os.access(crtbegin, os.F_OK):
symlink(os.path.join(libDir, "crtbegin_so.o"), crtbegin)
crtend = os.path.join(project.objDir, "crtend_so.o")
if not os.access(crtend, os.F_OK):
symlink(os.path.join(libDir, "crtend_so.o"), crtend)
return "\"{}\" {}-o{} {} {} {}{}{} {} {}-g{} -O{} {} {} {} {} -L\"{}\"".format(
cmd,
"-pg " if project.profile else "",
outputFile,
"@{}".format(linkFile),
"-Wl,--no-as-needed -Wl,--start-group" if not self.strictOrdering else "",
self._getLibraries( project.libraries ),
self._getStaticLibraries( project.staticLibraries ),
self._getSharedLibraries( project.sharedLibraries ),
"-Wl,--end-group" if not self.strictOrdering else "",
self._getLibraryDirs( project.libraryDirs, True ),
project.debugLevel,
project.optLevel,
"-shared" if project.type == csbuild.ProjectType.SharedLibrary else "",
" ".join( project.linkerFlags ),
self._getSystemLibDirs(project),
self._getTargetTriple(project),
libDir
)
def FindLibrary( self, project, library, libraryDirs, force_static, force_shared ):
success = True
out = ""
self._setupForProject( project )
nullOut = os.path.join(project.csbuildDir, "null")
try:
cmd = [self._ld, "-o", nullOut, "--verbose",
"-static" if force_static else "-shared" if force_shared else "", "-l{}".format( library ),
"-L", os.path.join( self._ndkHome, "platforms", "android-{}".format(self._targetSdkVersion), "arch-{}".format(self._getSimplifiedArch(project)), "usr", "lib")]
cmd += shlex.split( self._getLibraryDirs( libraryDirs, False ), posix=(platform.system() != "Windows") )
if _shared_globals.show_commands:
print(" ".join(cmd))
out = subprocess.check_output( cmd, stderr = subprocess.STDOUT )
except subprocess.CalledProcessError as e:
out = e.output
success = False
finally:
if os.access(nullOut, os.F_OK):
os.remove(nullOut)
if sys.version_info >= (3, 0):
RMatch = re.search( "attempt to open (.*) succeeded".encode( 'utf-8' ), out, re.I )
else:
RMatch = re.search( "attempt to open (.*) succeeded", out, re.I )
#Some libraries (such as -liberty) will return successful but don't have a file (internal to ld maybe?)
#In those cases we can probably assume they haven't been modified.
#Set the mtime to 0 and return success as long as ld didn't return an error code.
if RMatch is not None:
lib = RMatch.group( 1 )
if sys.version_info >= (3, 0):
self._actual_library_names[library] = os.path.basename(lib).decode('utf-8')
else:
self._actual_library_names[library] = os.path.basename(lib)
return lib
elif not success:
try:
cmd = [self._ld, "-o", nullOut, "--verbose",
"-static" if force_static else "-shared" if force_shared else "", "-l:{}".format( library ),
"-L", os.path.join( self._ndkHome, "platforms", "android-{}".format(self._targetSdkVersion), "arch-{}".format(self._getSimplifiedArch(project)), "usr", "lib")]
cmd += shlex.split( self._getLibraryDirs( libraryDirs, False ), posix=(platform.system() != "Windows") )
if _shared_globals.show_commands:
print(" ".join(cmd))
out = subprocess.check_output( cmd, stderr = subprocess.STDOUT )
except subprocess.CalledProcessError as e:
out = e.output
success = False
finally:
if os.access(nullOut, os.F_OK):
os.remove(nullOut)
if sys.version_info >= (3, 0):
RMatch = re.search( "attempt to open (.*) succeeded".encode( 'utf-8' ), out, re.I )
else:
RMatch = re.search( "attempt to open (.*) succeeded", out, re.I )
#Some libraries (such as -liberty) will return successful but don't have a file (internal to ld maybe?)
#In those cases we can probably assume they haven't been modified.
#Set the mtime to 0 and return success as long as ld didn't return an error code.
if RMatch is not None:
lib = RMatch.group( 1 )
if sys.version_info >= (3, 0):
self._actual_library_names[library] = os.path.basename(lib).decode('utf-8')
else:
self._actual_library_names[library] = os.path.basename(lib)
return lib
elif not success:
return None
def prePrepareBuildStep(self, project):
#Everything on Android has to build as a shared library
project.metaType = project.type
if project.type == csbuild.ProjectType.Application:
project.type = csbuild.ProjectType.SharedLibrary
if not project.outputName.startswith("lib"):
project.outputName = "lib{}".format(project.outputName)
def postBuildStep(self, project):
log.LOG_BUILD("Generating APK for {} ({} {}/{})".format(project.outputName, project.targetName, project.outputArchitecture, project.activeToolchainName))
if project.metaType != csbuild.ProjectType.Application:
return
appDir = os.path.join(project.csbuildDir, "apk", project.name)
if os.access(appDir, os.F_OK):
shutil.rmtree(appDir)
androidTool = os.path.join(self._sdkHome, "tools", "android.bat" if platform.system() == "Windows" else "android.sh")
fd = subprocess.Popen(
[
androidTool, "create", "project",
"--path", appDir,
"--target", "android-{}".format(self._targetSdkVersion),
"--name", project.name,
"--package", "com.{}.{}".format(self._packageName, project.name),
"--activity", project.name if self._activityName is None else self._activityName
],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE
)
output, errors = fd.communicate()
if fd.returncode != 0:
log.LOG_ERROR("Android tool failed to generate project skeleton!\n{}".format(output))
return
libDir = ""
if project.outputArchitecture == "x86":
libDir = "x86"
elif project.outputArchitecture == "mips":
libDir = "mips"
elif project.outputArchitecture == "armeabi":
libDir = "armeabi"
elif project.outputArchitecture == "armeabi-v7a-hard":
libDir = "armeabi-v7a-hard"
else:
libDir = "armeabi-v7a"
libDir = os.path.join(appDir, "libs", libDir)
if not os.access(libDir, os.F_OK):
os.makedirs(libDir)
for library in project.libraryLocations:
#don't copy android system libraries
if library.startswith(self._ndkHome):
continue
shutil.copyfile(library, os.path.join(libDir, os.path.basename(library)))
for dep in project.linkDepends:
depProj = _shared_globals.projects[dep]
libFile = os.path.join(depProj.outputDir, depProj.outputName)
shutil.copyfile(libFile, os.path.join(libDir, os.path.basename(libFile)))
shutil.copyfile(os.path.join(project.outputDir, project.outputName), os.path.join(libDir, os.path.basename(project.outputName)))
with open(os.path.join(appDir, "AndroidManifest.xml"), "w") as f:
f.write("<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\"\n")
f.write(" package=\"com.csbuild.autopackage.{}\"\n".format(project.name))
f.write(" android:versionCode=\"1\"\n")
f.write(" android:versionName=\"1.0\">\n")
f.write(" <uses-sdk android:minSdkVersion=\"{}\" android:targetSdkVersion=\"{}\"/>\n".format(self._minSdkVersion, self._targetSdkVersion))
for feature in self._usedFeatures:
#example: android:glEsVersion=\"0x00020000\"
f.write(" <uses-feature {}></uses-feature>".format(feature))
f.write(" <application android:label=\"{}\" android:hasCode=\"false\">\n".format(project.name))
f.write(" <activity android:name=\"android.app.NativeActivity\"\n")
f.write(" android:label=\"{}\">\n".format(project.name))
f.write(" android:configChanges=\"orientation|keyboardHidden\">\n")
f.write(" <meta-data android:name=\"android.app.lib_name\" android:value=\"{}\"/>\n".format(project.outputName[3:-3]))
f.write(" <intent-filter>\n")
f.write(" <action android:name=\"android.intent.action.MAIN\"/>\n")
f.write(" <category android:name=\"android.intent.category.LAUNCHER\"/>\n")
f.write(" </intent-filter>\n")
f.write(" </activity>\n")
f.write(" </application>\n")
f.write("</manifest>\n")
if project.optLevel != csbuild.OptimizationLevel.Max:
antBuildType = "debug"
else:
antBuildType = "release"
fd = subprocess.Popen(
[
os.path.join(self._antHome, "bin", "ant.bat" if platform.system() == "Windows" else "ant.sh"),
antBuildType
],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
cwd=appDir
)
output, errors = fd.communicate()
if fd.returncode != 0:
log.LOG_ERROR("Ant build failed!\n{}".format(output))
return
appNameBase = "{}-{}".format(project.outputName[3:-3], antBuildType)
appName = appNameBase + ".apk"
appStartLoc = os.path.join(appDir, "bin", appName)
if antBuildType == "release":
appNameUnsigned = appNameBase + "-unsigned.apk"
appUnsignedLoc = os.path.join(appDir, "bin", appNameUnsigned)
with open(self._keystorePwFile, "r") as f:
storePass = f.read().strip()
if os.access(self._keyPwFile, os.F_OK):
with open(self._keyPwFile, "r") as f:
keyPass = f.read().strip()
else:
keyPass = storePass
log.LOG_BUILD("Signing {} with key {}...".format(appName, self._keystoreLocation))
jarsigner = os.path.join(self._javaHome, "bin", "jarsigner{}".format(".exe" if platform.system() == "Windows" else ""))
fd = subprocess.Popen(
[
jarsigner,
"-sigalg", "SHA1withRSA",
"-digestalg", "SHA1",
"-keystore", self._keystoreLocation,
"-storepass", storePass,
"-keypass", keyPass,
appUnsignedLoc,
self._keystoreAlias
],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
cwd=appDir
)
output, errors = fd.communicate()
if fd.returncode != 0:
log.LOG_ERROR("Signing failed!\n{}".format(output))
return
log.LOG_BUILD("Zip-Aligning {}...".format(appName, self._keystoreLocation))
zipalign = os.path.join(self._sdkHome, "tools", "zipalign{}".format(".exe" if platform.system() == "Windows" else ""))
fd = subprocess.Popen(
[
zipalign,
"-v", "4",
appUnsignedLoc,
appStartLoc
],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
cwd=appDir
)
output, errors = fd.communicate()
if fd.returncode != 0:
log.LOG_ERROR("Zipalign failed!\n{}".format(output))
return
appEndLoc = os.path.join(project.outputDir, appName)
if os.access(appEndLoc, os.F_OK):
os.remove(appEndLoc)
shutil.move(appStartLoc, project.outputDir)
log.LOG_BUILD("Finished generating APK for {} ({} {}/{})".format(project.outputName, project.targetName, project.outputArchitecture, project.activeToolchainName))
|
|
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
#########################################################################
def readable_id(id):
return id.replace('_', ' ').title()
class Bijection(dict):
def __init__(self, mapping=None):
super(Bijection, self).__init__()
if mapping:
for key in mapping:
self[key] = mapping[key]
def __setitem__(self, key, value):
super(Bijection, self).__setitem__(key, value)
super(Bijection, self).__setitem__(value, key)
def __delitem__(self, key):
value = self[key]
super(Bijection, self).__delitem__(key)
super(Bijection, self).__delitem__(value)
class a62:
mapping = Bijection({j: i for j, i in zip(
range(62),
'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
)})
base = 62
@classmethod
def encode(cls, value, length):
return ''.join([
cls.mapping[x] for x in (
(value // cls.base**i) % cls.base
for i in range(length - 1, -1, -1)
)
])
@classmethod
def decode(cls, text):
return sum(
cls.mapping[c] * cls.base**(len(text)-i-1)
for i, c in enumerate(text)
)
class ListView:
def __init__(self, table, names, query=None, orderby=None, title=None,
controller=None, function=None):
self.table = table
self.names = names
self.query = query or (self.table.id > 0)
self.orderby = orderby or self.table.id
self.title = title or readable_id(table._id.tablename)
self.controller = controller or request.controller
self.function = function or request.function
def headers(self):
for name in self.names:
yield readable_id(name) if name != 'id' else XML(' ')
def columns(self):
for name in self.names:
yield self.table[name]
def rows(self):
properties = dict(
orderby=self.orderby,
)
return db(self.query).iterselect(*self.columns(), **properties)
def view_url(self, id):
return URL(self.controller, self.function, args=[id])
def edit_url(self, id):
return URL(self.controller, self.function, args=[id, 'edit'],
vars={'next': request.env.path_info})
def delete_url(self, id):
return URL(self.controller, self.function, args=[id, 'delete'],
vars={'next': request.env.path_info})
def new_url(self):
return URL(self.controller, self.function, args=['new'])
class Form:
def __init__(self, table, record=None, default_redirect=None):
self.form = SQLFORM(
table, record,
fields=[field.name for field in table if field.name not in
{'created', 'created_by', 'modified', 'modified_by'}],
)
self.default_redirect = default_redirect
def process(self):
if self.form.process().accepted:
redirect(request.get_vars.next or
self.default_redirect(self.form.vars))
return self.form
class Itemview:
def __init__(self, table, record):
self.table = table
self.record = record
def related(self):
for field in self.table._extra['related']:
table = field.table
names = table._extra['columns']
query = (field == self.record.id)
yield ListView(table, names, query)
class Delegate(dict):
def __init__(self, function, reference, verb=None):
self.function = function
self.table = mb.handlers[self.function]
self.list_orderby = self.table._extra['primary']
self.list_columns = self.table._extra['columns']
dict.__init__(self,
display=self.display,
url=self.url,
function=self.function,
)
record = self.table(reference)
if record and verb is None:
response.view = 'itemview.html'
self['itemview'] = self.build_itemview(record)
elif record is None and verb is None:
response.view = 'listview.html'
self['listview'] = self.build_listview()
elif record is None and verb == 'new' or verb == 'edit':
response.view = 'form.html'
self['form'] = self.build_form(record)
elif record and verb == 'delete':
response.view = 'delete.html'
self['form'] = self.build_delete()
else:
raise HTTP(404)
def display(self, field, row, primary_reference=True):
text = row[field]
link = ''
type, is_reference, table_name = field.type.partition(' ')
if type == 'reference' and text is not None:
table = db[table_name]
reference = text
text = (table._format(table[text]) if callable(table._format)
else table._format % table[text].as_dict())
if 'urls' in table._extra:
link = self.url(table._extra['function'], reference)
elif field.represent is not None:
text = field.represent(text, row)
if text is None and hasattr(field, 'extra') and 'null_value' in field.extra:
text = field.extra['null_value']
if primary_reference:
if hasattr(field, 'extra') and field.extra.get('primary'):
link = self.url(field.table._extra['function'], row.id)
if link:
return A(text, _title=text, _href=link, _class=type)
else:
return SPAN(text, _title=text, _class=type)
def default_redirect(self, vars):
return self.url(self.function, vars.id)
def build_itemview(self, record):
return Itemview(self.table, record)
def build_listview(self):
return ListView(self.table, self.list_columns, orderby=self.list_orderby)
def build_form(self, record):
return Form(self.table, record, default_redirect=self.default_redirect).process()
def build_delete(self):
return
@classmethod
def url(cls, table, reference=None, verb=None):
args = [a62.encode(mb.handlers[table]._extra['index'], 4)]
if reference is not None:
args[0] += a62.encode(reference, 10)
if verb:
args.append(verb)
return URL(r=request, args=args)
@auth.requires_login()
def index():
tables = Bijection({table._extra['index']: key
for key, table in mb.handlers.items()})
first = request.args(0)
if first is None:
redirect(Delegate.url('object'))
if len(first) not in (4, 14):
raise HTTP(404)
function = tables.get(a62.decode(first[:4]), 'not found')
reference = a62.decode(first[4:]) if first[4:] else None
verb = {
None: None,
'e': 'edit',
'd': 'delete',
'n': 'new',
}[request.args(1)]
response.flash = CAT(
P('function: ', function),
P('reference: ', reference),
P('verb: ', verb),
)
return Delegate(function, reference, verb)
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/bulk_register
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
also notice there is http://..../[app]/appadmin/manage/auth to allow administrator to manage users
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
'''
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
'''
def debug():
return dict(debug=dict(
user=auth.user,
))
|
|
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import urllib
from oslo_config import cfg
import six.moves.urllib.parse as urlparse
import webob
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import utils
api_common_opts = [
cfg.IntOpt('osapi_max_limit',
default=1000,
help='The maximum number of items that a collection '
'resource returns in a single response'),
cfg.StrOpt('osapi_volume_base_URL',
default=None,
help='Base URL that will be presented to users in links '
'to the OpenStack Volume API',
deprecated_name='osapi_compute_link_prefix'),
]
CONF = cfg.CONF
CONF.register_opts(api_common_opts)
LOG = logging.getLogger(__name__)
XML_NS_V1 = 'http://docs.openstack.org/api/openstack-block-storage/1.0/content'
XML_NS_V2 = 'http://docs.openstack.org/api/openstack-block-storage/2.0/content'
# Regex that matches alphanumeric characters, periods, hyphens,
# colons and underscores:
# ^ assert position at start of the string
# [\w\.\-\:\_] match expression
# $ assert position at end of the string
VALID_KEY_NAME_REGEX = re.compile(r"^[\w\.\-\:\_]+$", re.UNICODE)
def validate_key_names(key_names_list):
"""Validate each item of the list to match key name regex."""
for key_name in key_names_list:
if not VALID_KEY_NAME_REGEX.match(key_name):
return False
return True
def get_pagination_params(request):
"""Return marker, limit tuple from request.
:param request: `wsgi.Request` possibly containing 'marker' and 'limit'
GET variables. 'marker' is the id of the last element
the client has seen, and 'limit' is the maximum number
of items to return. If 'limit' is not specified, 0, or
> max_limit, we default to max_limit. Negative values
for either marker or limit will cause
exc.HTTPBadRequest() exceptions to be raised.
"""
params = {}
if 'limit' in request.GET:
params['limit'] = _get_limit_param(request)
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
return params
def _get_limit_param(request):
"""Extract integer limit from request or fail."""
try:
limit = int(request.GET['limit'])
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
return limit
def _get_marker_param(request):
"""Extract marker id from request or fail."""
return request.GET['marker']
def limited(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
try:
offset = int(request.GET.get('offset', 0))
except ValueError:
msg = _('offset param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
try:
limit = int(request.GET.get('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
if offset < 0:
msg = _('offset param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
def limited_by_marker(items, request, max_limit=CONF.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
params = get_pagination_params(request)
limit = params.get('limit', max_limit)
marker = params.get('marker')
limit = min(max_limit, limit)
start_index = 0
if marker:
start_index = -1
for i, item in enumerate(items):
if 'flavorid' in item:
if item['flavorid'] == marker:
start_index = i + 1
break
elif item['id'] == marker or item.get('uuid') == marker:
start_index = i + 1
break
if start_index < 0:
msg = _('marker [%s] not found') % marker
raise webob.exc.HTTPBadRequest(explanation=msg)
range_end = start_index + limit
return items[start_index:range_end]
def remove_version_from_href(href):
"""Removes the first api version from the href.
Given: 'http://www.cinder.com/v1.1/123'
Returns: 'http://www.cinder.com/123'
Given: 'http://www.cinder.com/v1.1'
Returns: 'http://www.cinder.com'
"""
parsed_url = urlparse.urlsplit(href)
url_parts = parsed_url.path.split('/', 2)
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
if expression.match(url_parts[1]):
del url_parts[1]
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
msg = 'href %s does not contain version' % href
LOG.debug(msg)
raise ValueError(msg)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urlparse.urlunsplit(parsed_url)
class ViewBuilder(object):
"""Model API responses as dictionaries."""
_collection_name = None
def _get_links(self, request, identifier):
return [{"rel": "self",
"href": self._get_href_link(request, identifier), },
{"rel": "bookmark",
"href": self._get_bookmark_link(request, identifier), }]
def _get_next_link(self, request, identifier, collection_name):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_link_prefix(request.application_url,
CONF.osapi_volume_base_URL)
url = os.path.join(prefix,
request.environ["cinder.context"].project_id,
collection_name)
return "%s?%s" % (url, urllib.urlencode(params))
def _get_href_link(self, request, identifier):
"""Return an href string pointing to this object."""
prefix = self._update_link_prefix(request.application_url,
CONF.osapi_volume_base_URL)
return os.path.join(prefix,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(request.application_url)
base_url = self._update_link_prefix(base_url,
CONF.osapi_volume_base_URL)
return os.path.join(base_url,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_collection_links(self, request, items, collection_name,
id_key="uuid"):
"""Retrieve 'next' link, if applicable.
The next link is included if:
1) 'limit' param is specified and equals the number of volumes.
2) 'limit' param is specified but it exceeds CONF.osapi_max_limit,
in this case the number of volumes is CONF.osapi_max_limit.
3) 'limit' param is NOT specified but the number of volumes is
CONF.osapi_max_limit.
:param request: API request
:param items: List of collection items
:param collection_name: Name of collection, used to generate the
next link for a pagination query
:param id_key: Attribute key used to retrieve the unique ID, used
to generate the next link marker for a pagination query
:returns links
"""
links = []
max_items = min(
int(request.params.get("limit", CONF.osapi_max_limit)),
CONF.osapi_max_limit)
if max_items and max_items == len(items):
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
else:
last_item_id = last_item["id"]
links.append({
"rel": "next",
"href": self._get_next_link(request, last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urlparse.urlsplit(orig_url))
prefix_parts = list(urlparse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
return urlparse.urlunsplit(url_parts)
class MetadataDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = utils.safe_minidom_parse_string(text)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
class MetaItemDeserializer(wsgi.MetadataXMLDeserializer):
def deserialize(self, text):
dom = utils.safe_minidom_parse_string(text)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
class MetadataXMLDeserializer(wsgi.XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request."""
if metadata_node is None:
return {}
metadata = {}
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
def _extract_metadata_container(self, datastring):
dom = utils.safe_minidom_parse_string(datastring)
metadata_node = self.find_first_child_named(dom, "metadata")
metadata = self.extract_metadata(metadata_node)
return {'body': {'metadata': metadata}}
def create(self, datastring):
return self._extract_metadata_container(datastring)
def update_all(self, datastring):
return self._extract_metadata_container(datastring)
def update(self, datastring):
dom = utils.safe_minidom_parse_string(datastring)
metadata_item = self.extract_metadata(dom)
return {'body': {'meta': metadata_item}}
metadata_nsmap = {None: xmlutil.XMLNS_V11}
class MetaItemTemplate(xmlutil.TemplateBuilder):
def construct(self):
sel = xmlutil.Selector('meta', xmlutil.get_items, 0)
root = xmlutil.TemplateElement('meta', selector=sel)
root.set('key', 0)
root.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
class MetadataTemplateElement(xmlutil.TemplateElement):
def will_render(self, datum):
return True
class MetadataTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = MetadataTemplateElement('metadata', selector='metadata')
elem = xmlutil.SubTemplateElement(root, 'meta',
selector=xmlutil.get_items)
elem.set('key', 0)
elem.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=metadata_nsmap)
|
|
# -*- encoding: UTF-8 -*-
"""Mardown Preview main."""
import sublime
import sublime_plugin
import os
import sys
import traceback
import tempfile
import re
import json
import time
import codecs
import cgi
import yaml
import textwrap
from collections import OrderedDict
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
from pygments.formatters import get_formatter_by_name
from . import desktop
from .markdown_settings import Settings
from .markdown_wrapper import StMarkdown as Markdown
_CANNOT_CONVERT = 'cannot convert markdown'
_EXT_CONFIG = "Packages/Markdown Preview/markdown_preview.yml"
PYGMENTS_LOCAL = {
'github': 'css/pygments/github.css',
'github2014': 'css/pygments/github2014.css'
}
RELOAD_JS = """<script async>
document.write(
'<script src="http://' +
(location.host || 'localhost').split(':')[0] +
':%d/livereload.js?snipver=1"></' +
'script>')
</script>
"""
def yaml_load(stream, loader=yaml.Loader, object_pairs_hook=OrderedDict):
"""
Custom yaml loader.
Make all YAML dictionaries load as ordered Dicts.
http://stackoverflow.com/a/21912744/3609487
Load all strings as unicode.
http://stackoverflow.com/a/2967461/3609487
"""
def construct_mapping(loader, node):
"""Convert to ordered dict."""
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
def construct_yaml_str(self, node):
"""Override the default string handling function to always return unicode objects."""
return self.construct_scalar(node)
class Loader(loader):
"""Custom Loader."""
Loader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping
)
Loader.add_constructor(
'tag:yaml.org,2002:str',
construct_yaml_str
)
return yaml.load(stream, Loader)
def request_url(url, data, headers):
"""Request URL."""
import urllib.request
return urllib.request.Request(url, data=data, headers=headers, method='POST')
def get_temp_preview_path(view):
"""Return a permanent full path of the temp markdown preview file."""
settings = sublime.load_settings('MarkdownPreview.sublime-settings')
tmp_filename = '%s.html' % view.id()
if settings.get('path_tempfile'):
if os.path.isabs(settings.get('path_tempfile')): # absolute path or not
tmp_dir = settings.get('path_tempfile')
else:
tmp_dir = os.path.join(os.path.dirname(view.file_name()), settings.get('path_tempfile'))
else:
tmp_dir = tempfile.gettempdir()
if not os.path.isdir(tmp_dir): # create dir if not exsits
os.makedirs(tmp_dir)
tmp_fullpath = os.path.join(tmp_dir, tmp_filename)
return tmp_fullpath
def save_utf8(filename, text):
"""Save to UTF8 file."""
with codecs.open(filename, 'w', encoding='utf-8')as f:
f.write(text)
def load_utf8(filename):
"""Load UTF8 file."""
with codecs.open(filename, 'r', encoding='utf-8') as f:
return f.read()
def load_resource(name):
"""Return file contents for files within the package root folder."""
try:
return sublime.load_resource('Packages/Markdown Preview/{0}'.format(name))
except Exception:
print("Error while load_resource('%s')" % name)
traceback.print_exc()
return ''
def exists_resource(resource_file_path):
"""Check if resource exists."""
filename = os.path.join(os.path.dirname(sublime.packages_path()), resource_file_path)
return os.path.isfile(filename)
def new_view(window, text, scratch=False):
"""
Create a new view and paste text content.
Return the new view that can optionally can be set as scratch.
"""
new_view = window.new_file()
if scratch:
new_view.set_scratch(True)
new_view.run_command('append', {
'characters': text,
})
return new_view
def get_references(file_name, encoding="utf-8"):
"""Get footnote and general references from outside source."""
text = ''
if file_name is not None:
if os.path.exists(file_name):
try:
with codecs.open(file_name, "r", encoding=encoding) as f:
text = f.read()
except Exception:
print(traceback.format_exc())
else:
print("Could not find reference file %s!", file_name)
return text
class MarkdownPreviewListener(sublime_plugin.EventListener):
"""Auto update the output html if markdown file has already been converted once."""
def on_post_save(self, view):
"""Handle auto-reload on save."""
settings = sublime.load_settings('MarkdownPreview.sublime-settings')
if settings.get('enable_autoreload', True):
filetypes = settings.get('markdown_filetypes')
file_name = view.file_name()
if filetypes and file_name is not None and file_name.endswith(tuple(filetypes)):
temp_file = get_temp_preview_path(view)
if os.path.isfile(temp_file):
# reexec markdown conversion
# todo : check if browser still opened and reopen it if needed
view.run_command('markdown_preview', {
'target': 'disk',
'parser': view.settings().get('parser')
})
sublime.status_message('Markdown preview file updated')
class MarkdownCheatsheetCommand(sublime_plugin.TextCommand):
"""Open our markdown cheat sheet."""
def run(self, edit):
"""Execute command."""
lines = '\n'.join(load_resource('samples/sample.md').splitlines())
view = new_view(self.view.window(), lines, scratch=True)
view.set_name("Markdown Cheatsheet")
# Set syntax file
syntax_files = [
"Packages/Markdown Extended/Syntaxes/Markdown Extended.tmLanguage",
"Packages/Markdown/Markdown.tmLanguage"
]
for file in syntax_files:
if exists_resource(file):
view.set_syntax_file(file)
break # Done if any syntax is set.
sublime.status_message('Markdown cheat sheet opened')
class Compiler(object):
"""Base compiler that does the markdown converting."""
default_css = "css/markdown.css"
def isurl(self, css_name):
"""Check if URL."""
match = re.match(r'https?://', css_name)
if match:
return True
return False
def get_default_css(self):
"""Locate the correct CSS with the 'css' setting."""
css_list = self.settings.get('css', ['default'])
if not isinstance(css_list, list):
css_list = [css_list]
css_text = []
for css_name in css_list:
if css_name.startswith('res://'):
internal_file = os.path.join(sublime.packages_path(), os.path.normpath(css_name[6:]))
if os.path.exists(internal_file):
css_text.append("<style>%s</style>" % load_utf8(internal_file))
else:
css_text.append("<style>%s</style>" % sublime.load_resource('Packages/' + css_name[6:]))
elif self.isurl(css_name):
# link to remote URL
css_text.append("<link href='%s' rel='stylesheet' type='text/css'>" % css_name)
elif os.path.isfile(os.path.expanduser(css_name)):
# use custom CSS file
css_text.append("<style>%s</style>" % load_utf8(os.path.expanduser(css_name)))
elif css_name == 'default':
# use parser CSS file
css_text.append("<style>%s</style>" % load_resource(self.default_css))
return '\n'.join(css_text)
def get_override_css(self):
"""Handles allow_css_overrides setting."""
if self.settings.get('allow_css_overrides'):
filename = self.view.file_name()
filetypes = self.settings.get('markdown_filetypes')
if filename and filetypes:
for filetype in filetypes:
if filename.endswith(filetype):
css_filename = filename.rpartition(filetype)[0] + '.css'
if (os.path.isfile(css_filename)):
return "<style>%s</style>" % load_utf8(css_filename)
return ''
def get_stylesheet(self):
"""Return the correct CSS file based on parser and settings."""
return self.get_default_css() + self.get_override_css()
def get_javascript(self):
"""Return JavaScript."""
js_files = self.settings.get('js')
scripts = ''
if js_files is not None:
# Ensure string values become a list.
if isinstance(js_files, str) or isinstance(js_files, str):
js_files = [js_files]
# Only load scripts if we have a list.
if isinstance(js_files, list):
for js_file in js_files:
if js_file.startswith('res://'):
internal_file = os.path.join(sublime.packages_path(), os.path.normpath(js_file[6:]))
if os.path.exists(internal_file):
scripts += "<script>%s</script>" % load_utf8(internal_file)
else:
scripts += "<script>%s</script>" % sublime.load_resource('Packages/' + js_file[6:])
elif os.path.isabs(js_file):
# Load the script inline to avoid cross-origin.
scripts += "<script>%s</script>" % load_utf8(js_file)
else:
scripts += "<script type='text/javascript' src='%s'></script>" % js_file
return scripts
def get_highlight(self):
"""Base highlight method."""
return ''
def get_contents(self, wholefile=False):
"""Get contents or selection from view and optionally strip the YAML front matter."""
region = sublime.Region(0, self.view.size())
contents = self.view.substr(region)
if not wholefile:
# use selection if any
selection = self.view.substr(self.view.sel()[0])
if selection.strip() != '':
contents = selection
# Remove yaml front matter
if self.settings.get('strip_yaml_front_matter'):
frontmatter, contents = self.preprocessor_yaml_frontmatter(contents)
self.settings.apply_frontmatter(frontmatter)
references = self.settings.get('builtin').get('references', [])
for ref in references:
contents += get_references(ref)
# Striip CriticMarkup
if self.settings.get("strip_critic_marks", "accept") in ("accept", "reject"):
contents = self.preprocessor_criticmarkup(
contents, self.settings.get("strip_critic_marks", "accept") == "accept"
)
contents = self.parser_specific_preprocess(contents)
return contents
def parser_specific_preprocess(self, text):
"""Base parser specific preprocess method."""
return text
def preprocessor_yaml_frontmatter(self, text):
"""Get frontmatter from string."""
frontmatter = OrderedDict()
if text.startswith("---"):
m = re.search(r'^(-{3}\r?\n(?!\r?\n)(.*?)(?<=\n)(?:-{3}|\.{3})\r?\n)', text, re.DOTALL)
if m:
yaml_okay = True
try:
frontmatter = yaml_load(m.group(2))
if frontmatter is None:
frontmatter = OrderedDict()
# If we didn't get a dictionary, we don't want this as it isn't frontmatter.
assert isinstance(frontmatter, (dict, OrderedDict)), TypeError
except Exception:
# We had a parsing error. This is not the YAML we are looking for.
yaml_okay = False
frontmatter = OrderedDict()
traceback.format_exc()
if yaml_okay:
text = text[m.end(1):]
return frontmatter, text
def parser_specific_postprocess(self, text):
"""
Parser specific post process.
Override this to add parser specific post processing.
"""
return text
def postprocessor_pathconverter(self, source, image_convert, file_convert, absolute=False):
"""Convert paths to absolute or relative paths."""
from pymdownx.pathconverter import PathConverterPostprocessor
relative_path = ''
if not absolute:
if self.preview:
relative_path = get_temp_preview_path(self.view)
else:
relative_path = self.settings.get('builtin').get("destination")
if not relative_path:
mdfile = self.view.file_name()
if mdfile is not None and os.path.exists(mdfile):
relative_path = os.path.splitext(mdfile)[0] + '.html'
if relative_path:
relative_path = os.path.dirname(relative_path)
tags = []
if file_convert:
tags.extend(["script", "a", "link"])
if image_convert:
tags.append('img')
pathconv = PathConverterPostprocessor()
pathconv.config = {
"base_path": self.settings.get('builtin').get("basepath"),
"relative_path": relative_path,
"absolute": absolute,
"tags": ' '.join(tags)
}
return pathconv.run(source)
def postprocessor_base64(self, source):
"""Convert resources (currently images only) to base64."""
from pymdownx.b64 import B64Postprocessor
b64proc = B64Postprocessor()
b64proc.config = {'base_path': self.settings.get('builtin').get("basepath")}
return b64proc.run(source)
def postprocessor_simple(self, source):
"""Strip out ids and classes for a simplified HTML output."""
from pymdownx.striphtml import StripHtmlPostprocessor
strip_comments = True,
strip_js_on_attributes = True
strip_attributes = ["id", "class", "style"]
striphtml = StripHtmlPostprocessor(strip_comments, strip_js_on_attributes, strip_attributes, None)
return striphtml.run(source)
def preprocessor_criticmarkup(self, source, accept):
"""Stip out multi-markdown critic marks. Accept changes by default."""
from pymdownx.critic import CriticViewPreprocessor, CriticStash, CRITIC_KEY
text = ''
mode = 'accept' if accept else 'reject'
critic_stash = CriticStash(CRITIC_KEY)
critic = CriticViewPreprocessor(critic_stash)
critic.config = {'mode': mode}
text = '\n'.join(critic.run(source.split('\n')))
return text
def convert_markdown(self, markdown_text):
"""Convert input markdown to HTML, with github or builtin parser."""
markdown_html = self.parser_specific_convert(markdown_text)
image_convert = self.settings.get("image_path_conversion", "absolute")
file_convert = self.settings.get("file_path_conversions", "absolute")
markdown_html = self.parser_specific_postprocess(markdown_html)
if "absolute" in (image_convert, file_convert):
markdown_html = self.postprocessor_pathconverter(
markdown_html,
image_convert == 'absolute',
file_convert == 'absolute',
True
)
if "relative" in (image_convert, file_convert):
markdown_html = self.postprocessor_pathconverter(
markdown_html,
image_convert == 'relative',
file_convert == 'relative',
False
)
if image_convert == "base64":
markdown_html = self.postprocessor_base64(markdown_html)
if self.settings.get("html_simple", False):
markdown_html = self.postprocessor_simple(markdown_html)
return markdown_html
def get_title(self):
"""Get HTML title."""
if self.meta_title is not None:
title = self.meta_title
else:
title = self.view.name()
if not title:
fn = self.view.file_name()
title = 'untitled' if not fn else os.path.splitext(os.path.basename(fn))[0]
return '<title>%s</title>' % cgi.escape(title)
def get_meta(self):
"""Get meta data."""
self.meta_title = None
meta = []
for k, v in self.settings.get("meta", {}).items():
if k == "title":
if isinstance(v, list):
if len(v) == 0:
v = ""
else:
v = v[0]
self.meta_title = str(v)
continue
if isinstance(v, list):
v = ','.join(v)
if v is not None:
meta.append(
'<meta name="%s" content="%s">' % (cgi.escape(k, True), cgi.escape(v, True))
)
return '\n'.join(meta)
def run(self, view, wholefile=False, preview=False):
"""Return full HTML and body HTML for view."""
self.settings = Settings('MarkdownPreview.sublime-settings', view.file_name())
self.preview = preview
self.view = view
contents = self.get_contents(wholefile)
body = self.convert_markdown(contents)
html_template = self.settings.get('html_template')
if html_template:
html_template = os.path.abspath(os.path.expanduser(html_template))
# use customized html template if given
if self.settings.get('html_simple', False):
html = body
elif html_template and os.path.exists(html_template):
head = ''
head += self.get_meta()
head += self.get_stylesheet()
head += self.get_javascript()
head += self.get_highlight()
head += self.get_title()
html = load_utf8(html_template)
html = html.replace('{{ HEAD }}', head, 1)
html = html.replace('{{ BODY }}', body, 1)
else:
html = '<!DOCTYPE html>'
html += '<html><head><meta charset="utf-8">'
html += self.get_meta()
html += self.get_stylesheet()
html += self.get_javascript()
html += self.get_highlight()
html += self.get_title()
html += '</head><body>'
html += '<article class="markdown-body">'
html += body
html += '</article>'
html += '</body>'
html += '</html>'
return html, body
class GithubCompiler(Compiler):
"""GitHub compiler."""
default_css = "css/github.css"
def curl_convert(self, data):
"""Use curl to send Markdown content through GitHub API."""
try:
import subprocess
# It looks like the text does NOT need to be escaped and
# surrounded with double quotes.
# Tested in ubuntu 13.10, python 2.7.5+
shell_safe_json = data.decode('utf-8')
curl_args = [
'curl',
'-H',
'Content-Type: application/json',
'-d',
shell_safe_json,
'https://api.github.com/markdown'
]
github_oauth_token = self.settings.get('github_oauth_token')
if github_oauth_token:
curl_args[1:1] = [
'-u',
github_oauth_token
]
markdown_html = subprocess.Popen(curl_args, stdout=subprocess.PIPE).communicate()[0].decode('utf-8')
return markdown_html
except subprocess.CalledProcessError:
sublime.error_message(
textwrap.dedent(
"""\
Cannot use github API to convert markdown. SSL is not included in your Python installation. \
And using curl didn't work either
"""
)
)
return None
def parser_specific_postprocess(self, html):
"""Run GitHub specific postprocesses."""
if self.settings.get("github_inject_header_ids", False):
html = self.postprocess_inject_header_id(html)
return html
def postprocess_inject_header_id(self, html):
"""Insert header ids when no anchors are present."""
from pymdownx.slugs import uslugify
unique = {}
re_header = re.compile(r'(?P<open><h([1-6])>)(?P<text>.*?)(?P<close></h\2>)', re.DOTALL)
def inject_id(m):
id = uslugify(m.group('text'), '-')
if id == '':
return m.group(0)
# Append a dash and number for uniqueness if needed
value = unique.get(id, None)
if value is None:
unique[id] = 1
else:
unique[id] += 1
id += "-%d" % value
return m.group('open')[:-1] + (' id="%s">' % id) + m.group('text') + m.group('close')
return re_header.sub(inject_id, html)
def get_github_response_from_exception(self, e):
"""Convert GitHub Response."""
body = json.loads(e.read().decode('utf-8'))
return 'GitHub\'s original response: (HTTP Status Code %s) "%s"' % (e.code, body['message'])
def parser_specific_convert(self, markdown_text):
"""Convert input markdown to HTML with github parser."""
markdown_html = _CANNOT_CONVERT
github_oauth_token = self.settings.get('github_oauth_token')
# use the github API
sublime.status_message('converting markdown with github API...')
github_mode = self.settings.get('github_mode', 'gfm')
data = {
"text": markdown_text,
"mode": github_mode
}
data = json.dumps(data).encode('utf-8')
try:
headers = {
'Content-Type': 'application/json'
}
if github_oauth_token:
headers['Authorization'] = "token %s" % github_oauth_token
url = "https://api.github.com/markdown"
sublime.status_message(url)
request = request_url(url, data, headers)
markdown_html = urlopen(request).read().decode('utf-8')
except HTTPError as e:
if e.code == 401:
sublime.error_message(
"GitHub API authentication failed. Please check your OAuth token.\n\n" +
self.get_github_response_from_exception(e)
)
elif e.code == 403: # Forbidden
sublime.error_message(
textwrap.dedent(
"""\
It seems like you have exceeded GitHub's API rate limit.
To continue using GitHub's markdown format with this package, log in to \
GitHub, then go to Settings > Personal access tokens > Generate new token, \
copy the token's value, and paste it in this package's user settings under the key \
'github_oauth_token'. Example:
{
"github_oauth_token": "xxxx...."
}
"""
) + self.get_github_response_from_exception(e)
)
else:
sublime.error_message(
"GitHub API responded in an unfriendly way!\n\n" +
self.get_github_response_from_exception(e)
)
except URLError:
# Maybe this is a Linux-install of ST which doesn't bundle with SSL support
# So let's try wrapping curl instead
markdown_html = self.curl_convert(data)
except Exception:
e = sys.exc_info()[1]
print(e)
traceback.print_exc()
sublime.error_message(
"Cannot use GitHub's API to convert Markdown. Please check your settings.\n\n" +
self.get_github_response_from_exception(e)
)
else:
sublime.status_message('converted markdown with github API successfully')
return markdown_html
class ExternalMarkdownCompiler(Compiler):
"""Compiler for other, external Markdown parsers."""
default_css = "css/markdown.css"
def __init__(self, parser):
"""Initialize."""
self.parser = parser
super(ExternalMarkdownCompiler, self).__init__()
def parser_specific_convert(self, markdown_text):
"""Convert Markdown with external parser."""
import subprocess
settings = sublime.load_settings("MarkdownPreview.sublime-settings")
binary = settings.get('markdown_binary_map', {})[self.parser]
if len(binary) and os.path.exists(binary[0]):
cmd = binary
sublime.status_message('converting markdown with %s...' % self.parser)
if sublime.platform() == "windows":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
p = subprocess.Popen(
cmd, startupinfo=startupinfo,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
else:
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
for line in markdown_text.split('\n'):
p.stdin.write((line + '\n').encode('utf-8'))
markdown_html = p.communicate()[0].decode("utf-8")
if p.returncode:
# Log info to console
sublime.error_message("Could not convert file! See console for more info.")
print(markdown_html)
markdown_html = _CANNOT_CONVERT
else:
sublime.error_message("Cannot find % binary!" % self.binary)
markdown_html = _CANNOT_CONVERT
return markdown_html
class MarkdownCompiler(Compiler):
"""Python Markdown compiler."""
default_css = "css/markdown.css"
def set_highlight(self, pygments_style, css_class):
"""Set the Pygments css."""
if pygments_style:
style = None
if pygments_style not in PYGMENTS_LOCAL:
try:
style = get_formatter_by_name('html', style=pygments_style).get_style_defs(
''.join(['.' + x for x in css_class.split(' ') if x])
)
except Exception:
traceback.print_exc()
pygments_style = 'github'
if style is None:
style = load_resource(PYGMENTS_LOCAL[pygments_style]) % {
'css_class': ''.join(['.' + x for x in css_class.split(' ') if x])
}
self.pygments_style = '<style>%s</style>' % style
return pygments_style
def get_highlight(self):
"""Return the Pygments css if enabled."""
return self.pygments_style if self.pygments_style else ''
def preprocessor_critic(self, source):
"""
Stip out multi-markdown critic marks.
Accept changes by default.
"""
from pymdownx.critic import CriticViewPreprocessor, CriticStash, CRITIC_KEY
text = ''
mode = 'accept' if self.settings.get("strip_critic_marks", "accept") == "accept" else 'reject'
critic_stash = CriticStash(CRITIC_KEY)
critic = CriticViewPreprocessor(critic_stash)
critic.config = {'mode': mode}
text = '\n'.join(critic.run(source.split('\n')))
return text
def process_extensions(self, extensions):
"""Process extensions and related settings."""
# See if we need to inject CSS for pygments.
self.pygments_style = None
style = self.settings.get('pygments_style', 'github')
if self.settings.get('pygments_inject_css', True):
# Check if the desired style exists internally
self.set_highlight(style, self.settings.get('pygments_css_class', 'codehilite'))
# Get the base path of source file if available
base_path = self.settings.get('builtin').get("basepath")
if base_path is None:
base_path = ""
names = []
settings = {}
for e in extensions:
# Ensure extension is in correct format and separate config from extension
if isinstance(e, str):
ext = e
config = OrderedDict()
elif isinstance(e, (dict, OrderedDict)):
ext = list(e.keys())[0]
config = list(e.values())[0]
if config is None:
config = OrderedDict()
else:
continue
names.append(ext)
settings[ext] = config
for k, v in config.items():
if isinstance(v, str):
config[k] = v.replace("${BASE_PATH}", base_path)
return names, settings
def get_config_extensions(self):
"""Get the extensions to include from the settings."""
ext_config = self.settings.get('markdown_extensions')
return self.process_extensions(ext_config)
def parser_specific_convert(self, markdown_text):
"""Parse Markdown with Python Markdown."""
sublime.status_message('converting markdown with Python markdown...')
extensions, extension_configs = self.get_config_extensions()
md = Markdown(extensions=extensions, extension_configs=extension_configs)
html_text = md.convert(markdown_text)
# Retrieve the meta data returned from the "meta" extension
self.settings.add_meta(md.Meta)
return html_text
class MarkdownPreviewSelectCommand(sublime_plugin.TextCommand):
"""Allow selection of parser to use."""
selected = 0
def run(self, edit, target='browser'):
"""Show menu of parsers to select from."""
settings = sublime.load_settings("MarkdownPreview.sublime-settings")
md_map = settings.get('markdown_binary_map', {})
parsers = [
"markdown",
"github"
]
# Add external markdown binaries.
for k in md_map.keys():
parsers.append(k)
self.target = target
enabled_parsers = set()
for p in settings.get("enabled_parsers", ["markdown", "github"]):
if p in parsers:
enabled_parsers.add(p)
self.user_parsers = list(enabled_parsers)
self.user_parsers.sort()
window = self.view.window()
length = len(self.user_parsers)
if window is not None and length:
if length == 1:
self.view.run_command(
"markdown_preview",
{
"parser": self.user_parsers[0],
"target": self.target
}
)
else:
window.show_quick_panel(
self.user_parsers, self.run_command, 0, self.selected
)
def run_command(self, value):
"""Run the selected parser."""
if value > -1:
self.selected = value
self.view.run_command(
"markdown_preview",
{
"parser": self.user_parsers[value],
"target": self.target
}
)
class MarkdownPreviewCommand(sublime_plugin.TextCommand):
"""Initiate a Markdown preview/conversion."""
def run(self, edit, parser='markdown', target='browser'):
"""Run the conversion with the specified parser and output to the specified target."""
self.settings = sublime.load_settings('MarkdownPreview.sublime-settings')
# backup parser+target for later saves
self.view.settings().set('parser', parser)
self.view.settings().set('target', target)
self.parser = parser
self.target = target
if parser == "github":
compiler = GithubCompiler()
elif parser == 'markdown':
compiler = MarkdownCompiler()
elif parser in self.settings.get("enabled_parsers", ("markdown", "github")):
compiler = ExternalMarkdownCompiler(parser)
else:
# Fallback to Python Markdown
compiler = MarkdownCompiler()
html, body = compiler.run(self.view, preview=(target in ['disk', 'browser']))
temp_target = 'browser' if target == 'disk' else target
if temp_target in self.settings.get('include_head', ['build', 'browser', 'sublime', 'clipboard', 'save']):
content = html
else:
content = body
if target in ['disk', 'browser']:
self.to_disk(content, self.target == 'browser')
elif target == 'sublime':
self.to_sublime(content)
elif target == 'clipboard':
self.to_clipboard(content)
elif target == 'save':
self.save(compiler, content)
def to_disk(self, html, open_in_browser):
"""Save to disk and open in browser if desired."""
# do not use LiveReload unless autoreload is enabled
github_auth_provided = self.settings.get('github_oauth_token') is not None
if self.settings.get('enable_autoreload', True) and (self.parser != 'github' or github_auth_provided):
# check if LiveReload ST2 extension installed and add its script to the resulting HTML
if 'LiveReload' in os.listdir(sublime.packages_path()):
port = sublime.load_settings('LiveReload.sublime-settings').get('port', 35729)
html += RELOAD_JS % port
# update output html file
tmp_fullpath = get_temp_preview_path(self.view)
save_utf8(tmp_fullpath, html)
# now opens in browser if needed
if open_in_browser:
self.__class__.open_in_browser(tmp_fullpath, self.settings.get('browser', 'default'))
def to_sublime(self, html):
"""Output to Sublime view."""
# create a new buffer and paste the output HTML
new_view(self.view.window(), html, scratch=True)
sublime.status_message('Markdown preview launched in sublime')
def to_clipboard(self, html):
"""Save to clipboard."""
# clipboard copy the full HTML
sublime.set_clipboard(html)
sublime.status_message('Markdown export copied to clipboard')
def save(self, compiler, html):
"""Save output."""
save_location = compiler.settings.get('builtin').get('destination', None)
if save_location is None:
save_location = self.view.file_name()
if save_location is None or not os.path.exists(save_location):
# Save as...
v = new_view(self.view.window(), html)
if v is not None:
v.run_command('save')
else:
# Save
htmlfile = os.path.splitext(save_location)[0] + '.html'
save_utf8(htmlfile, html)
else:
save_utf8(save_location, html)
@classmethod
def open_in_browser(cls, path, browser='default'):
"""Open in browser for the appropriate platform."""
if browser == 'default':
if sys.platform == 'darwin':
# To open HTML files, Mac OS the open command uses the file
# associated with .html. For many developers this is Sublime,
# not the default browser. Getting the right value is
# embarrassingly difficult.
import shlex
import subprocess
env = {'VERSIONER_PERL_PREFER_32_BIT': 'true'}
raw = """perl -MMac::InternetConfig -le 'print +(GetICHelper "http")[1]'"""
process = subprocess.Popen(shlex.split(raw), env=env, stdout=subprocess.PIPE)
out, err = process.communicate()
default_browser = out.strip().decode('utf-8')
cmd = "open -a '%s' %s" % (default_browser, path)
os.system(cmd)
else:
desktop.open(path)
sublime.status_message('Markdown preview launched in default browser')
else:
cmd = '"%s" %s' % (browser, path)
if sys.platform == 'darwin':
cmd = "open -a %s" % cmd
elif sys.platform == 'linux2':
cmd += ' &'
elif sys.platform == 'win32':
cmd = 'start "" %s' % cmd
result = os.system(cmd)
if result != 0:
sublime.error_message('cannot execute "%s" Please check your Markdown Preview settings' % browser)
else:
sublime.status_message('Markdown preview launched in %s' % browser)
class MarkdownBuildCommand(sublime_plugin.WindowCommand):
"""Build command for Markdown."""
def init_panel(self):
"""Initialize the output panel."""
if not hasattr(self, 'output_view'):
self.output_view = self.window.create_output_panel("markdown")
def puts(self, message):
"""Output to panel."""
message = message + '\n'
self.output_view.run_command('append', {'characters': message, 'force': True, 'scroll_to_end': True})
def run(self):
"""Run the build and convert the Markdown."""
view = self.window.active_view()
if not view:
return
start_time = time.time()
self.init_panel()
settings = sublime.load_settings('MarkdownPreview.sublime-settings')
parser = settings.get('parser', 'markdown')
if parser == 'default':
print(
'Markdown Preview: The use of "default" as a parser is now deprecated,'
' please specify a valid parser name.'
)
parser = 'markdown'
target = settings.get('build_action', 'build')
if target in ('browser', 'sublime', 'clipboard', 'save'):
view.run_command("markdown_preview", {"parser": parser, "target": target})
return
show_panel_on_build = settings.get("show_panel_on_build", True)
if show_panel_on_build:
self.window.run_command("show_panel", {"panel": "output.markdown"})
mdfile = view.file_name()
if mdfile is None or not os.path.exists(mdfile):
self.puts("Can't build an unsaved markdown file.")
return
self.puts("Compiling %s..." % mdfile)
if parser == "github":
compiler = GithubCompiler()
elif parser == 'markdown':
compiler = MarkdownCompiler()
elif parser in settings.get("enabled_parsers", ("markdown", "github")):
compiler = ExternalMarkdownCompiler(parser)
else:
compiler = MarkdownCompiler()
html, body = compiler.run(view, True, preview=False)
if 'build' in self.settings.get('include_head', ['build', 'browser', 'sublime', 'clipboard', 'save']):
content = html
else:
content = body
htmlfile = compiler.settings.get('builtin').get('destination', None)
if htmlfile is None:
htmlfile = os.path.splitext(mdfile)[0] + '.html'
self.puts(" ->" + htmlfile)
save_utf8(htmlfile, content)
elapsed = time.time() - start_time
if body == _CANNOT_CONVERT:
self.puts(_CANNOT_CONVERT)
self.puts("[Finished in %.1fs]" % (elapsed))
sublime.status_message("Build finished")
|
|
# -*- coding: utf-8 -*-
'''Fabric tasks for the setup of some services.'''
import re
import tempfile
from fabric.api import env, hide, sudo, warn_only
from fabric.contrib.files import exists
from fabsetup.fabutils import checkup_git_repo_legacy, install_packages
from fabsetup.fabutils import needs_packages, task, run, suggest_localhost, put
from fabsetup.fabutils import FABFILE_DATA_DIR
from fabsetup.utils import flo, query_yes_no
from fabsetup.utils import query_input, blue, cyan, magenta, filled_out_template
from selfoss import selfoss
from trac import trac
@task
@needs_packages('nginx')
def owncloud():
'''Set up owncloud.
Package 'owncloud' pulls package 'mysql' which asks for a password.
'''
hostname = re.sub(r'^[^@]+@', '', env.host) # without username if any
sitename = query_input(
question='\nEnter site-name of Your Owncloud web service',
default=flo('owncloud.{hostname}'), color=cyan)
username = env.user
fabfile_data_dir = FABFILE_DATA_DIR
print(magenta(' install owncloud'))
repository = ''.join([
'http://download.opensuse.org/repositories/',
'isv:/ownCloud:/community/Debian_7.0/',
])
with hide('output'):
sudo(flo('wget -O - {repository}Release.key | apt-key add -'))
filename = '/etc/apt/sources.list.d/owncloud.list'
sudo(flo("echo 'deb {repository} /' > {filename}"))
sudo('apt-get update')
install_packages([
'owncloud',
'php5-fpm',
'php-apc',
'memcached',
'php5-memcache',
])
# This server uses nginx. owncloud pulls apache2 => Disable apache2
print(magenta(' disable apache'))
with hide('output'):
sudo('service apache2 stop')
sudo('update-rc.d apache2 disable')
print(magenta(' nginx setup for owncloud'))
filename = 'owncloud_site_config.template'
path = flo('{fabfile_data_dir}/files/etc/nginx/sites-available/{filename}')
from_str = filled_out_template(path, username=username, sitename=sitename,
hostname=hostname)
with tempfile.NamedTemporaryFile(prefix=filename) as tmp_file:
with open(tmp_file.name, 'w') as fp:
fp.write(from_str)
put(tmp_file.name, flo('/tmp/{filename}'))
to = flo('/etc/nginx/sites-available/{sitename}')
sudo(flo('mv /tmp/{filename} {to}'))
sudo(flo('chown root.root {to}'))
sudo(flo('chmod 644 {to}'))
sudo(flo(' '.join([
'ln -snf ../sites-available/{sitename}',
'/etc/nginx/sites-enabled/{sitename}',
])))
# php5 fpm fast-cgi config
template = 'www.conf'
to = flo('/etc/php5/fpm/pool.d/{template}')
from_ = flo('{fabfile_data_dir}/files{to}')
put(from_, '/tmp/')
sudo(flo('mv /tmp/{template} {to}'))
sudo(flo('chown root.root {to}'))
sudo(flo('chmod 644 {to}'))
template = 'php.ini'
to = flo('/etc/php5/fpm/{template}')
from_ = flo('{fabfile_data_dir}/files{to}')
put(from_, '/tmp/')
sudo(flo('mv /tmp/{template} {to}'))
sudo(flo('chown root.root {to}'))
sudo(flo('chmod 644 {to}'))
sudo('service php5-fpm restart')
sudo('service nginx reload')
@task
@needs_packages('nginx')
@needs_packages('software-properties-common') # for cmd 'add-apt-repository'
def fdroid():
'''Set up an F-Droid App Repo.
More infos:
* https://f-droid.org/wiki/page/Setup_an_FDroid_App_Repo
* https://f-droid.org/wiki/page/Installing_the_Server_and_Repo_Tools
'''
hostname = re.sub(r'^[^@]+@', '', env.host) # without username if any
sitename = query_input(
question='\nEnter site-name of Your F-Droid web service',
default=flo('fdroid.{hostname}'))
username = env.user
fabfile_data_dir = FABFILE_DATA_DIR
print(magenta(' install fdroidserver'))
res = run('dpkg --get-selections | '
'grep -q "^fdroidserver[[:space:]]*install$" >/dev/null',
warn_only=True)
package_installed = res.return_code == 0
question = 'package fdroidserver already installed, update? ' \
'(needs some time)'
if package_installed and not query_yes_no(question, default='no'):
print('skip update')
else:
with hide('output'):
sudo('yes "" | add-apt-repository ppa:guardianproject/ppa')
sudo('apt-get update')
# why 'android-libhost-dev' (avoid "Failed to get apk information"
# on 'fdroid update --create-metadata'):
# https://f-droid.org/forums/topic/failed-to-get-apk-information-2/#post-15777
install_packages(['fdroidserver', 'android-libhost-dev'])
sudo('yes "" | add-apt-repository --remove '
'ppa:guardianproject/ppa')
sudo('apt-get update')
site_dir = flo('/home/{username}/sites/{sitename}')
apks_dir = flo('{site_dir}/apks')
fdroid_dir = flo('{site_dir}/fdroid')
repo_dir = flo('{site_dir}/fdroid/repo')
print(magenta(' init f-droid repo'))
question = ' '.join(['already initialized, initialize again?',
'(creates a new repo key)'])
if exists(repo_dir) and not query_yes_no(question, default='no'):
print('skip initialization')
else:
with warn_only():
run(flo('rm -rf {fdroid_dir}'))
run(flo('mkdir -p {repo_dir}'))
run(flo('cd {fdroid_dir} && fdroid init'))
run(flo('cd {site_dir} && tree'))
print(magenta(' update apk files of the fdroid repo'))
run(flo('mkdir -p {apks_dir}'))
run(flo('rm -rf {repo_dir}/*.apk'))
run(flo("find {apks_dir} -type f | rename 's/ /_/g'"))
run(flo("find {apks_dir} -type f | rename 's/[^[:ascii:]]//g'"))
run(flo('chmod 644 {apks_dir}/*.apk'))
run(flo('cp -v {apks_dir}/*.apk {repo_dir}'), warn_only=True)
run(flo('cd {fdroid_dir} && fdroid update --create-metadata'))
print(magenta(' setup nginx for F-Droid'))
run(flo('echo -e "User-agent: *\\nDisallow: /" > {fdroid_dir}/robots.txt'))
filename = 'fdroid_site_config.template'
path = flo('{fabfile_data_dir}/files/etc/nginx/sites-available/{filename}')
from_str = filled_out_template(path, username=username, sitename=sitename,
hostname=hostname)
with tempfile.NamedTemporaryFile(prefix=filename) as tmp_file:
with open(tmp_file.name, 'w') as fp:
fp.write(from_str)
put(tmp_file.name, flo('/tmp/{filename}'))
to = flo('/etc/nginx/sites-available/{sitename}')
sudo(flo('mv /tmp/{filename} {to}'))
sudo(flo('chown root.root {to}'))
sudo(flo('chmod 644 {to}'))
sudo(flo(' '.join([
'ln -snf ../sites-available/{sitename}',
'/etc/nginx/sites-enabled/{sitename}',
])))
sudo('service nginx reload')
@task
def vnc_raspi_osmc():
'''Install and configure dispmanx_vnc server on osmc (raspberry pi).
More Infos:
* https://github.com/patrikolausson/dispmanx_vnc
* https://discourse.osmc.tv/t/howto-install-a-vnc-server-on-the-raspberry-pi/1517
* tightvnc:
* http://raspberry.tips/raspberrypi-einsteiger/raspberry-pi-einsteiger-guide-vnc-einrichten-teil-4/
* http://jankarres.de/2012/08/raspberry-pi-vnc-server-installieren/
'''
print(blue('Install dependencies'))
install_packages([
'git',
'build-essential',
'rbp-userland-dev-osmc',
'libvncserver-dev',
'libconfig++-dev',
])
print(blue('Build vnc server for raspberry pi using dispmanx '
'(dispmanx_vnc)'))
checkup_git_repo_legacy(
url='https://github.com/patrikolausson/dispmanx_vnc.git')
run('mkdir -p ~/repos')
run('cd ~/repos/dispmanx_vnc && make')
print(blue('set up dispmanx_vnc as a service'))
with warn_only():
run('sudo systemctl stop dispmanx_vncserver.service')
username = env.user
builddir = flo('/home/{username}/repos/dispmanx_vnc')
run(flo('sudo cp {builddir}/dispmanx_vncserver /usr/bin'))
run('sudo chmod +x /usr/bin/dispmanx_vncserver')
fabfile_data_dir = FABFILE_DATA_DIR
put('{fabfile_data_dir}/files/etc/dispmanx_vncserver.conf', '/tmp/')
run('sudo mv /tmp/dispmanx_vncserver.conf /etc/dispmanx_vncserver.conf')
put('{fabfile_data_dir}/files/etc/systemd/system/dispmanx_vncserver.service',
'/tmp/')
run('sudo mv /tmp/dispmanx_vncserver.service '
'/etc/systemd/system/dispmanx_vncserver.service')
run('sudo systemctl start dispmanx_vncserver.service')
run('sudo systemctl enable dispmanx_vncserver.service')
run('sudo systemctl daemon-reload')
@task
def lms():
'''Install and start a Logitech Media Server (lms).
More infos:
* http://wiki.slimdevices.com/index.php/Logitech_Media_Server
* http://wiki.slimdevices.com/index.php/DebianPackage
* http://www.mysqueezebox.com/download
* XSqueeze on Kodi:
* http://kodi.wiki/view/Add-on:XSqueeze
* http://forum.kodi.tv/showthread.php?tid=122199
'''
# cf. http://wiki.slimdevices.com/index.php/DebianPackage#installing_7.9.0
cmds = '''\
url="http://www.mysqueezebox.com/update/?version=7.9.0&revision=1&geturl=1&os=deb"
latest_lms=$(wget -q -O - "$url")
mkdir -p ~/.logitech_media_server_sources
cd ~/.logitech_media_server_sources
wget $latest_lms
lms_deb=${latest_lms##*/}
sudo dpkg -i $lms_deb
'''
run(cmds)
run('sudo usermod -aG audio squeezeboxserver')
with warn_only():
run('sudo addgroup lms')
run('sudo usermod -aG lms squeezeboxserver')
username = env.user
run(flo('sudo usermod -aG audio {username}'))
print('\n Set correct folder permissions manually, eg:')
print(' > ' + cyan(flo('chown -R {username}.lms <path/to/your/media>')))
hostname = env.host
print(flo('\n lms frontend available at http://{hostname}:9000'))
@task
@suggest_localhost
def samba():
'''Install smb server samba and create a share (common read-write-access).
More infos:
* https://wiki.ubuntuusers.de/Samba%20Server/
'''
username = env.user
install_packages(['samba'])
run(flo('sudo smbpasswd -a {username}'))
path = '$HOME/shared'
sharename = 'shared'
comment = '"smb share; everyone has full access (read/write)"'
acl = flo('Everyone:F,{username}:F guest_ok=y')
with warn_only():
run(flo('mkdir {path}'))
run(flo('sudo net usershare add {sharename} {path} {comment} {acl}'))
run(flo('sudo net usershare info {sharename}'))
|
|
__author__ = 'Peter Shipley <[email protected]>'
__copyright__ = "Copyright (C) 2013 Peter Shipley"
__license__ = "BSD"
# from xml.dom.minidom import parse, parseString
#from StringIO import StringIO
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import iselement
from ISY.IsyExceptionClass import IsyPropertyError, IsyValueError, IsySoapError
# import base64
import sys
if sys.hexversion < 0x3000000:
import urllib2 as URL
from urllib import quote as URLQuote
from urllib2 import Request as URLRequest
from urllib2 import URLError, HTTPError
else:
import urllib as URL
from urllib.parse import quote as URLQuote
from urllib.request import Request as URLRequest
from urllib.error import URLError, HTTPError
# import re
from pprint import pprint
#__all__ = ['IsyUtil', 'IsySubClass' ]
__all__ = ['format_node_addr']
def val2bool(en):
if isinstance(en, str):
rval = (str(en).strip().lower() in ("yes", "y", "true", "t", "1"))
else: # if isinstance(en, (long, int, float)):
# Punt
rval = bool(en)
return(rval)
def et2d(et):
""" Etree to Dict
converts an ETree to a Dict Tree
lists are created for duplicate tag
if there are multiple XML of the same name
an list array is used
attrib tags are converted to "tag_name" + "attrib_name"
if an invalid arg is passed a empty dict is retrurned
arg: ETree Element obj
returns: a dict obj
"""
d = dict()
if not isinstance(et, ET.Element):
return d
children = list(et)
if et.attrib:
for k, v in list(et.items()):
d[et.tag + "-" + k] = v
if children:
for child in children:
if child.tag in d:
if type(d[child.tag]) != list:
t = d[child.tag]
d[child.tag] = [t]
if list(child) or child.attrib:
if child.tag in d:
d[child.tag].append(et2d(child))
else:
d[child.tag] = et2d(child)
else:
if child.tag in d:
d[child.tag].append(child.text)
else:
d[child.tag] = child.text
return d
def format_node_addr(naddr):
if not isinstance(naddr, str):
raise IsyValueError("{0} arg not string".format(__name__))
addr_el = naddr.upper().split()
a = "{0:0>2}' '{1:0>2}' '{2:0>2}' ".format(*addr_el)
return a
#
# Simple Base class for ISY Class
#
class IsyUtil(object):
def __init__(self):
self.debug = 0
self.baseurl = "" # never used
# self.pp = pprint.PrettyPrinter(indent=4)
def _printXML(self, xml):
""" Pretty Print XML, for internal debug"""
print("_printXML start")
ET.dump(xml)
def _set_prop(self, *arg):
pass
def _getXMLetree(self, xmlpath, noquote=0, timeout=10):
""" take a URL path, download XLM and return parsed Etree """
if noquote:
xurl = self.baseurl + xmlpath
else:
xurl = self.baseurl + URLQuote(xmlpath)
if self.debug & 0x02:
print("_getXMLetree: " + xurl)
# print("_getXMLetree: URLRequest")
req = URLRequest(xurl)
# print("_getXMLetree: self._opener.open ")
# HTTPError
try:
res = self._opener.open(req, None, timeout)
data = res.read()
# print("res.getcode() ", res.getcode(), len(data))
res.close()
except HTTPError as e:
self.error_str = str("Reponse Code: {0} : {1}").format(e.code, xurl)
return None
if len(self.error_str):
self.error_str = ""
if self.debug & 0x200:
print(res.info())
print(data)
et = None
if len(data):
try:
et = ET.fromstring(data)
except ET.ParseError as e:
print("Etree ParseError ")
print("data = ", data)
print("e.message = ", e.message)
# raise
finally:
return et
else:
return None
def _gensoap(self, cmd, **kwargs):
if self.debug & 0x200:
print("len kwargs = ", len(kwargs), kwargs)
if len(kwargs) == 0:
cmdsoap = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" \
+ "<e:Envelope><s:Body>" \
+ "<u:{0!s} ".format(cmd) \
+ "xmlns:u=\"urn:udi-com:service:X_Insteon_Lighting_Service:1\" />" \
+ "</s:Body></e:Envelope>"
else:
cmdsoap = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" \
+ "<s:Envelope><s:Body>" \
+ "<u:{0!s} ".format(cmd) \
+ "xmlns:u=\"urn:udi-com:service:X_Insteon_Lighting_Service:1\">"
for k, v in kwargs.items():
cmdsoap += "<{0}>{1!s}</{0}>".format(k, v)
cmdsoap += "</u:{0!s}>".format(cmd) + "</s:Body></s:Envelope>"
# print "cmdsoap = \n", cmdsoap
return cmdsoap
# http://wiki.universal-devices.com/index.php?title=ISY-99i/ISY-26_INSTEON:Errors_And_Error_Messages
def soapcomm(self, cmd, **kwargs):
"""
takes a command name and a list of keyword arguments.
each keyword is converted into a xml element
"""
if not isinstance(cmd, str) or not len(cmd):
raise IsyValueError("SOAP Method name missing")
if self.debug & 0x02:
print("sendcomm: ", cmd)
soap_cmd = self._gensoap(cmd, **kwargs)
xurl = self.baseurl + "/services"
if self.debug & 0x02:
print("xurl = ", xurl)
print("soap_cmd = ", soap_cmd)
req = URLRequest(xurl, soap_cmd, {'Content-Type': 'application/xml; charset="utf-8"'})
data = ""
try:
res = self._opener.open(req, None)
data = res.read()
if self.debug & 0x200:
print("res.getcode() ", res.getcode(), len(data))
print("data ", data)
res.close()
except HTTPError as e:
self.error_str = str("Reponse Code: {0} : {1} {2}").format(e.code, xurl, cmd)
if ((cmd == "DiscoverNodes" and e.code == 803) or
(cmd == "CancelNodesDiscovery" and e.code == 501)
# or (cmd == "RemoveNode" and e.code == 501)
):
if self.debug & 0x02:
print("spacial case: {0} : {1}".format(cmd, e.code))
print("e.code = ", e.code)
print("e.msg = ", e.msg)
print("e.hdrs = ", e.hdrs)
print("e.filename = ", e.filename)
print("e.code = ", type(e.code), e.code)
print("\n")
return e.read()
if self.debug & 0x202:
print("e.code = ", type(e.code), e.code)
# print "e.read = ", e.read()
print("e = ", e)
print("data = ", data)
mess = "{!s}: {!s}: {!s}".format(cmd, kwargs, e.code)
# This a messy and should change
raise IsySoapError(mess, httperr=e)
else:
if len(self.error_str):
self.error_str = ""
if self.debug & 0x200:
print(data)
return data
def sendfile(self, src=None, filename="", data=None):
"""
upload file
args:
data content for fine to upload
src file to load upload content ( if data is None )
filename name for remote file
"""
if self.debug & 0x01:
print("sendfile: ", self.__class__.__name__)
if filename[0] != '/':
filename = "/USER/WEB/" + filename
elif not str(filename).upper().startswith("/USER/WEB/"):
raise IsyValueError("sendfile: invalid dst filename: {!s}".format(filename))
if not len(data):
if not src:
src = filename
if self.debug & 0x20:
print("using file {!s} as data src".format(src))
with open(src, 'r') as content_file:
data = content_file.read()
else:
if self.debug & 0x20:
print("using provided data as data src")
self._sendfile(filename=filename, data=data, load="n")
def _sendfile(self, filename="", data="", load="n"):
if filename.startswith('/'):
xurl = self.baseurl + "/file/upload" + filename + "?load=" + load
else:
xurl = self.baseurl + "/file/upload/" + filename + "?load=" + load
if self.debug & 0x02:
print("{0} xurl: {1}".format(__name__, xurl))
req = URLRequest(xurl, data, {'Content-Type': 'application/xml; charset="utf-8"'})
try:
res = self._opener.open(req, None)
responce = res.read()
# print("res.getcode() ", res.getcode(), len(responce))
res.close()
except HTTPError as e:
# print "e.read: ", e.read()
mess = "{!s}: {!s}: {!s}".format("/file/upload", filename, e.code)
raise IsySoapError(mess, httperr=e)
else:
return responce
def _printdict(self, d):
""" Pretty Print dictionary, for internal debug"""
print("===START===")
pprint(d)
print("===END===")
def _printinfo(self, uobj, ulabel="\t"):
""" Debug util """
print("%s: tag=%s text=%s attr=%s: atype=%s: type=%s" % (ulabel, uobj.tag, uobj.text, uobj.attrib, type(uobj.attrib), type(uobj)))
# Var: '1:1': { 'id': '1:1', 'init': '0', 'name': 'enter_light',
# 'ts': '20130114 14:33:35', 'type': '1', 'val': '0'}
#
# Node: '15 4B 53 1': { 'ELK_ID': 'A01', 'address': '15 4B 53 1', 'flag': '128',
# 'enabled': 'true', 'name': 'Front Light 2', 'pnode': '15 4B 53 1',
# 'property': { 'ST': { 'formatted': 'Off', 'id': 'ST',
# 'uom': '%/on/off', 'value': '0'}}
#
# Scene: '20326': { 'ELK_ID': 'C11', 'address': '20326', 'deviceGroup': '25',
# 'flag': '132', 'name': 'Garage Lights'
# 'members': { '16 3F E5 1': '32', '16 D3 73 1': '32' }, },
#
# NodeFolder: '12743': { 'address': '12743', 'name': 'Garage Scenes'},
#
# Prog '0003': { 'enabled': 'true', 'folder': 'false', 'id': '0003',
# 'name': 'AM Off', 'parentId': '0001', 'runAtStartup': 'false',
# 'running': 'idle', 'status': 'false',
# 'lastFinishTime': '2013/03/27 09:41:28',
# 'lastRunTime': '2013/03/27 09:41:28',
# 'nextScheduledRunTime': '2013/03/27 10:05:00', },
#
# Prog '000A': { 'folder': 'true', 'id': '000A', 'lastFinishTime': None,
# 'lastRunTime': None, 'name': 'garage',
# 'parentId': '0001', 'status': 'true'},
class IsySubClass(IsyUtil):
""" Sub Class for ISY
This is a Sub Class for Node, Scene, Folder, Var, and Program Objects
This Class is not intended for direct use
attributes/properties:
type: object dependent flag
value: current value
id/address: unique for object used by ISY
name: name of object
funtions:
no public funtions
"""
_getlist = ["name", "id", "value", "address", "type", "enabled"]
_setlist = []
_propalias = {}
_boollist = ["enabled"]
def __init__(self, isy, objdict):
""" INIT """
if isinstance(objdict, dict):
self._mydict = objdict
else:
raise IsyValueError("{!s}: called without objdict".format(self.__class__.__name__))
if isinstance(isy, IsyUtil):
self.isy = isy
self.debug = isy.debug
else:
# print("error: class " + self.__class__.__name__ + " called without Isy")
raise TypeError("IsySubClass: isy arg is not a ISY family class")
if self.debug & 0x04:
print("IsySubClass: ",)
self._printdict(self._mydict)
#_objtype = (0, "unknown")
_objtype = "unknown"
def objType(self):
return self._objtype
#return self._objtype[0]
def _get_prop(self, prop):
""" Internal funtion call """
# print("U _get_prop =", prop)
if prop in self._propalias:
prop = self._propalias[prop]
if prop in self._getlist:
if prop in self._mydict:
if prop in self._boollist:
return val2bool(self._mydict[prop])
else:
return self._mydict[prop]
return None
# def _set_prop(self, prop, val):
# """ Internal funtion call """
# if prop in self._propalias:
# prop = self._propalias[prop]
#
# if not prop in self._setlist:
# raise IsyPropertyError("_set_prop: "
# "no property Attribute " + prop)
# pass
# def get_prop_list(self, l):
# """ Get a list of properties
#
# args:
# prop_list: a list of property names
#
# returns
# a list of property values
#
# if a property does not exist a value of None is used
# ( instead of raising a Attribute error)
#
# """
# pass
#
def _getaddr(self):
""" Address or ID of Node (readonly) """
return self._get_prop("address")
address = property(_getaddr)
def _getname(self):
""" Name of Node (readonly) """
return self._get_prop("name")
name = property(_getname)
def _gettype(self):
""" Type of Node (readonly) """
# self._get_prop("type")
# return self._objtype[1]
return self._objtype
objtype = property(_gettype)
def __getitem__(self, prop):
""" Internal method
allows Objects properties to be accessed in a dict style
"""
return self._get_prop(prop)
def __setitem__(self, prop, val):
""" Internal method
allows Objects properties to be accessed/set in a dict style
"""
return self._set_prop(prop, val)
def __delitem__(self, prop):
raise IsyPropertyError("__delitem__: can't delete propery: " + str(prop))
def __del__(self):
if self.debug & 0x80:
print("__del__ ", self.__repr__())
if hasattr(self, "_mydict"):
self._mydict.clear()
def __iter__(self):
""" Internal method
allows Objects properties to be access through iteration
"""
if self.debug & 0x80:
print("IsyUtil __iter__")
for p in self._getlist:
if p in self._mydict:
yield (p, self._get_prop(p))
else:
yield (p, None)
def __repr__(self):
return "<%s %s @ %s at 0x%x>" % (self.__class__.__name__, self._get_prop("id"), self.isy.addr, id(self))
# def __hash__(self):
# #print("_hash__ called")
# return str.__hash__(self._get_prop("id]").myval)
# def __compare__(self, other):
# print("__compare__ called")
# if isinstance(other, str):
# if not hasattr(other, "myval"):
# return -1
# return ( str.__cmp__(self.myval, other.myval) )
def __getattr__(self, attr):
# print("U attr =", attr)
attr_v = self._get_prop(attr)
if attr_v is not None:
return attr_v
else:
# print("attr =", attr)
# print("address =", self.address)
# print("self type = ", type(self))
# pprint(self._mydict)
raise AttributeError(attr)
# This allows for
def __eq__(self, other):
""" smarter test for compairing Obj value """
#print("IsyUtil __eq__")
#print("self", self)
#print("other", other)
if isinstance(other, str):
return self._get_prop("id") == other
if type(self) != type(other):
return False
# NotImplemented
if hasattr(other._mydict, "id"):
return self._get_prop("id") == other._get_prop("id")
return False
#
# Do nothing
# (syntax check)
#
if __name__ == "__main__":
import __main__
print(__main__.__file__)
print("syntax ok")
exit(0)
|
|
from __future__ import unicode_literals, print_function, division
import feedparser
import dataset
from twisted.internet.reactor import callLater
from threading import Thread
import twisted.internet.error
import logging
logger = logging.getLogger('module_rss')
DATABASE = None
updater = None
botref = None
config = {}
def init(bot, testing=False):
''' Initialize updater '''
global DATABASE
global config
global botref
global updater
global logger
if testing:
DATABASE = dataset.connect('sqlite:///:memory:')
else:
DATABASE = dataset.connect('sqlite:///databases/rss.db')
logger.info('RSS module initialized')
botref = bot
config = bot.config.get('rss', {})
finalize()
# As there's no signal if this is a rehash or restart
# update feeds in 30 seconds
updater = callLater(30, update_feeds)
def finalize():
''' Finalize updater (rehash etc) so we don't leave an updater running '''
global updater
global logger
logger.info('RSS module finalized')
if updater:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = None
def get_feeds(**kwargs):
''' Get feeds from database '''
return [
Feed(f['network'], f['channel'], f['id'])
for f in list(DATABASE['feeds'].find(**kwargs))
]
def find_feed(network, channel, **kwargs):
''' Find specific feed from database '''
f = DATABASE['feeds'].find_one(network=network, channel=channel, **kwargs)
if not f:
return
return Feed(f['network'], f['channel'], f['id'])
def add_feed(network, channel, url):
''' Add feed to database '''
f = Feed(network=network, channel=channel, url=url)
return (f.initialized, f.read())
def remove_feed(network, channel, id):
''' Remove feed from database '''
f = find_feed(network=network, channel=channel, id=int(id))
if not f:
return
DATABASE['feeds'].delete(id=f.id)
DATABASE['items_%i' % (f.id)].drop()
return f
def update_feeds(cancel=True, **kwargs):
# from time import sleep
''' Update all feeds in the DB '''
global config
global updater
global logger
logger.info('Updating RSS feeds started')
for f in get_feeds(**kwargs):
Thread(target=f.update).start()
# If we get a cancel, cancel the existing updater
# and start a new one
# NOTE: Not sure if needed, as atm cancel isn't used in any command...
if cancel:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = callLater(5 * 60, update_feeds)
def command_rss(bot, user, channel, args):
commands = ['list', 'add', 'remove', 'latest', 'update']
args = args.split()
if not args or args[0] not in commands:
return bot.say(channel, 'rss: valid arguments are [%s]' % (', '.join(commands)))
command = args[0]
network = bot.network.alias
# Get latest feed item from database
# Not needed? mainly for debugging
# Possibly useful for checking if feed still exists?
if command == 'latest':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss latest <id from list>"')
feed = find_feed(network=network, channel=channel, id=int(args[1]))
if not feed:
return bot.say(channel, 'feed not found, no action taken')
item = feed.get_latest()
if not item:
return bot.say(channel, 'no items in feed')
return bot.say(channel, feed.get_item_str(item))
# List all feeds for current network && channel
if command == 'list':
feeds = get_feeds(network=network, channel=channel)
if not feeds:
return bot.say(channel, 'no feeds set up')
for f in feeds:
bot.say(channel, '%02i: %s <%s>' % (f.id, f.name, f.url))
return
# Rest of the commands are only for admins
if not bot.factory.isAdmin(user):
return bot.say(channel, 'only "latest" and "list" available for non-admins')
# Add new feed for channel
if command == 'add':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss add url"')
init, items = add_feed(network, channel, url=args[1])
if not init:
return bot.say(channel, 'feed already added')
return bot.say(channel, 'feed added with %i items' % len(items))
# remove feed from channel
if command == 'remove':
if len(args) < 2:
return bot.say(channel, 'syntax: ".rss remove <id from list>"')
feed = remove_feed(network, channel, id=args[1])
if not feed:
return bot.say(channel, 'feed not found, no action taken')
return bot.say(channel, 'feed "%s" <%s> removed' % (feed.name, feed.url))
# If there's no args, update all feeds (even for other networks)
# If arg exists, try to update the feed...
if command == 'update':
if len(args) < 2:
bot.say(channel, 'feeds updating')
update_feeds()
return
feed = find_feed(network, channel, id=int(args[1]))
if not feed:
return bot.say(channel, 'feed not found, no action taken')
feed.update()
return
class Feed(object):
''' Feed object to simplify feed handling '''
def __init__(self, network, channel, id=None, url=None):
# Not sure if (this complex) init is needed...
self.id = id
self.network = network
self.channel = channel
self.url = url
if url:
self.url = url
self.initialized = False
# load feed details from database
self._get_feed_from_db()
def __repr__(self):
return '(%s, %s, %s)' % (self.url, self.channel, self.network)
def __unicode__(self):
return '%i - %s' % (self.id, self.url)
def __init_feed(self):
''' Initialize databases for feed '''
DATABASE['feeds'].insert({
'network': self.network,
'channel': self.channel,
'url': self.url,
'name': '',
})
# Update feed to match the created
feed = self._get_feed_from_db()
# Initialize item-database for feed
self.__save_item({
'title': 'PLACEHOLDER',
'link': 'https://github.com/lepinkainen/pyfibot/',
'printed': True,
})
self.initialized = True
return feed
def __get_items_tbl(self):
''' Get table for feeds items '''
return DATABASE[('items_%i' % (self.id))]
def __parse_feed(self):
''' Parse items from feed '''
f = feedparser.parse(self.url)
if self.initialized:
self.update_feed_info({'name': f['channel']['title']})
items = [{
'title': i['title'],
'link': i['link'],
} for i in f['items']]
return (f, items)
def __save_item(self, item, table=None):
''' Save item to feeds database '''
if table is None:
table = self.__get_items_tbl()
# If override is set or the item cannot be found, it's a new one
if not table.find_one(title=item['title'], link=item['link']):
# If printed isn't set, set it to the value in self.initialized (True, if initializing, else False)
# This is to prevent flooding when adding a new feed...
if 'printed' not in item:
item['printed'] = self.initialized
table.insert(item)
def __mark_printed(self, item, table=None):
''' Mark item as printed '''
if table is None:
table = self.__get_items_tbl()
table.update({'id': item['id'], 'printed': True}, ['id'])
def _get_feed_from_db(self):
''' Get self from database '''
feed = None
if self.url and not self.id:
feed = DATABASE['feeds'].find_one(network=self.network, channel=self.channel, url=self.url)
if self.id:
feed = DATABASE['feeds'].find_one(network=self.network, channel=self.channel, id=self.id)
if not feed:
feed = self.__init_feed()
self.id = feed['id']
self.network = feed['network']
self.channel = feed['channel']
self.url = feed['url']
# TODO: Name could just be the domain part of url?
self.name = feed['name']
return feed
def get_item_str(self, item):
return '[%s] %s <%s>' % (''.join([c for c in self.name][0:18]), item['title'], item['link'])
def get_latest(self):
tbl = self.__get_items_tbl()
items = [i for i in list(tbl.find(order_by='id'))]
if not items:
return
return items[-1]
def update_feed_info(self, data):
''' Update feed information '''
data['id'] = self.id
if 'url' in data:
self.url = data['url']
DATABASE['feeds'].update(data, ['id'])
# Update self to match new...
self._get_feed_from_db()
def read(self):
''' Read new items from feed '''
f, items = self.__parse_feed()
# Get table -reference to speed up stuff...
tbl = self.__get_items_tbl()
# Save items in DB, saving takes care of duplicate checks
for i in reversed(items):
self.__save_item(i, tbl)
# Set initialized to False, as we have read everything...
self.initialized = False
return items
def get_new_items(self, mark_printed=False):
''' Get all items which are not marked as printed, if mark_printed is set, update printed also. '''
tbl = self.__get_items_tbl()
items = [i for i in list(tbl.find(printed=False))]
if mark_printed:
for i in items:
self.__mark_printed(i, tbl)
return items
def update(self):
global logger
global botref
# If botref isn't defined, bot isn't running, no need to run
# (used for tests?)
if not botref:
return
# Read all items for feed
logger.debug('Feed "%s" updating' % (self.name))
self.read()
# Get number of unprinted items (and don't mark as printed)
items = self.get_new_items(False)
if len(items) == 0:
logger.debug('Feed "%s" containes no new items, doing nothing.' % (self.name))
return
logger.debug('Feed "%s" updated with %i new items' % (self.name, len(items)))
# If bot instance isn't found, don't print anything
bot_instance = botref.find_bot_for_network(self.network)
if not bot_instance:
logger.error('Bot instance for "%s" not found, not printing' % (self.name))
return
logger.debug('Printing new items for "%s"' % (self.name))
# Get all new (not printed) items and print them
items = self.get_new_items(True)
for i in items:
bot_instance.say(self.channel, self.get_item_str(i))
if __name__ == '__main__':
f = Feed('ircnet', '#pyfibot', 'http://feeds.feedburner.com/ampparit-kaikki?format=xml')
f.read()
for i in f.get_new_items(True):
print(i)
|
|
"""
Example manage.py script, which is responsible for providing a command-line
interface to application specific tasks, such as managing databases.
"""
import os
import sys
PROJECT_HOME = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(PROJECT_HOME)
from dateutil import parser
import requests
from flask.ext.script import Manager, Command, Option
from flask.ext.migrate import Migrate, MigrateCommand
from flask import current_app
from mc.models import db, Build, Commit
from mc.app import create_app
from mc.tasks import build_docker, register_task_revision, update_service, \
start_test_environment, stop_test_environment, run_ci_test, run_task
from mc.builders import ECSBuilder
from sqlalchemy import or_
from sqlalchemy.orm.exc import NoResultFound
app = create_app()
migrate = Migrate(app, db)
manager = Manager(app)
class ManageTestCluster(Command):
"""
Script to allow the management of the test cluster
"""
option_list = (
Option('--command', '-c', dest='command', choices=['start', 'stop', 'run'], required=True),
Option('--id', '-i', dest='test_id', required=False, default=None),
)
def run(self, command, test_id):
"""
Run command
:param command: command to pass to the cluster environment
:param test_id: test id of the environment
"""
command_look_up = {
'start': start_test_environment,
'stop': stop_test_environment,
'run': run_ci_test
}
command_look_up[command](test_id=test_id)
class CreateDatabase(Command):
"""
Creates the database based on models.py
"""
def run(self):
with app.app_context():
db.create_all()
class BuildDockerImage(Command):
"""
Generates a build based on a repo name and commit hash
"""
option_list = (
Option('--repo', '-r', dest='repo'),
Option('--commit', '-c', dest='commit_hash'),
Option('--tag', '-t', dest='tag')
)
def run(self, repo, commit_hash=None, tag=None, app=app):
with app.app_context():
if tag:
# Using the tag, obtain the sha for the relevant tag
url = current_app.config['GITHUB_TAG_FIND_API'].format(
repo=repo,
tag=tag
)
r = requests.get(url)
r.raise_for_status()
payload_find_tag = r.json()
try:
tag_commit_hash = payload_find_tag['object']['sha']
except KeyError:
raise KeyError(
'tag supplied does not exist: {0}'.format(tag)
)
# Obtain the commit hash for this tag
url = current_app.config['GITHUB_TAG_GET_API'].format(
repo=repo,
hash=tag_commit_hash
)
r = requests.get(url)
r.raise_for_status()
payload_get_tag = r.json()
commit_hash = payload_get_tag['object']['sha']
current_app.logger.info(
'user supplied a tag: {0}, sha: {1}'
.format(tag, commit_hash)
)
url = current_app.config['GITHUB_COMMIT_API'].format(
repo=repo,
hash=commit_hash
)
r = requests.get(url)
r.raise_for_status()
payload = r.json()
try:
c = Commit.query.filter_by(
commit_hash=commit_hash,
repository=repo
).one()
if not c.tag and tag:
c.tag = tag
except NoResultFound:
c = Commit(
commit_hash=commit_hash,
timestamp=parser.parse(payload['author']['date']),
author=payload['author']['name'],
repository=repo,
message=payload['message'],
tag=tag if tag else None
)
db.session.add(c)
db.session.commit()
build_docker.delay(c.id)
current_app.logger.info(
"user-received: {}@{}".format(c.repository, c.commit_hash)
)
class MakeDockerrunTemplate(Command):
"""
Prints a `Dockerrun.aws.json` to stdout
Usage: manage.py print_task_def -c adsws:2cfd... staging 100 -c biblib:j03b... staging 300
"""
option_list = (
Option(
'--containers',
'-c',
dest='containers',
nargs=3,
action='append'
),
Option(
'--family',
'-f',
dest='family',
)
)
def run(self, containers, family, app=app):
apps = []
with app.app_context():
for container in containers:
try:
repo, commit_hash = container[0].split(':')
except ValueError:
raise ValueError(
'"{}" should look like repo:id'.format(container[0])
)
build = Build.query.join(Commit).filter(
Commit.repository == repo,
or_(Commit.commit_hash == commit_hash, Commit.tag == commit_hash),
).first()
if build is None:
raise NoResultFound("No row found for {}/{}".format(
repo, commit_hash)
)
apps.append(ECSBuilder.DockerContainer(
build=build,
environment=container[1],
memory=container[2],
portmappings=[
{"hostPort": 8080, "containerPort": 80}
] if repo == "adsws" else None,
)
)
tmpl = ECSBuilder(apps, family=family).render_template()
print(tmpl)
return tmpl
class RegisterTaskRevision(Command):
"""
Calls tasks.register_task_definition to update
an ECS task revision
"""
option_list = (
Option('--task', '-t', dest='task_definition'),
)
def run(self, task_definition, app=app):
with app.app_context():
register_task_revision(task_definition)
class UpdateService(Command):
"""
Calls tasks.update_service to update an ECS service
"""
option_list = (
Option('--cluster', '-c', dest='cluster'),
Option('--service', '-s', dest='service'),
Option('--desiredCount', dest='desiredCount', type=int),
Option('--taskDefinition', '-t', dest='taskDefinition'),
)
def run(self, cluster, service, desiredCount, taskDefinition, app=app):
with app.app_context():
update_service(cluster=cluster,
service=service,
desiredCount=desiredCount,
taskDefinition=taskDefinition,
)
class RunTask(Command):
"""
Calls tasks.run_task to run a specific task on an ECS cluster
"""
option_list = (
Option('--cluster', '-c', dest='cluster'),
Option('--count', dest='count', type=int),
Option('--taskDefinition', '-t', dest='taskDefinition'),
)
def run(self, cluster, count, taskDefinition, app=app):
with app.app_context():
run_task(
cluster=cluster,
count=count,
taskDefinition=taskDefinition
)
manager.add_command('run_task', RunTask)
manager.add_command('update_service', UpdateService)
manager.add_command('register_task_def', RegisterTaskRevision)
manager.add_command('db', MigrateCommand)
manager.add_command('createdb', CreateDatabase())
manager.add_command('dockerbuild', BuildDockerImage)
manager.add_command('print_task_def', MakeDockerrunTemplate)
manager.add_command('test_cluster', ManageTestCluster)
if __name__ == '__main__':
manager.run()
|
|
#!/usr/bin/env python
#
# Copyright 2017 Brocade Communications Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Matthew Geldert ([email protected]), Brocade Communications Systems,Inc.
#
import base64
from brocade_neutron_lbaas_tenant_customizations_db import helper \
as customization_helper
import json
from neutronclient.neutron import client as neutron_client
from oslo_config import cfg
from oslo_log import log as logging
import re
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import socket
from struct import pack
from time import sleep
import yaml
LOG = logging.getLogger(__name__)
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class OpenStackInterface(object):
def __init__(self):
self.admin_username = cfg.CONF.lbaas_settings.openstack_username
self.admin_password = cfg.CONF.lbaas_settings.openstack_password
# Get Neutron and Nova API endpoints...
keystone = self.get_keystone_client()
neutron_service = keystone.services.find(name="neutron")
nova_service = keystone.services.find(name="nova")
if cfg.CONF.lbaas_settings.keystone_version == "2":
self.neutron_endpoint = keystone.endpoints.find(
service_id=neutron_service.id
).adminurl
self.nova_endpoint = keystone.endpoints.find(
service_id=nova_service.id
).adminurl
else:
self.neutron_endpoint = keystone.endpoints.find(
interface="admin", service_id=neutron_service.id
).url
self.nova_endpoint = keystone.endpoints.find(
interface="admin", service_id=nova_service.id
).url
# Get connector to tenant customizations database if enabled...
if cfg.CONF.lbaas_settings.allow_tenant_customizations is True:
self.customizations_db = customization_helper.\
BrocadeLbaasTenantCustomizationsDatabaseHelper(
cfg.CONF.lbaas_settings.tenant_customizations_db
)
else:
self.customizations_db = None
def create_vtm(self, hostname, lb, password, ports, cluster=None, avoid=None):
"""
Creates a vTM instance as a Nova VM.
"""
user_data = self._generate_user_data(
lb, hostname, password, ports['data'], ports['mgmt'], cluster
)
nics = [{"port": ports['data']['id']}]
if ports['mgmt'] is not None:
nics.insert(0, {"port": ports['mgmt']['id']})
instance = self.create_server(
tenant_id=lb.tenant_id,
hostname=hostname,
user_data=self._generate_cloud_init_file(lb, user_data),
nics=nics,
password=password,
avoid_host_of=avoid
)
self.set_server_lock(lb.tenant_id, instance['id'], lock=True)
self._await_build_complete(lb.tenant_id, instance['id'])
return instance
def destroy_vtm(self, hostname, lb):
port_list = []
sec_grp_list = []
floatingip_list = []
server_id = self.get_server_id_from_hostname(lb.tenant_id, hostname)
neutron = self.get_neutron_client()
# Build lists of ports, floating IPs and security groups to delete
ports = neutron.list_ports(device_id=server_id)
for port in ports['ports']:
port_list.append(port['id'])
sec_grp_list += port['security_groups']
floatingip_list += [
floatingip['id']
for floatingip in neutron.list_floatingips(
port_id=port['id']
)['floatingips']
]
# Delete the instance
self.delete_server(lb.tenant_id, server_id)
# Delete floating IPs
for flip in floatingip_list:
try:
neutron.delete_floatingip(flip)
except Exception as e:
LOG.error(
_("\nError deleting floating IP {}: {}".format(flip, e))
)
# Delete ports
for port in port_list:
try:
neutron.delete_port(port)
except Exception as e:
LOG.error(_("\nError deleting port {}: {}".format(port, e)))
# Delete security groups
for sec_grp in sec_grp_list:
try:
neutron.delete_security_group(sec_grp)
except Exception:
# Might legitimately fail in HA deployments
pass
def clean_up(self, tenant_id, ports=None, security_groups=None,
instances=None, floating_ips=None):
if instances:
for instance in instances:
self.delete_server(tenant_id, instance)
neutron = self.get_neutron_client()
if floating_ips:
for flip in floating_ips:
neutron.delete_floatingip(flip)
if ports:
for port in ports:
neutron.delete_port(port)
if security_groups:
for sec_grp in security_groups:
neutron.delete_security_group(sec_grp)
def vtm_exists(self, tenant_id, hostname):
"""
Tests whether a vTM instance with the specified hosname exists.
"""
hostname = hostname[0] if isinstance(hostname, tuple) else hostname
try:
self.get_server_id_from_hostname(tenant_id, hostname)
return True
except:
return False
def add_ip_to_ports(self, ip, ports):
"""
Adds IP address to the allowed_address_pairs field of ports.
"""
neutron = self.get_neutron_client()
# Loop through all the ports, typically one per vTM cluster member
for port_id in ports:
port = neutron.show_port(port_id)['port']
port_ips = [
addr['ip_address'] for addr in port['allowed_address_pairs']
]
# Add the IP if it isn't already in allowed_address_pairs
if ip not in port_ips:
port_ips.append(ip)
allowed_pairs = []
for addr in port_ips:
allowed_pairs.append({"ip_address": addr})
neutron.update_port(
port_id, {"port": {"allowed_address_pairs": allowed_pairs}}
)
def delete_ip_from_ports(self, ip, ports):
"""
Deletes IP address from the allowed_address_pairs field of ports.
"""
neutron = self.get_neutron_client()
# Loop through all the ports, typically one per vTM cluster member
for port_id in ports:
port = neutron.show_port(port_id)['port']
port_ips = [
addr['ip_address'] for addr in port['allowed_address_pairs']
]
# Delete the IP if it is in allowed_address_pairs
if ip in port_ips:
new_pairs = []
for port_ip in port_ips:
if ip != port_ip:
new_pairs.append({"ip_address": port_ip})
neutron.update_port(
port_id, {"port": {"allowed_address_pairs": new_pairs}}
)
def _await_build_complete(self, tenant_id, instance_id):
"""
Waits for a Nova instance to be built.
"""
instance = self.get_server(tenant_id, instance_id)
status = instance['status']
while status == 'BUILD':
sleep(10)
instance = self.get_server(tenant_id, instance_id)
status = instance['status']
if status == 'ERROR':
self.delete_server(tenant_id, instance_id)
raise Exception("VM build failed")
def create_port(self, lb, hostname, mgmt_port=False, cluster=False,
create_floating_ip=False, security_group=None):
neutron = self.get_neutron_client()
if mgmt_port is False:
subnet = neutron.show_subnet(lb.vip_subnet_id)
network_id = subnet['subnet']['network_id']
else:
network_id = cfg.CONF.lbaas_settings.management_network
port_config = {"port": {
"admin_state_up": True,
"network_id": network_id,
"tenant_id": lb.tenant_id,
"name": "{}-{}".format("mgmt" if mgmt_port else "data", hostname)
}}
if mgmt_port is False:
port_config['port']['fixed_ips'] = [
{'subnet_id': lb.vip_subnet_id}
]
port = neutron.create_port(port_config)['port']
deployment_model = self._get_setting(
lb.tenant_id, "lbaas_settings", "deployment_model"
)
if deployment_model == "PER_LOADBALANCER":
sec_grp_uuid = lb.id
elif deployment_model == "PER_TENANT":
sec_grp_uuid = lb.tenant_id
if create_floating_ip is True:
floatingip = self.create_floatingip(lb.tenant_id, port['id'])
mgmt_ip = floatingip['floatingip']['floating_ip_address']
if security_group is None:
sec_grp = self.create_lb_security_group(
lb.tenant_id, sec_grp_uuid, mgmt_port=True, cluster=cluster
)
security_group = sec_grp['security_group']['id']
else:
if security_group is None:
if mgmt_port is False:
sec_grp = self.create_lb_security_group(
lb.tenant_id, sec_grp_uuid
)
else:
sec_grp = self.create_lb_security_group(
lb.tenant_id, sec_grp_uuid, mgmt_port=True,
mgmt_label=True, cluster=cluster
)
security_group = sec_grp['security_group']['id']
if mgmt_port is False:
mgmt_ip = None
else:
mgmt_ip = port['fixed_ips'][0]['ip_address']
neutron.update_port(
port['id'], {"port": {"security_groups": [security_group]}}
)
return(port, security_group, mgmt_ip)
def vtm_has_subnet_port(self, hostname, lb):
hostname = hostname[0] if isinstance(hostname, tuple) else hostname
ports = self.get_server_ports(lb.tenant_id, hostname)
for port in ports:
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] == lb.vip_subnet_id:
return True
return False
def subnet_in_use(self, lb):
neutron = self.get_neutron_client()
loadbalancers = neutron.list_loadbalancers(
tenant_id=lb.tenant_id,
vip_subnet_id=lb.vip_subnet_id
)['loadbalancers']
if len(loadbalancers) > 1:
return True
return False
def attach_port(self, hostname, lb):
server_id = self.get_server_id_from_hostname(lb.tenant_id, hostname)
sec_grp_id = self.get_security_group_id(
"lbaas-{}".format(lb.tenant_id)
)
port, junk, junk = self.create_port(
lb, hostname, security_group=sec_grp_id
)
self.attach_port_to_instance(lb.tenant_id, server_id, port['id'])
return port
def detach_port(self, hostname, lb):
neutron = self.get_neutron_client()
server_id = self.get_server_id_from_hostname(lb.tenant_id, hostname)
ports = neutron.list_ports(device_id=server_id,)['ports']
for port in ports:
if port['fixed_ips'][0]['subnet_id'] == lb.vip_subnet_id:
self.detach_port_from_instance(
lb.tenant_id, server_id, port['id']
)
neutron.delete_port(port['id'])
return port['fixed_ips'][0]['ip_address']
raise Exception(_(
"No port found for subnet {} on device {}".format(
lb.vip_subnet_id, hostname)
))
def get_security_group_id(self, sec_grp_name):
neutron = self.get_neutron_client()
sec_grps = neutron.list_security_groups(name=sec_grp_name)
try:
return sec_grps['security_groups'][0]['id']
except IndexError:
raise Exception(
_("Security group {} not found".format(sec_grp_name))
)
def create_lb_security_group(self, tenant_id, uuid, mgmt_port=False,
mgmt_label=False, cluster=False):
"""
Creates a security group.
"""
neutron = self.get_neutron_client()
sec_grp_data = {"security_group": {
"name": "{}lbaas-{}".format("mgmt-" if mgmt_label else "", uuid),
"tenant_id": tenant_id
}}
sec_grp = neutron.create_security_group(sec_grp_data)
# If GUI access is allowed, open up the GUI port
gui_access = self._get_setting(
tenant_id, "vtm_settings", "gui_access"
)
if gui_access is True and not mgmt_label:
self.create_security_group_rule(
tenant_id,
sec_grp['security_group']['id'],
port=cfg.CONF.vtm_settings.admin_port
)
# If mgmt_port, add the necessary rules to allow management traffic
# i.e. allow each Services Director to access the REST port of the
# instance
if mgmt_port:
# REST access
source_list = (
[cfg.CONF.lbaas_settings.service_endpoint_address] +
cfg.CONF.lbaas_settings.configuration_source_ips
)
for server in source_list:
self.create_security_group_rule(
tenant_id,
sec_grp['security_group']['id'],
port=cfg.CONF.vtm_settings.rest_port,
src_addr=socket.gethostbyname(server)
)
# If cluster, add necessary ports for intra-cluster comms
if cluster is True:
self.create_security_group_rule(
tenant_id,
sec_grp['security_group']['id'],
port=cfg.CONF.vtm_settings.admin_port,
remote_group=sec_grp['security_group']['id']
)
self.create_security_group_rule(
tenant_id,
sec_grp['security_group']['id'],
port=cfg.CONF.vtm_settings.admin_port,
remote_group=sec_grp['security_group']['id'],
protocol='udp'
)
self.create_security_group_rule(
tenant_id,
sec_grp['security_group']['id'],
port=cfg.CONF.vtm_settings.cluster_port,
remote_group=sec_grp['security_group']['id']
)
self.create_security_group_rule(
tenant_id,
sec_grp['security_group']['id'],
port=cfg.CONF.vtm_settings.cluster_port,
remote_group=sec_grp['security_group']['id'],
protocol='udp'
)
self.create_security_group_rule(
tenant_id,
sec_grp['security_group']['id'],
port=cfg.CONF.vtm_settings.rest_port,
remote_group=sec_grp['security_group']['id']
)
return sec_grp
def create_security_group_rule(self, tenant_id, sec_grp_id, port,
src_addr=None, remote_group=None,
direction="ingress", protocol='tcp'):
"""
Creates the designatted rule in a security group.
"""
if isinstance(port, tuple):
port_min = port[0]
port_max = port[1]
else:
port_min = port
port_max = port
neutron = self.get_neutron_client()
new_rule = {"security_group_rule": {
"direction": direction,
"port_range_min": port_min,
"ethertype": "IPv4",
"port_range_max": port_max,
"protocol": protocol,
"security_group_id": sec_grp_id,
"tenant_id": tenant_id
}}
if src_addr:
new_rule['security_group_rule']['remote_ip_prefix'] = src_addr
if remote_group:
new_rule['security_group_rule']['remote_group_id'] = remote_group
try:
neutron.create_security_group_rule(new_rule)
except Exception as e:
if not e.message.startswith("Security group rule already exists"):
raise
def allow_port(self, lb, port, protocol='tcp'):
"""
Adds access to a given port to a security group.
"""
# Get the name of the security group for the "loadbalancer"
deployment_model = self._get_setting(
lb.tenant_id, "lbaas_settings", "deployment_model"
)
if deployment_model == "PER_LOADBALANCER":
sec_grp_name = "lbaas-{}".format(lb.id)
elif deployment_model == "PER_TENANT":
sec_grp_name = "lbaas-{}".format(lb.tenant_id)
# Get the security group
neutron = self.get_neutron_client()
sec_grp = neutron.list_security_groups(
name=sec_grp_name
)['security_groups'][0]
# Create the required rule
self.create_security_group_rule(
lb.tenant_id, sec_grp['id'], port, protocol=protocol
)
def block_port(self, lb, port, identifier, protocol='tcp', force=False):
"""
Removes access to a given port from a security group.
"""
neutron = self.get_neutron_client()
# Only block the port if not in use by another listener hosted on
# the same vTM
if force is False:
# Get all listeners belonging to this tenant that use this port
listeners = neutron.list_listeners(
tenant_id=lb.tenant_id,
protocol_port=port
)['listeners']
# Create a counter of instances of port for each vTM identifier
identifier_port_counter = {}
processed_lbs = [] # Only count each LB once as they don't allow
# duplicate ports
for listener in listeners:
for loadbalancer in listener['loadbalancers']:
if loadbalancer['id'] in processed_lbs:
continue
processed_lbs.append(loadbalancer['id'])
tmp_lb = neutron.show_loadbalancer(loadbalancer['id'])
identifier = self.get_identifier(tmp_lb['loadbalancer'])
try:
identifier_port_counter[identifier] += 1
except KeyError:
identifier_port_counter[identifier] = 1
this_identifier = self.get_identifier(tmp_lb['loadbalancer'])
# If there is more than one listener on this vTM using the
# port, exit the function without removing it from sec group
if identifier_port_counter[this_identifier] > 1:
return False
# Get the name of the security group for the "loadbalancer"
deployment_model = self._get_setting(
lb.tenant_id, "lbaas_settings", "deployment_model"
)
if deployment_model == "PER_LOADBALANCER":
sec_grp_name = "lbaas-{}".format(lb.id)
elif deployment_model == "PER_TENANT":
sec_grp_name = "lbaas-{}".format(lb.tenant_id)
# Get the security group
sec_grp = neutron.list_security_groups(
name=sec_grp_name
)['security_groups'][0]
# Iterate through all rules in the group and delete the matching one
for rule in sec_grp['security_group_rules']:
if rule['port_range_min'] == port \
and rule['port_range_max'] == port \
and rule['direction'] == "ingress" \
and rule['protocol'] == protocol:
neutron.delete_security_group_rule(rule['id'])
break
def get_identifier(self, lb):
if isinstance(lb, dict):
loadbalancer_id = lb['id']
subnet_id = lb['vip_subnet_id']
tenant_id = lb['tenant_id']
else:
loadbalancer_id = lb.id
subnet_id = lb.vip_subnet_id
tenant_id = lb.tenant_id
deployment_model = self._get_setting(
tenant_id, "lbaas_settings", "deployment_model"
)
if deployment_model == "PER_TENANT":
return tenant_id
elif deployment_model == "PER_LOADBALANCER":
return loadbalancer_id
elif deployment_model == "PER_SUBNET":
return subnet_id
def create_floatingip(self, tenant_id, port_id):
neutron = self.get_neutron_client()
network = cfg.CONF.lbaas_settings.management_network
floatingip_data = {"floatingip": {
"floating_network_id": network,
"port_id": port_id,
"tenant_id": tenant_id
}}
return neutron.create_floatingip(floatingip_data)
def get_network_for_subnet(self, subnet_id):
neutron = self.get_neutron_client()
return neutron.show_subnet(subnet_id)['subnet']['network_id']
def create_server(self, tenant_id, hostname, user_data, nics, password,
avoid_host_of=None):
"""
Creates a Nova instance of the vTM image.
"""
image_id = self._get_setting(tenant_id, "lbaas_settings", "image_id")
flavor_id = self._get_setting(tenant_id, "lbaas_settings", "flavor_id")
token, endpoint = self.get_nova_connection_data(tenant_id)
headers = {
"Content-Type": "application/json",
"X-Auth-Token": token
}
body = {"server": {
"imageRef": image_id,
"flavorRef": flavor_id,
"name": hostname,
"user_data": base64.b64encode(user_data),
"adminPass": password,
"networks": nics,
"config_drive": True
}}
specify_az = self._get_setting(tenant_id,"lbaas_settings","specify_az")
if specify_az is True:
if hostname.endswith("-sec"):
body['server']['availability_zone'] = \
self._get_setting(tenant_id,"lbaas_settings","secondary_az")
else:
body['server']['availability_zone'] = \
self._get_setting(tenant_id,"lbaas_settings","primary_az")
if avoid_host_of is not None:
body['os:scheduler_hints'] = {
"different_host": [avoid_host_of]
}
try:
response = requests.post(
"{}/servers".format(endpoint),
data=json.dumps(body),
headers=headers
)
if response.status_code >= 300:
raise Exception("{}: {}".format(
response.status_code, response.text
))
except Exception as e:
LOG.error(_("\nError creating vTM instance: {}".format(e)))
return response.json()['server']
def get_server(self, tenant_id, server_id):
token, endpoint = self.get_nova_connection_data(tenant_id)
response = requests.get(
"{}/servers/{}".format(endpoint, server_id),
headers={"X-Auth-Token": token}
)
if response.status_code != 200:
raise Exception("Server Not found")
return response.json()['server']
def attach_port_to_instance(self, tenant_id, server_id, port_id):
self.set_server_lock(tenant_id, server_id, lock=False)
token, endpoint = self.get_nova_connection_data(tenant_id)
response = requests.post(
"{}/servers/{}/os-interface".format(endpoint, server_id),
data=json.dumps({"interfaceAttachment": { "port_id": port_id}}),
headers={"X-Auth-Token": token, "Content-Type": "application/json"}
)
self.set_server_lock(tenant_id, server_id, lock=True)
if response.status_code != 200:
raise Exception(
"Unable to attach port '{}' to instance '{}': {}".format(
port_id, server_id, response.text
))
def detach_port_from_instance(self, tenant_id, server_id, port_id):
self.set_server_lock(tenant_id, server_id, lock=False)
token, endpoint = self.get_nova_connection_data(tenant_id)
response = requests.delete(
"{}/servers/{}/os-interface/{}".format(
endpoint, server_id, port_id
),
headers={"X-Auth-Token": token}
)
self.set_server_lock(tenant_id, server_id, lock=True)
if response.status_code != 202:
raise Exception(
"Unable to detach port '{}' from instance '{}': {}".format(
port_id, server_id, response.text
))
def get_mgmt_ip(self, tenant_id, hostname):
neutron = self.get_neutron_client()
mgmt_net = neutron.show_network(
cfg.CONF.lbaas_settings.management_network
)['network']['name']
server_id = self.get_server_id_from_hostname(tenant_id, hostname)
server = self.get_server(tenant_id, server_id)
return server['addresses'][mgmt_net][0]['addr']
def set_server_lock(self, tenant_id, server_id, lock=True):
token, endpoint = self.get_nova_connection_data(tenant_id)
response = requests.post(
"{}/servers/{}/action".format(endpoint, server_id),
headers={
"X-Auth-Token": token,
"Content-Type": "application/json"
},
data='{{ "{}": null }}'.format("lock" if lock else "unlock")
)
if response.status_code != 202:
raise Exception("Failed to lock server {}".format(server_id))
def get_server_ports(self, tenant_id, hostname):
"""
Gets the Neutron ID of a vTM's data port.
"""
neutron = self.get_neutron_client()
server_id = self.get_server_id_from_hostname(tenant_id, hostname)
all_ports = neutron.list_ports(device_id=server_id)['ports']
data_ports = [
port for port in all_ports
if not port['name'].startswith("mgmt")
]
if data_ports:
return data_ports
raise Exception("No data ports found for {}".format(hostname))
def get_server_port_ids(self, tenant_id, hostname):
ports = self.get_server_ports(tenant_id, hostname)
return [port['id'] for port in ports]
def get_server_id_from_hostname(self, tenant_id, hostname):
"""
Gets the Nova ID of a server from its hostname.
"""
token, endpoint = self.get_nova_connection_data(tenant_id)
response = requests.get(
"{}/servers?name={}".format(endpoint, hostname),
headers={"X-Auth-Token": token}
)
try:
return response.json()['servers'][0]['id']
except Exception:
raise Exception("Server not found")
def delete_server(self, tenant_id, server_id):
"""
Deletes a Nova instance.
"""
self.set_server_lock(tenant_id, server_id, lock=False)
token, endpoint = self.get_nova_connection_data(tenant_id)
requests.delete(
"{}/servers/{}".format(endpoint, server_id),
headers={"X-Auth-Token": token}
)
def get_subnet_gateway(self, subnet_id):
neutron = self.get_neutron_client()
subnet = neutron.show_subnet(subnet_id)['subnet']
ports = neutron.list_ports(network_id=subnet['network_id'])['ports']
for port in ports:
for fixed_ip in port['fixed_ips']:
if fixed_ip['ip_address'] == subnet['gateway_ip']:
return (subnet['gateway_ip'], port['mac_address'])
return (None, None)
def get_nova_connection_data(self, tenant_id):
token = self.get_auth_token(tenant_id=tenant_id)
endpoint = self.nova_endpoint.replace("$(tenant_id)s", tenant_id)
endpoint = endpoint.replace("%(tenant_id)s", tenant_id)
return (token, endpoint)
def get_neutron_client(self):
auth_token = self.get_auth_token()
neutron = neutron_client.Client(
'2.0', endpoint_url=self.neutron_endpoint, token=auth_token
)
neutron.format = 'json'
return neutron
def get_keystone_client(self, tenant_id=None, tenant_name=None):
auth_url = re.match(
"^(https?://[^/]+)",
cfg.CONF.keystone_authtoken.auth_uri
).group(1)
if cfg.CONF.lbaas_settings.keystone_version == "2":
from keystoneclient.v2_0 import client as keystone_client
auth_url = "{}/v2.0".format(auth_url)
else:
from keystoneclient.v3 import client as keystone_client
auth_url = "{}/v3".format(auth_url)
param = {}
if tenant_id:
param['tenant_id'] = tenant_id
elif tenant_name:
param['tenant_name'] = tenant_name
else:
param['tenant_name'] = "admin"
return keystone_client.Client(
username=self.admin_username,
password=self.admin_password,
auth_url=auth_url,
**param
)
def get_auth_token(self, tenant_id=None, tenant_name=None):
keystone_client = self.get_keystone_client(tenant_id, tenant_name)
return keystone_client.auth_token
def get_subnet_netmask(self, subnet_id):
neutron = self.get_neutron_client()
subnet = neutron.show_subnet(subnet_id)['subnet']
return self.get_netmask(subnet['cidr'])
def get_netmask(self, cidr):
mask = int(cidr.split("/")[1])
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(pack('>I', bits))
def _get_setting(self, tenant_id, section, param):
setting = None
if self.customizations_db:
setting = self.customizations_db.get_customization(
tenant_id, section, param
)
if setting is None:
global_section = getattr(cfg.CONF, section)
setting = getattr(global_section, param)
return setting
def _generate_user_data(self, lb, hostname, password, data_port, mgmt_port,
cluster_data=None):
neutron = self.get_neutron_client()
static_routes = {}
return_path_routes = None
data_subnet = neutron.show_subnet(
data_port['fixed_ips'][0]['subnet_id']
)['subnet']
gui_access = self._get_setting(
data_port['tenant_id'], "vtm_settings", "gui_access"
)
deployment_model = self._get_setting(
lb.tenant_id, "lbaas_settings", "deployment_model"
)
# Set return-path routes
if deployment_model == "PER_TENANT":
gateway_ip, gateway_mac = self.get_subnet_gateway(data_subnet['id'])
if gateway_ip is not None and gateway_mac is not None:
return_path_routes = [{"mac": gateway_mac, "ipv4": gateway_ip}]
# Set nameservers
nameservers = self._get_setting(
data_port['tenant_id'], "vtm_settings", "nameservers"
)
try:
nameservers = nameservers.split(",")
except AttributeError:
pass
# Get bind IP for management services
if mgmt_port:
bind_ip = mgmt_port['fixed_ips'][0]['ip_address']
else:
bind_ip = data_port['fixed_ips'][0]['ip_address']
host_entries = {}
access_ips = []
z_initial_config_data = {
"accept_license": "accept",
"dns": " ".join(nameservers),
"hostname": hostname,
"license_key": "",
"nameip": bind_ip,
"net_gateway": data_subnet['gateway_ip'],
"net_management": "" if gui_access else bind_ip,
"password": password,
"rest_enabled": "Y",
"rest_port": cfg.CONF.vtm_settings.rest_port,
"search": "",
"ssh_intrusion": "Y",
"timezone": cfg.CONF.vtm_settings.timezone
}
if mgmt_port:
mgmt_subnet = neutron.show_subnet(
mgmt_port['fixed_ips'][0]['subnet_id']
)['subnet']
static_routes['eth0'] = mgmt_subnet['host_routes']
static_routes['eth1'] = data_subnet['host_routes']
host_entries[hostname] = mgmt_port['fixed_ips'][0]['ip_address']
z_initial_config_data['net_eth0_addr'] = \
mgmt_port['fixed_ips'][0]['ip_address']
z_initial_config_data['net_eth0_mask'] = self.get_netmask(
mgmt_subnet['cidr']
)
z_initial_config_data['net_eth1_addr'] = \
data_port['fixed_ips'][0]['ip_address']
z_initial_config_data['net_eth1_mask'] = self.get_netmask(
data_subnet['cidr']
)
else:
static_routes['eth0'] = data_subnet['host_routes']
host_entries[hostname] = data_port['fixed_ips'][0]['ip_address']
z_initial_config_data['net_eth0_addr'] = \
data_port['fixed_ips'][0]['ip_address']
z_initial_config_data['net_eth0_mask'] = self.get_netmask(
data_subnet['cidr']
)
z_initial_config_text = "\n".join([
"{}={}".format(k, v) for k, v in z_initial_config_data.iteritems()
])
cluster_target = None
if cluster_data:
host_entries[cluster_data['peer_name']] = cluster_data['peer_addr']
if cluster_data['is_primary'] is False:
cluster_target = cluster_data['peer_addr']
if gui_access is not True:
access_ips = (
[cfg.CONF.lbaas_settings.service_endpoint_address] +
cfg.CONF.lbaas_settings.configuration_source_ips
)
if cluster_data:
access_ips.append(cluster_data['peer_addr'])
return {
"z-initial-config": z_initial_config_text,
"cluster_target": cluster_target,
"host_entries": host_entries,
"static_routes": static_routes,
"return_path_routes": return_path_routes,
"access_ips": access_ips,
"password": password,
"hostname": hostname,
"bind_ip": bind_ip,
"clustered": True if cluster_data else False,
"tenant_id": data_port['tenant_id']
}
def _generate_cloud_init_file(self, lb, config_data):
cloud_config = {
"write_files": [
{
"path": "/root/z-initial-config-replay",
"content": config_data['z-initial-config']
},
],
"runcmd": [
"echo regenerate-uuid | zcli",
"export ZEUSHOME=/opt/zeus",
("z-initial-config --replay-from=/root/z-initial-config-replay"
" --noloop --noninteractive")
]
}
# Add static routes
route_table = []
for interface, routes in config_data['static_routes'].iteritems():
route_table.extend([
{
"name": route['destination'].split("/")[0],
"if": interface,
"mask": self.get_netmask(route['destination']),
"gw": route['nexthop']
}
for route in routes
])
cloud_config["write_files"].append({
"path": "/root/routes-data",
"content": json.dumps(
{"properties": {"appliance": {
"routes": route_table
}}})
})
cloud_config["runcmd"].append(
'curl -k -X PUT -H "Content-Type: application/json" '
'--data @/root/routes-data --user "admin:{0}" '
'https://{1}:{2}/api/tm/2.0/config/active/traffic_managers/{1}'
''.format(config_data['password'],
config_data['bind_ip'],
cfg.CONF.vtm_settings.rest_port
))
# If per-tenant deployment model, enable return-path routing
deployment_model = self._get_setting(
lb.tenant_id, "lbaas_settings", "deployment_model"
)
if deployment_model == "PER_TENANT":
cloud_config["write_files"].append({
"path": "/root/return-path-routes-data",
"content": json.dumps(
{"properties": {
"appliance": {
"return_path_routing_enabled": True
},
"ip": {
"appliance_returnpath": config_data['return_path_routes']
}
}})
})
cloud_config["runcmd"].append(
'curl -k -X PUT -H "Content-Type: application/json" '
'--data @/root/return-path-routes-data --user "admin:{0}" '
'https://{1}:{2}/api/tm/2.0/config/active/global_settings'
''.format(config_data['password'],
config_data['bind_ip'],
cfg.CONF.vtm_settings.rest_port)
)
# Add split-brain recovery mechanism to primary cluster member only
if config_data['clustered'] is True \
and not config_data['cluster_target']:
cloud_config['write_files'].append({
"path": "/opt/zeus/zxtm/conf/events/allmachinesok",
"content": (
"type!faulttolerance!event_tags allmachinesok\n"
"actions sync-cluster"
)
})
cloud_config['write_files'].append({
"path": "/opt/zeus/zxtm/conf/actions/sync-cluster",
"content": (
"program sync-cluster.py\n"
"type program"
)
})
cloud_config['write_files'].append({
"path": "/opt/zeus/zxtm/conf/actionprogs/sync-cluster.py",
"permissions": "'0755'",
"content": (
"""#!/usr/bin/env python
import requests
import socket
import subprocess
def get_last_local_update():
with open("/opt/zeus/zxtm/conf/extra/last_update") as f:
last_update = f.readline()
return int(last_update.strip())
def get_last_remote_update():
local_hostname = socket.gethostname()
if local_hostname.endswith("-pri"):
remote_hostname = local_hostname[:-3] + "sec"
else:
remote_hostname = local_hostname[:-3] + "pri"
url = "https://%s:9070/api/tm/3.5/config/active/extra_files/last_update" % (
remote_hostname
)
last_update = requests.get(url, auth=('admin', '{0}'), verify=False).text
return int(last_update.strip())
def main():
if get_last_local_update() > get_last_remote_update():
subprocess.call(["/opt/zeus/zxtm/bin/replicate-config"])
if __name__ == '__main__':
main()
""".format(config_data['password']))
})
# Config for GUI options
gui_access = self._get_setting(
config_data['tenant_id'], "vtm_settings", "gui_access"
)
if gui_access is True \
and config_data['cluster_target'] is None:
cloud_config["write_files"].append({
"path": "/opt/zeus/zxtm/conf/groups/LBaaS",
"content": """
Web_Cache none
Pools!Edit!Load_Balancing none
SSL!SSL_Certs!New none
Virtual_Servers!Edit!Request_Logging none
Java none
Pools!Edit!SSL none
Event_Log full
SSL!DNSSEC_Keys none
Monitors ro
Event_Log!Event_Archive none
Virtual_Servers!Edit!GLB_Services none
Cloud_Credentials none
Virtual_Servers!Edit!Connection_Management none
SSL!SSL_Certs!Edit none
Wizard none
Pools!Edit!Persistence none
Security none
Traffic_IP_Groups!Networking none
Support_Files none
Routing none
AFM none
Shutdown none
Pools!Edit!DNSAutoscaling none
Traffic_Managers none
DateTime none
Log_Viewer none
Bandwidth none
SSL!SSL_Certs!Import none
Request_Logs none
DNS_Server none
Virtual_Servers!Edit!Aptimizer_Settings none
Connections full
SNMP none
Reboot none
Virtual_Servers!Edit!Request_Tracing none
SOAP_API none
Virtual_Servers ro
Map ro
Networking none
Pools ro
Diagnose!Replicate none
Support full
Global_Settings none
description Group for OpenStack LBaaS users
Pools!Edit!Connection_Management none
Appliance_Console none
Catalog none
SLM none
SSL ro
Kerberos none
Locations none
Monitoring full
Fault_Tolerance none
Service_Protection none
Persistence ro
Alerting none
Virtual_Servers!Edit!Content_Compression none
SSL!CAs none
Audit_Log none
Pools!Edit!Bandwidth none
Backup none
Pools!Edit!Monitors none
Virtual_Servers!Edit!Content_Caching none
Extra_Files none
Statd full
Virtual_Servers!Edit!SSL_Decryption none
Rate none
Help none
Pools!Edit!Autoscaling none
GLB_Services none
Restart none
Aptimizer none
Authenticators none
Custom none
Event_Log!Clear none
Virtual_Servers!Edit!Rules none
Virtual_Servers!Edit!Classes none
Traffic_IP_Groups ro
Pools!Edit!Kerberos_Protocol_Transition none
Rules none
License_Keys none
Draining none
Virtual_Servers!Edit!Kerberos_Protocol_Transition none
Traffic_IP_Groups!Edit ro
SSL!Client_Certs none
Sysctl none
Persistence!Edit none
SSL!SSL_Certs ro
Access_Management none
Diagnose ro
Monitors!Edit none
MainIndex ro
Virtual_Servers!Edit!DNS_Server none
Config_Summary ro
"""})
cloud_config["runcmd"].append(
'echo "Users.addUser monitor, password, LBaaS" | /opt/zeus/zxtm/bin/zcli'
)
# Add host entries to configuration
if config_data['host_entries']:
cloud_config["write_files"].append({
"path": "/root/hosts-data",
"content": json.dumps(
{"properties": {"appliance": {
"hosts": [
{"name": name, "ip_address": ip}
for name, ip in \
config_data['host_entries'].iteritems()
]
}}})
})
cloud_config["runcmd"].append(
'curl -k -X PUT -H "Content-Type: application/json" '
'--data @/root/hosts-data --user "admin:{0}" '
'https://{1}:{2}/api/tm/2.0/config/active/traffic_managers/{1}'
''.format(config_data['password'],
config_data['bind_ip'],
cfg.CONF.vtm_settings.rest_port
))
# Join secondary member to cluster
if config_data['cluster_target'] is not None:
cloud_config["write_files"].append({
"path": "/root/join-cluster",
"permissions": "0755",
"content": (
"""#!/opt/zeus/perl/miniperl -w
BEGIN {{ unshift @INC, "/opt/zeus/zxtm/lib/perl";
unshift @INC, "/opt/zeus/zxtmadmin/lib/perl"; }}
use Zeus::ZXTM::Configure;
my %certs = Zeus::ZXTM::Configure::CheckSSLCerts( [ "{0}:9090" ] );
Zeus::ZXTM::Configure::RegisterWithCluster("admin", "{1}", [ "{0}:9090" ],
undef, {{ "{0}:9090" => $certs{{"{0}:9090"}}->{{fp}} }}, "yes", undef, 1);
""".format(config_data['cluster_target'], config_data['password']))
})
cloud_config["runcmd"].append("/root/join-cluster")
# Set admin SSH port
ssh_port = self._get_setting(lb.tenant_id, "vtm_settings", "ssh_port")
if ssh_port != 22:
cloud_config["write_files"].append({
"path": "/root/ssh-port-data",
"content": json.dumps(
{"properties": {"appliance": {
"ssh_port": ssh_port
}}})
})
cloud_config["runcmd"].append(
'curl -k -X PUT -H "Content-Type: application/json" '
'--data @/root/ssh-port-data --user "admin:{0}" '
'https://{1}:{2}/api/tm/2.0/config/active/traffic_managers/{1}'
''.format(config_data['password'],
config_data['bind_ip'],
cfg.CONF.vtm_settings.rest_port
))
# Lock down security if end-user doesn't have GUI access
if gui_access is not True:
cloud_config["write_files"].append({
"path": "/root/access-data",
"content": json.dumps(
{"properties": {"basic": {
"access": config_data['access_ips']
}}})
})
cloud_config["runcmd"].append(
'curl -k -X PUT -H "Content-Type: application/json" '
'--data @/root/access-data --user "admin:{0}" '
'https://{1}:{2}/api/tm/2.0/config/active/security'
''.format(config_data['password'],
config_data['bind_ip'],
cfg.CONF.vtm_settings.rest_port)
)
return "#cloud-config\n\n" + yaml.dump(
cloud_config, default_flow_style=False
)
|
|
import MySQLdb
class QueryBuilder():
"""
contiene le utilities per generare le queries da usare in interrogazioni
@note: il risultato del filtro e' ottenuto intersecando i risultati di un massimo di tre query con l'insieme delle anagrafiche derivante dalla selezione geografica:
in particolare viene eseguita una query per ogni tab della gui in cui viene selezionato qualcosa
@note: il criterio per la costruzione dei filtri richiede: che i parametri appartenenti allo stesso tab vengano messi in AND, cioe'
voglio trovare i pv che abbiano tre addetti e 2 vetrine e piu' di quattro vetrine, inoltre iparametri appartenenti alla stessa classe devono essere in OR, cio' significa
cerca i pv che abbiano 1, 2 o 3 addetti, i parametri tra i diversi tab sono in AND cioe'
voglio i pv che abbiano potenziale mmas 80 o 90 e tre o quattro addetti e 2 vetrine, i tab vengono messi in AND tramite l'intersezione tra i risultati delle queries
"""
def __init__(self):
self.columns={'parametri':['tc_clpar_id', 'tc_par_id'], 'marchi':['tc_clmar_id', 'tc_mar_id'], 'potenziali':['tc_clpot_id', 'tc_pot_id']}
self.Query={"parametri":"SELECT pv_id FROM rel_pv_par r where ",
"marchi":"SELECT pv_id FROM rel_pv_mar where ",
"potenziali":"SELECT pv_id FROM rel_pv_pot r where "}
self.d=[[8L, 82L], [10L, 101L], [10L, 102L]]
def groupList(self,l):
"""
produce le liste di parametri separati per class_id
genera un dict le cui chiavi sono il class_id, mentre i valori sono le liste dei parameters_id che afferiscono alla classe
@param l: [[int,int]] con il seguente significato [class_id,parameter_id]
@return: {int:[int]}{class_id:[parameter_id]}: la chiave e' il class_id degli elementi in l,
"""
groups={}
for k in l:
if not (groups.has_key(k[0])):
groups[k[0]]=[]
groups[k[0]].append(k[1])
else:
groups[k[0]].append(k[1])
return groups
#
#print grupList(d)
def selectFormatter(self,key,d):
"""
genera le clausole select da unificare con union all
@param key: string 'parametri','marchi','potenziali' chiave che identifica il tab
@param {int:string}:{class_id:string} out di orPackager
@return: [string]
"""
query= self.Query[key]
l=[]
for o in d.itervalues():
l.append("( %s)"%(query+o))
return l
def queryUnifier(self,d):
"""
congiunge le select generate da selectFormatter tramite union all
@param [string]:
@return: string
"""
queryHead="select * from ("
queryTail=")as tb1 GROUP BY tb1.pv_id HAVING COUNT(*) = %d"%len(d)
query=""
for s in d:
query=query+s+" union all"
#rimuovo l'ultimo union all
query=query[0:len(query)-9]
query= queryHead+query+queryTail
return query
def coupleMaker(self,d,key):
"""
@param {class_id:[parameter_id]}: prende l'output di groupList e genera le condizioni and che formeranno la query del filtro: coppie del tipo tc_cl_mar_id= key and tc_mar_id=parameter_id
@return:{k:[string]} la chiave e' la stessa del dict in input
@note: l'output e' una lista perche' cosi' e' piu' facile gestire i casi speciali del primo e dell'ultimo elemento, infatti questi saranno combinati con degli or che vanno posizionati solo tra gli elementi interni
"""
l={}
for k in d.iterkeys():
l[k]=[]
for p in d[k]:
l[k].append("(%s=%d and %s=%d)"%(self.columns[key][0],k,self.columns[key][1],p))# il senso e'(tc_cl.._id= chiave del dict,tc_.._id= parameter_id)
return l
def orPackager(self,l):
"""
combina le liste in uscita da keyList2keyCouple con clausole 'or'
@param {class_id:[string]} { class_id:[coupleFormatter]}:output di keyList2keyCouple
@return: {class_id:string}
@note: il risultato deve ancora essere lavorato: gli elementi del dict vanno messi in and tramite andPackager
"""
dictOut={}
for k in l.iterkeys():
if not dictOut.has_key(k):
dictOut[k]=[]
preOut="( "
for i in range(0,len(l[k])):
preOut=preOut+str(l[k][i])
if i<len(l[k])-1:
preOut=preOut+" or "
else:
preOut=preOut+")"
dictOut[k]=preOut
return dictOut
def andPackager(self,l):
"""
costituisce l'ultimo step nella preparazione della query riceve l'uscita di orPackager e ritorna la clausola where da aggiungere alla query
@param {class_id:[string]}:out di orPackager
@return: string
"""
q="("
for e in l.iterkeys():
q=q+l[e]+ " and "
#rimuovo l'ultimo and e chiudo la clausola
q=q[0:len(q)-4]+")"
return q
def groupMaker(self,l):
"""
combina assieme le clausole nelle coppie AND con la clausola OR
@param [string] : l'input del metodo e' l'output di coupleMaker
@return: string: l'insieme delle clausole da aggiungere alla query
"""
gm=" "
for i in range(0,len(l)):
gm=gm+l[i]
if ((i>0)or(i<len(l)-1)):
gm=gm+" or "
return gm
def coupleFormatter(self,key,class_id,parameter_id):
"""
genera una "coppia and" del tipo '(tc_cl_.._id =class_id and tc_.._id=parameter_id' cioe' mette assieme classe parametro e parametro id
risponde al requisitocerca i pv che classe parametro = addetti e parametro_id= 3 addetti etc., va ripetuto per ogni parametro selezionato
@param class_id: int id della classe
@param parameter_id: id del parametro
@return: string
"""
couple="(%s=%d and %s=%d)"%(self.columns[key][0],int(class_id),self.columns[key][1],int(parameter_id))
return couple
def buildQuery(self,l,key):
"""
produce la query dalla lista dei parametri
@param [[class_id,parameter_id]]:[[int,int]]
@return: string
"""
gl=self.groupList(l)#{8L: [82L], 10L: [102L, 101L]}#
#gl=db.groupList(d)
cplm=self.coupleMaker(gl, key)
op=self.orPackager(cplm)
sf= self.selectFormatter(key,op)
cplm=self.coupleMaker(gl, key)
op=self.orPackager(cplm)
sf= self.selectFormatter(key,op)
query= self.queryUnifier(sf)
return query
class dbQuery():
"""
classe per la gestione del db per conto di interrogazioni
"""
def __init__(self,user,activeDb):
self.user=user
self.activeDb=activeDb
self.maker=QueryBuilder()
# self.key='parametri'
#dizionario che codifica le colonne da usare nelle queries
self.columns={'parametri':['tc_clpar_id', 'tc_par_id'], 'marchi':['tc_clmar_id', 'tc_mar_id'], 'potenziali':['tc_clpot_id', 'tc_pot_id']}
#dizionario che codifica le queries da usare
self.Query={"parametri":"SELECT pv_id FROM rel_pv_par where ",
"marchi":"SELECT pv_id FROM rel_pv_mar where ",
"potenziali":"SELECT pv_id FROM rel_pv_pot where "}
# tabelle da usare
self.dictTables={'parametri':'tc_clpar', 'marchi':'tc_clmar', 'potenziali':'tc_clpot'}
#specifica le colonne da usare nelle queries
self.dictValue={'parametri':'tc_par', 'marchi':'tc_mar', 'potenziali':'tc_pot'}
self.dictPolita={'parametri':'tc_clpar', 'marchi':'tc_mar', 'potenziali':'tc_clpot'}
self.query="select %s_id, testo,tc_stato_id from %s where tc_stato_id=1"
# le queries per ricavare i testi dei checkbox per caricare veccie queries
self.dictReverseQuery={}
#popolo le reversequeries
self.dictReverseQuery['parametri']="select testo from tc_par where tc_clpar_id=%d and tc_par_id=%d"
self.dictReverseQuery['potenziali']="SELECT testo FROM tc_pot t where tc_clpot_id=%d and tc_pot_id=%d"
self.dictReverseQuery['marchi']="SELECT testo FROM tc_mar t where tc_mar_id=%d"
self.db = self.activeDb.getConnection()#MySQLdb.connect(host="localhost", user="root", passwd="ringil-87",db="parafarmacie",charset="utf8",use_unicode=True)
self.crs=self.db.cursor()
def getMarchiText(self,Id,dummy):
"""
@note recupera il testo relativo alle marche
@param Id: [int] id e' una lista perche' questa funzione deve essere compatibile con getOtherText quindi piu' che di una lista si tratta di una tupla
@param dummy: variabile di comodo per le stesse ragioni di Id
@return: String
"""
#interrogo il db
self.crs.execute(self.dictReverseQuery['marchi']%(Id[0]))# uso le quadre per compatibilita
#ricavo un solo item, poche' fetchall ritorna una lista di tuple
text= self.crs.fetchall()[0][0]
return text
def getOtherText(self,(classe,Id),key):
"""
@note: recupera il testo relativo a parametri e potenziali
@param id: Integer
@param classe: integer
@param key: 'marchi'/'parametri'/'potenziali'
@return: String
"""
query= self.dictReverseQuery[key]%(classe,Id)
self.crs.execute(query)
rs=self.crs.fetchall()
return rs[0][0]
def getAllText(self,tab,*param):
"""
@note usa i giusti metodi per ricavare il testo per tutte le chiavi
@param *param: id/ id,classe
"""
func={}
func['marchi']=self.getMarchiText
func['potenziali']=self.getOtherText
func['parametri']=self.getOtherText
text=func[tab](param,tab)
#print "dbQuery.getAllText",text
return text
def getPvById(self, Id):
"""
interroga il db e ritorna i parametri di un Pv dato il suo Id
@param Id: Integer
@return: ((),())
"""
query="select nome1, indirizzo,pv_id,cliente,cap, cod_cliente,comune,provincia,tel1,tel2,tel3,cf_pi from pv where pv_id= %d"%(Id)
##print query
self.crs.execute(query)
return self.crs.fetchall()
def addOr(self, b):
"""
implementa la logica per introdurre la clausola 'or' nella query
lo or e' usato tra regole che afferiscono a tab diversi della gui interrogazioni
@param b: Boolean
@note: e' usata da interrogazioni
"""
if b:
return 'or '
else:
return ""
def getResultSet(self, query):
"""
esegue la query sul db di parafarmacie tramite semplice mysqldb
@todo: check se realmente serve controllare se query!=[]; if introdotto con il passaggio a sqlalchemy=> forse non serve
"""
if query!=[]:
self.crs.execute(query)
return self.crs.fetchall()
else:
return ""
def coupleMaker(self,d,key):
"""
@param {class_id:[parameter_id]}: riceve l'uscita di groupList e genera le condizioni and che formeranno la query del filtro: coppie del tipo tc_cl_mar_id= key and tc_mar_id=parameter_id
@return:{k:[string]} la chiave e' la stessa del dict in input
@note: l'output e' una lista perche' cosi' e' piu' facile gestire i casi speciali del primo e dell'ultimo elemento, infatti questi saranno combinati con degli or che vanno posizionati solo tra gli elementi interni
"""
l={}
for k in d.iterkeys():
l[k]=[]
for p in d[k]:
l[k].append("(%s=%d and %s=%d)"%(self.columns[key][0],k,self.columns[key][1],p))# il senso e'(tc_cl.._id= chiave del dict,tc_.._id= parameter_id)
return l
def groupMaker(self,l):
"""
combina assieme le clausole nelle coppie and con la clausola or
@param [string] : l'input del metodo e' l'output di coupleMaker
@return: string: l'insieme delle clausole da aggiungere alla query
"""
gm=" "
for i in range(0,len(l)):
gm=gm+l[i]
if ((i>0)or(i<len(l)-1)):
gm=gm+" or "
return gm
def coupleCombiner(self,di):
"""
combina assieme i gruppi di parametri prodotti da coupleMaker in modo che i parametri di classi diverse siano combinati mediante and mentre i parametri della stessa classe siano collegati tramite or
@param
@return: string
"""
query=" "
first= True
for k in di.iterkeys():
firstOr=True
#se non e' il primo elemento del dict devo congiungerlo con and agli altri
if not first:
query=""+query+") and ("
else:
query=query+"("
first=False
for c in di[k]:
if not firstOr:
query=query+" or "
else:
firstOr=False
query=query+str(c)
query+query+")"# chiudo la parentesi della classe
#chiudo la parentesi dei gruppi
query=query+")"
return query
def groupList(self,l):
"""
produce le liste di parametri separati per class_id
genera un dict le cui chiavi sono il class_id, mentre i valori sono le liste dei parameters_id che afferiscono alla classe
@param l: [[int,int]] con il seguente significato [class_id,parameter_id]
@return: {int:[int]}{class_id:[parameter_id]}: la chiave e' il class_id degli elementi in l,
"""
groups={}
for k in l:
if not (groups.has_key(k[0])):
groups[k[0]]=[k[1]]
else:
groups[k[0]].append(k[1])
return groups
def queryBuilder(self, key, l):
"""
costruisce la query sulla base della key<=> tab
@param key: <'parametri','marchi','potenziali'>
@param l:{'parametri':[(classe,id_parametro)],'marchi':[(classe,id_parametro)],'potenziali':[(classe,id_parametro)]}
"""
#estraggo la lista dei parametri relativi a key
parameters=l[key]
query=self.maker.buildQuery(parameters,key)#self.addwhere(query, parameters, key)
##print "query from queryBuilder: %s"%query
return query
def getValuesBrand(self, idClass):
brands=self.getBrands(self.getIdBrands( idClass))
return brands
def getValuesFunc(self, key, Id):
"""
ritorna
la lista dei valori con il loro id data la chiave del tab key
@param key: 'parametri','marchi','potenziali'
@param id: Integer
@return ((int,string,int))::((class_id,testo,parametro_id))
@note class_id puo essere tc_mar_id, tc_pot_id o tc_par_id in funzione di key che puo'
assumere rispettivamente i valori 'marchi','potenziali','parametri'
"""
query=self.query%(self.dictValue[key],self.dictValue[key])
#se parametro o potenziale devo ordinare i risultati della query
order={}
order['parametri']=True
order['potenziali']=True
order['marchi']=False
#polita=self.dictValue[key].replace('_', "_cl")
polita=self.dictPolita[key]
query=query+" and %s_id=%d"%(polita,Id)
if order[key]:
query+=" order by ordine"
self.crs.execute(query)
return self.crs.fetchall()
def getCodMMAS(self, Id):
query="SELECT cod_mmas FROM pv WHERE pv_id=%d"%Id
self.crs.execute(query)
return self.crs.fetchall()[0][0]
def getBrand(self, Id):
"""
recupera il nome del marchio dal db dato lo id
@param id: Integer
@return: String
"""
query="select testo, tc_mar_id from tc_mar where tc_mar_id=%d"%(Id)
#print query
self.crs.execute(query)
return self.crs.fetchall()[0][0]
def getBrands(self, l):
"""
genera la lista di nomi di brand relativa alla lista di id
@param l: [Integer]
@return: [String]
"""
brands=[]
for id in l:
brands.append([ id[0], self.getBrand(id[0])])
return brands
def getIdBrands(self, brand_class_id):
"""
recupera i tc_mar_id relativi a tc_clmar_id
cioe' i marchi che vendono una data classe di prodotti
@param brand_class_id: Integer
@return: (('tc_mar_id,Integer))
@note: non usa sqlalchemy ne' elixir, ma la query e' standard dovrebbe funzionare pure con rdbms diversi da mysql
"""
query="select tc_mar_id from tc_rel_clmar_mar where tc_clmar_id=%s"%(brand_class_id)
self.crs.execute(query)
return self.crs.fetchall()
def getParameters(self, key):
"""
@param key: 'parametri'/'potenziali'
@attention: 'marchi' non va perche' cambia un po' il procedimento per questo si usano getBrand, getBrands e getIdBrands
@return: ((int, string,int))::(id, testo, tc_stato_id)
"""
query=self.query%(self.dictTables[key],self.dictTables[key])
#print "query parametri "+query
self.crs.execute(query)
return self.crs.fetchall()
def getValues(self, key, id):
"""
ritorna i valori delle classi dato lo id
@param id: integer
@return: ((id,text))
@param key:string chiave del tab 'parametri o 'marchi' o 'potenziali'
"""
#popolo un dizionario di funzioni, che usero' per ricavare i valori a seconda della chiave
self.getValuesFunctions={'parametri':self.getValuesFunc(key, id), 'marchi':self.getValuesBrand( id), 'potenziali':self.getValuesFunc(key, id)}
#print " id {0}, key {1}".format(key, id)
return self.getValuesFunctions[key]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.