repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
franky88/emperioanimesta | env/Lib/site-packages/django/db/backends/mysql/schema.py | 37 | 4590 | from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.models import NOT_PROVIDED
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
def quote_value(self, value):
# Inner import to allow module to fail to load gracefully
import MySQLdb.converters
return MySQLdb.escape(value, MySQLdb.converters.conversions)
def skip_default(self, field):
"""
MySQL doesn't accept default values for TEXT and BLOB types, and
implicitly treats these columns as nullable.
"""
db_type = field.db_type(self.connection)
return (
db_type is not None and
db_type.lower() in {
'tinyblob', 'blob', 'mediumblob', 'longblob',
'tinytext', 'text', 'mediumtext', 'longtext',
}
)
def add_field(self, model, field):
super(DatabaseSchemaEditor, self).add_field(model, field)
# Simulate the effect of a one-off default.
# field.default may be unhashable, so a set isn't used for "in" check.
if self.skip_default(field) and field.default not in (None, NOT_PROVIDED):
effective_default = self.effective_default(field)
self.execute('UPDATE %(table)s SET %(column)s = %%s' % {
'table': self.quote_name(model._meta.db_table),
'column': self.quote_name(field.column),
}, [effective_default])
def _field_should_be_indexed(self, model, field):
create_index = super(DatabaseSchemaEditor, self)._field_should_be_indexed(model, field)
storage = self.connection.introspection.get_storage_engine(
self.connection.cursor(), model._meta.db_table
)
# No need to create an index for ForeignKey fields except if
# db_constraint=False because the index from that constraint won't be
# created.
if (storage == "InnoDB" and
create_index and
field.get_internal_type() == 'ForeignKey' and
field.db_constraint):
return False
return create_index
def _delete_composed_index(self, model, fields, *args):
"""
MySQL can remove an implicit FK index on a field when that field is
covered by another index like a unique_together. "covered" here means
that the more complex index starts like the simpler one.
http://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757
We check here before removing the [unique|index]_together if we have to
recreate a FK index.
"""
first_field = model._meta.get_field(fields[0])
if first_field.get_internal_type() == 'ForeignKey':
constraint_names = self._constraint_names(model, [first_field.column], index=True)
if not constraint_names:
self.execute(self._create_index_sql(model, [first_field], suffix=""))
return super(DatabaseSchemaEditor, self)._delete_composed_index(model, fields, *args)
def _set_field_new_type_null_status(self, field, new_type):
"""
Keep the null property of the old field. If it has changed, it will be
handled separately.
"""
if field.null:
new_type += " NULL"
else:
new_type += " NOT NULL"
return new_type
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super(DatabaseSchemaEditor, self)._alter_column_type_sql(table, old_field, new_field, new_type)
def _rename_field_sql(self, table, old_field, new_field, new_type):
new_type = self._set_field_new_type_null_status(old_field, new_type)
return super(DatabaseSchemaEditor, self)._rename_field_sql(table, old_field, new_field, new_type)
| gpl-3.0 | -7,214,077,675,303,888,000 | 44 | 110 | 0.62963 | false |
ccarvalheira/jangada | apps/core/tests.py | 1 | 8183 | from django.test import TestCase
from apps.core.models import FieldModel
from apps.core.models import RelationshipFieldModel
from apps.core.models import ClassModel
from apps.core.models import App
from apps.core.models import Pip
class BaseTestSetUp(TestCase):
def setUp(self):
self.app = App(name="myapp",description="asd")
self.app.save()
self.tclass = ClassModel(name="post",app=self.app,register_admin=True,is_stacked=False,is_tabular=False)
self.tclass.save()
self.authorclass = ClassModel(name="author",app=self.app,register_admin=True,is_stacked=False,is_tabular=False)
self.authorclass.save()
self.f1 = FieldModel(name="title",this_class=self.tclass,
field_type="char 50",is_blank=False,is_null=False,is_str=True,list_display=True,
filter_on_this=False,search_on_this=True)
self.f2 = FieldModel(name="body",this_class=self.tclass,
field_type="txt",is_blank=False,is_null=False,is_str=False,list_display=False,
filter_on_this=False,search_on_this=False)
self.f3 = FieldModel(name="num_views",this_class=self.tclass,
field_type="int",is_blank=True,is_null=True,is_str=False,list_display=True,
filter_on_this=False,search_on_this=False)
self.f4 = FieldModel(name="published",this_class=self.tclass,
field_type="bool",is_blank=False,is_null=True,is_str=False,list_display=True,
filter_on_this=True,search_on_this=False)
self.f5 = FieldModel(name="lorem",this_class=self.tclass,
field_type="txt",is_blank=False,is_null=False,is_str=True,list_display=False,
filter_on_this=False,search_on_this=False,editable=False,default="Lorem Ipsum")
self.f6 = RelationshipFieldModel(name="author",this_class=self.tclass,
is_blank=False,is_null=False, target_class=self.authorclass,key_type="fk")
self.pizza_class = ClassModel(name="pizza",app=self.app,register_admin=True,is_stacked=False,is_tabular=False)
self.pizza_class.save()
self.top_class = ClassModel(name="toppings",app=self.app,register_admin=True,is_stacked=False,is_tabular=False)
self.top_class.save()
self.pizzaname = FieldModel(name="name",this_class=self.pizza_class,
field_type="char 50",is_blank=False,is_null=False,is_str=True,list_display=True,
filter_on_this=False,search_on_this=True)
self.pizzaname.save()
self.top_name = FieldModel(name="name",this_class=self.top_class,
field_type="char 50",is_blank=False,is_null=False,is_str=True,list_display=True,
filter_on_this=False,search_on_this=True)
self.top_name.save()
self.pizza_m2m = RelationshipFieldModel(name="toppings",this_class=self.pizza_class,
is_blank=False,is_null=False, target_class=self.top_class,key_type="m2m")
self.space_class = ClassModel(name="spam and eggs",app=self.app,register_admin=True,is_stacked=False,is_tabular=False)
self.space_class.save()
self.stacked_class = ClassModel(name="inlinesta",app=self.app,register_admin=True,is_stacked=True,is_tabular=False)
self.space_class.save()
self.tabular_class = ClassModel(name="inlinetab",app=self.app,register_admin=True,is_stacked=False,is_tabular=True)
self.space_class.save()
self.both_inline_class = ClassModel(name="inlineboth",app=self.app,register_admin=True,is_stacked=True,is_tabular=True)
self.space_class.save()
self.f1.save()
self.f2.save()
self.f3.save()
self.f4.save()
self.f5.save()
self.f6.save()
class ClassAppFieldModelTest(BaseTestSetUp):
def test_field_class(self):
""" Checks whether the class of the field is correct. """
self.assertEqual(self.f1.field_class(), "CharField")
self.assertEqual(self.f2.field_class(), "TextField")
self.assertEqual(self.f3.field_class(), "IntegerField")
self.assertEqual(self.f4.field_class(), "BooleanField")
self.assertEqual(self.f6.field_class(), "ForeignKey")
self.assertEqual(self.pizza_m2m.field_class(), "ManyToManyField")
def test_field_options(self):
""" Checks if field options are being correctly returned. """
self.assertItemsEqual(["max_length=50"], self.f1.field_options())
self.assertItemsEqual([], self.f2.field_options())
self.assertItemsEqual(["blank=True","null=True"], self.f3.field_options())
self.assertItemsEqual(["null=True"], self.f4.field_options())
self.assertItemsEqual(["editable=False","default=\"Lorem Ipsum\""], self.f5.field_options())
self.assertItemsEqual(["Author"], self.f6.field_options())
self.assertItemsEqual(["Toppings"], self.pizza_m2m.field_options())
def test_class_name(self):
""" Checks whether the correct CamelCase class name is returned. """
self.assertEqual(self.pizza_class.class_name(), "Pizza")
self.assertEqual(self.space_class.class_name(), "SpamAndEggs")
def test_str_attribute(self):
""" Checks whether the correct str attribute is detected. """
self.assertEqual(self.tclass.get_str_attribute(), "title")
self.assertEqual(self.top_class.get_str_attribute(), "name")
def test_admin_class(self):
""" Checks whether the correct CamelCase admin class name is returned. """
self.assertEqual(self.tclass.admin_name(), "PostAdmin")
self.assertEqual(self.space_class.admin_name(), "SpamAndEggsAdmin")
self.assertEqual(self.stacked_class.admin_name(), "InlinestaAdminInline")
self.assertEqual(self.tabular_class.admin_name(), "InlinetabAdminInline")
self.assertEqual(self.both_inline_class.admin_name(), "InlinebothAdminInline")
def test_admin_inherit(self):
""" Checks whether the correct admin class is inherited. """
self.assertEqual(self.stacked_class.admin_class_inherit(), "StackedInline")
self.assertEqual(self.tabular_class.admin_class_inherit(), "TabularInline")
self.assertEqual(self.both_inline_class.admin_class_inherit(), "StackedInline")
self.assertEqual(self.tclass.admin_class_inherit(), "ModelAdmin")
class PipTest(TestCase):
def setUp(self):
self.p1 = Pip(name="south", installed_apps_text="south\nthings", requirements_pkg_name="South")
self.p2 = Pip(name="south versioned", installed_apps_text="south\nother_things", requirements_pkg_name="South", requirements_version="0.6.0")
self.p3 = Pip(name="south versioned oneiapp", installed_apps_text="south", requirements_pkg_name="South", requirements_version="0.6.0")
self.p1.save()
self.p2.save()
self.p3.save()
def test_requirements(self):
""" Checks if requirements for pip format are returned correctly """
self.assertEqual(self.p1.get_requirements(), "South")
self.assertEqual(self.p2.get_requirements(), "South==0.6.0")
def test_installed_apps_pip(self):
""" Checks if installed apps list for this pip is correct """
self.assertItemsEqual(["south","things"], self.p1.installed_apps_list())
self.assertItemsEqual(["south","other_things"], self.p2.installed_apps_list())
self.assertItemsEqual(["south"], self.p3.installed_apps_list())
class AppTest(TestCase):
fixtures = ["fixtures/initial_data.json"]
def test_sane_name(self):
""" Test if name is correctly """
self.spaced_app = App.objects.get(name="spaced app with trailing space ")
self.blog = App.objects.get(name="myblog")
self.assertEqual(self.spaced_app.get_sane_name(), "spaced_app_with_trailing_space")
self.assertEqual(self.blog.get_sane_name(), "myblog")
| mit | 3,505,498,035,407,490,000 | 46.028736 | 149 | 0.644751 | false |
MechanisM/ajenti | plugins/squid_sarg/ui_sarg.py | 17 | 1116 | import os
from ajenti import apis
from ajenti.com import *
from ajenti.ui import *
from ajenti.api import *
from ajenti.utils import wsgi_serve_file, shell
class SquidReports(Plugin, URLHandler):
implements(apis.squid.IPluginPart)
weight = 20
title = 'Reports'
tab = 0
cfg = 0
parent = None
def init(self, parent, cfg, tab):
self.parent = parent
self.cfg = cfg
self.tab = tab
@url('^/sarg_report/.+$')
def process(self, req, start_response):
file = os.path.join('/var/lib/sarg/', req['PATH_INFO'][13:])
return wsgi_serve_file(req, start_response, file)
def get_ui(self):
vc = UI.VContainer(
UI.Button(text='Generate report', id='gen'),
UI.Spacer(height=10),
UI.IFrame(src='/sarg_report/index.html', width="600", height="500")
)
return vc
def on_click(self, event, params, vars=None):
if params[0] == 'gen':
self.parent._tab = self.tab
shell('sarg')
def on_submit(self, event, params, vars=None):
pass
| lgpl-3.0 | -3,175,314,974,869,028,000 | 24.363636 | 83 | 0.575269 | false |
mhbu50/erpnext | erpnext/payroll/doctype/employee_incentive/employee_incentive.py | 3 | 1225 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class EmployeeIncentive(Document):
def validate(self):
self.validate_salary_structure()
def validate_salary_structure(self):
if not frappe.db.exists('Salary Structure Assignment', {'employee': self.employee}):
frappe.throw(_("There is no Salary Structure assigned to {0}. First assign a Salary Stucture.").format(self.employee))
def on_submit(self):
company = frappe.db.get_value('Employee', self.employee, 'company')
additional_salary = frappe.new_doc('Additional Salary')
additional_salary.employee = self.employee
additional_salary.currency = self.currency
additional_salary.salary_component = self.salary_component
additional_salary.overwrite_salary_structure_amount = 0
additional_salary.amount = self.incentive_amount
additional_salary.payroll_date = self.payroll_date
additional_salary.company = company
additional_salary.ref_doctype = self.doctype
additional_salary.ref_docname = self.name
additional_salary.submit()
| gpl-3.0 | -6,447,332,821,022,435,000 | 38.516129 | 121 | 0.766531 | false |
ruuk/script.module.sharesocial | lib/SSClipboard.py | 1 | 1480 | # -*- coding: utf-8 -*-
import os, xbmc
from ShareSocial import Share
from ShareSocial import SHARE_TYPE_IMAGE, SHARE_TYPE_AUDIO, SHARE_TYPE_VIDEO, SHARE_TYPE_LINK, SHARE_TYPE_IMAGEFILE, SHARE_TYPE_VIDEOFILE, SHARE_TYPE_AUDIOFILE, SHARE_TYPE_BINARYFILE, SHARE_TYPE_HTML, SHARE_TYPE_TEXT, SHARE_TYPE_STATUS #@UnusedImport
import xbmcaddon
__addon__ = xbmcaddon.Addon(id='script.module.sharesocial')
APILEVEL = 1
MAIN_PATH = xbmc.translatePath(__addon__.getAddonInfo('profile'))
CACHE_PATH = os.path.join(MAIN_PATH,'cache')
if not os.path.exists(CACHE_PATH): os.makedirs(CACHE_PATH)
class Clipboard:
def __init__(self):
self.clipboard = None
self.clipFilePath = os.path.join(CACHE_PATH,'CLIPBOARD')
self.loadCBData()
def hasData(self,types=None):
if not self.clipboard: return None
if types:
if not self.clipboard.shareType in types: return None
return self.clipboard.shareType
def getShare(self,source,sharetype):
return Share(source,sharetype)
def setClipboard(self,share):
self.clipboard = share
self.saveCBData()
def getClipboard(self):
return self.clipboard
def saveCBData(self):
if not self.clipboard: return
data = self.clipboard.toString()
f = open(self.clipFilePath,'w')
f.write(data)
f.close()
def loadCBData(self):
if not os.path.exists(self.clipFilePath): return
f = open(self.clipFilePath,'r')
data = f.read()
f.close()
if not data: return
share = Share().fromString(data)
self.clipboard = share
| gpl-2.0 | -1,315,731,506,257,077,200 | 26.425926 | 250 | 0.728378 | false |
advoretsky/nose-gae | nosegae.py | 1 | 11118 | import os
import pdb
import logging
import sys
import tempfile
from imp import find_module, acquire_lock, release_lock
from warnings import warn
from nose.importer import Importer, add_path
from nose.plugins.base import Plugin
log = logging.getLogger(__name__)
class NoseGAE(Plugin):
"""
Activate this plugin to run tests in Google App Engine dev
environment. When the plugin is active, Google App Engine dev stubs, such
as the stub datastore, will be available, and application code will run in
a sandbox that restricts module loading in the same way as it is
restricted when running under GAE.
"""
name = 'gae'
def options(self, parser, env=os.environ):
super(NoseGAE, self).options(parser, env)
parser.add_option(
'--gae-lib-root', default='/usr/local/google_appengine',
dest='gae_lib_root',
help='Set the path to the root directory of the Google '
'Application Engine installation')
parser.add_option(
'--gae-application', default=None, action='store', dest='gae_app',
help='Set the path to the GAE application '
'under test. Default is the nose `where` '
'directory (generally the pwd)')
parser.add_option(
'--gae-sqlite', default=False, action='store_true', dest='gae_sqlite',
help='Use the new sqlite datastore stub.')
parser.add_option(
'--gae-datastore', default=None, action='store', dest='gae_data',
help='Set the path to the GAE datastore to use in tests. '
'Note that when using an existing datastore directory, the '
'datastore will not be cleared before testing begins.')
parser.add_option(
'--without-sandbox', default=True, action='store_false', dest='sandbox_enabled',
help='Enable this flag if you want to run your tests without '
'import module sandbox. This is most useful when you have a '
'conflicting nose plugin (such as coverage).')
def configure(self, options, config):
super(NoseGAE, self).configure(options, config)
if not self.enabled:
return
self.config = config
if options.gae_app is not None:
self._path = options.gae_app
else:
self._path = config.workingDir
if options.gae_lib_root is not None:
root = self._gae_path = options.gae_lib_root
sys.path.append(root)
else:
self._gae_path = None
if options.gae_data is not None:
self._data_path = options.gae_data
self._temp_data = False
else:
self._data_path = os.path.join(tempfile.gettempdir(),
'nosegae.datastore')
self._temp_data = True
self.sandbox_enabled = options.sandbox_enabled
self._use_sqlite = options.gae_sqlite
try:
if 'google' in sys.modules:
# make sure an egg (e.g. protobuf) is not cached
# with the wrong path:
del sys.modules['google']
saved_path = [p for p in sys.path]
# import the pseudo dev_appserver (which is really a script)
# and let it add 3rd party libraries:
from dev_appserver import fix_sys_path
fix_sys_path() # wipes out sys.path
sys.path.extend(saved_path) # put back our previous path
from google.appengine.tools import old_dev_appserver as dev_appserver
from google.appengine.tools.dev_appserver_main import \
DEFAULT_ARGS, ARG_CLEAR_DATASTORE, ARG_LOG_LEVEL, \
ARG_DATASTORE_PATH, ARG_HISTORY_PATH, ARG_USE_SQLITE
self._gae = {'dev_appserver': dev_appserver,
'ARG_LOG_LEVEL': ARG_LOG_LEVEL,
'ARG_CLEAR_DATASTORE': ARG_CLEAR_DATASTORE,
'ARG_DATASTORE_PATH': ARG_DATASTORE_PATH,
'ARG_HISTORY_PATH': ARG_HISTORY_PATH,
'DEFAULT_ARGS': DEFAULT_ARGS,
'ARG_USE_SQLITE': ARG_USE_SQLITE}
# prefill these into sys.modules
import webob
import yaml
# (removed since using this causes non-default django version to break)
# import django
try:
import webtest
except ImportError:
pass
except ImportError, e:
self.enabled = False
raise
# warn("Google App Engine not found in %s" % options.gae_lib_root,
# RuntimeWarning)
if sys.version_info[0:2] < (2,5):
raise EnvironmentError(
"Python version must be 2.5 or greater, like the Google App Engine environment. "
"Tests are running with: %s" % sys.version)
# As of SDK 1.2.5 the dev_appserver.py aggressively adds some logging handlers.
# This removes the handlers but note that Nose will still capture logging and
# report it during failures. See Issue 25 for more info.
rootLogger = logging.getLogger()
for handler in rootLogger.handlers:
if isinstance(handler, logging.StreamHandler):
rootLogger.removeHandler(handler)
def begin(self):
args = self._gae['DEFAULT_ARGS']
clear = self._gae['ARG_CLEAR_DATASTORE']
ds_path = self._gae['ARG_DATASTORE_PATH']
hs_path = self._gae['ARG_HISTORY_PATH']
sqlite = self._gae['ARG_USE_SQLITE']
dev_appserver = self._gae['dev_appserver']
gae_opts = args.copy()
gae_opts["root_path"] = self._path
gae_opts[clear] = self._temp_data
gae_opts[sqlite] = self._use_sqlite
gae_opts[ds_path] = self._data_path
gae_opts[hs_path] = os.path.join(tempfile.gettempdir(),
'nosegae.datastore.history')
config, _explicit_matcher, from_cache = dev_appserver.LoadAppConfig(self._path, {})
dev_appserver.SetupStubs(config.application, **gae_opts)
self._install_hook(dev_appserver.HardenedModulesHook, config)
# dev_appserver.HardenedModulesHook.ENABLE_LOGGING = True
def beforeImport(self, filename, module):
if not self.hook.sandbox:
if self.hook.should_sandbox(module, filename):
self.hook.enter_sandbox(module)
def afterImport(self, filename, module):
if self.hook.sandbox == module:
self.hook.exit_sandbox()
def _install_hook(self, cls, config):
dev_appserver = self._gae['dev_appserver']
class Hook(HookMixin, cls):
dev_appserver = self._gae['dev_appserver']
sandbox_root = self._path
testMatch = self.config.testMatch
module_dict = self._setup_shared_modules()
def should_sandbox(hook, *args, **kwargs):
if self.sandbox_enabled:
return super(Hook, hook).should_sandbox(*args, **kwargs)
self.hook = Hook(config, sys.modules, self._path)
sys.meta_path = [self.hook]
# set up allowed file access paths
paths = []
if self._gae_path:
paths.append(self._gae_path)
dev_appserver.FakeFile.SetAllowedPaths(self._path, paths)
def _setup_shared_modules(self):
mods = self._gae['dev_appserver'].SetupSharedModules(sys.modules)
for name in sys.modules:
if name.startswith('nose') or name.startswith('webtest'):
mods[name] = sys.modules[name]
return mods
class HookMixin(object):
"""
Combine this mixin with a meta_path importer (such as
dev_appserver.HardenedModulesHook) to set up a meta_path importer that
enforces the rules of the mixed-in importer only for non-test modules that
fall under a particular path.
The subclass defined by mixing this class with an importer must define the
following attributes:
* dev_appserver: the google.appengine.tools.dev_appserver module
* sandbox_root: the path under which non-test modules should be sandboxed
* testMatch: a regular expression used to distinguish test modules
"""
sandbox = None
def find_module(self, fullname, path=None):
if not self.sandbox:
if path:
mod_path = path[0]
else:
mod_path = self.find_mod_path(fullname)
if mod_path and self.should_sandbox(fullname, mod_path):
self.enter_sandbox(fullname)
if not self.sandbox:
# allow normal loading
self.log("* ALLOW NORMAL LOAD: %s" % fullname)
return None
# sandboxed
return super(HookMixin, self).find_module(fullname, path)
def load_module(self, fullname):
# only called when sandboxed
try:
# FIXME: possible strategy for sandboxing file, open, etc
# if mod.file is <type 'file'> or nothing, set it to
# FakeFile. Same for mod.open.
return super(HookMixin, self).load_module(fullname)
finally:
if fullname == self.sandbox:
self.exit_sandbox()
def enter_sandbox(self, mod_name):
if self.sandbox:
return
self.log(">>> ENTER sandbox %s" % mod_name)
self.sandbox = mod_name
self._old_modules = sys.modules.copy()
self.dev_appserver.ClearAllButEncodingsModules(sys.modules)
# restore shared modules (see issue #2)
sys.modules.update(self.module_dict)
if hasattr(sys, 'path_importer_cache'):
sys.path_importer_cache.clear()
def is_sandboxed(self, mod_name):
return mod_name == self.sandbox
def exit_sandbox(self):
if not self.sandbox:
return
self.log("<<< EXIT sandbox %s" % self.sandbox)
self.sandbox = None
# preserve loaded modules for next entry into sandbox (see issue #7)
self.module_dict.update(sys.modules)
sys.modules.update(self._old_modules)
if hasattr(sys, 'path_importer_cache'):
sys.path_importer_cache.clear()
def find_mod_path(self, fullname):
# we really only need the path to the top
top = fullname.split('.')[0]
try:
_sf, path, _desc= self._imp.find_module(top, None)
except ImportError:
self.log("Could not find path for %s", fullname)
return
self.log("Module path for %s is %s", fullname, path)
return path
def should_sandbox(self, fullname, mod_path):
mp = os.path.realpath(mod_path)
sbp = os.path.realpath(self.sandbox_root)
self.log("%s under %s?", mp, sbp)
return mp.startswith(sbp) and not self.testMatch.search(fullname)
| lgpl-3.0 | 723,085,592,287,426,200 | 40.640449 | 98 | 0.585177 | false |
pattisdr/osf.io | api_tests/preprints/views/test_preprint_actions.py | 11 | 1710 | import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
AuthUserFactory,
)
from osf.utils import permissions as osf_permissions
from api_tests.reviews.mixins.filter_mixins import ReviewActionFilterMixin
from api_tests.reviews.mixins.comment_settings import ReviewActionCommentSettingsMixin
@pytest.mark.enable_quickfiles_creation
class TestPreprintActionFilters(ReviewActionFilterMixin):
@pytest.fixture()
def preprint(self, all_actions):
return all_actions[0].target
@pytest.fixture(params=[True, False], ids=['moderator', 'node_admin'])
def user(self, request, preprint):
user = AuthUserFactory()
if request.param:
user.groups.add(preprint.provider.get_group('moderator'))
else:
preprint.add_contributor(
user,
permissions=osf_permissions.ADMIN)
return user
@pytest.fixture()
def expected_actions(self, preprint, all_actions):
return [r for r in all_actions if r.target_id == preprint.id]
@pytest.fixture()
def url(self, preprint):
return '/{}preprints/{}/review_actions/'.format(API_BASE, preprint._id)
def test_unauthorized_user(self, app, url):
res = app.get(url, expect_errors=True)
assert res.status_code == 401
user = AuthUserFactory()
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
@pytest.mark.enable_quickfiles_creation
class TestReviewActionSettings(ReviewActionCommentSettingsMixin):
@pytest.fixture()
def url(self, preprint):
return '/{}preprints/{}/review_actions/'.format(API_BASE, preprint._id)
| apache-2.0 | 8,200,623,162,264,172,000 | 31.884615 | 86 | 0.687135 | false |
tsdmgz/ansible | lib/ansible/modules/network/avi/avi_systemconfiguration.py | 27 | 5996 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_systemconfiguration
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of SystemConfiguration Avi RESTful Object
description:
- This module is used to configure SystemConfiguration object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
admin_auth_configuration:
description:
- Adminauthconfiguration settings for systemconfiguration.
dns_configuration:
description:
- Dnsconfiguration settings for systemconfiguration.
dns_virtualservice_refs:
description:
- Dns virtualservices hosting fqdn records for applications across avi vantage.
- If no virtualservices are provided, avi vantage will provide dns services for configured applications.
- Switching back to avi vantage from dns virtualservices is not allowed.
- It is a reference to an object of type virtualservice.
docker_mode:
description:
- Boolean flag to set docker_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
email_configuration:
description:
- Emailconfiguration settings for systemconfiguration.
global_tenant_config:
description:
- Tenantconfiguration settings for systemconfiguration.
linux_configuration:
description:
- Linuxconfiguration settings for systemconfiguration.
mgmt_ip_access_control:
description:
- Configure ip access control for controller to restrict open access.
ntp_configuration:
description:
- Ntpconfiguration settings for systemconfiguration.
portal_configuration:
description:
- Portalconfiguration settings for systemconfiguration.
proxy_configuration:
description:
- Proxyconfiguration settings for systemconfiguration.
snmp_configuration:
description:
- Snmpconfiguration settings for systemconfiguration.
ssh_ciphers:
description:
- Allowed ciphers list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default ciphers are allowed.
- Ssh -q cipher provides the list of default ciphers supported.
ssh_hmacs:
description:
- Allowed hmac list for ssh to the management interface on the controller and service engines.
- If this is not specified, all the default hmacs are allowed.
- Ssh -q mac provides the list of default hmacs supported.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SystemConfiguration object
avi_systemconfiguration:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_systemconfiguration
"""
RETURN = '''
obj:
description: SystemConfiguration (api/systemconfiguration) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
admin_auth_configuration=dict(type='dict',),
dns_configuration=dict(type='dict',),
dns_virtualservice_refs=dict(type='list',),
docker_mode=dict(type='bool',),
email_configuration=dict(type='dict',),
global_tenant_config=dict(type='dict',),
linux_configuration=dict(type='dict',),
mgmt_ip_access_control=dict(type='dict',),
ntp_configuration=dict(type='dict',),
portal_configuration=dict(type='dict',),
proxy_configuration=dict(type='dict',),
snmp_configuration=dict(type='dict',),
ssh_ciphers=dict(type='list',),
ssh_hmacs=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'systemconfiguration',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | 9,053,495,143,336,564,000 | 35.560976 | 116 | 0.663276 | false |
devdelay/home-assistant | script/get_entities.py | 23 | 3135 | #! /usr/bin/python
"""
Query the Home Assistant API for available entities.
Output is printed to stdout.
"""
import sys
import getpass
import argparse
try:
from urllib2 import urlopen
PYTHON = 2
except ImportError:
from urllib.request import urlopen
PYTHON = 3
import json
def main(password, askpass, attrs, address, port):
"""Fetch Home Assistant API JSON page and post process."""
# Ask for password
if askpass:
password = getpass.getpass('Home Assistant API Password: ')
# Fetch API result
url = mk_url(address, port, password)
response = urlopen(url).read()
if PYTHON == 3:
response = response.decode('utf-8')
data = json.loads(response)
# Parse data
output = {'entity_id': []}
output.update([(attr, []) for attr in attrs])
for item in data:
output['entity_id'].append(item['entity_id'])
for attr in attrs:
output[attr].append(item['attributes'].get(attr, ''))
# Output data
print_table(output, ['entity_id'] + attrs)
def print_table(data, columns):
"""Format and print a table of data from a dictionary."""
# Get column lengths
lengths = {}
for key, value in data.items():
lengths[key] = max([len(str(val)) for val in value] + [len(key)])
# Print header
for item in columns:
itemup = item.upper()
sys.stdout.write(itemup + ' ' * (lengths[item] - len(item) + 4))
sys.stdout.write('\n')
# print body
for ind in range(len(data[columns[0]])):
for item in columns:
val = str(data[item][ind])
sys.stdout.write(val + ' ' * (lengths[item] - len(val) + 4))
sys.stdout.write("\n")
def mk_url(address, port, password):
"""Construct the URL call for the API states page."""
url = ''
if address.startswith('http://'):
url += address
else:
url += 'http://' + address
url += ':' + port + '/api/states?'
if password is not None:
url += 'api_password=' + password
return url
if __name__ == "__main__":
all_options = {'password': None, 'askpass': False, 'attrs': [],
'address': 'localhost', 'port': '8123'}
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('attrs', metavar='ATTRIBUTE', type=str, nargs='*',
help='an attribute to read from the state')
parser.add_argument('--password', dest='password', default=None,
type=str, help='API password for the HA server')
parser.add_argument('--ask-password', dest='askpass', default=False,
action='store_const', const=True,
help='prompt for HA API password')
parser.add_argument('--addr', dest='address',
default='localhost', type=str,
help='address of the HA server')
parser.add_argument('--port', dest='port', default='8123',
type=str, help='port that HA is hosting on')
args = parser.parse_args()
main(args.password, args.askpass, args.attrs, args.address, args.port)
| mit | 8,536,007,995,154,457,000 | 30.989796 | 74 | 0.584051 | false |
splee/bigdoorkit | src/bigdoorkit/resources/level.py | 1 | 1271 | from bigdoorkit.resources.base import BDResource
from bigdoorkit.resources.user import EndUser
class NamedLevelCollection(BDResource):
endpoint = "named_level_collection"
def __init__(self, **kw):
self.currency_id = kw.get('currency_id', None)
super(NamedLevelCollection, self).__init__(**kw)
class NamedLevel(BDResource):
endpoint = "named_level"
parent_class = NamedLevelCollection
parent_id_attr = "named_level_collection_id"
def __init__(self, **kw):
self.named_level_collection_id = kw.get('named_level_collection_id', None)
self.threshold = kw.get('threshold', None)
self.collection_resource_uri = kw.get('collection_resource_uri', None)
super(NamedLevel, self).__init__(**kw)
class Level(BDResource):
endpoint = "level"
parent_class = EndUser
parent_id_attr = "end_user_login"
def __init__(self, **kw):
self.end_user_login = kw.get('end_user_login', None)
self.named_level_id = kw.get('named_level_id', None)
self.transaction_group_id = kw.get('transaction_group_id', None)
self.next_level_uri = kw.get('next_level_uri', None)
self.previous_level_uri = kw.get('previous_level_uri', None)
super(Level, self).__init__(**kw)
| mit | -8,697,972,916,895,498,000 | 37.515152 | 82 | 0.652242 | false |
PreludeAndFugue/PySpend | setup.py | 1 | 1179 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
import sys
from distutils.core import setup
#from cx_Freeze import setup, Executable
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(
# py2exe
#windows=['pyspend.py'],
#cx_Freeze
#executables=[Executable('pyspend/pyspend.py', base=base)],
#build_options = {}
name='PySpend',
version='0.1dev',
author='Gary Kerr',
author_email='[email protected]',
packages=['pyspend', 'pyspend.test'],
package_data={'pyspend': ['config.json', 'pyspend.pyw']},
license='LICENSE.txt',
description='Record your expenditure',
long_description=open('README.txt').read(),
requires=['wxPython'],
classifiers=[
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython'
]
)
| bsd-3-clause | 8,335,991,523,227,479,000 | 27.071429 | 69 | 0.615776 | false |
botswana-harvard/edc-quota | edc_quota/controller/controller.py | 2 | 8159 | import json
import requests
from datetime import date
from django.utils import timezone
from django.contrib.auth.models import User
from tastypie.models import ApiKey
from .exceptions import ControllerError
from .models import Client, ControllerQuota, ControllerQuotaHistory
class Controller(object):
"""A class to control or manage quotas between a group of offline clients.
For example:
quota = ControllerQuota.objects.get(...)
controller = Controller(quota)
controller.get_all()
controller.post_all()
from datetime import date, timedelta
from edc_quota.controller.models import Client, ControllerQuota
from edc_quota.controller.controller import Controller
client = Client(hostname='edc4.bhp.org.bw', port=8001, api_name='v1')
controller_quota = ControllerQuota(target=10, start_date=date.today(), expiration_date=date.today() + timedelta(days=1), app_label='bcpp_subject', model_name='PimaVl')
controller_quota.id = 1
controller = Controller(controller_quota, [client], username='edc_quota', api_key='a817fc214f81b0e1467039e2ac61acbf99db8d47')
"""
def __init__(self, quota, clients=None, username=None, api_name=None, api_key=None):
self.api_name = api_name or 'v1'
self.base_url = 'http://{hostname}:{port}/edc_quota/api/{api_name}/quota/'
self.clients = {}
self.auth = {}
self.status_codes = {'get': {}, 'post': {}}
username = username or 'edc_quota'
try:
user = User.objects.get(username=username)
self.auth.update({'username': username})
self.auth.update({'api_key': ApiKey.objects.get(user=user).key})
except (User.DoesNotExist, ApiKey.DoesNotExist):
self.auth.update({'username': username})
self.auth.update({'api_key': api_key})
try:
if quota.is_active and quota.start_date <= date.today() and quota.expiration_date >= date.today():
self.quota = quota
else:
raise ControllerError(
'ControllerQuota {} is not active. '
'Got is_active={}, start date {}, end date {}.'.format(
quota, quota.is_active, quota.start_date, quota.expiration_date))
self.quota_history = ControllerQuotaHistory.objects.create(
quota=self.quota,
start_date=date.today(),
expiration_date=self.quota.expiration_date)
if clients:
for hostname in clients:
try:
client = Client.objects.get(hostname=hostname)
if client.is_active:
self.register(client)
except Client.DoesNotExist as e:
pass
else:
self.register_all()
except (ControllerQuota.DoesNotExist, AttributeError) as e:
raise ControllerQuota.DoesNotExist(
'Quota for model \'{}\' is not active, expired or does not exist. Got {}'.format(
quota, str(e)))
def register_all(self):
for client in Client.objects.filter(
app_label=self.quota.app_label,
model_name=self.quota.model_name,
is_active=True):
self.register(client)
def register(self, client=None, hostname=None):
try:
hostname = client.hostname
except AttributeError:
client = Client.objects.get(
hostname=hostname,
app_label=self.quota.app_label,
model_name=self.quota.model_name,
is_active=True)
self.clients[hostname] = client
def get_all(self):
"""Contacts all registered clients and updates the Quota model."""
contacted = timezone.now()
total_model_count = 0
clients_contacted = []
for hostname, client in self.clients.items():
client.model_count = self.get_client_model_count(client) or 0
client.contacted = contacted
total_model_count += client.model_count
clients_contacted.append(hostname)
client.save()
self.quota_history.model_count = total_model_count
self.quota_history.contacted = contacted
self.quota_history.clients_contacted = ','.join(clients_contacted)
self.quota_history.save()
if self.quota_history.clients_contacted:
self.set_new_targets()
else:
print('Warning: Appears there are no clients online. New targets have not been set.')
def post_all(self):
"""posts the new quota targets on the clients."""
for hostname in self.quota_history.clients_contacted_list:
self.post_client_quota(hostname)
def get_url(self, client):
return '{base}?format=json&app_label={app_label}&model_name={model_name}&{credentials}'.format(
base=self.base_url.format(hostname=client.hostname, port=client.port, api_name=self.api_name),
app_label=self.quota.app_label,
model_name=self.quota.model_name.lower(),
credentials=self.credentials)
def get_request(self, client):
hostname = client.hostname
try:
request = requests.get(self.get_url(client))
self.status_codes['get'].update({hostname: request.status_code})
except ConnectionError:
self.status_codes['get'].update({hostname: None})
request = None
return request
def get_client_model_count(self, client):
"""Fetches one clients model_count over the REST api."""
request = self.get_request(client)
objects = request.json()['objects']
try:
model_count = objects[0].get('model_count', None)
except IndexError:
model_count = None
return model_count
def set_new_targets(self):
"""Calculates new quota targets for all contacted clients."""
allocation = self.quota.target - self.quota_history.model_count
client_count = len(self.quota_history.clients_contacted_list)
remainder = allocation % client_count if allocation > 0 else 0
for name in self.quota_history.clients_contacted_list:
self.clients.get(name).target, remainder = self.target(allocation, client_count, remainder)
self.clients.get(name).start_date = self.quota_history.start_date
self.clients.get(name).expiration_date = self.quota_history.expiration_date
self.clients.get(name).save()
def target(self, allocation, client_count, remainder):
if allocation <= 0 or client_count == 0:
return 0, 0
extra = 0
if remainder > 0:
remainder -= 1
extra = 1
return int(allocation / client_count) + extra, remainder
@property
def credentials(self):
return 'username={username}&api_key={api_key}'.format(
username=self.auth.get('username'),
api_key=self.auth.get('api_key'))
def post_url(self, name, port):
return '{base}/?format=json&{credentials}'.format(
base=self.base_url.format(hostname=name, port=port, api_name=self.api_name),
credentials=self.credentials
)
def post_client_quota(self, hostname):
"""Creates an instance of quota in the client."""
client = self.clients.get(hostname)
data = dict(
app_label=self.quota.app_label,
model_name=self.quota.model_name.lower(),
target=self.clients.get(hostname).target,
start_date=self.clients.get(hostname).start_date.isoformat(),
expiration_date=self.clients.get(hostname).expiration_date.isoformat())
request = requests.post(self.post_url(client.hostname, client.port), data=json.dumps(data))
try:
status_code = request.status_code
except AttributeError:
status_code = None
self.status_codes['post'].update({hostname: status_code})
return request
| gpl-2.0 | -8,579,146,239,434,428,000 | 42.398936 | 171 | 0.608776 | false |
jSherz/lsd-members | test-data/text_messages.py | 1 | 1414 | #!/usr/bin/env python3
import uuid
import random
import datetime
from hashlib import sha256
from faker import Factory
from postgres import Postgres
fake = Factory.create()
db = Postgres('postgres://luskydive@localhost/luskydive')
print('COPY text_messages (uuid, member_uuid, mass_text_uuid, status, to_number, from_number, message, external_id, created_at, updated_at) FROM stdin;')
mass_texts = db.all('SELECT uuid, template, created_at FROM mass_texts;')
for mass_text in mass_texts:
start = datetime.datetime(mass_text.created_at.year, 8, 1)
end = datetime.datetime(mass_text.created_at.year + 1, 7, 28)
phone_number = '+447' + str(random.randrange(100000000, 999999999, 1))
members = db.all('SELECT uuid, phone_number, name FROM members WHERE phone_number IS NOT NULL AND created_at > %(start)s AND created_at < %(end)s', { 'start': start, 'end': end });
for member in members:
created_at = mass_text.created_at
delta = datetime.timedelta(seconds = random.random() * 7200 + 5)
updated_at = mass_text.created_at + delta
message = mass_text.template.replace('{{ name }}', member.name)
text_uuid = str(uuid.uuid4())
external_id = sha256(text_uuid.encode('utf-8')).hexdigest()
print("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (text_uuid, member.uuid, mass_text.uuid, 1, member.phone_number, phone_number, message, external_id, created_at, updated_at))
print('\\.')
| mit | -4,347,668,284,197,556,000 | 39.4 | 182 | 0.699434 | false |
PalNilsson/pilot2 | pilot/test/test_utils.py | 1 | 1802 | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Paul Nilsson, [email protected], 2018
import unittest
import os
from pilot.util.workernode import collect_workernode_info, get_disk_space
class TestUtils(unittest.TestCase):
"""
Unit tests for utils functions.
"""
def setUp(self):
# skip tests if running on a Mac -- Macs don't have /proc
self.mac = False
if os.environ.get('MACOSX') == 'true' or not os.path.exists('/proc/meminfo'):
self.mac = True
from pilot.info import infosys
infosys.init("CERN")
def test_collect_workernode_info(self):
"""
Make sure that collect_workernode_info() returns the proper types (float, float, float).
:return: (assertion)
"""
if self.mac:
return True
mem, cpu, disk = collect_workernode_info(path=os.getcwd())
self.assertEqual(type(mem), float)
self.assertEqual(type(cpu), float)
self.assertEqual(type(disk), float)
self.assertNotEqual(mem, 0.0)
self.assertNotEqual(cpu, 0.0)
self.assertNotEqual(disk, 0.0)
def test_get_disk_space(self):
"""
Verify that get_disk_space() returns the proper type (int).
:return: (assertion)
"""
if self.mac:
return True
#queuedata = {'maxwdir': 123456789}
from pilot.info import infosys
diskspace = get_disk_space(infosys.queuedata) ## FIX ME LATER
self.assertEqual(type(diskspace), int)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,525,461,993,596,524,000 | 25.115942 | 96 | 0.615427 | false |
Sorsly/subtle | google-cloud-sdk/lib/third_party/monotonic/__init__.py | 6 | 6953 | # -*- coding: utf-8 -*-
"""
monotonic
~~~~~~~~~
This module provides a ``monotonic()`` function which returns the
value (in fractional seconds) of a clock which never goes backwards.
On Python 3.3 or newer, ``monotonic`` will be an alias of
``time.monotonic`` from the standard library. On older versions,
it will fall back to an equivalent implementation:
+-------------+----------------------------------------+
| Linux, BSD | ``clock_gettime(3)`` |
+-------------+----------------------------------------+
| Windows | ``GetTickCount`` or ``GetTickCount64`` |
+-------------+----------------------------------------+
| OS X | ``mach_absolute_time`` |
+-------------+----------------------------------------+
If no suitable implementation exists for the current platform,
attempting to import this module (or to import from it) will
cause a ``RuntimeError`` exception to be raised.
Copyright 2014, 2015, 2016 Ori Livneh <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import ctypes
import ctypes.util
import os
import sys
import threading
import time
try:
monotonic = time.monotonic
except AttributeError:
try:
if sys.platform == 'darwin': # OS X, iOS
# See Technical Q&A QA1398 of the Mac Developer Library:
# <https://developer.apple.com/library/mac/qa/qa1398/>
libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True)
class mach_timebase_info_data_t(ctypes.Structure):
"""System timebase info. Defined in <mach/mach_time.h>."""
_fields_ = (('numer', ctypes.c_uint32),
('denom', ctypes.c_uint32))
mach_absolute_time = libc.mach_absolute_time
mach_absolute_time.restype = ctypes.c_uint64
timebase = mach_timebase_info_data_t()
libc.mach_timebase_info(ctypes.byref(timebase))
ticks_per_second = timebase.numer / timebase.denom * 1.0e9
def monotonic():
"""Monotonic clock, cannot go backward."""
return mach_absolute_time() / ticks_per_second
elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):
if sys.platform.startswith('cygwin'):
# Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since
# version 1.7.6. Using raw WinAPI for maximum version compatibility.
# Ugly hack using the wrong calling convention (in 32-bit mode)
# because ctypes has no windll under cygwin (and it also seems that
# the code letting you select stdcall in _ctypes doesn't exist under
# the preprocessor definitions relevant to cygwin).
# This is 'safe' because:
# 1. The ABI of GetTickCount and GetTickCount64 is identical for
# both calling conventions because they both have no parameters.
# 2. libffi masks the problem because after making the call it doesn't
# touch anything through esp and epilogue code restores a correct
# esp from ebp afterwards.
try:
kernel32 = ctypes.cdll.kernel32
except OSError: # 'No such file or directory'
kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll')
else:
kernel32 = ctypes.windll.kernel32
GetTickCount64 = getattr(kernel32, 'GetTickCount64', None)
if GetTickCount64:
# Windows Vista / Windows Server 2008 or newer.
GetTickCount64.restype = ctypes.c_ulonglong
def monotonic():
"""Monotonic clock, cannot go backward."""
return GetTickCount64() / 1000.0
else:
# Before Windows Vista.
GetTickCount = kernel32.GetTickCount
GetTickCount.restype = ctypes.c_uint32
get_tick_count_lock = threading.Lock()
get_tick_count_last_sample = 0
get_tick_count_wraparounds = 0
def monotonic():
"""Monotonic clock, cannot go backward."""
global get_tick_count_last_sample
global get_tick_count_wraparounds
with get_tick_count_lock:
current_sample = GetTickCount()
if current_sample < get_tick_count_last_sample:
get_tick_count_wraparounds += 1
get_tick_count_last_sample = current_sample
final_milliseconds = get_tick_count_wraparounds << 32
final_milliseconds += get_tick_count_last_sample
return final_milliseconds / 1000.0
else:
try:
clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'),
use_errno=True).clock_gettime
except AttributeError:
clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'),
use_errno=True).clock_gettime
class timespec(ctypes.Structure):
"""Time specification, as described in clock_gettime(3)."""
_fields_ = (('tv_sec', ctypes.c_long),
('tv_nsec', ctypes.c_long))
if sys.platform.startswith('linux'):
CLOCK_MONOTONIC = 1
elif sys.platform.startswith('freebsd'):
CLOCK_MONOTONIC = 4
elif sys.platform.startswith('sunos5'):
CLOCK_MONOTONIC = 4
elif 'bsd' in sys.platform:
CLOCK_MONOTONIC = 3
def monotonic():
"""Monotonic clock, cannot go backward."""
ts = timespec()
if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)):
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
return ts.tv_sec + ts.tv_nsec / 1.0e9
# Perform a sanity-check.
if monotonic() - monotonic() > 0:
raise ValueError('monotonic() is not monotonic!')
except Exception:
raise RuntimeError('no suitable implementation for this system')
| mit | -2,948,555,834,065,863,700 | 41.396341 | 86 | 0.55228 | false |
cbitterfield/JobCard | archive/old_archive/new2_file.py | 1 | 1723 | '''
Created on Sep 30, 2017
@author: colin
'''
import yaml
import importlib
import os
import sys
# Import Local Modules
import validate
#===============================================================================
# Setup test Logging
#===============================================================================
import logging
import logging.config
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',disable_existing_loggers=False, level=logging.INFO)
component = 'promoimg'
prefix = '/Users/colin/Documents/Appcelerator_Studio_Workspace/JobCard/Assembly/'
source = '/Users/colin/Documents/Appcelerator_Studio_Workspace/JobCard/EdgeSource01'
finish = '/Users/colin/Documents/Appcelerator_Studio_Workspace/JobCard/Finished'
card = '/Users/colin/Documents/Appcelerator_Studio_Workspace/JobCard/example/edge0022.yaml'
job = open('/Users/colin/Documents/Appcelerator_Studio_Workspace/JobCard/example/edge0022.yaml','r')
cfile = open('/Users/colin/Documents/Appcelerator_Studio_Workspace/JobCard/example/config.yaml','r')
noexec = False
jobcard = yaml.load(job)
config = yaml.load(cfile)
jobflag = 'exists'
#destination = prefix + jobcard['clipinfo']['projectno'] + "/" + jobcard['clipinfo']['prime_dubya'] + "/" + jobcard['clipinfo']['edgeid']
#if not os.path.isdir(destination):
# os.makedirs(destination,0777)
logger.info(sys.argv[0] + "[Starting]")
logger.info('Starting Job Processing for ' + jobcard['clipinfo']['edgeid'])
if not validate.produce(source, prefix, component, jobcard, config, noexec):
logger.info("JobCard is valid")
else:
logger.error("Fix JobCard issues; then rerun")
logger.info('[end program]')
| gpl-3.0 | 6,459,327,101,032,425,000 | 30.327273 | 137 | 0.672084 | false |
gooofy/zamia-ai | data-tools/csv/train_model.py | 3 | 1568 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 Guenter Bartsch
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# train keras module alignment model
#
import os
import sys
import traceback
import codecs
import logging
import time
import numpy as np
from optparse import OptionParser
from zamiaai import model
from nltools import misc
from nltools.tokenizer import tokenize
from sqlalchemy.orm import sessionmaker
from align_model import AlignModel
#
# init, cmdline
#
misc.init_app('train_model')
parser = OptionParser("usage: %prog [options]")
parser.add_option ("-v", "--verbose", action="store_true", dest="verbose",
help="verbose output")
(options, args) = parser.parse_args()
if options.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
#
# db
#
Session = sessionmaker(bind=model.engine)
session = Session()
#
# train
#
align_model = AlignModel(session)
align_model.train(100000, False)
| apache-2.0 | 702,990,146,049,444,600 | 21.4 | 74 | 0.705995 | false |
wlonk/warehouse | tests/unit/cli/test_cli.py | 6 | 1646 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
import pretend
import warehouse.cli
import warehouse.config
def test_lazy_config_delays(monkeypatch):
config = pretend.stub(foo="bar", another="thing")
configure = pretend.call_recorder(lambda a, settings: config)
monkeypatch.setattr(warehouse.config, "configure", configure)
lconfig = warehouse.cli.LazyConfig("thing", settings={"lol": "wat"})
assert configure.calls == []
assert lconfig.foo == "bar"
assert configure.calls == [pretend.call("thing", settings={"lol": "wat"})]
assert lconfig.another == "thing"
assert configure.calls == [pretend.call("thing", settings={"lol": "wat"})]
def test_cli_no_settings(monkeypatch, cli):
config = pretend.stub()
configure = pretend.call_recorder(lambda: config)
monkeypatch.setattr(warehouse.cli, "LazyConfig", configure)
@warehouse.cli.warehouse.command()
@click.pass_obj
def cli_test_command(obj):
assert obj is config
result = cli.invoke(warehouse.cli.warehouse, ["cli_test_command"])
assert result.exit_code == 0
assert configure.calls == [pretend.call()]
| apache-2.0 | -7,200,680,776,735,931,000 | 34.021277 | 78 | 0.716282 | false |
lixiangning888/whole_project | modules/signatures_orignal/antivm_generic_disk.py | 1 | 3265 | # Copyright (C) 2012,2014 Claudio "nex" Guarnieri (@botherder), Accuvant, Inc. ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class DiskInformation(Signature):
name = "antivm_generic_disk"
description = "Queries information on disks, possibly for anti-virtualization"
severity = 3
categories = ["anti-vm"]
authors = ["nex", "Accuvant"]
minimum = "1.2"
evented = True
def __init__(self, *args, **kwargs):
Signature.__init__(self, *args, **kwargs)
self.lastprocess = 0
self.handles = dict()
filter_apinames = set(["NtCreateFile", "NtOpenFile", "NtClose", "DeviceIoControl", "NtDuplicateObject", "NtDeviceIoControlFile"])
def on_call(self, call, process):
ioctls = [
0x2d1400, # IOCTL_STORAGE_QUERY_PROPERTY
0x70000, # IOCTL_DISK_GET_DRIVE_GEOMETRY
0x700a0, # IOCTL_DISK_GET_DRIVE_GEOMETRY_EX
0x4d008, # IOCTL_SCSI_MINIPORT
0x7405c, # IOCTL_DISK_GET_LENGTH_INFO
]
if process is not self.lastprocess:
self.handles = dict()
self.lastprocess = process
if call["api"] == "NtDuplicateObject" and call["status"]:
tgtarg = self.get_argument(call, "TargetHandle")
if tgtarg:
srchandle = int(self.get_argument(call, "SourceHandle"), 16)
tgthandle = int(tgtarg, 16)
if srchandle in self.handles:
self.handles[tgthandle] = self.handles[srchandle]
elif call["api"] == "NtClose":
handle = int(self.get_argument(call, "Handle"), 16)
self.handles.pop(handle, None)
elif (call["api"] == "NtCreateFile" or call["api"] == "NtOpenFile") and call["status"]:
filename = self.get_argument(call, "FileName")
handle = int(self.get_argument(call, "FileHandle"), 16)
if filename and (filename.lower() == "\\??\\physicaldrive0" or filename.lower().startswith("\\device\\harddisk") or "scsi0" in filename.lower()):
if handle not in self.handles:
self.handles[handle] = filename
elif call["api"] == "DeviceIoControl" or call["api"] == "NtDeviceIoControlFile":
ioctl = int(self.get_argument(call, "IoControlCode"), 16)
if call["api"] == "DeviceIoControl":
handle = int(self.get_argument(call, "DeviceHandle"), 16)
else:
handle = int(self.get_argument(call, "FileHandle"), 16)
if handle in self.handles and ioctl in ioctls:
return True | lgpl-3.0 | 4,824,502,871,173,900,000 | 45.657143 | 157 | 0.623583 | false |
duyetdev/openerp-6.1.1 | openerp/addons/hr/__openerp__.py | 9 | 2343 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Employee Directory",
"version": "1.1",
"author": "OpenERP SA",
"category": "Human Resources",
"sequence": 12,
'complexity': "easy",
"website": "http://www.openerp.com",
"description": """
Module for human resource management.
=====================================
You can manage:
* Employees and hierarchies : You can define your employee with User and display hierarchies
* HR Departments
* HR Jobs
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/hr_department.jpeg', 'images/hr_employee.jpeg','images/hr_job_position.jpeg'],
'depends': ['base_setup','mail', 'resource', 'board'],
'init_xml': [],
'update_xml': [
'security/hr_security.xml',
'security/ir.model.access.csv',
'hr_view.xml',
'hr_department_view.xml',
'process/hr_process.xml',
'hr_installer.xml',
'hr_data.xml',
'hr_board.xml',
],
'demo_xml': [
'hr_demo.xml',
],
'test': [
'test/open2recruit2close_job.yml',
'test/hr_demo.yml',
],
'installable': True,
'application': True,
'auto_install': False,
'certificate': '0086710558965',
"css": [ 'static/src/css/hr.css' ],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,381,880,410,070,221,300 | 33.970149 | 101 | 0.57149 | false |
hojel/calibre | src/calibre/ebooks/lrf/html/table.py | 24 | 13938 | __license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
import math, sys, re
from calibre.ebooks.lrf.fonts import get_font
from calibre.ebooks.lrf.pylrs.pylrs import TextBlock, Text, CR, Span, \
CharButton, Plot, Paragraph, \
LrsTextTag
def ceil(num):
return int(math.ceil(num))
def print_xml(elem):
from calibre.ebooks.lrf.pylrs.pylrs import ElementWriter
elem = elem.toElement('utf8')
ew = ElementWriter(elem, sourceEncoding='utf8')
ew.write(sys.stdout)
print
def cattrs(base, extra):
new = base.copy()
new.update(extra)
return new
def tokens(tb):
'''
Return the next token. A token is :
1. A string
a block of text that has the same style
'''
def process_element(x, attrs):
if isinstance(x, CR):
yield 2, None
elif isinstance(x, Text):
yield x.text, cattrs(attrs, {})
elif isinstance(x, basestring):
yield x, cattrs(attrs, {})
elif isinstance(x, (CharButton, LrsTextTag)):
if x.contents:
if hasattr(x.contents[0], 'text'):
yield x.contents[0].text, cattrs(attrs, {})
elif hasattr(x.contents[0], 'attrs'):
for z in process_element(x.contents[0], x.contents[0].attrs):
yield z
elif isinstance(x, Plot):
yield x, None
elif isinstance(x, Span):
attrs = cattrs(attrs, x.attrs)
for y in x.contents:
for z in process_element(y, attrs):
yield z
for i in tb.contents:
if isinstance(i, CR):
yield 1, None
elif isinstance(i, Paragraph):
for j in i.contents:
attrs = {}
if hasattr(j, 'attrs'):
attrs = j.attrs
for k in process_element(j, attrs):
yield k
class Cell(object):
def __init__(self, conv, tag, css):
self.conv = conv
self.tag = tag
self.css = css
self.text_blocks = []
self.pwidth = -1.
if tag.has_key('width') and '%' in tag['width']:
try:
self.pwidth = float(tag['width'].replace('%', ''))
except ValueError:
pass
if css.has_key('width') and '%' in css['width']:
try:
self.pwidth = float(css['width'].replace('%', ''))
except ValueError:
pass
if self.pwidth > 100:
self.pwidth = -1
self.rowspan = self.colspan = 1
try:
self.colspan = int(tag['colspan']) if tag.has_key('colspan') else 1
self.rowspan = int(tag['rowspan']) if tag.has_key('rowspan') else 1
except:
pass
pp = conv.current_page
conv.book.allow_new_page = False
conv.current_page = conv.book.create_page()
conv.parse_tag(tag, css)
conv.end_current_block()
for item in conv.current_page.contents:
if isinstance(item, TextBlock):
self.text_blocks.append(item)
conv.current_page = pp
conv.book.allow_new_page = True
if not self.text_blocks:
tb = conv.book.create_text_block()
tb.Paragraph(' ')
self.text_blocks.append(tb)
for tb in self.text_blocks:
tb.parent = None
tb.objId = 0
# Needed as we have to eventually change this BlockStyle's width and
# height attributes. This blockstyle may be shared with other
# elements, so doing that causes havoc.
tb.blockStyle = conv.book.create_block_style()
ts = conv.book.create_text_style(**tb.textStyle.attrs)
ts.attrs['parindent'] = 0
tb.textStyle = ts
if ts.attrs['align'] == 'foot':
if isinstance(tb.contents[-1], Paragraph):
tb.contents[-1].append(' ')
def pts_to_pixels(self, pts):
pts = int(pts)
return ceil((float(self.conv.profile.dpi)/72.)*(pts/10.))
def minimum_width(self):
return max([self.minimum_tb_width(tb) for tb in self.text_blocks])
def minimum_tb_width(self, tb):
ts = tb.textStyle.attrs
default_font = get_font(ts['fontfacename'], self.pts_to_pixels(ts['fontsize']))
parindent = self.pts_to_pixels(ts['parindent'])
mwidth = 0
for token, attrs in tokens(tb):
font = default_font
if isinstance(token, int): # Handle para and line breaks
continue
if isinstance(token, Plot):
return self.pts_to_pixels(token.xsize)
ff = attrs.get('fontfacename', ts['fontfacename'])
fs = attrs.get('fontsize', ts['fontsize'])
if (ff, fs) != (ts['fontfacename'], ts['fontsize']):
font = get_font(ff, self.pts_to_pixels(fs))
if not token.strip():
continue
word = token.split()
word = word[0] if word else ""
width = font.getsize(word)[0]
if width > mwidth:
mwidth = width
return parindent + mwidth + 2
def text_block_size(self, tb, maxwidth=sys.maxint, debug=False):
ts = tb.textStyle.attrs
default_font = get_font(ts['fontfacename'], self.pts_to_pixels(ts['fontsize']))
parindent = self.pts_to_pixels(ts['parindent'])
top, bottom, left, right = 0, 0, parindent, parindent
def add_word(width, height, left, right, top, bottom, ls, ws):
if left + width > maxwidth:
left = width + ws
top += ls
bottom = top+ls if top+ls > bottom else bottom
else:
left += (width + ws)
right = left if left > right else right
bottom = top+ls if top+ls > bottom else bottom
return left, right, top, bottom
for token, attrs in tokens(tb):
if attrs == None:
attrs = {}
font = default_font
ls = self.pts_to_pixels(attrs.get('baselineskip', ts['baselineskip']))+\
self.pts_to_pixels(attrs.get('linespace', ts['linespace']))
ws = self.pts_to_pixels(attrs.get('wordspace', ts['wordspace']))
if isinstance(token, int): # Handle para and line breaks
if top != bottom: #Previous element not a line break
top = bottom
else:
top += ls
bottom += ls
left = parindent if int == 1 else 0
continue
if isinstance(token, Plot):
width, height = self.pts_to_pixels(token.xsize), self.pts_to_pixels(token.ysize)
left, right, top, bottom = add_word(width, height, left, right, top, bottom, height, ws)
continue
ff = attrs.get('fontfacename', ts['fontfacename'])
fs = attrs.get('fontsize', ts['fontsize'])
if (ff, fs) != (ts['fontfacename'], ts['fontsize']):
font = get_font(ff, self.pts_to_pixels(fs))
for word in token.split():
width, height = font.getsize(word)
left, right, top, bottom = add_word(width, height, left, right, top, bottom, ls, ws)
return right+3+max(parindent, 10), bottom
def text_block_preferred_width(self, tb, debug=False):
return self.text_block_size(tb, sys.maxint, debug=debug)[0]
def preferred_width(self, debug=False):
return ceil(max([self.text_block_preferred_width(i, debug=debug) for i in self.text_blocks]))
def height(self, width):
return sum([self.text_block_size(i, width)[1] for i in self.text_blocks])
class Row(object):
def __init__(self, conv, row, css, colpad):
self.cells = []
self.colpad = colpad
cells = row.findAll(re.compile('td|th', re.IGNORECASE))
self.targets = []
for cell in cells:
ccss = conv.tag_css(cell, css)[0]
self.cells.append(Cell(conv, cell, ccss))
for a in row.findAll(id=True) + row.findAll(name=True):
name = a['name'] if a.has_key('name') else a['id'] if a.has_key('id') else None
if name is not None:
self.targets.append(name.replace('#', ''))
def number_of_cells(self):
'''Number of cells in this row. Respects colspan'''
ans = 0
for cell in self.cells:
ans += cell.colspan
return ans
def height(self, widths):
i, heights = 0, []
for cell in self.cells:
width = sum(widths[i:i+cell.colspan])
heights.append(cell.height(width))
i += cell.colspan
if not heights:
return 0
return max(heights)
def cell_from_index(self, col):
i = -1
cell = None
for cell in self.cells:
for k in range(0, cell.colspan):
if i == col:
break
i += 1
if i == col:
break
return cell
def minimum_width(self, col):
cell = self.cell_from_index(col)
if not cell:
return 0
return cell.minimum_width()
def preferred_width(self, col):
cell = self.cell_from_index(col)
if not cell:
return 0
return 0 if cell.colspan > 1 else cell.preferred_width()
def width_percent(self, col):
cell = self.cell_from_index(col)
if not cell:
return -1
return -1 if cell.colspan > 1 else cell.pwidth
def cell_iterator(self):
for c in self.cells:
yield c
class Table(object):
def __init__(self, conv, table, css, rowpad=10, colpad=10):
self.rows = []
self.conv = conv
self.rowpad = rowpad
self.colpad = colpad
rows = table.findAll('tr')
conv.in_table = True
for row in rows:
rcss = conv.tag_css(row, css)[0]
self.rows.append(Row(conv, row, rcss, colpad))
conv.in_table = False
def number_of_columns(self):
max = 0
for row in self.rows:
max = row.number_of_cells() if row.number_of_cells() > max else max
return max
def number_or_rows(self):
return len(self.rows)
def height(self, maxwidth):
''' Return row heights + self.rowpad'''
widths = self.get_widths(maxwidth)
return sum([row.height(widths) + self.rowpad for row in self.rows]) - self.rowpad
def minimum_width(self, col):
return max([row.minimum_width(col) for row in self.rows])
def width_percent(self, col):
return max([row.width_percent(col) for row in self.rows])
def get_widths(self, maxwidth):
'''
Return widths of columns + self.colpad
'''
rows, cols = self.number_or_rows(), self.number_of_columns()
widths = range(cols)
for c in range(cols):
cellwidths = [ 0 for i in range(rows)]
for r in range(rows):
try:
cellwidths[r] = self.rows[r].preferred_width(c)
except IndexError:
continue
widths[c] = max(cellwidths)
min_widths = [self.minimum_width(i)+10 for i in xrange(cols)]
for i in xrange(len(widths)):
wp = self.width_percent(i)
if wp >= 0.:
widths[i] = max(min_widths[i], ceil((wp/100.) * (maxwidth - (cols-1)*self.colpad)))
itercount = 0
while sum(widths) > maxwidth-((len(widths)-1)*self.colpad) and itercount < 100:
for i in range(cols):
widths[i] = ceil((95./100.)*widths[i]) if \
ceil((95./100.)*widths[i]) >= min_widths[i] else widths[i]
itercount += 1
return [i+self.colpad for i in widths]
def blocks(self, maxwidth, maxheight):
rows, cols = self.number_or_rows(), self.number_of_columns()
cellmatrix = [[None for c in range(cols)] for r in range(rows)]
rowpos = [0 for i in range(rows)]
for r in range(rows):
nc = self.rows[r].cell_iterator()
try:
while True:
cell = nc.next()
cellmatrix[r][rowpos[r]] = cell
rowpos[r] += cell.colspan
for k in range(1, cell.rowspan):
try:
rowpos[r+k] += 1
except IndexError:
break
except StopIteration: # No more cells in this row
continue
widths = self.get_widths(maxwidth)
heights = [row.height(widths) for row in self.rows]
xpos = [sum(widths[:i]) for i in range(cols)]
delta = maxwidth - sum(widths)
if delta < 0:
delta = 0
for r in range(len(cellmatrix)):
yield None, 0, heights[r], 0, self.rows[r].targets
for c in range(len(cellmatrix[r])):
cell = cellmatrix[r][c]
if not cell:
continue
width = sum(widths[c:c+cell.colspan])-self.colpad*cell.colspan
sypos = 0
for tb in cell.text_blocks:
tb.blockStyle = self.conv.book.create_block_style(
blockwidth=width,
blockheight=cell.text_block_size(tb, width)[1],
blockrule='horz-fixed')
yield tb, xpos[c], sypos, delta, None
sypos += tb.blockStyle.attrs['blockheight']
| gpl-3.0 | -675,818,216,413,012,200 | 34.830334 | 104 | 0.520017 | false |
erwilan/ansible | lib/ansible/modules/network/cloudengine/ce_snmp_location.py | 7 | 6778 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_snmp_location
version_added: "2.4"
short_description: Manages SNMP location configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP location configurations on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
location:
description:
- Location information.
required: true
default: null
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: CloudEngine snmp location test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP location"
ce_snmp_location:
state: present
location: nanjing China
provider: "{{ cli }}"
- name: "Remove SNMP location"
ce_snmp_location:
state: absent
location: nanjing China
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"location": "nanjing China",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"location": "nanjing China"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent sys-info location nanjing China"]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config, ce_argument_spec
class SnmpLocation(object):
""" Manages SNMP location configuration """
def __init__(self, **kwargs):
""" Class init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
# config
self.cur_cfg = dict()
# module args
self.state = self.module.params['state']
self.location = self.module.params['location']
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def check_args(self):
""" Check invalid args """
if self.location:
if len(self.location) > 255 or len(self.location) < 1:
self.module.fail_json(
msg='Error: The len of location %s is out of [1 - 255].' % self.location)
else:
self.module.fail_json(
msg='Error: The len of location is 0.')
def get_proposed(self):
""" Get proposed state """
self.proposed["state"] = self.state
if self.location:
self.proposed["location"] = self.location
def get_existing(self):
""" Get existing state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"location ")
self.cur_cfg["location"] = temp_data[1]
self.existing["location"] = temp_data[1]
def get_end_state(self):
""" Get end state """
tmp_cfg = self.cli_get_config()
if tmp_cfg:
temp_data = tmp_cfg.split(r"location ")
self.end_state["location"] = temp_data[1]
def cli_load_config(self, commands):
""" Load config by cli """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_config(self):
""" Get config by cli """
regular = "| include snmp | include location"
flags = list()
flags.append(regular)
tmp_cfg = get_config(self.module, flags)
return tmp_cfg
def set_config(self):
""" Set configure by cli """
cmd = "snmp-agent sys-info location %s" % self.location
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def undo_config(self):
""" Undo configure by cli """
cmd = "undo snmp-agent sys-info location"
self.updates_cmd.append(cmd)
cmds = list()
cmds.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" Main work function """
self.check_args()
self.get_proposed()
self.get_existing()
if self.state == "present":
if "location" in self.cur_cfg.keys() and self.location == self.cur_cfg["location"]:
pass
else:
self.set_config()
else:
if "location" in self.cur_cfg.keys() and self.location == self.cur_cfg["location"]:
self.undo_config()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
location=dict(type='str', required=True)
)
argument_spec.update(ce_argument_spec)
module = SnmpLocation(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 | 8,408,833,285,769,398,000 | 25.896825 | 95 | 0.596341 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2016_09_01/models/verification_ip_flow_result.py | 1 | 1299 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VerificationIPFlowResult(Model):
"""Results of IP flow verification on the target resource.
:param access: Indicates whether the traffic is allowed or denied.
Possible values include: 'Allow', 'Deny'
:type access: str or ~azure.mgmt.network.v2016_09_01.models.Access
:param rule_name: Name of the rule. If input is not matched against any
security rule, it is not displayed.
:type rule_name: str
"""
_attribute_map = {
'access': {'key': 'access', 'type': 'str'},
'rule_name': {'key': 'ruleName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VerificationIPFlowResult, self).__init__(**kwargs)
self.access = kwargs.get('access', None)
self.rule_name = kwargs.get('rule_name', None)
| mit | 1,227,397,315,372,065,800 | 37.205882 | 76 | 0.597383 | false |
brian-yang/mozillians | mozillians/announcements/tests/test_models.py | 9 | 1314 | from datetime import datetime
from django.utils.timezone import make_aware, utc
from jinja2 import Markup
from mock import patch
from nose.tools import ok_
from mozillians.announcements.tests import AnnouncementFactory, TestCase
class AnnouncementTests(TestCase):
@patch('mozillians.announcements.models.timezone')
def test_published(self, mock_obj):
"""Test published model property."""
first = AnnouncementFactory.create(
publish_from=make_aware(datetime(2013, 2, 12), utc),
publish_until=make_aware(datetime(2013, 2, 18), utc))
second = AnnouncementFactory.create(
publish_from=make_aware(datetime(2013, 2, 15), utc),
publish_until=make_aware(datetime(2013, 2, 17), utc))
third = AnnouncementFactory.create(
publish_from=make_aware(datetime(2013, 2, 21), utc),
publish_until=make_aware(datetime(2013, 2, 23), utc))
mock_obj.now.return_value = make_aware(datetime(2013, 2, 16), utc)
ok_(first.published)
ok_(second.published)
ok_(not third.published)
def test_get_template_text(self):
announcement = AnnouncementFactory.create(publish_from=datetime(2013, 2, 12))
text = announcement.get_template_text()
ok_(isinstance(text, Markup))
| bsd-3-clause | 4,015,194,616,235,547,600 | 37.647059 | 85 | 0.671994 | false |
davidhdz/crits | documentation/src/conf.py | 17 | 8686 | # -*- coding: utf-8 -*-
#
# CRITs documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 25 15:33:49 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crits.settings")
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CRITs'
copyright = u'2015, MITRE Corporation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0.0'
# The full version, including alpha/beta/rc tags.
release = '3.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CRITsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'CRITs.tex', u'CRITs Documentation',
u'mgoffin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'crits', u'CRITs Documentation',
[u'MITRE Corporation'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CRITs', u'CRITs Documentation',
u'MITRE Corporation', 'CRITs', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
'sphinx': ('http://sphinx.pocoo.org/', None),
'django': ('http://docs.djangoproject.com/en/dev/', 'http://docs.djangoproject.com/en/dev/_objects/'),
'mongoengine': ('http://docs.mongoengine.org/en/latest/', None),
}
| mit | -1,092,684,036,117,284,000 | 30.81685 | 106 | 0.70274 | false |
indirectlylit/kolibri | setup.py | 2 | 3969 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
from setuptools import setup
from setuptools.command.install_scripts import install_scripts
import kolibri
# Windows-specific .bat script template
WINDOWS_BATCH_TEMPLATE = r"""@echo off
set mypath=%~dp0
set pyscript="%mypath%{file_name}"
set /p line1=<%pyscript%
if "%line1:~0,2%" == "#!" (goto :goodstart)
echo First line of %pyscript% does not start with "#!"
exit /b 1
:goodstart
set py_exe=%line1:~2%
call %py_exe% %pyscript% %*
"""
# Generate Windows-specific .bat files
class gen_windows_batch_files(install_scripts):
def run(self):
# default behaviors
install_scripts.run(self)
# Nothing more to do if this is not Windows
if not os.name == "nt":
return
# For Windows, write batch scripts for all executable python files
for output_path in self.get_outputs():
# look for #! at the top
with open(output_path, "rt") as f:
first_line = f.readline()
# skip non-executbale python files
if not (first_line.startswith("#!") and "python" in first_line.lower()):
continue
path_name, file_name = os.path.split(output_path)
if self.dry_run:
continue
bat_file = os.path.join(path_name, os.path.splitext(file_name)[0] + ".bat")
with open(bat_file, "wt") as f:
f.write(WINDOWS_BATCH_TEMPLATE.format(file_name=file_name))
long_description = """
`Kolibri <https://learningequality.org/kolibri/>`_ is the offline learning platform
from `Learning Equality <https://learningequality.org/>`_.
This package can be installed by running ``pip install --user kolibri``. `See the download
page <https://learningequality.org/download/>`_ for other methods of installation.
- `View the documentation <https://kolibri.readthedocs.io/>`_ and the `community
forums <https://community.learningequality.org/>`_ for more guidance on setting up
and using Kolibri
- Visit the `Github project <https://github.com/learningequality/kolibri>`_ and the
`developer documentation <https://kolibri-dev.readthedocs.io/>`_ if you would like
to contribute to development
"""
setup(
name="kolibri",
version=kolibri.__version__,
description="Kolibri - the offline app for universal education",
long_description=long_description,
author="Learning Equality",
author_email="[email protected]",
url="https://github.com/learningequality/kolibri",
packages=[str("kolibri")], # https://github.com/pypa/setuptools/pull/597
entry_points={
"console_scripts": ["kolibri = kolibri.utils.cli:main"],
"kolibri.plugins": [
"{module_path} = {module_path}".format(module_path=module_path)
for module_path in kolibri.INTERNAL_PLUGINS
],
},
package_dir={"kolibri": "kolibri"},
include_package_data=True,
install_requires=[],
dependency_links=[],
tests_require=["pytest", "tox", "flake8"],
license="MIT",
zip_safe=False,
keywords=["education", "offline", "kolibri"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: PyPy",
],
cmdclass={"install_scripts": gen_windows_batch_files},
)
| mit | 8,729,614,028,381,882,000 | 35.081818 | 90 | 0.639456 | false |
befelix/GPy | GPy/testing/serialization_tests.py | 1 | 12625 | '''
Created on 20 April 2017
@author: pgmoren
'''
import unittest, itertools
#import cPickle as pickle
import pickle
import numpy as np
import tempfile
import GPy
from nose import SkipTest
import numpy as np
fixed_seed = 11
class Test(unittest.TestCase):
def test_serialize_deserialize_kernels(self):
k1 = GPy.kern.RBF(2, variance=1.0, lengthscale=[1.0,1.0], ARD=True)
k2 = GPy.kern.RatQuad(2, variance=2.0, lengthscale=1.0, power=2.0, active_dims = [0,1])
k3 = GPy.kern.Bias(2, variance=2.0, active_dims = [1,0])
k4 = GPy.kern.StdPeriodic(2, variance=2.0, lengthscale=1.0, period=1.0, active_dims = [1,1])
k5 = GPy.kern.Linear(2, variances=[2.0, 1.0], ARD=True, active_dims = [1,1])
k6 = GPy.kern.Exponential(2, variance=1., lengthscale=2)
k7 = GPy.kern.Matern32(2, variance=1.0, lengthscale=[1.0,3.0], ARD=True, active_dims = [1,1])
k8 = GPy.kern.Matern52(2, variance=2.0, lengthscale=[2.0,1.0], ARD=True, active_dims = [1,0])
k9 = GPy.kern.ExpQuad(2, variance=3.0, lengthscale=[1.0,2.0], ARD=True, active_dims = [0,1])
k10 = k1 + k1.copy() + k2 + k3 + k4 + k5 + k6
k11 = k1 * k2 * k2.copy() * k3 * k4 * k5
k12 = (k1 + k2) * (k3 + k4 + k5)
k13 = ((k1 + k2) * k3) + k4 + k5 * k7
k14 = ((k1 + k2) * k3) + k4 * k5 + k8
k15 = ((k1 * k2) * k3) + k4 * k5 + k8 + k9
k_list = [k1,k2,k3,k4,k5,k6,k7,k8,k9,k10,k11,k12,k13,k14,k15]
for kk in k_list:
kk_dict = kk.to_dict()
kk_r = GPy.kern.Kern.from_dict(kk_dict)
assert type(kk) == type(kk_r)
np.testing.assert_array_equal(kk[:], kk_r[:])
np.testing.assert_array_equal(np.array(kk.active_dims), np.array(kk_r.active_dims))
def test_serialize_deserialize_mappings(self):
m1 = GPy.mappings.Identity(3,2)
m2 = GPy.mappings.Constant(3,2,1)
m2_r = GPy.core.mapping.Mapping.from_dict(m2.to_dict())
np.testing.assert_array_equal(m2.C.values[:], m2_r.C.values[:])
m3 = GPy.mappings.Linear(3,2)
m3_r = GPy.core.mapping.Mapping.from_dict(m3.to_dict())
assert np.all(m3.A == m3_r.A)
m_list = [m1, m2, m3]
for mm in m_list:
mm_dict = mm.to_dict()
mm_r = GPy.core.mapping.Mapping.from_dict(mm_dict)
assert type(mm) == type(mm_r)
assert type(mm.input_dim) == type(mm_r.input_dim)
assert type(mm.output_dim) == type(mm_r.output_dim)
def test_serialize_deserialize_likelihoods(self):
l1 = GPy.likelihoods.Gaussian(GPy.likelihoods.link_functions.Identity(),variance=3.0)
l1_r = GPy.likelihoods.likelihood.Likelihood.from_dict(l1.to_dict())
l2 = GPy.likelihoods.Bernoulli(GPy.likelihoods.link_functions.Probit())
l2_r = GPy.likelihoods.likelihood.Likelihood.from_dict(l2.to_dict())
assert type(l1) == type(l1_r)
assert np.all(l1.variance == l1_r.variance)
assert type(l2) == type(l2_r)
def test_serialize_deserialize_normalizers(self):
n1 = GPy.util.normalizer.Standardize()
n1.scale_by(np.random.rand(10))
n1_r = GPy.util.normalizer._Norm.from_dict((n1.to_dict()))
assert type(n1) == type(n1_r)
assert np.all(n1.mean == n1_r.mean)
assert np.all(n1.std == n1_r.std)
def test_serialize_deserialize_link_functions(self):
l1 = GPy.likelihoods.link_functions.Identity()
l2 = GPy.likelihoods.link_functions.Probit()
l_list = [l1, l2]
for ll in l_list:
ll_dict = ll.to_dict()
ll_r = GPy.likelihoods.link_functions.GPTransformation.from_dict(ll_dict)
assert type(ll) == type(ll_r)
def test_serialize_deserialize_inference_methods(self):
e1 = GPy.inference.latent_function_inference.expectation_propagation.EP(ep_mode="nested")
e1.ga_approx_old = GPy.inference.latent_function_inference.expectation_propagation.gaussianApproximation(np.random.rand(10),np.random.rand(10))
e1._ep_approximation = []
e1._ep_approximation.append(GPy.inference.latent_function_inference.expectation_propagation.posteriorParams(np.random.rand(10),np.random.rand(100).reshape((10,10))))
e1._ep_approximation.append(GPy.inference.latent_function_inference.expectation_propagation.gaussianApproximation(np.random.rand(10),np.random.rand(10)))
e1._ep_approximation.append(GPy.inference.latent_function_inference.expectation_propagation.cavityParams(10))
e1._ep_approximation[-1].v = np.random.rand(10)
e1._ep_approximation[-1].tau = np.random.rand(10)
e1._ep_approximation.append(np.random.rand(10))
e1_r = GPy.inference.latent_function_inference.LatentFunctionInference.from_dict(e1.to_dict())
assert type(e1) == type(e1_r)
assert e1.epsilon==e1_r.epsilon
assert e1.eta==e1_r.eta
assert e1.delta==e1_r.delta
assert e1.always_reset==e1_r.always_reset
assert e1.max_iters==e1_r.max_iters
assert e1.ep_mode==e1_r.ep_mode
assert e1.parallel_updates==e1_r.parallel_updates
np.testing.assert_array_equal(e1.ga_approx_old.tau[:], e1_r.ga_approx_old.tau[:])
np.testing.assert_array_equal(e1.ga_approx_old.v[:], e1_r.ga_approx_old.v[:])
np.testing.assert_array_equal(e1._ep_approximation[0].mu[:], e1_r._ep_approximation[0].mu[:])
np.testing.assert_array_equal(e1._ep_approximation[0].Sigma[:], e1_r._ep_approximation[0].Sigma[:])
np.testing.assert_array_equal(e1._ep_approximation[1].tau[:], e1_r._ep_approximation[1].tau[:])
np.testing.assert_array_equal(e1._ep_approximation[1].v[:], e1_r._ep_approximation[1].v[:])
np.testing.assert_array_equal(e1._ep_approximation[2].tau[:], e1_r._ep_approximation[2].tau[:])
np.testing.assert_array_equal(e1._ep_approximation[2].v[:], e1_r._ep_approximation[2].v[:])
np.testing.assert_array_equal(e1._ep_approximation[3][:], e1_r._ep_approximation[3][:])
e1 = GPy.inference.latent_function_inference.expectation_propagation.EPDTC(ep_mode="nested")
e1.ga_approx_old = GPy.inference.latent_function_inference.expectation_propagation.gaussianApproximation(np.random.rand(10),np.random.rand(10))
e1._ep_approximation = []
e1._ep_approximation.append(GPy.inference.latent_function_inference.expectation_propagation.posteriorParamsDTC(np.random.rand(10),np.random.rand(10)))
e1._ep_approximation.append(GPy.inference.latent_function_inference.expectation_propagation.gaussianApproximation(np.random.rand(10),np.random.rand(10)))
e1._ep_approximation.append(GPy.inference.latent_function_inference.expectation_propagation.cavityParams(10))
e1._ep_approximation[-1].v = np.random.rand(10)
e1._ep_approximation[-1].tau = np.random.rand(10)
e1._ep_approximation.append(np.random.rand(10))
e1_r = GPy.inference.latent_function_inference.LatentFunctionInference.from_dict(e1.to_dict())
assert type(e1) == type(e1_r)
assert e1.epsilon==e1_r.epsilon
assert e1.eta==e1_r.eta
assert e1.delta==e1_r.delta
assert e1.always_reset==e1_r.always_reset
assert e1.max_iters==e1_r.max_iters
assert e1.ep_mode==e1_r.ep_mode
assert e1.parallel_updates==e1_r.parallel_updates
np.testing.assert_array_equal(e1.ga_approx_old.tau[:], e1_r.ga_approx_old.tau[:])
np.testing.assert_array_equal(e1.ga_approx_old.v[:], e1_r.ga_approx_old.v[:])
np.testing.assert_array_equal(e1._ep_approximation[0].mu[:], e1_r._ep_approximation[0].mu[:])
np.testing.assert_array_equal(e1._ep_approximation[0].Sigma_diag[:], e1_r._ep_approximation[0].Sigma_diag[:])
np.testing.assert_array_equal(e1._ep_approximation[1].tau[:], e1_r._ep_approximation[1].tau[:])
np.testing.assert_array_equal(e1._ep_approximation[1].v[:], e1_r._ep_approximation[1].v[:])
np.testing.assert_array_equal(e1._ep_approximation[2].tau[:], e1_r._ep_approximation[2].tau[:])
np.testing.assert_array_equal(e1._ep_approximation[2].v[:], e1_r._ep_approximation[2].v[:])
np.testing.assert_array_equal(e1._ep_approximation[3][:], e1_r._ep_approximation[3][:])
e2 = GPy.inference.latent_function_inference.exact_gaussian_inference.ExactGaussianInference()
e2_r = GPy.inference.latent_function_inference.LatentFunctionInference.from_dict(e2.to_dict())
assert type(e2) == type(e2_r)
def test_serialize_deserialize_model(self):
np.random.seed(fixed_seed)
N = 20
Nhalf = int(N/2)
X = np.hstack([np.random.normal(5, 2, Nhalf), np.random.normal(10, 2, Nhalf)])[:, None]
Y = np.hstack([np.ones(Nhalf), np.zeros(Nhalf)])[:, None]
kernel = GPy.kern.RBF(1)
likelihood = GPy.likelihoods.Bernoulli()
inference_method=GPy.inference.latent_function_inference.expectation_propagation.EP(ep_mode="nested")
mean_function=None
m = GPy.core.GP(X=X, Y=Y, kernel=kernel, likelihood=likelihood, inference_method=inference_method, mean_function=mean_function, normalizer=True, name='gp_classification')
m.optimize()
m.save_model("temp_test_gp_with_data.json", compress=True, save_data=True)
m.save_model("temp_test_gp_without_data.json", compress=True, save_data=False)
m1_r = GPy.core.GP.load_model("temp_test_gp_with_data.json.zip")
m2_r = GPy.core.GP.load_model("temp_test_gp_without_data.json.zip", (X,Y))
import os
os.remove("temp_test_gp_with_data.json.zip")
os.remove("temp_test_gp_without_data.json.zip")
var = m.predict(X)[0]
var1_r = m1_r.predict(X)[0]
var2_r = m2_r.predict(X)[0]
np.testing.assert_array_equal(np.array(var).flatten(), np.array(var1_r).flatten())
np.testing.assert_array_equal(np.array(var).flatten(), np.array(var2_r).flatten())
def test_serialize_deserialize_inference_GPRegressor(self):
np.random.seed(fixed_seed)
N = 50
N_new = 50
D = 1
X = np.random.uniform(-3., 3., (N, 1))
Y = np.sin(X) + np.random.randn(N, D) * 0.05
X_new = np.random.uniform(-3., 3., (N_new, 1))
k = GPy.kern.RBF(input_dim=1, lengthscale=10)
m = GPy.models.GPRegression(X,Y,k)
m.optimize()
m.save_model("temp_test_gp_regressor_with_data.json", compress=True, save_data=True)
m.save_model("temp_test_gp_regressor_without_data.json", compress=True, save_data=False)
m1_r = GPy.models.GPRegression.load_model("temp_test_gp_regressor_with_data.json.zip")
m2_r = GPy.models.GPRegression.load_model("temp_test_gp_regressor_without_data.json.zip", (X,Y))
import os
os.remove("temp_test_gp_regressor_with_data.json.zip")
os.remove("temp_test_gp_regressor_without_data.json.zip")
Xp = np.random.uniform(size=(int(1e5),1))
Xp[:,0] = Xp[:,0]*15-5
_, var = m.predict(Xp)
_, var1_r = m1_r.predict(Xp)
_, var2_r = m2_r.predict(Xp)
np.testing.assert_array_equal(var.flatten(), var1_r.flatten())
np.testing.assert_array_equal(var.flatten(), var2_r.flatten())
def test_serialize_deserialize_inference_GPClassifier(self):
np.random.seed(fixed_seed)
N = 50
Nhalf = int(N/2)
X = np.hstack([np.random.normal(5, 2, Nhalf), np.random.normal(10, 2, Nhalf)])[:, None]
Y = np.hstack([np.ones(Nhalf), np.zeros(Nhalf)])[:, None]
kernel = GPy.kern.RBF(1)
m = GPy.models.GPClassification(X, Y, kernel=kernel)
m.optimize()
m.save_model("temp_test_gp_classifier_with_data.json", compress=True, save_data=True)
m.save_model("temp_test_gp_classifier_without_data.json", compress=True, save_data=False)
m1_r = GPy.models.GPClassification.load_model("temp_test_gp_classifier_with_data.json.zip")
m2_r = GPy.models.GPClassification.load_model("temp_test_gp_classifier_without_data.json.zip", (X,Y))
import os
os.remove("temp_test_gp_classifier_with_data.json.zip")
os.remove("temp_test_gp_classifier_without_data.json.zip")
var = m.predict(X)[0]
var1_r = m1_r.predict(X)[0]
var2_r = m2_r.predict(X)[0]
np.testing.assert_array_equal(np.array(var).flatten(), np.array(var1_r).flatten())
np.testing.assert_array_equal(np.array(var).flatten(), np.array(var1_r).flatten())
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_parameter_index_operations']
unittest.main()
| bsd-3-clause | -4,306,246,230,863,588,400 | 52.495763 | 179 | 0.637703 | false |
vighneshbirodkar/scikit-image | skimage/future/graph/_ncut.py | 21 | 1885 | try:
import networkx as nx
except ImportError:
from ..._shared.utils import warn
warn('RAGs require networkx')
import numpy as np
from scipy import sparse
from . import _ncut_cy
def DW_matrices(graph):
"""Returns the diagonal and weight matrices of a graph.
Parameters
----------
graph : RAG
A Region Adjacency Graph.
Returns
-------
D : csc_matrix
The diagonal matrix of the graph. ``D[i, i]`` is the sum of weights of
all edges incident on `i`. All other entries are `0`.
W : csc_matrix
The weight matrix of the graph. ``W[i, j]`` is the weight of the edge
joining `i` to `j`.
"""
# sparse.eighsh is most efficient with CSC-formatted input
W = nx.to_scipy_sparse_matrix(graph, format='csc')
entries = W.sum(axis=0)
D = sparse.dia_matrix((entries, 0), shape=W.shape).tocsc()
return D, W
def ncut_cost(cut, D, W):
"""Returns the N-cut cost of a bi-partition of a graph.
Parameters
----------
cut : ndarray
The mask for the nodes in the graph. Nodes corresponding to a `True`
value are in one set.
D : csc_matrix
The diagonal matrix of the graph.
W : csc_matrix
The weight matrix of the graph.
Returns
-------
cost : float
The cost of performing the N-cut.
References
----------
.. [1] Normalized Cuts and Image Segmentation, Jianbo Shi and
Jitendra Malik, IEEE Transactions on Pattern Analysis and Machine
Intelligence, Page 889, Equation 2.
"""
cut = np.array(cut)
cut_cost = _ncut_cy.cut_cost(cut, W)
# D has elements only along the diagonal, one per node, so we can directly
# index the data attribute with cut.
assoc_a = D.data[cut].sum()
assoc_b = D.data[~cut].sum()
return (cut_cost / assoc_a) + (cut_cost / assoc_b)
| bsd-3-clause | -819,555,054,585,325,700 | 26.720588 | 78 | 0.608488 | false |
SigPloiter/SigPloit | gtp/attacks/dos/user_dos.py | 1 | 5623 | #!/usr/bin/env python
# encoding: utf-8
# user_dos.py
#
# Copyright 2018 Rosalia d'Alessandro
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
from optparse import OptionParser
from gtp_v2_core.utilities.configuration_parser import parseConfigs
from commons.message_handler import MessageHandler
from commons.globals import message_queue
__all__ = []
__version__ = 0.1
GTP_PORT = 2123
DEFAULT_MSG_FREQ = 20
DEFAULT_SLEEPTIME = 1
DEBUG = 0
##
## ATTACKING TOOL
##
## @brief Main file to execute the script.
##
## This file can test a DoS attack sending a delete session request (36) or
## delete bearer request (66) using one or more TEIDS.TEIDs are listed in the
## config file.
##
## Use the -h option to enter the help menu and determine what to do.
##
## Basic usage examples:
## * $ python user_dos.py -v -c conf_file.cnf [-c conf2.cnf ...] -r <remote ip>
# act as a client connecting to <remote-host-ip>
##
## * $ python user_dos.py -lv -c conf_file.cnf [-c conf2.cnf ...] -r <remote ip>
##
## act as a server listening on 0.0.0.0 and accepting replies from <remote-host-ip>
##
## Example configuration file: UserDos.cnf
def main(argv=None):
'''Command line options.'''
program_name = os.path.basename(sys.argv[0])
program_version = "v0.1"
program_version_string = '%%prog %s' % (program_version)
program_license = "Copyright 2017 Rosalia d'Alessandro\
Licensed under the Apache License 2.0\
nhttp://www.apache.org/licenses/LICENSE-2.0"
if argv is None:
argv = sys.argv[1:]
lstn = None
try:
# setup option parser
parser = OptionParser(version=program_version_string, description=program_license)
parser.add_option("-v", "--verbose", dest="verbose", action="count", help="set verbosity level [default: %default]")
parser.add_option("-c", "--config", dest="config_file", help="the configuration file")
parser.add_option("-r", "--remote_net", dest="remote_net",
help="remote network e.g. 10.0.0.0/24, 10.0.0.1/32")
parser.add_option("-l", "--listening", dest = "listening_mode",
action = "count", help = "start also a GTP_C listener")
# set defaults
parser.set_defaults(listening_mode=False,
config_file="../config/UserDoS.cnf",
verbose = False)
# process options
(opts, args) = parser.parse_args(argv)
is_verbose = False
listening_mode = opts.listening_mode
msg_freq = DEFAULT_SLEEPTIME
remote_net = opts.remote_net
sleep_time = DEFAULT_SLEEPTIME
if listening_mode and remote_net == None:
print "remote network (e.g. 10.0.0.0/24, 10.0.0.1/32) is required"
return
# MAIN BODY #
if opts.config_file == "" :
print "Error: missed config file"
return
config = parseConfigs(opts.config_file)
msgs = config.get_unpacked_messages()
lstn = MessageHandler(messages = msgs, peer = remote_net,
isVerbose = is_verbose,
listening_mode = listening_mode,
msgs_freq = msg_freq, wait_time = sleep_time)
if lstn :
lstn.daemon = True
lstn.start()
lstn.join()
lstn.stop()
print "Sent %d GTPV2 messages"%len(message_queue)
except Exception, e:
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
print "Exception %s"%str(e)
if lstn :
lstn.stop()
return 2
if __name__ == "__main__":
if DEBUG:
sys.argv.append("-v")
sys.exit(main())
| mit | -2,479,556,723,612,131,000 | 36.993243 | 124 | 0.604126 | false |
sanandrea/ofnic_R3.3 | src/nox/coreapps/db_manager/mysql_manager.py | 1 | 12379 | # Copyright 2013 (C) Universita' di Roma La Sapienza
#
# This file is part of OFNIC Uniroma GE.
#
# OFNIC is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OFNIC is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with OFNIC. If not, see <http://www.gnu.org/licenses/>.
# @Author Andi Palo
# @Date created 17/07/2013
# @Updated by Valerio Di Matteo, 09/10/2013
from nox.lib.core import *
import MySQLdb
MYSQL_HOST = "localhost"
MYSQL_PORT = 3306
MYSQL_USER = "ofnic"
MYSQL_PASSWD = "openflow"
MYSQL_DB = "openflow_users"
#Conn = MySQLdb.Connect(host = MYSQL_HOST, port = MYSQL_PORT, user = MYSQL_USER, passwd= MYSQL_PASSWD, db= MYSQL_DB )
#Cursor = Conn.cursor(MySQLdb.cursors.DictCursor) #Permette l'accesso attraverso il nome dei fields
def check_db_connection(fn):
def wrapped(*args):
retries = 5;
#Conn = None
while (retries > 0):
retries -= 1;
try:
Conn = MySQLdb.Connect(host = MYSQL_HOST, port = MYSQL_PORT, user = MYSQL_USER, passwd= MYSQL_PASSWD, db= MYSQL_DB )
args[0].cursor = Conn.cursor(MySQLdb.cursors.DictCursor)
return fn(*args)
except MySQLdb.Error, e:
pass
#if Conn:
#Conn.close
#Conn = MySQLdb.Connect(host = MYSQL_HOST, port = MYSQL_PORT,
#user = MYSQL_USER, passwd = MYSQL_PASSWD,
#db = MYSQL_DB)
#Conn.close()
return None
return wrapped
class MySQLManager(Component):
db = None
cursor = None
def __init__(self, ctxt):
Component.__init__(self, ctxt)
def install(self):
print "Installing module mysql_manager"
def echo(self, msg):
print msg
@check_db_connection
def call_editables_caps_db(self):
#query = "SELECT User,GROUP_CONCAT(Role SEPARATOR ',') as Roles FROM user_roles GROUP BY User;"
query = "SELECT Role,GROUP_CONCAT(Cap SEPARATOR ',') as Caps FROM editable_roles GROUP BY Role;"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
@check_db_connection
def check_role(self, role):
query = "SELECT Role FROM editable_roles where Role='"+role+"';"
self.cursor.execute(query)
if self.cursor.rowcount>0:
return True
else:
return False
@check_db_connection
def check_role_all(self, role):
query = "SELECT Name FROM roles where Name='"+role+"';"
self.cursor.execute(query)
if self.cursor.rowcount>0:
return True
else:
return False
@check_db_connection
def remove_role(self,role):
query = "delete FROM editable_roles where Role='"+role+"';"
self.cursor.execute(query)
query = "delete from user_roles where Role='"+role+"';"
self.cursor.execute(query)
query = "delete from roles where Name='"+role+"';"
self.cursor.execute(query)
@check_db_connection
def call_users_db(self):
query = "SELECT username FROM users;"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
def call_all_user_roles_db(self):
query = "SELECT User,GROUP_CONCAT(Role SEPARATOR ',') as Roles FROM user_roles GROUP BY User;"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
@check_db_connection
def get_all_roles_db(self, onlyEditables):
if (onlyEditables):
query = "SELECT Name FROM roles where Editable = 1;"
else:
query = "SELECT Name FROM roles;"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
@check_db_connection
def check_user(self, username):
query = "SELECT username FROM users where username='"+username+"';"
self.cursor.execute(query)
if self.cursor.rowcount>0:
return True
else:
return False
@check_db_connection
def reg_user(self,username,password):
query="insert into users (username,password,language) values ('"+username+"','"+password+"','en');"
self.cursor.execute(query)
self.add_role_to_user(username,"Readonly");
@check_db_connection
def remove_user(self,username):
query = "delete FROM users where username='"+username+"';"
self.cursor.execute(query)
query = "delete from user_roles where User='"+username+"';"
self.cursor.execute(query)
@check_db_connection
def call_cap_db(self):
query = "SELECT * FROM capabilities;"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
@check_db_connection
def check_cap(self, cap):
query = "SELECT Name FROM capabilities where Name='"+cap+"';"
self.cursor.execute(query)
if self.cursor.rowcount>0:
return True
else:
return False
@check_db_connection
def get_cap_by_role(self, name):
query = "SELECT Cap FROM editable_roles where Role='"+name+"';"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
@check_db_connection
def get_other_cap_by_role(self, name):
query = "SELECT Name FROM capabilities where Name not in (Select Cap FROM editable_roles where Role='"+name+"');"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
@check_db_connection
def authenticate(self, username, password):
query = "SELECT * FROM users where username='"+username+"' and password='"+password+"';"
self.cursor.execute(query)
if self.cursor.rowcount > 0:
query = "SELECT * FROM user_roles where User='"+username+"';"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
else:
return False
@check_db_connection
def add_new_role(self,role):
query = "select Name from roles where Name='"+role+"';"
self.cursor.execute(query)
if self.cursor.rowcount > 0:
return False
else:
query = "insert into editable_roles (Role,Cap) values('"+role+"','GET');"
self.cursor.execute(query)
query = "insert into roles (Name,Editable) values('"+role+"','1');"
self.cursor.execute(query)
return True
@check_db_connection
def add_cap_to_role(self,role,cap):
query = "select * from editable_roles where Role='"+role+"' and Cap='"+cap+"';"
self.cursor.execute(query)
if self.cursor.rowcount > 0:
return False
else:
query = "insert into editable_roles (Role,Cap) values('"+role+"','"+cap+"');"
self.cursor.execute(query)
return True
@check_db_connection
def del_cap_from_role(self,role,cap):
query = "delete from editable_roles where Role='"+role+"' and Cap='"+cap+"';"
self.cursor.execute(query)
if self.cursor.rowcount > 0:
return True
else:
return False
@check_db_connection
def db_path(self,path,method):
if (path.count("/statistics/task/")>0):
if(method=="DELETE"):
path="DELETE_STAT"
elif(method=="GET"):
path= "GET_STAT"
elif (path.count("/statistics/node/")>0):
path="GET_NODE"
elif (path.count("/virtualpath/")>0):
if(method=="GET"):
path="GET_PATH"
elif(method=="DELETE"):
path="DELETE_PATH"
elif (path.count("/synchronize/network/node/")>0):
path="SYNC_NODE"
elif (path.count("/extension/")>0):
path="EXTENSION"
elif (path.count("/controlpanel/")>0):
path="CONTROL_PANEL"
return path
@check_db_connection
def check_path(self,request):
path=self.db_path(request.uri,request.method)
query = "SELECT Cap FROM resources where Path='"+path+"';"
self.cursor.execute(query)
if self.cursor.rowcount>0:
data=self.cursor.fetchall()
return data
else:
return False
@check_db_connection
def get_res(self):
#query = "SELECT distinct Path FROM resources;"
query = "SELECT GROUP_CONCAT(id SEPARATOR ',') as IDs, Path,GROUP_CONCAT(Cap SEPARATOR ',') as Caps FROM resources GROUP BY Path;"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
@check_db_connection
def check_res(self, path):
query = "SELECT Path FROM resources where Path='"+path+"';"
self.cursor.execute(query)
if self.cursor.rowcount>0:
return True
else:
return False
@check_db_connection
def get_cap_by_res(self,res):
query = "SELECT Cap FROM resources where Path='"+res+"';"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
@check_db_connection
def res_has_caps(self,res):
query = "SELECT Cap FROM resources where Path='"+res+"';"
self.cursor.execute(query)
if self.cursor.rowcount>0:
return True
else:
return False
@check_db_connection
def get_other_cap_by_res(self,res):
query = "SELECT Name FROM capabilities where Name not in (Select Cap FROM resources where Path='"+res+"');"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
@check_db_connection
def add_cap_to_res(self,res,cap):
query = "select * from resources where Path='"+res+"' and Cap='"+cap+"';"
self.cursor.execute(query)
if self.cursor.rowcount > 0:
return False
else:
query = "insert into resources (Path,Cap) values('"+res+"','"+cap+"');"
self.cursor.execute(query)
return True
@check_db_connection
def del_cap_from_res(self,res,cap):
query = "delete from resources where Path='"+res+"' and Cap='"+cap+"';"
self.cursor.execute(query)
if self.cursor.rowcount > 0:
return True
else:
return False
@check_db_connection
def get_role_by_user(self,user):
query = "SELECT Role FROM user_roles where User='"+user+"';"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
@check_db_connection
def get_other_role_by_user(self,user):
query = "SELECT Name FROM roles where Name not in (SELECT Role FROM user_roles where User='"+user+"');"
self.cursor.execute(query)
data=self.cursor.fetchall()
return data
@check_db_connection
def add_role_to_user(self,user,role):
query = "select * from user_roles where User='"+user+"' and Role='"+role+"';"
self.cursor.execute(query)
if self.cursor.rowcount > 0:
return False
else:
query = "insert into user_roles (User,Role) values('"+user+"','"+role+"');"
self.cursor.execute(query)
return True
@check_db_connection
def del_role_from_user(self,user,role):
query = "delete from user_roles where User='"+user+"' and Role='"+role+"';"
self.cursor.execute(query)
if self.cursor.rowcount > 0:
return True
else:
return False
def getInterface(self):
return str(MySQLManager)
def getFactory():
class Factory:
def instance(self, ctxt):
return MySQLManager(ctxt)
return Factory()
| gpl-3.0 | -5,372,603,276,951,197,000 | 32.730245 | 139 | 0.581469 | false |
bgalehouse/grr | lib/distro_entry.py | 10 | 2520 | #!/usr/bin/env python
"""This file defines the entry points for typical installations."""
# Imports must be inline to stop argument pollution across the entry points.
# pylint: disable=g-import-not-at-top
import platform
from grr.lib import config_lib
from grr.lib import flags
# Set custom options for each distro here.
DISTRO_DEFAULTS = {
"debian": {"flag_defaults": {"config": "/etc/grr/grr-server.yaml"},
"config_opts": {"Config.writeback":
"/etc/grr/server.local.yaml"}},
"redhat": {"flag_defaults": {"config": "/etc/grr/grr-server.yaml"},
"config_opts": {"Config.writeback":
"/etc/grr/server.local.yaml"}},
}
def GetDistro():
"""Return the distro specific config to use."""
if hasattr(platform, "linux_distribution"):
distribution = platform.linux_distribution()[0].lower()
if distribution in ["ubuntu", "debian"]:
return "debian"
if distribution in ["red hat enterprise linux server"]:
return "redhat"
raise RuntimeError("Missing distro specific config. Please update "
"distro_entry.py.")
def SetConfigOptions():
"""Set distro specific options."""
distro = GetDistro()
for option, value in DISTRO_DEFAULTS[distro]["config_opts"].items():
config_lib.CONFIG.Set(option, value)
flags.PARSER.set_defaults(**DISTRO_DEFAULTS[distro]["flag_defaults"])
def Console():
from grr.tools import console
SetConfigOptions()
flags.StartMain(console.main)
def ConfigUpdater():
from grr.tools import config_updater
SetConfigOptions()
flags.StartMain(config_updater.main)
def GrrServer():
from grr.tools import grr_server
SetConfigOptions()
flags.StartMain(grr_server.main)
def EndToEndTests():
from grr.tools import end_to_end_tests
SetConfigOptions()
flags.StartMain(end_to_end_tests.main)
def Export():
from grr.tools import export
export.AddPluginsSubparsers()
SetConfigOptions()
flags.StartMain(export.main)
def Worker():
from grr.worker import worker
SetConfigOptions()
flags.StartMain(worker.main)
def GRRFuse():
from grr.tools import fuse_mount
SetConfigOptions()
flags.StartMain(fuse_mount.main)
def Client():
from grr.client import client
# Note client doesn't call SetConfigOptions as this entry point is primarily
# used for testing on the server.
flags.StartMain(client.main)
def AdminUI():
from grr.gui import admin_ui
SetConfigOptions()
flags.StartMain(admin_ui.main)
| apache-2.0 | -8,319,044,420,231,891,000 | 24.979381 | 78 | 0.690476 | false |
Edraak/circleci-edx-platform | openedx/core/lib/xblock_utils.py | 2 | 15714 | """
Functions that can are used to modify XBlock fragments for use in the LMS and Studio
"""
import datetime
import json
import logging
import static_replace
import uuid
import markupsafe
from lxml import html, etree
from contracts import contract
from django.conf import settings
from django.utils.timezone import UTC
from django.utils.html import escape
from django.contrib.auth.models import User
from edxmako.shortcuts import render_to_string
from xblock.core import XBlock
from xblock.exceptions import InvalidScopeError
from xblock.fragment import Fragment
from xmodule.seq_module import SequenceModule
from xmodule.vertical_block import VerticalBlock
from xmodule.x_module import shim_xmodule_js, XModuleDescriptor, XModule, PREVIEW_VIEWS, STUDIO_VIEW
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
def wrap_fragment(fragment, new_content):
"""
Returns a new Fragment that has `new_content` and all
as its content, and all of the resources from fragment
"""
wrapper_frag = Fragment(content=new_content)
wrapper_frag.add_frag_resources(fragment)
return wrapper_frag
def request_token(request):
"""
Return a unique token for the supplied request.
This token will be the same for all calls to `request_token`
made on the same request object.
"""
# pylint: disable=protected-access
if not hasattr(request, '_xblock_token'):
request._xblock_token = uuid.uuid1().get_hex()
return request._xblock_token
def wrap_xblock(
runtime_class,
block,
view,
frag,
context, # pylint: disable=unused-argument
usage_id_serializer,
request_token, # pylint: disable=redefined-outer-name
display_name_only=False,
extra_data=None
):
"""
Wraps the results of rendering an XBlock view in a standard <section> with identifying
data so that the appropriate javascript module can be loaded onto it.
:param runtime_class: The name of the javascript runtime class to use to load this block
:param block: An XBlock (that may be an XModule or XModuleDescriptor)
:param view: The name of the view that rendered the fragment being wrapped
:param frag: The :class:`Fragment` to be wrapped
:param context: The context passed to the view being rendered
:param usage_id_serializer: A function to serialize the block's usage_id for use by the
front-end Javascript Runtime.
:param request_token: An identifier that is unique per-request, so that only xblocks
rendered as part of this request will have their javascript initialized.
:param display_name_only: If true, don't render the fragment content at all.
Instead, just render the `display_name` of `block`
:param extra_data: A dictionary with extra data values to be set on the wrapper
"""
if extra_data is None:
extra_data = {}
# If any mixins have been applied, then use the unmixed class
class_name = getattr(block, 'unmixed_class', block.__class__).__name__
data = {}
data.update(extra_data)
css_classes = [
'xblock',
'xblock-{}'.format(markupsafe.escape(view)),
'xblock-{}-{}'.format(
markupsafe.escape(view),
markupsafe.escape(block.scope_ids.block_type),
)
]
if isinstance(block, (XModule, XModuleDescriptor)):
if view in PREVIEW_VIEWS:
# The block is acting as an XModule
css_classes.append('xmodule_display')
elif view == STUDIO_VIEW:
# The block is acting as an XModuleDescriptor
css_classes.append('xmodule_edit')
if getattr(block, 'HIDDEN', False):
css_classes.append('is-hidden')
css_classes.append('xmodule_' + markupsafe.escape(class_name))
data['type'] = block.js_module_name
shim_xmodule_js(block, frag)
if frag.js_init_fn:
data['init'] = frag.js_init_fn
data['runtime-class'] = runtime_class
data['runtime-version'] = frag.js_init_version
data['block-type'] = block.scope_ids.block_type
data['usage-id'] = usage_id_serializer(block.scope_ids.usage_id)
data['request-token'] = request_token
if block.name:
data['name'] = block.name
template_context = {
'content': block.display_name if display_name_only else frag.content,
'classes': css_classes,
'display_name': block.display_name_with_default_escaped,
'data_attributes': u' '.join(u'data-{}="{}"'.format(markupsafe.escape(key), markupsafe.escape(value))
for key, value in data.iteritems()),
}
if hasattr(frag, 'json_init_args') and frag.json_init_args is not None:
# Replace / with \/ so that "</script>" in the data won't break things.
template_context['js_init_parameters'] = json.dumps(frag.json_init_args).replace("/", r"\/")
else:
template_context['js_init_parameters'] = ""
return wrap_fragment(frag, render_to_string('xblock_wrapper.html', template_context))
def replace_jump_to_id_urls(course_id, jump_to_id_base_url, block, view, frag, context): # pylint: disable=unused-argument
"""
This will replace a link between courseware in the format
/jump_to/<id> with a URL for a page that will correctly redirect
This is similar to replace_course_urls, but much more flexible and
durable for Studio authored courses. See more comments in static_replace.replace_jump_to_urls
course_id: The course_id in which this rewrite happens
jump_to_id_base_url:
A app-tier (e.g. LMS) absolute path to the base of the handler that will perform the
redirect. e.g. /courses/<org>/<course>/<run>/jump_to_id. NOTE the <id> will be appended to
the end of this URL at re-write time
output: a new :class:`~xblock.fragment.Fragment` that modifies `frag` with
content that has been update with /jump_to links replaced
"""
return wrap_fragment(frag, static_replace.replace_jump_to_id_urls(frag.content, course_id, jump_to_id_base_url))
def replace_course_urls(course_id, block, view, frag, context): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the old get_html function and substitutes urls of the form /course/...
with urls that are /courses/<course_id>/...
"""
return wrap_fragment(frag, static_replace.replace_course_urls(frag.content, course_id))
def replace_static_urls(data_dir, block, view, frag, context, course_id=None, static_asset_path=''): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the old get_html function and substitutes urls of the form /static/...
with urls that are /static/<prefix>/...
"""
return wrap_fragment(frag, static_replace.replace_static_urls(
frag.content,
data_dir,
course_id,
static_asset_path=static_asset_path
))
def grade_histogram(module_id):
'''
Print out a histogram of grades on a given problem in staff member debug info.
Warning: If a student has just looked at an xmodule and not attempted
it, their grade is None. Since there will always be at least one such student
this function almost always returns [].
'''
from django.db import connection
cursor = connection.cursor()
query = """\
SELECT courseware_studentmodule.grade,
COUNT(courseware_studentmodule.student_id)
FROM courseware_studentmodule
WHERE courseware_studentmodule.module_id=%s
GROUP BY courseware_studentmodule.grade"""
# Passing module_id this way prevents sql-injection.
cursor.execute(query, [module_id.to_deprecated_string()])
grades = list(cursor.fetchall())
grades.sort(key=lambda x: x[0]) # Add ORDER BY to sql query?
if len(grades) >= 1 and grades[0][0] is None:
return []
return grades
@contract(user=User, has_instructor_access=bool, block=XBlock, view=basestring, frag=Fragment, context="dict|None")
def add_staff_markup(user, has_instructor_access, disable_staff_debug_info, block, view, frag, context): # pylint: disable=unused-argument
"""
Updates the supplied module with a new get_html function that wraps
the output of the old get_html function with additional information
for admin users only, including a histogram of student answers, the
definition of the xmodule, and a link to view the module in Studio
if it is a Studio edited, mongo stored course.
Does nothing if module is a SequenceModule.
"""
# TODO: make this more general, eg use an XModule attribute instead
if isinstance(block, VerticalBlock) and (not context or not context.get('child_of_vertical', False)):
# check that the course is a mongo backed Studio course before doing work
is_mongo_course = modulestore().get_modulestore_type(block.location.course_key) != ModuleStoreEnum.Type.xml
is_studio_course = block.course_edit_method == "Studio"
if is_studio_course and is_mongo_course:
# build edit link to unit in CMS. Can't use reverse here as lms doesn't load cms's urls.py
edit_link = "//" + settings.CMS_BASE + '/container/' + unicode(block.location)
# return edit link in rendered HTML for display
return wrap_fragment(
frag,
render_to_string(
"edit_unit_link.html",
{'frag_content': frag.content, 'edit_link': edit_link}
)
)
else:
return frag
if isinstance(block, SequenceModule) or getattr(block, 'HIDDEN', False):
return frag
block_id = block.location
if block.has_score and settings.FEATURES.get('DISPLAY_HISTOGRAMS_TO_STAFF'):
histogram = grade_histogram(block_id)
render_histogram = len(histogram) > 0
else:
histogram = None
render_histogram = False
if settings.FEATURES.get('ENABLE_LMS_MIGRATION') and hasattr(block.runtime, 'filestore'):
[filepath, filename] = getattr(block, 'xml_attributes', {}).get('filename', ['', None])
osfs = block.runtime.filestore
if filename is not None and osfs.exists(filename):
# if original, unmangled filename exists then use it (github
# doesn't like symlinks)
filepath = filename
data_dir = block.static_asset_path or osfs.root_path.rsplit('/')[-1]
giturl = block.giturl or 'https://github.com/MITx'
edit_link = "%s/%s/tree/master/%s" % (giturl, data_dir, filepath)
else:
edit_link = False
# Need to define all the variables that are about to be used
giturl = ""
data_dir = ""
source_file = block.source_file # source used to generate the problem XML, eg latex or word
# Useful to indicate to staff if problem has been released or not.
# TODO (ichuang): use _has_access_descriptor.can_load in lms.courseware.access,
# instead of now>mstart comparison here.
now = datetime.datetime.now(UTC())
is_released = "unknown"
mstart = block.start
if mstart is not None:
is_released = "<font color='red'>Yes!</font>" if (now > mstart) else "<font color='green'>Not yet</font>"
field_contents = []
for name, field in block.fields.items():
try:
field_contents.append((name, field.read_from(block)))
except InvalidScopeError:
log.warning("Unable to read field in Staff Debug information", exc_info=True)
field_contents.append((name, "WARNING: Unable to read field"))
staff_context = {
'fields': field_contents,
'xml_attributes': getattr(block, 'xml_attributes', {}),
'location': block.location,
'xqa_key': block.xqa_key,
'source_file': source_file,
'source_url': '%s/%s/tree/master/%s' % (giturl, data_dir, source_file),
'category': str(block.__class__.__name__),
# Template uses element_id in js function names, so can't allow dashes
'element_id': block.location.html_id().replace('-', '_'),
'edit_link': edit_link,
'user': user,
'xqa_server': settings.FEATURES.get('XQA_SERVER', "http://your_xqa_server.com"),
'histogram': json.dumps(histogram),
'render_histogram': render_histogram,
'block_content': frag.content,
'is_released': is_released,
'has_instructor_access': has_instructor_access,
'disable_staff_debug_info': disable_staff_debug_info,
}
return wrap_fragment(frag, render_to_string("staff_problem_info.html", staff_context))
def get_course_update_items(course_updates, provided_index=0):
"""
Returns list of course_updates data dictionaries either from new format if available or
from old. This function don't modify old data to new data (in db), instead returns data
in common old dictionary format.
New Format: {"items" : [{"id": computed_id, "date": date, "content": html-string}],
"data": "<ol>[<li><h2>date</h2>content</li>]</ol>"}
Old Format: {"data": "<ol>[<li><h2>date</h2>content</li>]</ol>"}
"""
def _course_info_content(html_parsed):
"""
Constructs the HTML for the course info update, not including the header.
"""
if len(html_parsed) == 1:
# could enforce that update[0].tag == 'h2'
content = html_parsed[0].tail
else:
content = html_parsed[0].tail if html_parsed[0].tail is not None else ""
content += "\n".join([html.tostring(ele) for ele in html_parsed[1:]])
return content
if course_updates and getattr(course_updates, "items", None):
if provided_index and 0 < provided_index <= len(course_updates.items):
return course_updates.items[provided_index - 1]
else:
# return list in reversed order (old format: [4,3,2,1]) for compatibility
return list(reversed(course_updates.items))
course_update_items = []
if course_updates:
# old method to get course updates
# purely to handle free formed updates not done via editor. Actually kills them, but at least doesn't break.
try:
course_html_parsed = html.fromstring(course_updates.data)
except (etree.XMLSyntaxError, etree.ParserError):
log.error("Cannot parse: " + course_updates.data)
escaped = escape(course_updates.data)
course_html_parsed = html.fromstring("<ol><li>" + escaped + "</li></ol>")
# confirm that root is <ol>, iterate over <li>, pull out <h2> subs and then rest of val
if course_html_parsed.tag == 'ol':
# 0 is the newest
for index, update in enumerate(course_html_parsed):
if len(update) > 0:
content = _course_info_content(update)
# make the id on the client be 1..len w/ 1 being the oldest and len being the newest
computed_id = len(course_html_parsed) - index
payload = {
"id": computed_id,
"date": update.findtext("h2"),
"content": content
}
if provided_index == 0:
course_update_items.append(payload)
elif provided_index == computed_id:
return payload
return course_update_items
| agpl-3.0 | -7,385,829,800,606,447,000 | 41.128686 | 139 | 0.645157 | false |
carefree0910/MachineLearning | e_SVM/KP.py | 1 | 3674 | import os
import sys
root_path = os.path.abspath("../")
if root_path not in sys.path:
sys.path.append(root_path)
import numpy as np
import matplotlib.pyplot as plt
from Util.Util import DataUtil
from Util.Timing import Timing
from Util.Bases import KernelBase, GDKernelBase
class KP(KernelBase):
KernelPerceptronTiming = Timing()
def __init__(self, **kwargs):
super(KP, self).__init__(**kwargs)
self._fit_args, self._fit_args_names = [0.01], ["lr"]
@KernelPerceptronTiming.timeit(level=1, prefix="[Core] ")
def _update_dw_cache(self, idx, lr, sample_weight):
self._dw_cache = lr * self._y[idx] * sample_weight[idx]
self._w[idx] += self._dw_cache
@KernelPerceptronTiming.timeit(level=1, prefix="[Core] ")
def _update_db_cache(self, idx, lr, sample_weight):
self._db_cache = self._dw_cache
self._b += self._db_cache
@KernelPerceptronTiming.timeit(level=1, prefix="[Core] ")
def _fit(self, sample_weight, lr):
err = (np.sign(self._prediction_cache) != self._y) * sample_weight
indices = np.random.permutation(len(self._y))
idx = indices[np.argmax(err[indices])]
if self._prediction_cache[idx] == self._y[idx]:
return True
self._update_dw_cache(idx, lr, sample_weight)
self._update_db_cache(idx, lr, sample_weight)
self._update_pred_cache(idx)
class GDKP(GDKernelBase):
GDKPTiming = Timing()
@GDKPTiming.timeit(level=1, prefix="[Core] ")
def _get_grads(self, x_batch, y_batch, y_pred, sample_weight_batch, *args):
err = -y_batch * (x_batch.dot(self._alpha) + self._b) * sample_weight_batch
mask = err >= 0 # type: np.ndarray
if not np.any(mask):
self._model_grads = [None, None]
else:
delta = -y_batch[mask] * sample_weight_batch[mask]
self._model_grads = [
np.sum(delta[..., None] * x_batch[mask], axis=0),
np.sum(delta)
]
return np.sum(err[mask])
if __name__ == '__main__':
# xs, ys = DataUtil.gen_two_clusters(center=5, dis=1, scale=2, one_hot=False)
xs, ys = DataUtil.gen_spiral(20, 4, 2, 2, one_hot=False)
# xs, ys = DataUtil.gen_xor(one_hot=False)
ys[ys == 0] = -1
animation_params = {
"show": False, "mp4": False, "period": 50,
"dense": 400, "draw_background": True
}
kp = KP(animation_params=animation_params)
kp.fit(xs, ys, kernel="poly", p=12, epoch=200)
kp.evaluate(xs, ys)
kp.visualize2d(xs, ys, dense=400)
kp = GDKP(animation_params=animation_params)
kp.fit(xs, ys, kernel="poly", p=12, epoch=10000)
kp.evaluate(xs, ys)
kp.visualize2d(xs, ys, dense=400)
(x_train, y_train), (x_test, y_test), *_ = DataUtil.get_dataset(
"mushroom", "../_Data/mushroom.txt", n_train=100, quantize=True, tar_idx=0)
y_train[y_train == 0] = -1
y_test[y_test == 0] = -1
kp = KP()
logs = [log[0] for log in kp.fit(
x_train, y_train, metrics=["acc"], x_test=x_test, y_test=y_test
)]
kp.evaluate(x_train, y_train)
kp.evaluate(x_test, y_test)
plt.figure()
plt.title(kp.title)
plt.plot(range(len(logs)), logs)
plt.show()
kp = GDKP()
logs = [log[0] for log in kp.fit(
x_train, y_train, metrics=["acc"], x_test=x_test, y_test=y_test
)]
kp.evaluate(x_train, y_train)
kp.evaluate(x_test, y_test)
plt.figure()
plt.title(kp.title)
plt.plot(range(len(logs)), logs)
plt.show()
kp.show_timing_log()
| mit | 3,805,659,103,802,959,400 | 30.803571 | 83 | 0.572673 | false |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/websockets/compatibility.py | 4 | 1695 | """
The :mod:`websockets.compatibility` module provides helpers for bridging
compatibility issues across Python versions.
"""
import asyncio
import http
# Replace with BaseEventLoop.create_task when dropping Python < 3.4.2.
try: # pragma: no cover
asyncio_ensure_future = asyncio.ensure_future # Python ≥ 3.5
except AttributeError: # pragma: no cover
asyncio_ensure_future = asyncio.async # Python < 3.5
try: # pragma: no cover
# Python ≥ 3.5
SWITCHING_PROTOCOLS = http.HTTPStatus.SWITCHING_PROTOCOLS
OK = http.HTTPStatus.OK
BAD_REQUEST = http.HTTPStatus.BAD_REQUEST
UNAUTHORIZED = http.HTTPStatus.UNAUTHORIZED
FORBIDDEN = http.HTTPStatus.FORBIDDEN
INTERNAL_SERVER_ERROR = http.HTTPStatus.INTERNAL_SERVER_ERROR
SERVICE_UNAVAILABLE = http.HTTPStatus.SERVICE_UNAVAILABLE
except AttributeError: # pragma: no cover
# Python < 3.5
class SWITCHING_PROTOCOLS:
value = 101
phrase = "Switching Protocols"
class OK:
value = 200
phrase = "OK"
class BAD_REQUEST:
value = 400
phrase = "Bad Request"
class UNAUTHORIZED:
value = 401
phrase = "Unauthorized"
class FORBIDDEN:
value = 403
phrase = "Forbidden"
class INTERNAL_SERVER_ERROR:
value = 500
phrase = "Internal Server Error"
class SERVICE_UNAVAILABLE:
value = 503
phrase = "Service Unavailable"
| gpl-2.0 | -3,467,559,385,815,724,000 | 30.314815 | 72 | 0.567711 | false |
Withington/phenopolis | tests/test_gene.py | 3 | 1868 |
import unittest
import runserver
import sys
import load_data
import helper
class GenePageTestCase(unittest.TestCase):
def setUp(self):
load_data.load_data()
runserver.app.config['TESTING'] = True
runserver.app.config['DB_NAME'] = 'test_uclex'
runserver.app.config['DB_NAME_HPO'] = 'test_hpo'
runserver.app.config['DB_NAME_PATIENTS'] = 'test_patients'
runserver.app.config['DB_NAME_USERS'] = 'test_users'
self.app = runserver.app.test_client()
helper.create_neo4j_demo_user()
helper.login(self.app)
def tearDown(self):
self.app.get('/logout', follow_redirects=True)
def gene_page(self, geneName):
return self.app.get('/gene/'+geneName, follow_redirects=True)
def test_gene_page(self):
page = self.gene_page('TTLL5')
assert page.status_code == 200
assert 'TTLL5' in page.data
assert 'ENSG00000119685' in page.data
assert 'Macular dystrophy' in page.data
assert 'Abnormality of the macula' in page.data
assert 'Autosomal recessive inheritance' in page.data
assert 'Mode of inheritance' in page.data
assert 'Visual impairment' in page.data
assert 'Abnormality of vision' in page.data
assert 'Abnormal eye physiology' in page.data
assert 'Retinal dystrophy' in page.data
assert 'Abnormality of the retina' in page.data
assert 'Abnormality of the fundus' in page.data
assert 'Abnormality of the posterior segment of the globe' in page.data
assert 'Abnormality of the globe' in page.data
assert 'Abnormal eye morphology' in page.data
assert 'Abnormality of the eye' in page.data
assert 'Phenotypic abnormality' in page.data
assert 'All' in page.data
if __name__ == '__main__':
unittest.main()
| mit | 2,537,454,229,143,793,000 | 35.627451 | 79 | 0.65257 | false |
becomingGuru/hyde | hyde.py | 6 | 3962 | #!/usr/bin/env python
import os
import sys
import threading
from optparse import OptionParser
from hydeengine import Generator, Initializer, Server
#import cProfile
PROG_ROOT = os.path.dirname(os.path.realpath( __file__ ))
def main(argv):
parser = OptionParser(usage="%prog [-f] [-q]", version="%prog 0.4")
parser.add_option("-s", "--sitepath",
dest = "site_path",
help = "Change the path of the site folder.")
parser.add_option("-i", "--init", action = 'store_true',
dest = "init", default = False,
help = "Create a new hyde site.")
parser.add_option("-f", "--force", action = 'store_true',
dest = "force_init", default = False, help = "")
parser.add_option("-t", "--template",
dest = "template",
help = "Choose which template you want to use.")
parser.add_option("-g", "--generate", action = "store_true",
dest = "generate", default = False,
help = "Generate the source for your hyde site.")
parser.add_option("-k", "--keep_watching", action = "store_true",
dest = "keep_watching", default = False,
help = "Start monitoring the source folder for changes.")
parser.add_option("-d", "--deploy_to",
dest = "deploy_to",
help = "Change the path of the deploy folder.")
parser.add_option("-w", "--webserve", action = "store_true",
dest = "webserve", default = False,
help = "Start an instance of the CherryPy webserver.")
parser.add_option("-p", "--port",
dest = "port", default=8080,
type='int',
help = "Port webserver should listen on (8080).")
parser.add_option("-a", "--address",
dest = "address", default='localhost',
help = "Address webserver should listen on (localhost).")
(options, args) = parser.parse_args()
if len(args):
parser.error("Unexpected arguments encountered.")
if not options.site_path:
options.site_path = os.getcwdu()
if options.deploy_to:
options.deploy_to = os.path.abspath(options.deploy_to)
if options.init:
initializer = Initializer(options.site_path)
initializer.initialize(PROG_ROOT,
options.template, options.force_init)
generator = None
server = None
def quit(*args, **kwargs):
if server and server.alive:
server.quit()
if generator:
generator.quit()
if options.generate:
generator = Generator(options.site_path)
generator.generate(options.deploy_to, options.keep_watching, quit)
if options.webserve:
server = Server(options.site_path, address=options.address, port=options.port)
server.serve(options.deploy_to, quit)
if ((options.generate and options.keep_watching)
or
options.webserve):
try:
print "Letting the server and/or the generator do their thing..."
if server:
server.block()
if generator:
generator.quit()
elif generator:
generator.block()
except:
print sys.exc_info()
quit()
if argv == []:
print parser.format_option_help()
if __name__ == "__main__":
main(sys.argv[1:])
# cProfile.run('main(sys.argv[1:])', filename='hyde.cprof')
# import pstats
# stats = pstats.Stats('hyde.cprof')
# stats.strip_dirs().sort_stats('time').print_stats(20)
| mit | -5,815,827,522,196,870,000 | 36.733333 | 101 | 0.516911 | false |
18F/regulations-parser | regparser/federalregister.py | 3 | 1960 | """Fetch data from the Federal Register
See https://www.federalregister.gov/developers/api/v1 - GET "search" method
"""
import logging
from regparser.index.http_cache import http_client
FR_BASE = "https://www.federalregister.gov"
API_BASE = FR_BASE + "/api/v1/"
FULL_NOTICE_FIELDS = [
"cfr_references", "citation", "comments_close_on", "dates",
"document_number", "effective_on", "end_page", "full_text_xml_url",
"html_url", "publication_date", "regulation_id_numbers", "start_page",
"type", "volume"]
logger = logging.getLogger(__name__)
def fetch_notice_json(cfr_title, cfr_part, only_final=False,
max_effective_date=None):
"""Search through all articles associated with this part. Right now,
limited to 1000; could use paging to fix this in the future."""
params = {
"conditions[cfr][title]": cfr_title,
"conditions[cfr][part]": cfr_part,
"per_page": 1000,
"order": "oldest",
"fields[]": FULL_NOTICE_FIELDS}
if only_final:
params["conditions[type][]"] = 'RULE'
if max_effective_date:
params["conditions[effective_date][lte]"] = max_effective_date
url = API_BASE + "articles"
logger.info("Fetching notices - URL: %s Params: %r", url, params)
response = http_client().get(url, params=params).json()
logger.debug("Fetching notices response - %r", response)
if 'results' in response:
return response['results']
else:
return []
def meta_data(document_number, fields=None):
"""Return the requested meta data for a specific Federal Register
document. Accounts for a bad document number by throwing an exception"""
url = "{0}articles/{1}".format(API_BASE, document_number)
params = {} # default fields are generally good
if fields:
params["fields[]"] = fields
response = http_client().get(url, params=params)
response.raise_for_status()
return response.json()
| cc0-1.0 | -8,579,109,218,934,789,000 | 36.692308 | 76 | 0.64949 | false |
garyjyao1/ansible | lib/ansible/modules/core/cloud/amazon/ec2_group.py | 11 | 17998 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_group
author: "Andrew de Quincey (@adq)"
version_added: "1.3"
short_description: maintain an ec2 VPC security group.
description:
- maintains ec2 security groups. This module has a dependency on python-boto >= 2.5
options:
name:
description:
- Name of the security group.
required: true
description:
description:
- Description of the security group.
required: true
vpc_id:
description:
- ID of the VPC to create the group in.
required: false
rules:
description:
- List of firewall inbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no inbound rules will be enabled.
required: false
rules_egress:
description:
- List of firewall outbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
required: false
version_added: "1.6"
region:
description:
- the EC2 region to use
required: false
default: null
aliases: []
state:
version_added: "1.4"
description:
- Create or delete a security group
required: false
default: 'present'
choices: [ "present", "absent" ]
aliases: []
purge_rules:
version_added: "1.8"
description:
- Purge existing rules on security group that are not found in rules
required: false
default: 'true'
aliases: []
purge_rules_egress:
version_added: "1.8"
description:
- Purge existing rules_egress on security group that are not found in rules_egress
required: false
default: 'true'
aliases: []
extends_documentation_fragment: aws
notes:
- If a rule declares a group_name and that group doesn't exist, it will be
automatically created. In that case, group_desc should be provided as well.
The module will refuse to create a depended-on group without a description.
'''
EXAMPLES = '''
- name: example ec2 group
ec2_group:
name: example
description: an example EC2 group
vpc_id: 12345
region: eu-west-1a
aws_secret_key: SECRET
aws_access_key: ACCESS
rules:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
- proto: tcp
from_port: 22
to_port: 22
cidr_ip: 10.0.0.0/8
- proto: tcp
from_port: 443
to_port: 443
group_id: amazon-elb/sg-87654321/amazon-elb-sg
- proto: tcp
from_port: 3306
to_port: 3306
group_id: 123412341234/sg-87654321/exact-name-of-sg
- proto: udp
from_port: 10050
to_port: 10050
cidr_ip: 10.0.0.0/8
- proto: udp
from_port: 10051
to_port: 10051
group_id: sg-12345678
- proto: icmp
from_port: 8 # icmp type, -1 = any type
to_port: -1 # icmp subtype, -1 = any subtype
cidr_ip: 10.0.0.0/8
- proto: all
# the containing group name may be specified here
group_name: example
rules_egress:
- proto: tcp
from_port: 80
to_port: 80
cidr_ip: 0.0.0.0/0
group_name: example-other
# description to use if example-other needs to be created
group_desc: other example EC2 group
'''
try:
import boto.ec2
from boto.ec2.securitygroup import SecurityGroup
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def make_rule_key(prefix, rule, group_id, cidr_ip):
"""Creates a unique key for an individual group rule"""
if isinstance(rule, dict):
proto, from_port, to_port = [rule.get(x, None) for x in ('proto', 'from_port', 'to_port')]
#fix for 11177
if proto not in ['icmp', 'tcp', 'udp'] and from_port == -1 and to_port == -1:
from_port = 'none'
to_port = 'none'
else: # isinstance boto.ec2.securitygroup.IPPermissions
proto, from_port, to_port = [getattr(rule, x, None) for x in ('ip_protocol', 'from_port', 'to_port')]
key = "%s-%s-%s-%s-%s-%s" % (prefix, proto, from_port, to_port, group_id, cidr_ip)
return key.lower().replace('-none', '-None')
def addRulesToLookup(rules, prefix, dict):
for rule in rules:
for grant in rule.grants:
dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = (rule, grant)
def validate_rule(module, rule):
VALID_PARAMS = ('cidr_ip',
'group_id', 'group_name', 'group_desc',
'proto', 'from_port', 'to_port')
for k in rule:
if k not in VALID_PARAMS:
module.fail_json(msg='Invalid rule parameter \'{}\''.format(k))
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_id OR cidr_ip, not both')
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg='Specify group_name OR cidr_ip, not both')
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg='Specify group_id OR group_name, not both')
def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
"""
Returns tuple of (group_id, ip) after validating rule params.
rule: Dict describing a rule.
name: Name of the security group being managed.
groups: Dict of all available security groups.
AWS accepts an ip range or a security group as target of a rule. This
function validate the rule specification and return either a non-None
group_id or a non-None ip range.
"""
FOREIGN_SECURITY_GROUP_REGEX = '^(\S+)/(sg-\S+)/(\S+)'
group_id = None
group_name = None
ip = None
target_group_created = False
if 'group_id' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_id OR cidr_ip, not both")
elif 'group_name' in rule and 'cidr_ip' in rule:
module.fail_json(msg="Specify group_name OR cidr_ip, not both")
elif 'group_id' in rule and 'group_name' in rule:
module.fail_json(msg="Specify group_id OR group_name, not both")
elif 'group_id' in rule and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']):
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups()
group_instance = SecurityGroup(owner_id=owner_id, name=group_name, id=group_id)
groups[group_id] = group_instance
groups[group_name] = group_instance
elif 'group_id' in rule:
group_id = rule['group_id']
elif 'group_name' in rule:
group_name = rule['group_name']
if group_name == name:
group_id = group.id
groups[group_id] = group
groups[group_name] = group
elif group_name in groups:
group_id = groups[group_name].id
else:
if not rule.get('group_desc', '').strip():
module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule))
if not module.check_mode:
auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id)
group_id = auto_group.id
groups[group_id] = auto_group
groups[group_name] = auto_group
target_group_created = True
elif 'cidr_ip' in rule:
ip = rule['cidr_ip']
return group_id, ip, target_group_created
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(required=True),
vpc_id=dict(),
rules=dict(),
rules_egress=dict(),
state = dict(default='present', choices=['present', 'absent']),
purge_rules=dict(default=True, required=False, type='bool'),
purge_rules_egress=dict(default=True, required=False, type='bool'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
name = module.params['name']
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = module.params['rules']
rules_egress = module.params['rules_egress']
state = module.params.get('state')
purge_rules = module.params['purge_rules']
purge_rules_egress = module.params['purge_rules_egress']
changed = False
ec2 = ec2_connect(module)
# find the group if present
group = None
groups = {}
for curGroup in ec2.get_all_security_groups():
groups[curGroup.id] = curGroup
if curGroup.name in groups:
# Prioritise groups from the current VPC
if vpc_id is None or curGroup.vpc_id == vpc_id:
groups[curGroup.name] = curGroup
else:
groups[curGroup.name] = curGroup
if curGroup.name == name and (vpc_id is None or curGroup.vpc_id == vpc_id):
group = curGroup
# Ensure requested group is absent
if state == 'absent':
if group:
'''found a match, delete it'''
try:
group.delete()
except Exception, e:
module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e))
else:
group = None
changed = True
else:
'''no match found, no changes required'''
# Ensure requested group is present
elif state == 'present':
if group:
'''existing group found'''
# check the group parameters are correct
group_in_use = False
rs = ec2.get_all_instances()
for r in rs:
for i in r.instances:
group_in_use |= reduce(lambda x, y: x | (y.name == 'public-ssh'), i.groups, False)
if group.description != description:
if group_in_use:
module.fail_json(msg="Group description does not match, but it is in use so cannot be changed.")
# if the group doesn't exist, create it now
else:
'''no match found, create it'''
if not module.check_mode:
group = ec2.create_security_group(name, description, vpc_id=vpc_id)
# When a group is created, an egress_rule ALLOW ALL
# to 0.0.0.0/0 is added automatically but it's not
# reflected in the object returned by the AWS API
# call. We re-read the group for getting an updated object
# amazon sometimes takes a couple seconds to update the security group so wait till it exists
while len(ec2.get_all_security_groups(filters={ 'group_id': group.id, })) == 0:
time.sleep(0.1)
group = ec2.get_all_security_groups(group_ids=(group.id,))[0]
changed = True
else:
module.fail_json(msg="Unsupported state requested: %s" % state)
# create a lookup for all existing rules on the group
if group:
# Manage ingress rules
groupRules = {}
addRulesToLookup(group.rules, 'in', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules is not None:
for rule in rules:
validate_rule(module, rule)
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# Convert ip to list we can iterate over
if not isinstance(ip, list):
ip = [ip]
# If rule already exists, don't later delete it
for thisip in ip:
ruleId = make_rule_key('in', rule, group_id, thisip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
else:
grantGroup = None
if group_id:
grantGroup = groups[group_id]
if not module.check_mode:
group.authorize(rule['proto'], rule['from_port'], rule['to_port'], thisip, grantGroup)
changed = True
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules:
for (rule, grant) in groupRules.itervalues() :
grantGroup = None
if grant.group_id:
if grant.owner_id != group.owner_id:
# this is a foreign Security Group. Since you can't fetch it you must create an instance of it
group_instance = SecurityGroup(owner_id=grant.owner_id, name=grant.name, id=grant.group_id)
groups[grant.group_id] = group_instance
groups[grant.name] = group_instance
grantGroup = groups[grant.group_id]
if not module.check_mode:
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
changed = True
# Manage egress rules
groupRules = {}
addRulesToLookup(group.rules_egress, 'out', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules_egress is not None:
for rule in rules_egress:
validate_rule(module, rule)
group_id, ip, target_group_created = get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id)
if target_group_created:
changed = True
if rule['proto'] in ('all', '-1', -1):
rule['proto'] = -1
rule['from_port'] = None
rule['to_port'] = None
# Convert ip to list we can iterate over
if not isinstance(ip, list):
ip = [ip]
# If rule already exists, don't later delete it
for thisip in ip:
ruleId = make_rule_key('out', rule, group_id, thisip)
if ruleId in groupRules:
del groupRules[ruleId]
# Otherwise, add new rule
else:
grantGroup = None
if group_id:
grantGroup = groups[group_id].id
if not module.check_mode:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=rule['proto'],
from_port=rule['from_port'],
to_port=rule['to_port'],
src_group_id=grantGroup,
cidr_ip=thisip)
changed = True
elif vpc_id and not module.check_mode:
# when using a vpc, but no egress rules are specified,
# we add in a default allow all out rule, which was the
# default behavior before egress rules were added
default_egress_rule = 'out--1-None-None-None-0.0.0.0/0'
if default_egress_rule not in groupRules:
ec2.authorize_security_group_egress(
group_id=group.id,
ip_protocol=-1,
from_port=None,
to_port=None,
src_group_id=None,
cidr_ip='0.0.0.0/0'
)
changed = True
else:
# make sure the default egress rule is not removed
del groupRules[default_egress_rule]
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules_egress:
for (rule, grant) in groupRules.itervalues():
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id].id
if not module.check_mode:
ec2.revoke_security_group_egress(
group_id=group.id,
ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group_id=grantGroup,
cidr_ip=grant.cidr_ip)
changed = True
if group:
module.exit_json(changed=changed, group_id=group.id)
else:
module.exit_json(changed=changed, group_id=None)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 | -9,016,406,039,582,021,000 | 36.810924 | 201 | 0.564729 | false |
christiangroleau/maze-solver-python | solver.py | 1 | 1055 | from PIL import Image
from Queue import Queue
import sys
start = (18, 18)
end = (955, 479)
def is_approx_white(value):
r = value[0]
g = value[1]
b = value[2]
return r >= 240 and g >= 240 and b >= 240
def neighbours(n):
x, y = n
return [(x-1, y), (x, y+1), (x+1, y), (x, y-1)]
def BFS(pixels, start, end):
queue = Queue()
queue.put(start)
path_taken = []
while not queue.empty():
next = queue.get()
if next == end:
return path_taken
for neighbour in neighbours(next):
x, y = neighbour
if is_approx_white(pixels[x,y]):
pixels[x,y] = (10, 10, 10) # mark as visited
queue.put(neighbour)
print "Unable to find exit!"
def main():
im = Image.open(sys.argv[1])
pixels = im.load()
print "Format: " + str(im.format)
print "Size: " + str(im.size)
print "Mode: " + str(im.mode)
path_taken = BFS(pixels, start, end)
if __name__ == "__main__":
main()
| gpl-2.0 | -9,081,671,666,137,794,000 | 19.288462 | 60 | 0.507109 | false |
jkreps/kafka | system_test/migration_tool_testsuite/migration_tool_test.py | 2 | 16689 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# migration_tool_test.py
# ===================================
import inspect
import logging
import os
import signal
import subprocess
import sys
import time
import traceback
from system_test_env import SystemTestEnv
sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR)
from setup_utils import SetupUtils
from replication_utils import ReplicationUtils
import system_test_utils
from testcase_env import TestcaseEnv
# product specific: Kafka
import kafka_system_test_utils
import metrics
class MigrationToolTest(ReplicationUtils, SetupUtils):
testModuleAbsPathName = os.path.realpath(__file__)
testSuiteAbsPathName = os.path.abspath(os.path.dirname(testModuleAbsPathName))
def __init__(self, systemTestEnv):
# SystemTestEnv - provides cluster level environment settings
# such as entity_id, hostname, kafka_home, java_home which
# are available in a list of dictionary named
# "clusterEntityConfigDictList"
self.systemTestEnv = systemTestEnv
super(MigrationToolTest, self).__init__(self)
# dict to pass user-defined attributes to logger argument: "extra"
d = {'name_of_class': self.__class__.__name__}
def signal_handler(self, signal, frame):
self.log_message("Interrupt detected - User pressed Ctrl+c")
# perform the necessary cleanup here when user presses Ctrl+c and it may be product specific
self.log_message("stopping all entities - please wait ...")
kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
sys.exit(1)
def runTest(self):
# ======================================================================
# get all testcase directories under this testsuite
# ======================================================================
testCasePathNameList = system_test_utils.get_dir_paths_with_prefix(
self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX)
testCasePathNameList.sort()
replicationUtils = ReplicationUtils(self)
# =============================================================
# launch each testcase one by one: testcase_1, testcase_2, ...
# =============================================================
for testCasePathName in testCasePathNameList:
skipThisTestCase = False
try:
# ======================================================================
# A new instance of TestcaseEnv to keep track of this testcase's env vars
# and initialize some env vars as testCasePathName is available now
# ======================================================================
self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self)
self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName
self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName)
self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"]
# ======================================================================
# SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json
# ======================================================================
testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"]
if self.systemTestEnv.printTestDescriptionsOnly:
self.testcaseEnv.printTestCaseDescription(testcaseDirName)
continue
elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName):
self.log_message("Skipping : " + testcaseDirName)
skipThisTestCase = True
continue
else:
self.testcaseEnv.printTestCaseDescription(testcaseDirName)
system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName)
# ============================================================================== #
# ============================================================================== #
# Product Specific Testing Code Starts Here: #
# ============================================================================== #
# ============================================================================== #
# initialize self.testcaseEnv with user-defined environment variables (product specific)
self.testcaseEnv.userDefinedEnvVarDict["zkConnectStr"] = ""
self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = False
self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False
# initialize signal handler
signal.signal(signal.SIGINT, self.signal_handler)
# TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file:
# system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data(
self.testcaseEnv.testcasePropJsonPathName)
# clean up data directories specified in zookeeper.properties and kafka_server_<n>.properties
kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# create "LOCAL" log directories for metrics, dashboards for each entity under this testcase
# for collecting logs from remote machines
kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv)
# TestcaseEnv - initialize producer & consumer config / log file pathnames
kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv)
# generate remote hosts log/config dirs if not exist
kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# generate properties files for zookeeper, kafka, producer, consumer and mirror-maker:
# 1. copy system_test/<suite_name>_testsuite/config/*.properties to
# system_test/<suite_name>_testsuite/testcase_<n>/config/
# 2. update all properties files in system_test/<suite_name>_testsuite/testcase_<n>/config
# by overriding the settings specified in:
# system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName,
self.testcaseEnv, self.systemTestEnv)
# =============================================
# preparing all entities to start the test
# =============================================
self.log_message("starting zookeepers")
kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 2s")
time.sleep(2)
self.log_message("starting brokers")
kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 5s")
time.sleep(5)
self.log_message("starting migration tool")
kafka_system_test_utils.start_migration_tool(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 5s")
time.sleep(5)
# =============================================
# starting producer
# =============================================
self.log_message("starting producer in the background")
kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, True)
msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"]
self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages")
time.sleep(int(msgProducingFreeTimeSec))
# =============================================
# A while-loop to bounce leader as specified
# by "num_iterations" in testcase_n_properties.json
# =============================================
i = 1
numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"])
bouncedEntityDownTimeSec = 1
try:
bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"])
except:
pass
while i <= numIterations:
self.log_message("Iteration " + str(i) + " of " + str(numIterations))
# =============================================
# Bounce Migration Tool
# =============================================
bounceMigrationTool = self.testcaseEnv.testcaseArgumentsDict["bounce_migration_tool"]
self.log_message("bounce_migration_tool flag : " + bounceMigrationTool)
if (bounceMigrationTool.lower() == "true"):
clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList
migrationToolEntityIdList = system_test_utils.get_data_from_list_of_dicts(
clusterConfigList, "role", "migration_tool", "entity_id")
stoppedMigrationToolEntityId = migrationToolEntityIdList[0]
migrationToolPPid = self.testcaseEnv.entityMigrationToolParentPidDict[stoppedMigrationToolEntityId]
self.log_message("stopping migration tool : " + migrationToolPPid)
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedMigrationToolEntityId, migrationToolPPid)
self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec")
time.sleep(bouncedEntityDownTimeSec)
# starting previously terminated broker
self.log_message("starting the previously terminated migration tool")
kafka_system_test_utils.start_migration_tool(self.systemTestEnv, self.testcaseEnv, stoppedMigrationToolEntityId)
self.anonLogger.info("sleeping for 15s")
time.sleep(15)
i += 1
# while loop
# =============================================
# tell producer to stop
# =============================================
self.testcaseEnv.lock.acquire()
self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True
time.sleep(1)
self.testcaseEnv.lock.release()
time.sleep(1)
# =============================================
# wait for producer thread's update of
# "backgroundProducerStopped" to be "True"
# =============================================
while 1:
self.testcaseEnv.lock.acquire()
self.logger.info("status of backgroundProducerStopped : [" + \
str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d)
if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]:
time.sleep(1)
self.testcaseEnv.lock.release()
self.logger.info("all producer threads completed", extra=self.d)
break
time.sleep(1)
self.testcaseEnv.lock.release()
time.sleep(2)
#print "\n\n#### sleeping for 30 min ...\n\n"
#time.sleep(1800)
# =============================================
# starting consumer
# =============================================
self.log_message("starting consumer in the background")
kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 20s")
time.sleep(20)
# =============================================
# this testcase is completed - stop all entities
# =============================================
self.log_message("stopping all entities")
for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items():
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items():
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
# make sure all entities are stopped
kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv)
# =============================================
# collect logs from remote hosts
# =============================================
kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# =============================================
# validate the data matched and checksum
# =============================================
self.log_message("validating data matched")
kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv, replicationUtils)
kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv)
# =============================================
# draw graphs
# =============================================
metrics.draw_all_graphs(self.systemTestEnv.METRICS_PATHNAME,
self.testcaseEnv,
self.systemTestEnv.clusterEntityConfigDictList)
# build dashboard, one for each role
metrics.build_all_dashboards(self.systemTestEnv.METRICS_PATHNAME,
self.testcaseEnv.testCaseDashboardsDir,
self.systemTestEnv.clusterEntityConfigDictList)
except Exception as e:
self.log_message("Exception while running test {0}".format(e))
traceback.print_exc()
finally:
if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly:
self.log_message("stopping all entities - please wait ...")
kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
| apache-2.0 | -4,194,514,154,318,170,600 | 53.185065 | 136 | 0.528132 | false |
christianurich/VIBe2UrbanSim | 3rdparty/opus/src/synthesizer/gui/results_menu/view_thmap.py | 2 | 18974 | # PopGen 1.1 is A Synthetic Population Generator for Advanced
# Microsimulation Models of Travel Demand
# Copyright (C) 2009, Arizona State University
# See PopGen/License
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from coreplot import *
from file_menu.newproject import Geography
from misc.map_toolbar import *
from results_preprocessor import *
from gui.misc.dbf import *
from numpy.random import randint
# Inputs for this module
resultsloc = "C:/populationsynthesis/gui/results"
resultmap = "bg04_selected.shp"
class Thmap(Matplot):
def __init__(self, project, parent=None):
Matplot.__init__(self)
self.setWindowTitle("Thematic Maps of Synthetic Population")
self.setWindowIcon(QIcon("./images/individualgeo.png"))
self.project = project
self.valid = False
check = self.isValid()
if check < 0:
self.valid = True
if self.project.resolution == "County":
self.res_prefix = "co"
if self.project.resolution == "Tract":
self.res_prefix = "tr"
if self.project.resolution == "Blockgroup":
self.res_prefix = "bg"
self.stateCode = self.project.stateCode[self.project.state]
resultfilename = self.res_prefix+self.stateCode+"_selected"
self.resultsloc = self.project.location + os.path.sep + self.project.name + os.path.sep + "results"
self.resultfileloc = os.path.realpath(self.resultsloc+os.path.sep+resultfilename+".shp")
self.dbffileloc = os.path.realpath(self.resultsloc+os.path.sep+resultfilename+".dbf")
scenarioDatabase = '%s%s%s' %(self.project.name, 'scenario', self.project.scenario)
self.projectDBC = createDBC(self.project.db, scenarioDatabase)
self.projectDBC.dbc.open()
self.makeComboBox()
self.makeMapWidget()
#self.vbox.addWidget(self.geocombobox)
self.vbox.addWidget(self.mapwidget)
self.vboxwidget = QWidget()
self.vboxwidget.setLayout(self.vbox)
vbox2 = QVBoxLayout()
self.vboxwidget2 = QWidget()
self.vboxwidget2.setLayout(vbox2)
self.labelwidget = QWidget()
labellayout = QGridLayout(None)
self.labelwidget.setLayout(labellayout)
labellayout.addWidget(QLabel("Selected Geography: " ),1,1)
labellayout.addWidget(QLabel("Average Absolute Relative Error(AARD): " ),2,1)
labellayout.addWidget(QLabel("p-value: "),3,1)
self.aardval = QLabel("")
self.pval = QLabel("")
self.selgeog = QLabel("")
self.aardval.setAlignment(Qt.AlignLeft)
self.pval.setAlignment(Qt.AlignLeft)
self.selgeog.setAlignment(Qt.AlignLeft)
labellayout.addWidget(self.selgeog ,1,2)
labellayout.addWidget(self.aardval,2,2)
labellayout.addWidget(self.pval,3,2)
vbox2.addWidget(self.labelwidget)
vbox2.addWidget(self.mapwidget)
self.hbox = QHBoxLayout()
#self.hbox.addWidget(self.vboxwidget)
self.hbox.addWidget(self.vboxwidget2)
indGeoWarning = QLabel("""<font color = blue>Note: Select a geography to show the performance statistics and display a"""
""" scatter plot showing the comparison between the person weighted sum and the """
"""composite person type constraints. </font>""")
indGeoWarning.setWordWrap(True)
self.vbox1 = QVBoxLayout()
self.vbox1.addLayout(self.hbox)
self.vbox1.addWidget(indGeoWarning)
self.vbox1.addWidget(self.dialogButtonBox)
self.setLayout(self.vbox1)
self.draw_boxselect()
#self.connect(self.geocombobox, SIGNAL("currSelChanged"), self.draw_boxselect)
self.connect(self.toolbar, SIGNAL("currentGeoChanged"), self.draw_mapselect)
self.selcounty = "0"
self.seltract = "0"
self.selblkgroup = "0"
self.pumano = -1
else:
if check == 1:
QMessageBox.warning(self, "Results", "Thematic Maps not available for TAZ resolution.", QMessageBox.Ok)
elif check == 2:
QMessageBox.warning(self, "Results", "Valid Shape File for geography not found.", QMessageBox.Ok)
elif check == 3:
QMessageBox.warning(self, "Results", "Please run synthesizer before viewing results.", QMessageBox.Ok)
def isValid(self):
retval = -1
if not self.isResolutionValid():
retval = 1
return retval
elif not self.isLayerValid():
retval = 2
return retval
elif not self.isPopSyn():
retval = 3
return retval
else:
return retval
def isResolutionValid(self):
return self.project.resolution != "TAZ"
def isLayerValid(self):
res = ResultsGen(self.project)
return res.create_hhmap()
def isPopSyn(self):
self.getGeographies()
return len(self.geolist)>0
def accept(self):
self.projectDBC.dbc.close()
self.mapcanvas.clear()
QDialog.accept(self)
def reject(self):
self.projectDBC.dbc.close()
self.mapcanvas.clear()
QDialog.reject(self)
def draw_boxselect(self):
currgeo = (self.geocombobox.getCurrentText()).split(',')
provider = self.layer.getDataProvider()
allAttrs = provider.allAttributesList()
#self.layer.select(QgsRect(), True)
provider.select(allAttrs,QgsRect())
blkgroupidx = provider.indexFromFieldName("BLKGROUP")
tractidx = provider.indexFromFieldName("TRACT")
countyidx = provider.indexFromFieldName("COUNTY")
selfeatid = 0
feat = QgsFeature()
while provider.getNextFeature(feat):
attrMap = feat.attributeMap()
featcounty = attrMap[countyidx].toString().trimmed()
if self.res_prefix == "co":
compid = '%s' %int(featcounty)
baseid = currgeo[1]
self.selgeog.setText("County - " + currgeo[1])
elif self.res_prefix == "tr":
feattract = attrMap[tractidx].toString().trimmed()
compid = '%s' %int(featcounty) + ',' + '%s' %int(feattract)
baseid = currgeo[1] + ',' + currgeo[2]
self.selgeog.setText("County - " + currgeo[1] + "; Tract - " + currgeo[2])
elif self.res_prefix == "bg":
feattract = ('%s'%(attrMap[tractidx].toString().trimmed())).ljust(6,'0')
featbg = attrMap[blkgroupidx].toString().trimmed()
compid = '%s' %int(featcounty) + ',' + '%s' %int(feattract) + ',' + '%s' %int(featbg)
baseid = currgeo[1] + ',' + currgeo[2] + ',' + currgeo[3]
self.selgeog.setText("County - " + currgeo[1] + "; Tract - " + currgeo[2] + "; BlockGroup - " + currgeo[3])
if (compid == baseid):
selfeatid = feat.featureId()
self.layer.setSelectedFeatures([selfeatid])
boundingBox = self.layer.boundingBoxOfSelected()
boundingBox.scale(4)
self.mapcanvas.setExtent(boundingBox)
self.mapcanvas.refresh()
break
self.selcounty = currgeo[1]
self.seltract = currgeo[2]
self.selblkgroup =currgeo[3]
self.draw_stat()
def draw_mapselect(self, provider=None, selfeat=None ):
if provider != None:
blkgroupidx = provider.indexFromFieldName("BLKGROUP")
tractidx = provider.indexFromFieldName("TRACT")
countyidx = provider.indexFromFieldName("COUNTY")
attrMap = selfeat.attributeMap()
try:
self.selcounty = attrMap[countyidx].toString().trimmed()
if blkgroupidx == -1 & tractidx == -1:
self.selgeog.setText("County - " + self.selcounty)
if tractidx != -1:
self.seltract = ('%s'%(attrMap[tractidx].toString().trimmed())).ljust(6,'0')
if blkgroupidx == -1:
self.selgeog.setText("County - " + self.selcounty + "; Tract - " + self.seltract)
else:
self.selblkgroup = attrMap[blkgroupidx].toString().trimmed()
self.selgeog.setText("County - " + self.selcounty + "; Tract - " + self.seltract + "; BlockGroup - " + self.selblkgroup)
geog = '%s' %int(self.stateCode) + "," + '%s' %int(self.selcounty) + "," + '%s' %int(self.seltract) + "," + '%s' %int(self.selblkgroup)
if geog in self.geolist:
self.geocombobox.setCurrentText(geog)
#self.draw_boxselect()
else:
self.draw_stat()
except Exception, e:
print "Exception: %s; Invalid Selection." %e
def draw_stat(self):
self.ids = []
self.act = []
self.syn = []
# clear the axes
self.axes.clear()
self.axes.grid(True)
self.axes.set_xlabel("Joint Frequency Distribution from IPF")
self.axes.set_ylabel("Synthetic Joint Frequency Distribution")
self.axes.set_xbound(0)
self.axes.set_ybound(0)
self.retrieveResults()
if len(self.ids) > 0:
scat_plot = self.axes.scatter(self.act, self.syn)
scat_plot.axes.set_xbound(0)
scat_plot.axes.set_ybound(0)
else:
pass
self.canvas.draw()
def on_draw(self, provider=None, selfeat=None ):
if provider != None:
blkgroupidx = provider.indexFromFieldName("BLKGROUP")
tractidx = provider.indexFromFieldName("TRACT")
countyidx = provider.indexFromFieldName("COUNTY")
attrMap = selfeat.attributeMap()
try:
self.selcounty = attrMap[countyidx].toString().trimmed()
if blkgroupidx == -1 & tractidx == -1:
self.selgeog.setText("County - " + self.selcounty)
if tractidx != -1:
self.seltract = ('%s'%(attrMap[tractidx].toString().trimmed())).ljust(6,'0')
if blkgroupidx == -1:
self.selgeog.setText("County - " + self.selcounty + "; Tract - " + self.seltract)
else:
self.selblkgroup = attrMap[blkgroupidx].toString().trimmed()
self.selgeog.setText("County - " + self.selcounty + "; Tract - " + self.seltract + "; BlockGroup - " + self.selblkgroup)
geog = '%s' %int(self.stateCode) + "," + '%s' %int(self.selcounty) + "," + '%s' %int(self.seltract) + "," + '%s' %int(self.selblkgroup)
self.ids = []
self.act = []
self.syn = []
# clear the axes
self.axes.clear()
self.axes.grid(True)
self.axes.set_xlabel("Joint Frequency Distribution from IPF")
self.axes.set_ylabel("Synthetic Joint Frequency Distribution")
self.axes.set_xbound(0)
self.axes.set_ybound(0)
self.retrieveResults()
if len(self.ids) > 0:
scat_plot = self.axes.scatter(self.act, self.syn)
scat_plot.axes.set_xbound(0)
scat_plot.axes.set_ybound(0)
else:
pass
self.canvas.draw()
except Exception, e:
print "Exception: %s; Invalid Selection." %e
def makeComboBox(self):
self.geolist.sort()
self.geocombobox = LabComboBox("Geography:",self.geolist)
self.current = self.geocombobox.getCurrentText()
def makeMapWidget(self):
self.mapcanvas = QgsMapCanvas()
self.mapcanvas.setCanvasColor(QColor(255,255,255))
self.mapcanvas.enableAntiAliasing(True)
self.mapcanvas.useQImageToRender(False)
var = 'random1'
f = open(self.dbffileloc, 'rb')
db = list(dbfreader(f))
f.close()
fieldnames, fieldspecs, records = db[0], db[1], db[2:]
if var not in fieldnames:
fieldnames.append(var)
fieldspecs.append(('N',11,0))
for rec in records:
rec.append(randint(0,100))
f = open(self.dbffileloc, 'wb')
dbfwriter(f, fieldnames, fieldspecs, records)
f.close()
else:
var = 'random2'
print 'ok'
fieldnames.append(var)
fieldspecs.append(('N',11,0))
for rec in records:
rec.append(randint(0,100))
f = open(self.dbffileloc, 'wb')
dbfwriter(f, fieldnames, fieldspecs, records)
f.close()
self.layer = QgsVectorLayer(self.resultfileloc, "Selgeogs", "ogr")
self.layer.setRenderer(QgsContinuousColorRenderer(self.layer.vectorType()))
r = self.layer.renderer()
provider = self.layer.getDataProvider()
idx = provider.indexFromFieldName(var)
r.setClassificationField(idx)
min = provider.minValue(idx).toString()
max = provider.maxValue(idx).toString()
minsymbol = QgsSymbol(self.layer.vectorType(), min, "","")
minsymbol.setBrush(QBrush(QColor(255,255,255)))
maxsymbol = QgsSymbol(self.layer.vectorType(), max, "","")
maxsymbol.setBrush(QBrush(QColor(0,0,0)))
r.setMinimumSymbol(minsymbol)
r.setMaximumSymbol(maxsymbol)
r.setSelectionColor(QColor(255,255,0))
if not self.layer.isValid():
return
QgsMapLayerRegistry.instance().addMapLayer(self.layer)
self.mapcanvas.setExtent(self.layer.extent())
cl = QgsMapCanvasLayer(self.layer)
layers = [cl]
self.mapcanvas.setLayerSet(layers)
self.toolbar = Toolbar(self.mapcanvas, self.layer)
self.toolbar.hideDragTool()
maplayout = QVBoxLayout()
maplayout.addWidget(self.toolbar)
maplayout.addWidget(self.mapcanvas)
self.mapwidget = QWidget()
self.mapwidget.setLayout(maplayout)
def getPUMA5(self, geo):
query = QSqlQuery(self.projectDBC.dbc)
if not geo.puma5:
if self.project.resolution == 'County':
geo.puma5 = 0
elif self.project.resolution == 'Tract':
if not query.exec_("""select pumano from geocorr where state = %s and county = %s and tract = %s and bg = 1"""
%(geo.state, geo.county, geo.tract)):
raise FileError, query.lastError().text()
while query.next():
geo.puma5 = query.value(0).toInt()[0]
else:
if not query.exec_("""select pumano from geocorr where state = %s and county = %s and tract = %s and bg = %s"""
%(geo.state, geo.county, geo.tract, geo.bg)):
raise FileError, query.lastError().text()
while query.next():
geo.puma5 = query.value(0).toInt()[0]
return geo
def retrieveResults(self):
# Get p-values and aard-values from performance statistics
performancetable = "performance_statistics"
aardvalvar = "aardvalue"
pvaluevar = "pvalue"
vars = aardvalvar + "," + pvaluevar
filter = ""
group = ""
if self.selblkgroup <> "0":
filter_act = "tract=" + str(self.seltract) + " and " + "bg=" + str(self.selblkgroup)
filter_syn = "county=" + str(self.selcounty) + " and " +"tract=" + str(self.seltract) + " and " + "bg=" + str(self.selblkgroup)
elif self.seltract <> "0":
filter_act = "tract=" + str(self.seltract) + " and " + "bg=0"
filter_syn = "county=" + str(self.selcounty) + " and " +"tract=" + str(self.seltract) + " and " + "bg=0"
else:
filter_act = "tract=0 and bg=0"
filter_syn = "county=" + str(self.selcounty) + " and tract=0 and bg=0"
query = self.executeSelectQuery(self.projectDBC.dbc,vars, performancetable, filter_syn, group)
aardval = 0.0
pval = 0.0
if query:
while query.next():
aardval = query.value(0).toDouble()[0]
pval = query.value(1).toDouble()[0]
self.aardval.setText("%.4f" %aardval)
self.pval.setText("%.4f" %pval)
geo = Geography(self.stateCode, int(self.selcounty), int(self.seltract), int(self.selblkgroup))
geo = self.getPUMA5(geo)
self.pumano = geo.puma5
# Get and populate the actual and synthetics unique person type frequencies for the scatter plot
if int(self.pumano) > 0:
actualtable = "person_" + str(self.pumano) + "_joint_dist"
vars = "personuniqueid" + "," + "frequency"
group = "personuniqueid"
query = self.executeSelectQuery(self.projectDBC.dbc,vars, actualtable, filter_act, group)
if query:
while query.next():
id= query.value(0).toInt()[0]
freq = query.value(1).toDouble()[0]
self.ids.append(id)
self.act.append(freq)
syntable = "person_synthetic_data"
vars = "personuniqueid" + "," + "sum(frequency)"
group = "personuniqueid"
query = self.executeSelectQuery(self.projectDBC.dbc,vars, syntable, filter_syn, group)
self.syn = [0.0] * len(self.act)
if query:
while query.next():
id= query.value(0).toInt()[0]
freq = query.value(1).toDouble()[0]
if id in self.ids:
idx = self.ids.index(id)
self.syn[idx] = freq
def main():
app = QApplication(sys.argv)
QgsApplication.setPrefixPath(qgis_prefix, True)
QgsApplication.initQgis()
# res.show()
# app.exec_()
QgsApplication.exitQgis()
if __name__ == "__main__":
main()
| gpl-2.0 | 6,166,169,101,634,186,000 | 41.122727 | 151 | 0.545167 | false |
openstack/oslo.db | oslo_db/tests/sqlalchemy/test_update_match.py | 1 | 13842 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base as oslo_test_base
from sqlalchemy.ext import declarative
from sqlalchemy import schema
from sqlalchemy import sql
from sqlalchemy import types as sqltypes
from oslo_db.sqlalchemy import update_match
from oslo_db.tests.sqlalchemy import base as test_base
Base = declarative.declarative_base()
class MyModel(Base):
__tablename__ = 'my_table'
id = schema.Column(sqltypes.Integer, primary_key=True)
uuid = schema.Column(sqltypes.String(36), nullable=False, unique=True)
x = schema.Column(sqltypes.Integer)
y = schema.Column(sqltypes.String(40))
z = schema.Column(sqltypes.String(40))
class ManufactureCriteriaTest(oslo_test_base.BaseTestCase):
def test_instance_criteria_basic(self):
specimen = MyModel(
y='y1', z='z3',
uuid='136254d5-3869-408f-9da7-190e0072641a'
)
self.assertEqual(
"my_table.uuid = :uuid_1 AND my_table.y = :y_1 "
"AND my_table.z = :z_1",
str(update_match.manufacture_entity_criteria(specimen).compile())
)
def test_instance_criteria_basic_wnone(self):
specimen = MyModel(
y='y1', z=None,
uuid='136254d5-3869-408f-9da7-190e0072641a'
)
self.assertEqual(
"my_table.uuid = :uuid_1 AND my_table.y = :y_1 "
"AND my_table.z IS NULL",
str(update_match.manufacture_entity_criteria(specimen).compile())
)
def test_instance_criteria_tuples(self):
specimen = MyModel(
y='y1', z=('z1', 'z2'),
)
self.assertRegex(
str(update_match.manufacture_entity_criteria(specimen).compile()),
r"my_table.y = :y_1 AND my_table.z IN \(.+?\)",
)
def test_instance_criteria_tuples_wnone(self):
specimen = MyModel(
y='y1', z=('z1', 'z2', None),
)
self.assertRegex(
str(update_match.manufacture_entity_criteria(specimen).compile()),
r"my_table.y = :y_1 AND \(my_table.z IS NULL OR "
r"my_table.z IN \(.+?\)\)",
)
def test_instance_criteria_none_list(self):
specimen = MyModel(
y='y1', z=[None],
)
self.assertEqual(
"my_table.y = :y_1 AND my_table.z IS NULL",
str(update_match.manufacture_entity_criteria(specimen).compile())
)
class UpdateMatchTest(test_base._DbTestCase):
def setUp(self):
super(UpdateMatchTest, self).setUp()
Base.metadata.create_all(self.engine)
self.addCleanup(Base.metadata.drop_all, self.engine)
# self.engine.echo = 'debug'
self.session = self.sessionmaker(autocommit=False)
self.addCleanup(self.session.close)
self.session.add_all([
MyModel(
id=1,
uuid='23cb9224-9f8e-40fe-bd3c-e7577b7af37d',
x=5, y='y1', z='z1'),
MyModel(
id=2,
uuid='136254d5-3869-408f-9da7-190e0072641a',
x=6, y='y1', z='z2'),
MyModel(
id=3,
uuid='094eb162-d5df-494b-a458-a91a1b2d2c65',
x=7, y='y1', z='z1'),
MyModel(
id=4,
uuid='94659b3f-ea1f-4ffd-998d-93b28f7f5b70',
x=8, y='y2', z='z2'),
MyModel(
id=5,
uuid='bdf3893c-ee3c-40a0-bc79-960adb6cd1d4',
x=8, y='y2', z=None),
])
self.session.commit()
def _assert_row(self, pk, values):
row = self.session.execute(
sql.select([MyModel.__table__]).where(MyModel.__table__.c.id == pk)
).first()
values['id'] = pk
self.assertEqual(values, dict(row))
def test_update_specimen_successful(self):
uuid = '136254d5-3869-408f-9da7-190e0072641a'
specimen = MyModel(
y='y1', z='z2', uuid=uuid
)
result = self.session.query(MyModel).update_on_match(
specimen,
'uuid',
values={'x': 9, 'z': 'z3'}
)
self.assertEqual(uuid, result.uuid)
self.assertEqual(2, result.id)
self.assertEqual('z3', result.z)
self.assertIn(result, self.session)
self._assert_row(
2,
{
'uuid': '136254d5-3869-408f-9da7-190e0072641a',
'x': 9, 'y': 'y1', 'z': 'z3'
}
)
def test_update_specimen_include_only(self):
uuid = '136254d5-3869-408f-9da7-190e0072641a'
specimen = MyModel(
y='y9', z='z5', x=6, uuid=uuid
)
# Query the object first to test that we merge when the object is
# already cached in the session.
self.session.query(MyModel).filter(MyModel.uuid == uuid).one()
result = self.session.query(MyModel).update_on_match(
specimen,
'uuid',
values={'x': 9, 'z': 'z3'},
include_only=('x', )
)
self.assertEqual(uuid, result.uuid)
self.assertEqual(2, result.id)
self.assertEqual('z3', result.z)
self.assertIn(result, self.session)
self.assertNotIn(result, self.session.dirty)
self._assert_row(
2,
{
'uuid': '136254d5-3869-408f-9da7-190e0072641a',
'x': 9, 'y': 'y1', 'z': 'z3'
}
)
def test_update_specimen_no_rows(self):
specimen = MyModel(
y='y1', z='z3',
uuid='136254d5-3869-408f-9da7-190e0072641a'
)
exc = self.assertRaises(
update_match.NoRowsMatched,
self.session.query(MyModel).update_on_match,
specimen, 'uuid', values={'x': 9, 'z': 'z3'}
)
self.assertEqual("Zero rows matched for 3 attempts", exc.args[0])
def test_update_specimen_process_query_no_rows(self):
specimen = MyModel(
y='y1', z='z2',
uuid='136254d5-3869-408f-9da7-190e0072641a'
)
def process_query(query):
return query.filter_by(x=10)
exc = self.assertRaises(
update_match.NoRowsMatched,
self.session.query(MyModel).update_on_match,
specimen, 'uuid', values={'x': 9, 'z': 'z3'},
process_query=process_query
)
self.assertEqual("Zero rows matched for 3 attempts", exc.args[0])
def test_update_specimen_given_query_no_rows(self):
specimen = MyModel(
y='y1', z='z2',
uuid='136254d5-3869-408f-9da7-190e0072641a'
)
query = self.session.query(MyModel).filter_by(x=10)
exc = self.assertRaises(
update_match.NoRowsMatched,
query.update_on_match,
specimen, 'uuid', values={'x': 9, 'z': 'z3'},
)
self.assertEqual("Zero rows matched for 3 attempts", exc.args[0])
def test_update_specimen_multi_rows(self):
specimen = MyModel(
y='y1', z='z1',
)
exc = self.assertRaises(
update_match.MultiRowsMatched,
self.session.query(MyModel).update_on_match,
specimen, 'y', values={'x': 9, 'z': 'z3'}
)
self.assertEqual("2 rows matched; expected one", exc.args[0])
def test_update_specimen_query_mismatch_error(self):
specimen = MyModel(
y='y1'
)
q = self.session.query(MyModel.x, MyModel.y)
exc = self.assertRaises(
AssertionError,
q.update_on_match,
specimen, 'y', values={'x': 9, 'z': 'z3'},
)
self.assertEqual("Query does not match given specimen", exc.args[0])
def test_custom_handle_failure_raise_new(self):
class MyException(Exception):
pass
def handle_failure(query):
# ensure the query is usable
result = query.count()
self.assertEqual(0, result)
raise MyException("test: %d" % result)
specimen = MyModel(
y='y1', z='z3',
uuid='136254d5-3869-408f-9da7-190e0072641a'
)
exc = self.assertRaises(
MyException,
self.session.query(MyModel).update_on_match,
specimen, 'uuid', values={'x': 9, 'z': 'z3'},
handle_failure=handle_failure
)
self.assertEqual("test: 0", exc.args[0])
def test_custom_handle_failure_cancel_raise(self):
uuid = '136254d5-3869-408f-9da7-190e0072641a'
class MyException(Exception):
pass
def handle_failure(query):
# ensure the query is usable
result = query.count()
self.assertEqual(0, result)
return True
specimen = MyModel(
id=2, y='y1', z='z3', uuid=uuid
)
result = self.session.query(MyModel).update_on_match(
specimen, 'uuid', values={'x': 9, 'z': 'z3'},
handle_failure=handle_failure
)
self.assertEqual(uuid, result.uuid)
self.assertEqual(2, result.id)
self.assertEqual('z3', result.z)
self.assertEqual(9, result.x)
self.assertIn(result, self.session)
def test_update_specimen_on_none_successful(self):
uuid = 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4'
specimen = MyModel(
y='y2', z=None, uuid=uuid
)
result = self.session.query(MyModel).update_on_match(
specimen,
'uuid',
values={'x': 9, 'z': 'z3'},
)
self.assertIn(result, self.session)
self.assertEqual(uuid, result.uuid)
self.assertEqual(5, result.id)
self.assertEqual('z3', result.z)
self._assert_row(
5,
{
'uuid': 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4',
'x': 9, 'y': 'y2', 'z': 'z3'
}
)
def test_update_specimen_on_multiple_nonnone_successful(self):
uuid = '094eb162-d5df-494b-a458-a91a1b2d2c65'
specimen = MyModel(
y=('y1', 'y2'), x=(5, 7), uuid=uuid
)
result = self.session.query(MyModel).update_on_match(
specimen,
'uuid',
values={'x': 9, 'z': 'z3'},
)
self.assertIn(result, self.session)
self.assertEqual(uuid, result.uuid)
self.assertEqual(3, result.id)
self.assertEqual('z3', result.z)
self._assert_row(
3,
{
'uuid': '094eb162-d5df-494b-a458-a91a1b2d2c65',
'x': 9, 'y': 'y1', 'z': 'z3'
}
)
def test_update_specimen_on_multiple_wnone_successful(self):
uuid = 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4'
specimen = MyModel(
y=('y1', 'y2'), x=(8, 7), z=('z1', 'z2', None), uuid=uuid
)
result = self.session.query(MyModel).update_on_match(
specimen,
'uuid',
values={'x': 9, 'z': 'z3'},
)
self.assertIn(result, self.session)
self.assertEqual(uuid, result.uuid)
self.assertEqual(5, result.id)
self.assertEqual('z3', result.z)
self._assert_row(
5,
{
'uuid': 'bdf3893c-ee3c-40a0-bc79-960adb6cd1d4',
'x': 9, 'y': 'y2', 'z': 'z3'
}
)
def test_update_returning_pk_matched(self):
pk = self.session.query(MyModel).\
filter_by(y='y1', z='z2').update_returning_pk(
{'x': 9, 'z': 'z3'},
('uuid', '136254d5-3869-408f-9da7-190e0072641a')
)
self.assertEqual((2,), pk)
self._assert_row(
2,
{
'uuid': '136254d5-3869-408f-9da7-190e0072641a',
'x': 9, 'y': 'y1', 'z': 'z3'
}
)
def test_update_returning_wrong_uuid(self):
exc = self.assertRaises(
update_match.NoRowsMatched,
self.session.query(MyModel).
filter_by(y='y1', z='z2').update_returning_pk,
{'x': 9, 'z': 'z3'},
('uuid', '23cb9224-9f8e-40fe-bd3c-e7577b7af37d')
)
self.assertEqual("No rows matched the UPDATE", exc.args[0])
def test_update_returning_no_rows(self):
exc = self.assertRaises(
update_match.NoRowsMatched,
self.session.query(MyModel).
filter_by(y='y1', z='z3').update_returning_pk,
{'x': 9, 'z': 'z3'},
('uuid', '136254d5-3869-408f-9da7-190e0072641a')
)
self.assertEqual("No rows matched the UPDATE", exc.args[0])
def test_update_multiple_rows(self):
exc = self.assertRaises(
update_match.MultiRowsMatched,
self.session.query(MyModel).
filter_by(y='y1', z='z1').update_returning_pk,
{'x': 9, 'z': 'z3'},
('y', 'y1')
)
self.assertEqual("2 rows matched; expected one", exc.args[0])
class PGUpdateMatchTest(
UpdateMatchTest,
test_base._PostgreSQLOpportunisticTestCase):
pass
class MySQLUpdateMatchTest(
UpdateMatchTest,
test_base._MySQLOpportunisticTestCase):
pass
| apache-2.0 | -8,412,389,441,677,218,000 | 30.105618 | 79 | 0.537856 | false |
bundgus/python-playground | numba-playground/numba-test2.py | 1 | 2784 | from __future__ import print_function, division
import numpy as np
from time import time
from numba import autojit, jit
def nufftfreqs(M, df=1):
"""Compute the frequency range used in nufft for M frequency bins"""
return df * np.arange(-(M // 2), M - (M // 2))
def nudft(x, y, M, df=1.0, iflag=1):
"""Non-Uniform Direct Fourier Transform"""
sign = -1 if iflag < 0 else 1
return (1 / len(x)) * np.dot(y, np.exp(sign * 1j * nufftfreqs(M, df) * x[:, np.newaxis]))
def _compute_grid_params(M, eps):
# Choose Msp & tau from eps following Dutt & Rokhlin (1993)
if eps <= 1E-33 or eps >= 1E-1:
raise ValueError("eps = {0:.0e}; must satisfy "
"1e-33 < eps < 1e-1.".format(eps))
ratio = 2 if eps > 1E-11 else 3
Msp = int(-np.log(eps) / (np.pi * (ratio - 1) / (ratio - 0.5)) + 0.5)
Mr = max(ratio * M, 2 * Msp)
lambda_ = Msp / (ratio * (ratio - 0.5))
tau = np.pi * lambda_ / M ** 2
return Msp, Mr, tau
@jit
def nufft_python(x, c, M, df=1.0, eps=1E-15, iflag=1):
"""Fast Non-Uniform Fourier Transform with Python"""
Msp, Mr, tau = _compute_grid_params(M, eps)
N = len(x)
# Construct the convolved grid
ftau = np.zeros(Mr, dtype=c.dtype)
Mr = ftau.shape[0]
hx = 2 * np.pi / Mr
mm = np.arange(-Msp, Msp)
for i in range(N):
xi = (x[i] * df) % (2 * np.pi)
m = 1 + int(xi // hx)
spread = np.exp(-0.25 * (xi - hx * (m + mm)) ** 2 / tau)
ftau[(m + mm) % Mr] += c[i] * spread
# Compute the FFT on the convolved grid
if iflag < 0:
Ftau = (1 / Mr) * np.fft.fft(ftau)
else:
Ftau = np.fft.ifft(ftau)
Ftau = np.concatenate([Ftau[-(M//2):], Ftau[:M//2 + M % 2]])
# Deconvolve the grid using convolution theorem
k = nufftfreqs(M)
return (1 / N) * np.sqrt(np.pi / tau) * np.exp(tau * k ** 2) * Ftau
def test_nufft(M=1000, Mtime=100000):
# Test vs the direct method
print(30 * '-')
name = {'nufft1':'nufft_fortran'}.get(nufft_python.__name__,
nufft_python.__name__)
print("testing {0}".format(name))
rng = np.random.RandomState(0)
x = 100 * rng.rand(M + 1)
y = np.sin(x)
for df in [1, 2.0]:
for iflag in [1, -1]:
F1 = nudft(x, y, M, df=df, iflag=iflag)
F2 = nufft_python(x, y, M, df=df, iflag=iflag)
assert np.allclose(F1, F2)
print("- Results match the DFT")
# Time the nufft function
x = 100 * rng.rand(Mtime)
y = np.sin(x)
times = []
for i in range(5):
t0 = time()
F = nufft_python(x, y, Mtime)
t1 = time()
times.append(t1 - t0)
print("- Execution time (M={0}): {1:.2g} sec".format(Mtime, np.median(times)))
test_nufft()
| mit | -3,675,053,529,809,261,000 | 31.752941 | 93 | 0.534842 | false |
shinglyu/servo | tests/wpt/web-platform-tests/webdriver/support/fixtures.py | 23 | 5150 | import json
import os
import urlparse
import webdriver
from support.http_request import HTTPRequest
from support import merge_dictionaries
default_host = "http://127.0.0.1"
default_port = "4444"
def _ensure_valid_window(session):
"""If current window is not open anymore, ensure to have a valid one selected."""
try:
session.window_handle
except webdriver.NoSuchWindowException:
session.window_handle = session.handles[0]
def _dismiss_user_prompts(session):
"""Dismisses any open user prompts in windows."""
current_window = session.window_handle
for window in _windows(session):
session.window_handle = window
try:
session.alert.dismiss()
except webdriver.NoSuchAlertException:
pass
session.window_handle = current_window
def _restore_windows(session):
"""Closes superfluous windows opened by the test without ending
the session implicitly by closing the last window.
"""
current_window = session.window_handle
for window in _windows(session, exclude=[current_window]):
session.window_handle = window
if len(session.window_handles) > 1:
session.close()
session.window_handle = current_window
def _switch_to_top_level_browsing_context(session):
"""If the current browsing context selected by WebDriver is a
`<frame>` or an `<iframe>`, switch it back to the top-level
browsing context.
"""
session.switch_frame(None)
def _windows(session, exclude=None):
"""Set of window handles, filtered by an `exclude` list if
provided.
"""
if exclude is None:
exclude = []
wins = [w for w in session.handles if w not in exclude]
return set(wins)
def create_frame(session):
"""Create an `iframe` element in the current browsing context and insert it
into the document. Return an element reference."""
def create_frame():
append = """
var frame = document.createElement('iframe');
document.body.appendChild(frame);
return frame;
"""
response = session.execute_script(append)
return create_frame
def create_window(session):
"""Open new window and return the window handle."""
def create_window():
windows_before = session.handles
name = session.execute_script("window.open()")
assert len(session.handles) == len(windows_before) + 1
new_windows = list(set(session.handles) - set(windows_before))
return new_windows.pop()
return create_window
def http(session):
return HTTPRequest(session.transport.host, session.transport.port)
def server_config():
return json.loads(os.environ.get("WD_SERVER_CONFIG"))
def create_session(request):
"""Provide a factory function that produces wdclient `Session` instances.
If the `WD_CAPABILITIES` environment variable is set, it will be parsed as
JSON and the resulting object will be included in the WebDriver "Create
Session" command. Additional capabilities may be specified as an optional
argument to this function, but the operation will fail if any values
conflict with those specified via the environment. If the session is still
active at the completion of the test, it will be destroyed
automatically."""
def create_session(test_capabilities=None):
host = os.environ.get("WD_HOST", default_host)
port = int(os.environ.get("WD_PORT", default_port))
if test_capabilities is None:
test_capabilities = {}
env_capabilities = json.loads(os.environ.get("WD_CAPABILITIES", "{}"))
capabilities = merge_dictionaries(env_capabilities, test_capabilities)
session = webdriver.Session(host, port, capabilities=capabilities)
def destroy():
if session.session_id is not None:
session.end()
# finalisers are popped off a stack, making their ordering reverse
request.addfinalizer(destroy)
request.addfinalizer(lambda: _switch_to_top_level_browsing_context(session))
request.addfinalizer(lambda: _restore_windows(session))
request.addfinalizer(lambda: _dismiss_user_prompts(session))
request.addfinalizer(lambda: _ensure_valid_window(session))
return session
return create_session
# Create a wdclient `Session` object for each Pytest "session". If the
# `WD_CAPABILITIES` environment variable is set, it will be parsed as JSON and
# the resulting object will be included in the WebDriver "Create Session"
# command. If the session is still active at the completion of the test, it
# will be destroyed automatically.
def session(create_session):
return create_session()
def url(server_config):
def inner(path, query="", fragment=""):
rv = urlparse.urlunsplit(("http",
"%s:%s" % (server_config["host"],
server_config["ports"]["http"][0]),
path,
query,
fragment))
return rv
return inner
| mpl-2.0 | 5,264,432,975,172,907,000 | 34.763889 | 85 | 0.659417 | false |
stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGLContext/scenegraph/text/_toolsfont.py | 2 | 7692 | """fonttools-based direct extraction of TTF outlines
Most of the complexity of this module has been refactored
into the ttfquery package, which provides for finding
system fonts, generating registries of available fonts,
querying metadata regarding a particular font/glyph etc.
"""
from fontTools import ttLib
from OpenGLContext.arrays import *
import weakref, sys
from ttfquery import describe, glyphquery
import logging
log = logging.getLogger( __name__ )
# don't have any specialisations as of yet, so just include it
from ttfquery.glyph import Glyph
class Font(object):
"""Holder for metadata regarding a particular font
XXX Note: currently there is no distinction between the
Context-specific metadata and the generic descriptions
of the fonts. For instance, there is no reason that
the underlying glyph metadata needs to be queried for
each quality level of a given font (only the outlines
need to be regenerated), and the outlines themselves
are not actually dependent on the context, so they can
be shared across contexts, with only the display lists
not being shareable.
"""
defaultGlyphClass = Glyph
encoding = None
def __init__(self, filename, encoding = None, glyphClass = None, quality=3 ):
"""Initialize the font
filename -- a file source from which to load
the .ttf file, must be a simple local filename,
not a URL or font-name.
encoding -- the TrueType encoding specifier, the
specifier is two elements, a PlatformID and
a platform-specific ID code (sub encoding).
http://developer.apple.com/fonts/TTRefMan/RM06/Chap6name.html#ID
the default should be the Unicode Roman if
I've done my homework correctly.
(0,0) or (0,3) -- Unicode (default or Unicode 2.0
semantics respectively), I have no fonts with which
to test this encoding.
(3,1) -- Latin-1 Microsoft encoding, while
(1,0) should be the Mac-Roman encoding. You will
almost certainly want (3,1) on windows, and I'm
guessing (1,0) on Mac.
glyphClass -- the class used for creating new glyphs,
if not provided, self.defaultGlyphClass is used.
quality -- rendering quality for the font, the number
of integration steps for each quadratic curve in
the font definition.
"""
# character: Glyph instance
self.glyphs = {}
# glyphName: Glyph instance (short-circuits creation where == glyphs)
self.glyphNames = {}
self.filename = filename
self.encoding = encoding
self.quality = quality
self.glyphClass = glyphClass or self.defaultGlyphClass
self.withFont( self._generalMetadata )
def withFont( self, callable, *arguments, **named ):
"""Call a callable while we have a .font attribute
This method opens the font file and then calls the given
callable object. On exit, it eliminates the font.
XXX Currently this is not reentrant :(
"""
if __debug__:
log.info( """Opening TrueType font %r with fonttools""", self.filename)
self.font = describe.openFont( self.filename )
try:
return callable( *arguments, **named )
finally:
try:
del self.font
except AttributeError:
pass
def _generalMetadata( self ):
"""Load general meta-data for this font (called via withFont)
Guess the appropriate encoding, query line height,
and character height.
"""
try:
self.encoding = describe.guessEncoding( self.font, self.encoding )
self.lineHeight = glyphquery.lineHeight( self.font )
self.charHeight = glyphquery.charHeight( self.font )
except Exception:
log.error( """Unable to load TrueType font from %r""", self.filename)
raise
def countGlyphs( self, string ):
"""Count the number of glyphs from string present in file"""
return self.withFont( self._countGlyphs, string )
def _countGlyphs( self, string ):
count = 0
set = {}
for character in string:
set[ glyphquery.explicitGlyph( self.font, character )] = 1
return len(set)
def ensureGlyphs( self, string ):
"""Retrieve set of glyphs for the string from file into local cache
(Optimization), take all glyphs represented by the string
and compile each glyph not currently available with a
single opening of the font file.
"""
needed = []
for character in string:
if not character in self.glyphs:
needed.append( character )
if needed:
self.withFont( self._createGlyphs, needed )
return len(needed)
def _createGlyphs( self, set ):
"""Create glyphs for the sequence of passed characters (called via withFont)"""
for character in set:
self._createGlyph( character, self.quality )
def getGlyph( self, character ):
"""Retrieve the appropriate glyph for this character
Returns a compiled glyph for the given character in
this font.
"""
if not character in self.glyphs:
self.withFont( self._createGlyph, character, self.quality )
return self.glyphs.get (character)
def _createGlyph( self, character, quality ):
"""Load glyph outlines from font-file (called via withFont)"""
if __debug__:
log.info( """Retrieving glyph for character %r""", character)
glyphName = glyphquery.glyphName( self.font, character, self.encoding )
if glyphName in self.glyphNames:
# whadda-ya-know, it's the same glyph as another character
glyph = self.glyphNames[ glyphName ]
self.glyphs[character] = glyph
return glyph
glyph = self.glyphClass(
glyphName
)
glyph.compile( self.font, steps = quality )
self.glyphs[character] = glyph
self.glyphNames[ glyphName ] = glyph
return glyph
def __repr__( self ):
"""Provide a representation of the Font"""
return """%s( %r, %r )"""% (
self.__class__.__name__,
self.filename,
self.encoding,
)
if __name__ == "__main__":
import os, glob, traceback
testText = [ unicode(chr(x),'latin-1') for x in range(32,256)]
def scan( directory = os.path.join( os.environ['windir'], 'fonts')):
files = glob.glob( os.path.join(directory, "*.ttf"))
errors = []
for file in files:
error = (file, [])
print '\nFile', file
try:
font = Font(
file,
)
except Exception, err:
traceback.print_exc()
error[1].append( (file, "Couldn't load"))
else:
for character in testText:
try:
font.getGlyph(character)
except Exception, err:
traceback.print_exc()
error[1].append( (file, "Character %r failed, aborting font %r"%(character,file)))
break
if error[1]:
errors.append( error )
return errors
errors = scan()
print '__________________________'
for file,msgs in errors:
print 'File', file
print "\n".join(msgs)
| lgpl-3.0 | -6,025,054,764,739,935,000 | 37.653266 | 106 | 0.593214 | false |
jejung/django-bootstrap-components | bootstrap_components/templatetags/bootstrap_components.py | 1 | 1826 | from django import template
register = template.Library()
@register.inclusion_tag('bootstrap/bootstrap_page.html', takes_context=True)
def bootstrap_page(context):
""" A useful tag to transform your page on an extension of bootstrap basic
template. Ceckout bootstrap site for more details:
http://getbootstrap.com/getting-started/#template.
There are some sections on the page created over django templates block
system, so you can override the content. They are:
- meta: To describe your own <meta> declarations.
- title: To define the title of the page.
- resources: In header declarations of css, javascript or any other resource
you need.
- body: The content of the page itself.
- scripts: The scripts that should be added to the end of the page, it's a
common pratice to place javascript there, because this way you enable the
browser to download and render the page in parallel. Check:
https://developer.yahoo.com/performance/rules.html for a set of good reasons
to do that.
"""
return context
@register.inclusion_tag('bootstrap/icon.html', takes_context=False)
def bootstrap_icon(icon):
""" Render a simple icon on the page like <span class="glyphicon *icon" />.
Please consider reading the usage notes as described on the Bootstrap
documentation: http://getbootstrap.com/components/#glyphicons-how-to-use
An aditional statement is important on Glyphicons, as the Bootstrap
documentation says:
"Glyphicons Halflings are normally not available for free, but their creator
has made them available for Bootstrap free of cost. As a thank you, we only
ask that you include a link back to Glyphicons whenever possible."
Link here: http://glyphicons.com/
"""
return {
'icon': icon,
} | gpl-3.0 | -4,578,210,798,109,589,000 | 44.675 | 81 | 0.722344 | false |
notbalanced/jrnl | features/steps/core.py | 1 | 8465 | from __future__ import unicode_literals
from __future__ import absolute_import
from behave import given, when, then
from jrnl import cli, install, Journal, util, plugins
from jrnl import __version__
from dateutil import parser as date_parser
from collections import defaultdict
try: import parsedatetime.parsedatetime_consts as pdt
except ImportError: import parsedatetime as pdt
import time
import os
import json
import yaml
import keyring
consts = pdt.Constants(usePyICU=False)
consts.DOWParseStyle = -1 # Prefers past weekdays
CALENDAR = pdt.Calendar(consts)
class TestKeyring(keyring.backend.KeyringBackend):
"""A test keyring that just stores its valies in a hash"""
priority = 1
keys = defaultdict(dict)
def set_password(self, servicename, username, password):
self.keys[servicename][username] = password
def get_password(self, servicename, username):
return self.keys[servicename].get(username)
def delete_password(self, servicename, username, password):
self.keys[servicename][username] = None
# set the keyring for keyring lib
keyring.set_keyring(TestKeyring())
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
import tzlocal
import shlex
import sys
def ushlex(command):
if sys.version_info[0] == 3:
return shlex.split(command)
return map(lambda s: s.decode('UTF8'), shlex.split(command.encode('utf8')))
def read_journal(journal_name="default"):
config = util.load_config(install.CONFIG_FILE_PATH)
with open(config['journals'][journal_name]) as journal_file:
journal = journal_file.read()
return journal
def open_journal(journal_name="default"):
config = util.load_config(install.CONFIG_FILE_PATH)
journal_conf = config['journals'][journal_name]
if type(journal_conf) is dict: # We can override the default config on a by-journal basis
config.update(journal_conf)
else: # But also just give them a string to point to the journal file
config['journal'] = journal_conf
return Journal.open_journal(journal_name, config)
@given('we use the config "{config_file}"')
def set_config(context, config_file):
full_path = os.path.join("features/configs", config_file)
install.CONFIG_FILE_PATH = os.path.abspath(full_path)
if config_file.endswith("yaml"):
# Add jrnl version to file for 2.x journals
with open(install.CONFIG_FILE_PATH, 'a') as cf:
cf.write("version: {}".format(__version__))
@when('we run "{command}" and enter')
@when('we run "{command}" and enter "{inputs}"')
def run_with_input(context, command, inputs=None):
text = inputs or context.text
args = ushlex(command)[1:]
buffer = StringIO(text.strip())
util.STDIN = buffer
try:
cli.run(args or [])
context.exit_status = 0
except SystemExit as e:
context.exit_status = e.code
@when('we run "{command}"')
def run(context, command):
args = ushlex(command)[1:]
try:
cli.run(args or None)
context.exit_status = 0
except SystemExit as e:
context.exit_status = e.code
@given('we load template "{filename}"')
def load_template(context, filename):
full_path = os.path.join("features/data/templates", filename)
exporter = plugins.template_exporter.__exporter_from_file(full_path)
plugins.__exporter_types[exporter.names[0]] = exporter
@when('we set the keychain password of "{journal}" to "{password}"')
def set_keychain(context, journal, password):
keyring.set_password('jrnl', journal, password)
@then('we should get an error')
def has_error(context):
assert context.exit_status != 0, context.exit_status
@then('we should get no error')
def no_error(context):
assert context.exit_status is 0, context.exit_status
@then('the output should be parsable as json')
def check_output_json(context):
out = context.stdout_capture.getvalue()
assert json.loads(out), out
@then('"{field}" in the json output should have {number:d} elements')
@then('"{field}" in the json output should have 1 element')
def check_output_field(context, field, number=1):
out = context.stdout_capture.getvalue()
out_json = json.loads(out)
assert field in out_json, [field, out_json]
assert len(out_json[field]) == number, len(out_json[field])
@then('"{field}" in the json output should not contain "{key}"')
def check_output_field_not_key(context, field, key):
out = context.stdout_capture.getvalue()
out_json = json.loads(out)
assert field in out_json
assert key not in out_json[field]
@then('"{field}" in the json output should contain "{key}"')
def check_output_field_key(context, field, key):
out = context.stdout_capture.getvalue()
out_json = json.loads(out)
assert field in out_json
assert key in out_json[field]
@then('the json output should contain {path} = "{value}"')
def check_json_output_path(context, path, value):
""" E.g.
the json output should contain entries.0.title = "hello"
"""
out = context.stdout_capture.getvalue()
struct = json.loads(out)
for node in path.split('.'):
try:
struct = struct[int(node)]
except ValueError:
struct = struct[node]
assert struct == value, struct
@then('the output should be')
@then('the output should be "{text}"')
def check_output(context, text=None):
text = (text or context.text).strip().splitlines()
out = context.stdout_capture.getvalue().strip().splitlines()
assert len(text) == len(out), "Output has {} lines (expected: {})".format(len(out), len(text))
for line_text, line_out in zip(text, out):
assert line_text.strip() == line_out.strip(), [line_text.strip(), line_out.strip()]
@then('the output should contain "{text}" in the local time')
def check_output_time_inline(context, text):
out = context.stdout_capture.getvalue()
local_tz = tzlocal.get_localzone()
date, flag = CALENDAR.parse(text)
output_date = time.strftime("%Y-%m-%d %H:%M",date)
assert output_date in out, output_date
@then('the output should contain')
@then('the output should contain "{text}"')
def check_output_inline(context, text=None):
text = text or context.text
out = context.stdout_capture.getvalue()
if isinstance(out, bytes):
out = out.decode('utf-8')
assert text in out, text
@then('the output should not contain "{text}"')
def check_output_not_inline(context, text):
out = context.stdout_capture.getvalue()
if isinstance(out, bytes):
out = out.decode('utf-8')
assert text not in out
@then('we should see the message "{text}"')
def check_message(context, text):
out = context.messages.getvalue()
assert text in out, [text, out]
@then('we should not see the message "{text}"')
def check_not_message(context, text):
out = context.messages.getvalue()
assert text not in out, [text, out]
@then('the journal should contain "{text}"')
@then('journal "{journal_name}" should contain "{text}"')
def check_journal_content(context, text, journal_name="default"):
journal = read_journal(journal_name)
assert text in journal, journal
@then('journal "{journal_name}" should not exist')
def journal_doesnt_exist(context, journal_name="default"):
with open(install.CONFIG_FILE_PATH) as config_file:
config = yaml.load(config_file, Loader=yaml.FullLoader)
journal_path = config['journals'][journal_name]
assert not os.path.exists(journal_path)
@then('the config should have "{key}" set to "{value}"')
@then('the config for journal "{journal}" should have "{key}" set to "{value}"')
def config_var(context, key, value, journal=None):
t, value = value.split(":")
value = {
"bool": lambda v: v.lower() == "true",
"int": int,
"str": str
}[t](value)
config = util.load_config(install.CONFIG_FILE_PATH)
if journal:
config = config["journals"][journal]
assert key in config
assert config[key] == value
@then('the journal should have {number:d} entries')
@then('the journal should have {number:d} entry')
@then('journal "{journal_name}" should have {number:d} entries')
@then('journal "{journal_name}" should have {number:d} entry')
def check_journal_entries(context, number, journal_name="default"):
journal = open_journal(journal_name)
assert len(journal.entries) == number
@then('fail')
def debug_fail(context):
assert False
| mit | 7,907,794,253,715,395,000 | 30.70412 | 98 | 0.680213 | false |
yamahata/neutron | neutron/tests/unit/mlnx/test_mlnx_plugin_config.py | 8 | 3923 | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo.config import cfg
#NOTE this import loads tests required options
from neutron.plugins.mlnx.common import config # noqa
from neutron.plugins.mlnx.common import constants
from neutron.plugins.mlnx.mlnx_plugin import MellanoxEswitchPlugin
from neutron.tests import base
class TestMlnxPluginConfig(base.BaseTestCase):
expected_vlan_mappings = {'physnet1': [(1, 1000)],
'physnet2': [(1, 1000)]}
expected_network_types = {'physnet1': constants.TYPE_ETH,
'physnet2': constants.TYPE_IB}
config_vlan_ranges = ['physnet1:1:1000', 'physnet2:1:1000']
config_network_types = ['physnet1:eth', 'physnet2:ib']
def setUp(self):
super(TestMlnxPluginConfig, self).setUp()
cfg.CONF.set_override('rpc_backend',
'neutron.openstack.common.rpc.impl_fake')
cfg.CONF.set_override(group='MLNX',
name='network_vlan_ranges',
override=self.config_vlan_ranges)
def _create_mlnx_plugin(self):
with mock.patch('neutron.plugins.mlnx.db.mlnx_db_v2'):
return MellanoxEswitchPlugin()
def _assert_expected_config(self):
plugin = self._create_mlnx_plugin()
self.assertEqual(plugin.network_vlan_ranges,
self.expected_vlan_mappings)
self.assertEqual(plugin.phys_network_type_maps,
self.expected_network_types)
def test_vlan_ranges_with_network_type(self):
cfg.CONF.set_override(group='MLNX',
name='physical_network_type_mappings',
override=self.config_network_types)
self._assert_expected_config()
def test_vlan_ranges_partial_network_type(self):
cfg.CONF.set_override(group='MLNX',
name='physical_network_type_mappings',
override=self.config_network_types[:1])
cfg.CONF.set_override(group='MLNX',
name='physical_network_type',
override=constants.TYPE_IB)
self._assert_expected_config()
def test_vlan_ranges_no_network_type(self):
cfg.CONF.set_override(group='MLNX',
name='physical_network_type',
override=constants.TYPE_IB)
cfg.CONF.set_override(group='MLNX',
name='physical_network_type_mappings',
override=[])
self.expected_network_types.update({'physnet1': constants.TYPE_IB})
self._assert_expected_config()
self.expected_network_types.update({'physnet1': constants.TYPE_ETH})
def test_parse_physical_network_mappings_invalid_type(self):
cfg.CONF.set_override(group='MLNX',
name='physical_network_type_mappings',
override=['physnet:invalid-type'])
self.assertRaises(SystemExit, self._create_mlnx_plugin)
def test_invalid_network_type(self):
cfg.CONF.set_override(group='MLNX',
name='physical_network_type',
override='invalid-type')
self.assertRaises(SystemExit, self._create_mlnx_plugin)
| apache-2.0 | 8,450,385,356,192,866,000 | 43.078652 | 76 | 0.607188 | false |
DLR-SC/DataFinder | src/datafinder/persistence/adapters/archive/metadata/adapter.py | 1 | 6501 | # $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module implements a MetadataStorer that is able to write the
meta data of an item into separated property files.
"""
__version__ = "$Revision-Id:$"
import codecs
from xml.etree import ElementTree as etree
from zipfile import ZipInfo
from datafinder.persistence.metadata.metadatastorer import NullMetadataStorer
from datafinder.persistence.metadata.value_mapping import MetadataValue, \
getPersistenceRepresentation
_ZIP_FILENAME_CODEC = codecs.lookup("CP437")
class MetadataArchiveAdapter(NullMetadataStorer, object):
"""
Implementation of the L{NullMetadataStorer} scheme for ZIP archives. This
implementation keeps the meta data in a XML format in the comment associated
with the respective item.
"""
def __init__(self, identifier, archive, password=None):
""" Constructor.
@param identifier: The identifier of the associated item.
@type identifier: C{unicode}
@type archive: The zip archive that should be used for storage.
@type archive: C{zipfile.ZipFile}
@param password: If the archive is encrypted, the password should be given here.
@type password: C{string}
"""
super(MetadataArchiveAdapter, self).__init__(identifier)
self._archive = archive
self._password = password
self._persistenceId = _ZIP_FILENAME_CODEC.encode(self.identifier, errors="ignore")[0] + ".xml"
@staticmethod
def _decodeMetadata(text):
""" Decode meta data from a XML string.
@param text: A string containing valid XML.
@type text: C{string}
@return: Deserialized meta data.
@rtype: C{dict} mapping C{string} to
L{MetadataValue<datafinder.persistence.metadata.value_mapping.MetadataValue>}
"""
tree = etree.XML(text)
result = dict()
for propertyNode in tree.findall("property"):
propertyName = propertyNode.attrib["name"]
propertyValue = MetadataValue(propertyNode.text)
result[propertyName] = propertyValue
return result
@staticmethod
def _encodeMetadata(metadata):
""" Encode meta data as XML document.
@param metadata: The meta data
@type metadata: C{dict}, mapping string to object
@return: A serialized XML document representing the meta data.
@rtype: C{string}
"""
tree = etree.Element("properties")
for key in metadata:
propertyNode = etree.Element("property", name=key)
propertyNode.text = getPersistenceRepresentation(metadata[key])
tree.append(propertyNode)
return etree.tostring(tree)
def retrieve(self, _=None):
""" @see: L{NullMetadataStorer<datafinder.persistence.metadata.metadatastorer.NullMetadataStorer>} """
# fixme meinel: add propertyIds to filter retrieved properties
try:
return self._decodeMetadata(self._archive.open(self._persistenceId, "r", self._password).read())
except KeyError:
return dict()
def _storeMetadata(self, encodedMetadata):
""" This method stores back the given meta data.
@param metadata: The dictionary with the metadata that should be stored.
@type metadata: C{dict} mapping C{string} to
L{MetadataValue<datafinder.persistence.metadata.value_mapping.MetadataValue>}
"""
try:
info = self._archive.getinfo(self._persistenceId)
except KeyError:
info = ZipInfo(self._persistenceId)
self._archive.writestr(info, encodedMetadata)
def update(self, properties):
""" @see: L{NullMetadataStorer<datafinder.persistence.metadata.metadatastorer.NullMetadataStorer>} """
metadata = self.retrieve()
for key in metadata:
if not key in properties:
properties[key] = metadata[key].guessRepresentation()
encodedMetadata = self._encodeMetadata(properties)
self._storeMetadata(encodedMetadata)
def delete(self, propertyIds):
""" @see: L{NullMetadataStorer<datafinder.persistence.metadata.metadatastorer.NullMetadataStorer>} """
metadata = self.retrieve()
properties = dict()
for key in metadata:
if not key in propertyIds:
properties[key] = metadata[key].guessRepresentation()
properties = self._encodeMetadata(properties)
self._storeMetadata(properties)
| bsd-3-clause | -9,051,041,311,212,983,000 | 37.640244 | 110 | 0.652976 | false |
WendellDuncan/or-tools | examples/tests/issue128.py | 7 | 6477 | from ortools.constraint_solver import pywrapcp
def test_v0():
print 'test_v0'
solver = pywrapcp.Solver('')
# we have two tasks of durations 4 and 7
task1 = solver.FixedDurationIntervalVar(0, 5, 4, False, "task1")
task2 = solver.FixedDurationIntervalVar(0, 5, 7, False, "task2")
tasks = [task1, task2]
# to each task, a post task of duration 64 is attached
postTask1 = solver.FixedDurationIntervalVar(4, 74 + 64, 64, False, "postTask1")
postTask2 = solver.FixedDurationIntervalVar(4, 77 + 64, 64, False, "postTask2")
postTasks = [postTask1, postTask2]
solver.Add(postTask1.StartsAtEnd(task1))
solver.Add(postTask2.StartsAtEnd(task2))
# two resources are available for the post tasks. There are binary indicator
# variables to determine which task uses which resource
postTask1UsesRes1 = solver.IntVar(0, 1, "post task 1 using resource 1")
postTask1UsesRes2 = solver.IntVar(0, 1, "post task 1 using resource 2")
postTask2UsesRes1 = solver.IntVar(0, 1, "post task 2 using resource 1")
postTask2UsesRes2 = solver.IntVar(0, 1, "post task 2 using resource 2")
indicators = [postTask1UsesRes1, postTask1UsesRes2, postTask2UsesRes1, postTask2UsesRes2]
# each post task needs exactly one resource
solver.Add(postTask1UsesRes1 + postTask1UsesRes2 == 1)
solver.Add(postTask2UsesRes1 + postTask2UsesRes2 == 1)
# each resource cannot be used simultaneously by more than one post task
solver.Add(solver.Cumulative(postTasks, [postTask1UsesRes1, postTask2UsesRes1], 1, "cumul1"))
solver.Add(solver.Cumulative(postTasks, [postTask1UsesRes2, postTask2UsesRes2], 1, "cumul2"))
# using constant demands instead, the correct solution is found
# solver.Add(solver.Cumulative(postTasks, [0, 1], 1, ""))
# solver.Add(solver.Cumulative(postTasks, [1, 0], 1, ""))
# search setup and solving
dbInterval = solver.Phase(tasks + postTasks, solver.INTERVAL_DEFAULT)
dbInt = solver.Phase(indicators, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
makespan = solver.Max([task1.EndExpr().Var(), task2.EndExpr().Var()])
optimize = solver.Optimize(False, makespan, 1)
solution = solver.Assignment()
solution.Add([t for t in (tasks + postTasks)])
solution.Add(indicators)
collector = solver.LastSolutionCollector(solution)
phase = solver.Compose([dbInt, dbInterval])
solver.Solve(phase, [collector, optimize])
if collector.SolutionCount() > 0:
for i, task in enumerate(tasks):
print("task {} runs from {} to {}".format(
i,
collector.StartValue(0, task),
collector.EndValue(0, task)))
for i, task in enumerate(postTasks):
print("postTask {} starts at {}".format(i, collector.StartValue(0, task)))
for indicator in indicators:
print('{} -> {}'.format(indicator.Name(), collector.Value(0, indicator)))
else:
print 'No solution'
def test_v1():
print 'test_v1'
solver = pywrapcp.Solver('')
# we have two tasks of durations 4 and 7
task1 = solver.FixedDurationIntervalVar(0, 5, 4, False, "task1")
task2 = solver.FixedDurationIntervalVar(0, 5, 7, False, "task2")
tasks = [task1, task2]
# Create copies for each resource
task1_r1 = solver.FixedDurationIntervalVar(0, 5, 4, True, "task1_1")
task2_r1 = solver.FixedDurationIntervalVar(0, 5, 7, True, "task2_1")
tasks_r1 = [task1_r1, task2_r1]
task1_r2 = solver.FixedDurationIntervalVar(0, 5, 4, True, "task1_2")
task2_r2 = solver.FixedDurationIntervalVar(0, 5, 7, True, "task2_2")
tasks_r2 = [task1_r2, task2_r2]
# to each task, a post task of duration 64 is attached
postTask1 = solver.FixedDurationStartSyncedOnEndIntervalVar(task1, 64, 0)
postTask2 = solver.FixedDurationStartSyncedOnEndIntervalVar(task2, 64, 0)
postTasks = [postTask1, postTask2]
# Create copies for each resource
postTask1_r1 = solver.FixedDurationIntervalVar(4, 9, 64, True, "pTask1_1")
postTask2_r1 = solver.FixedDurationIntervalVar(4, 11, 64, True, "pTask2_1")
postTask1_r2 = solver.FixedDurationIntervalVar(4, 9, 64, True, "pTask1_2")
postTask2_r2 = solver.FixedDurationIntervalVar(4, 11, 64, True, "pTask2_2")
copies = [ task1_r1, task2_r1, task1_r2, task2_r2,
postTask1_r1, postTask1_r2, postTask2_r1, postTask2_r2 ]
# each resource cannot be used simultaneously by more than one post task
solver.Add(solver.DisjunctiveConstraint(
[task1_r1, task2_r1, postTask1_r1, postTask2_r1], "disj1"))
solver.Add(solver.DisjunctiveConstraint(
[task1_r2, task2_r2, postTask1_r2, postTask2_r2], "disj1"))
# Only one resource available
solver.Add(task1_r1.PerformedExpr() + task1_r2.PerformedExpr() == 1)
solver.Add(task2_r1.PerformedExpr() + task2_r2.PerformedExpr() == 1)
solver.Add(postTask1_r1.PerformedExpr() + postTask1_r2.PerformedExpr() == 1)
solver.Add(postTask2_r1.PerformedExpr() + postTask2_r2.PerformedExpr() == 1)
# Sync master task with copies
solver.Add(solver.Cover([task1_r1, task1_r2], task1))
solver.Add(solver.Cover([task2_r1, task2_r2], task2))
solver.Add(solver.Cover([postTask1_r1, postTask1_r2], postTask1))
solver.Add(solver.Cover([postTask2_r1, postTask2_r2], postTask2))
# Indicators (no need to add both as they are constrained together)
indicators = [
task1_r1.PerformedExpr(), task2_r1.PerformedExpr(),
postTask1_r1.PerformedExpr(), postTask2_r1.PerformedExpr()]
# search setup and solving
dbInterval = solver.Phase(tasks + postTasks, solver.INTERVAL_DEFAULT)
dbInt = solver.Phase(
indicators, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
makespan = solver.Max([task1.EndExpr(), task2.EndExpr()])
optimize = solver.Minimize(makespan, 1)
solution = solver.Assignment()
solution.Add(tasks)
solution.Add(postTasks)
solution.Add(copies)
solution.AddObjective(makespan)
collector = solver.LastSolutionCollector(solution)
phase = solver.Compose([dbInt, dbInterval])
solver.Solve(phase, [collector, optimize])
if collector.SolutionCount() > 0:
print 'solution with makespan', collector.ObjectiveValue(0)
for task in tasks:
print("task {} runs from {} to {}".format(
task.Name(),
collector.StartValue(0, task),
collector.EndValue(0, task)))
for task in postTasks:
print("postTask {} starts at {}".format(
task.Name(), collector.StartValue(0, task)))
for task in copies:
print task.Name(), collector.PerformedValue(0, task)
else:
print 'No solution'
test_v0()
test_v1()
| apache-2.0 | -5,610,711,700,263,913,000 | 40.519231 | 95 | 0.708198 | false |
ComeBertrand/metabench | metabench/display/draw_utils.py | 1 | 4568 | """
File: draw_utils.py
Author: Come Bertrand
Email: [email protected]
Github: https://github.com/ComeBertrand
Description: bokeh drawings utilities.
"""
from itertools import chain
import numpy as np
from bokeh import palettes
from bokeh.plotting import figure, show, ColumnDataSource
from bokeh.layouts import gridplot
from bokeh.models import HoverTool
def create_box_plot(title, y_axis_label, categories, categories_values, value_formatter=None):
if value_formatter is None:
value_formatter = lambda x: x
raw_data = {
'min': [],
'max': [],
'q1': [],
'q2': [],
'q3': [],
'avg': [],
'std': []
}
for category_values in categories_values:
raw_data['min'].append(np.amin(category_values))
raw_data['max'].append(np.amax(category_values))
raw_data['q1'].append(np.percentile(category_values, 25))
raw_data['q2'].append(np.percentile(category_values, 50))
raw_data['q3'].append(np.percentile(category_values, 75))
raw_data['avg'].append(np.mean(category_values))
raw_data['std'].append(np.std(category_values))
format_data = {}
for key, value in raw_data.items():
new_key = '{}_fmt'.format(key)
format_data[new_key] = [value_formatter(item) for item in value]
raw_data.update(format_data)
raw_data['categories'] = categories
data_source = ColumnDataSource(data=raw_data)
f = figure(title=title,
y_axis_label=y_axis_label,
background_fill_color="#EFE8E2",
x_range=categories)
f.segment(categories, raw_data['max'], categories, raw_data['q3'], line_color='black')
f.segment(categories, raw_data['min'], categories, raw_data['q1'], line_color='black')
# boxes
bar_high = f.vbar(x='categories', width=0.7, bottom='q2', top='q3', source=data_source, fill_color="#E08E79",
line_color="black")
bar_low = f.vbar(x='categories', width=0.7, bottom='q1', top='q2', source=data_source, fill_color="#3B8686",
line_color="black")
# whiskers (almost-0 height rects simpler than segments)
whiskers_height = min([raw_data['max'][i] - raw_data['min'][i] for i in range(len(raw_data['max']))]) / 1000
f.rect(categories, raw_data['min'], 0.2, whiskers_height, line_color="black")
f.rect(categories, raw_data['max'], 0.2, whiskers_height, line_color="black")
hover = HoverTool(tooltips=[('Max', '@max_fmt'), ('3td Quartile', '@q3_fmt'), ('Median', '@q2_fmt'),
('1st Quartile', '@q1_fmt'), ('Min', '@min_fmt'), ('Avg', '@avg_fmt'), ('Std', '@std_fmt')],
renderers=[bar_high, bar_low])
f.add_tools(hover)
f.xgrid.grid_line_color = None
f.ygrid.grid_line_color = "white"
f.grid.grid_line_width = 2
f.xaxis.major_label_text_font_size="12pt"
return f
def create_hovered_multiline_graph(title, x_axis_label, y_axis_label, data_sources, hover_data, legend):
f = figure(title=title, x_axis_label=x_axis_label, y_axis_label=y_axis_label)
nb_lines = len(data_sources)
colors = get_colors(nb_lines)
circle_glyphs = []
for i, data_source in enumerate(data_sources):
item_kwargs = {'line_color': colors[i]}
if legend:
item_kwargs['legend'] = legend[i]
f.line('x', 'y', source=data_source, **item_kwargs)
circle_glyphs.append(f.circle('x', 'y', source=data_source, fill_color='white', size=8, **item_kwargs))
# Hover only on the circles
hover = HoverTool(renderers=circle_glyphs, tooltips=hover_data, mode='vline')
f.add_tools(hover)
f.legend.click_policy = 'hide'
return f
def create_hovered_single_line_graph(title, x_axis_label, y_axis_label, data_source, hover_data):
hover = HoverTool(tooltips=hover_data, mode='vline')
f = figure(title=title, x_axis_label=x_axis_label, y_axis_label=y_axis_label)
f.add_tools(hover)
color = get_colors(1)[0]
f.line('x', 'y', source=data_source, line_color=color)
return f
def get_colors(nb_lines):
colors = []
nb_palette = nb_lines // 20
colors = [color for color in chain(nb_palette * palettes.d3['Category20'][20])]
remaining = nb_lines % 20
if remaining <= 2:
colors += palettes.d3['Category10'][3]
elif remaining <= 10:
colors += palettes.d3['Category10'][len(y_values_per_line)]
elif remaining <= 20:
colors += palettes.d3['Category20'][len(y_values_per_line)]
return colors
| mit | -4,514,050,999,230,606,000 | 33.345865 | 124 | 0.618433 | false |
Fat-Zer/FreeCAD_sf_master | src/Mod/PartDesign/PartDesignTests/TestLinearPattern.py | 27 | 7170 | # (c) Juergen Riegel ([email protected]) 2011 LGPL *
# *
# This file is part of the FreeCAD CAx development system. *
# *
# This program is free software; you can redistribute it and/or modify *
# it under the terms of the GNU Lesser General Public License (LGPL) *
# as published by the Free Software Foundation; either version 2 of *
# the License, or (at your option) any later version. *
# for detail see the LICENCE text file. *
# *
# FreeCAD is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU Library General Public License for more details. *
# *
# You should have received a copy of the GNU Library General Public *
# License along with FreeCAD; if not, write to the Free Software *
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# USA *
#**************************************************************************
import unittest
import FreeCAD
import TestSketcherApp
class TestLinearPattern(unittest.TestCase):
def setUp(self):
self.Doc = FreeCAD.newDocument("PartDesignTestLinearPattern")
def testXAxisLinearPattern(self):
self.Body = self.Doc.addObject('PartDesign::Body','Body')
self.Box = self.Doc.addObject('PartDesign::AdditiveBox','Box')
self.Body.addObject(self.Box)
self.Box.Length=10.00
self.Box.Width=10.00
self.Box.Height=10.00
self.Doc.recompute()
self.LinearPattern = self.Doc.addObject("PartDesign::LinearPattern","LinearPattern")
self.LinearPattern.Originals = [self.Box]
self.LinearPattern.Direction = (self.Doc.X_Axis,[""])
self.LinearPattern.Length = 90.0
self.LinearPattern.Occurrences = 10
self.Body.addObject(self.LinearPattern)
self.Doc.recompute()
self.assertAlmostEqual(self.LinearPattern.Shape.Volume, 1e4)
def testYAxisLinearPattern(self):
self.Body = self.Doc.addObject('PartDesign::Body','Body')
self.Box = self.Doc.addObject('PartDesign::AdditiveBox','Box')
self.Body.addObject(self.Box)
self.Box.Length=10.00
self.Box.Width=10.00
self.Box.Height=10.00
self.Doc.recompute()
self.LinearPattern = self.Doc.addObject("PartDesign::LinearPattern","LinearPattern")
self.LinearPattern.Originals = [self.Box]
self.LinearPattern.Direction = (self.Doc.Y_Axis,[""])
self.LinearPattern.Length = 90.0
self.LinearPattern.Occurrences = 10
self.Body.addObject(self.LinearPattern)
self.Doc.recompute()
self.assertAlmostEqual(self.LinearPattern.Shape.Volume, 1e4)
def testZAxisLinearPattern(self):
self.Body = self.Doc.addObject('PartDesign::Body','Body')
self.Box = self.Doc.addObject('PartDesign::AdditiveBox','Box')
self.Body.addObject(self.Box)
self.Box.Length=10.00
self.Box.Width=10.00
self.Box.Height=10.00
self.Doc.recompute()
self.LinearPattern = self.Doc.addObject("PartDesign::LinearPattern","LinearPattern")
self.LinearPattern.Originals = [self.Box]
self.LinearPattern.Direction = (self.Doc.Z_Axis,[""])
self.LinearPattern.Length = 90.0
self.LinearPattern.Occurrences = 10
self.Body.addObject(self.LinearPattern)
self.Doc.recompute()
self.assertAlmostEqual(self.LinearPattern.Shape.Volume, 1e4)
def testNormalSketchAxisLinearPattern(self):
self.Body = self.Doc.addObject('PartDesign::Body','Body')
self.PadSketch = self.Doc.addObject('Sketcher::SketchObject', 'SketchPad')
self.Body.addObject(self.PadSketch)
TestSketcherApp.CreateRectangleSketch(self.PadSketch, (0, 0), (10, 10))
self.Doc.recompute()
self.Pad = self.Doc.addObject("PartDesign::Pad", "Pad")
self.Body.addObject(self.Pad)
self.Pad.Profile = self.PadSketch
self.Pad.Length = 10
self.Doc.recompute()
self.LinearPattern = self.Doc.addObject("PartDesign::LinearPattern","LinearPattern")
self.LinearPattern.Originals = [self.Pad]
self.LinearPattern.Direction = (self.PadSketch,["N_Axis"])
self.LinearPattern.Length = 90.0
self.LinearPattern.Occurrences = 10
self.Body.addObject(self.LinearPattern)
self.Doc.recompute()
self.assertAlmostEqual(self.LinearPattern.Shape.Volume, 1e4)
def testVerticalSketchAxisLinearPattern(self):
self.Body = self.Doc.addObject('PartDesign::Body','Body')
self.PadSketch = self.Doc.addObject('Sketcher::SketchObject', 'SketchPad')
self.Body.addObject(self.PadSketch)
TestSketcherApp.CreateRectangleSketch(self.PadSketch, (0, 0), (10, 10))
self.Doc.recompute()
self.Pad = self.Doc.addObject("PartDesign::Pad", "Pad")
self.Body.addObject(self.Pad)
self.Pad.Profile = self.PadSketch
self.Pad.Length = 10
self.Doc.recompute()
self.LinearPattern = self.Doc.addObject("PartDesign::LinearPattern","LinearPattern")
self.LinearPattern.Originals = [self.Pad]
self.LinearPattern.Direction = (self.PadSketch,["V_Axis"])
self.LinearPattern.Length = 90.0
self.LinearPattern.Occurrences = 10
self.Body.addObject(self.LinearPattern)
self.Doc.recompute()
self.assertAlmostEqual(self.LinearPattern.Shape.Volume, 1e4)
def testHorizontalSketchAxisLinearPattern(self):
self.Body = self.Doc.addObject('PartDesign::Body','Body')
self.PadSketch = self.Doc.addObject('Sketcher::SketchObject', 'SketchPad')
self.Body.addObject(self.PadSketch)
TestSketcherApp.CreateRectangleSketch(self.PadSketch, (0, 0), (10, 10))
self.Doc.recompute()
self.Pad = self.Doc.addObject("PartDesign::Pad", "Pad")
self.Body.addObject(self.Pad)
self.Pad.Profile = self.PadSketch
self.Pad.Length = 10
self.Doc.recompute()
self.LinearPattern = self.Doc.addObject("PartDesign::LinearPattern","LinearPattern")
self.LinearPattern.Originals = [self.Pad]
self.LinearPattern.Direction = (self.PadSketch,["H_Axis"])
self.LinearPattern.Length = 90.0
self.LinearPattern.Occurrences = 10
self.Body.addObject(self.LinearPattern)
self.Doc.recompute()
self.assertAlmostEqual(self.LinearPattern.Shape.Volume, 1e4)
def tearDown(self):
#closing doc
FreeCAD.closeDocument("PartDesignTestLinearPattern")
# print ("omit closing document for debugging")
| lgpl-2.1 | -5,595,781,411,822,274,000 | 48.448276 | 92 | 0.621339 | false |
dwang159/oncall | setup.py | 1 | 1557 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import setuptools
import re
with open('src/oncall/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1)
setuptools.setup(
name='oncall',
version=version,
package_dir={'': 'src'},
packages=setuptools.find_packages('src'),
include_package_data=True,
install_requires=[
'falcon==1.1.0',
'falcon-cors',
'gevent',
'ujson',
'sqlalchemy',
'PyYAML',
'PyMYSQL',
'phonenumbers',
'jinja2',
'streql',
'webassets',
'beaker',
'cryptography==2.3',
'python-ldap',
'pytz',
'irisclient',
'slackclient',
'icalendar',
'pymsteams'
],
extras_require={
'ldap': ['python-ldap'],
'prometheus': ['prometheus_client'],
'dev': [
'pytest',
'pytest-mock',
'requests',
'gunicorn',
'flake8',
'Sphinx==1.5.6',
'sphinxcontrib-httpdomain',
'sphinx_rtd_theme',
'sphinx-autobuild',
],
},
entry_points={
'console_scripts': [
'oncall-dev = oncall.bin.run_server:main',
'oncall-user-sync = oncall.bin.user_sync:main',
'build_assets = oncall.bin.build_assets:main',
'oncall-scheduler = oncall.bin.scheduler:main',
'oncall-notifier = oncall.bin.notifier:main'
]
}
)
| bsd-2-clause | -5,598,139,929,934,084,000 | 24.52459 | 100 | 0.487476 | false |
veger/ansible | lib/ansible/module_utils/network/edgeswitch/edgeswitch.py | 11 | 5758 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2018 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import re
from copy import deepcopy
from ansible.module_utils._text import to_text
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import Connection, ConnectionError
from ansible.module_utils.network.common.utils import remove_default_spec
_DEVICE_CONFIGS = {}
def build_aggregate_spec(element_spec, required, *extra_spec):
aggregate_spec = deepcopy(element_spec)
for elt in required:
aggregate_spec[elt] = dict(required=True)
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec)
)
argument_spec.update(element_spec)
argument_spec.update(*extra_spec)
return argument_spec
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
d = item.copy()
obj.append(d)
else:
obj.append(module.params)
return obj
def get_connection(module):
if hasattr(module, '_edgeswitch_connection'):
return module._edgeswitch_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._edgeswitch_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._edgeswitch_connection
def get_capabilities(module):
if hasattr(module, '_edgeswitch_capabilities'):
return module._edgeswitch_capabilities
try:
capabilities = Connection(module._socket_path).get_capabilities()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
module._edgeswitch_capabilities = json.loads(capabilities)
return module._edgeswitch_capabilities
def get_defaults_flag(module):
connection = get_connection(module)
try:
out = connection.get_defaults_flag()
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
return to_text(out, errors='surrogate_then_replace').strip()
def get_config(module, flags=None):
flag_str = ' '.join(to_list(flags))
try:
return _DEVICE_CONFIGS[flag_str]
except KeyError:
connection = get_connection(module)
try:
out = connection.get_config(flags=flags)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[flag_str] = cfg
return cfg
def get_interfaces_config(module):
config = get_config(module)
lines = config.split('\n')
interfaces = {}
interface = None
for line in lines:
if line == 'exit':
if interface:
interfaces[interface[0]] = interface
interface = None
elif interface:
interface.append(line)
else:
match = re.match(r'^interface (.*)$', line)
if match:
interface = list()
interface.append(line)
return interfaces
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
connection = get_connection(module)
try:
return connection.run_commands(commands=commands, check_rc=check_rc)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def load_config(module, commands):
connection = get_connection(module)
try:
resp = connection.edit_config(commands)
return resp.get('response')
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
| gpl-3.0 | 1,736,475,068,465,557,000 | 33.479042 | 92 | 0.684786 | false |
zstackio/zstack-woodpecker | integrationtest/vm/multihosts/vm_snapshots/paths/path34.py | 1 | 2440 | import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', ],
[TestAction.create_volume, 'volume1', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot1'],
[TestAction.create_volume_snapshot, 'volume1', 'volume1-snapshot5'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot6'],
[TestAction.create_volume_snapshot, 'volume2', 'volume2-snapshot10'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot11'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot15'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot16'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot17'],
[TestAction.create_volume_snapshot, 'volume3', 'volume3-snapshot21'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_volume_snapshot, 'volume1-snapshot1'],
[TestAction.start_vm, 'vm1'],
[TestAction.resize_data_volume, 'volume2', 5*1024*1024],
[TestAction.delete_volume_snapshot, 'volume3-snapshot1'],
[TestAction.delete_volume_snapshot, 'volume3-snapshot6'],
[TestAction.delete_vm_snapshot, 'vm1-snapshot1'],
])
'''
The final status:
Running:['vm1']
Stopped:[]
Enadbled:['volume1-snapshot5', 'vm1-snapshot6', 'volume1-snapshot6', 'volume2-snapshot6', 'volume2-snapshot10', 'vm1-snapshot11', 'volume1-snapshot11', 'volume2-snapshot11', 'volume3-snapshot11', 'vm1-root-snapshot15', 'vm1-root-snapshot16', 'vm1-snapshot17', 'volume1-snapshot17', 'volume2-snapshot17', 'volume3-snapshot17', 'volume3-snapshot21']
attached:['volume1', 'volume2', 'volume3']
Detached:[]
Deleted:['volume3-snapshot6', 'vm1-snapshot1', 'volume1-snapshot1', 'volume2-snapshot1', 'volume3-snapshot1']
Expunged:[]
Ha:[]
Group:
vm_snap2:['vm1-snapshot6', 'volume1-snapshot6', 'volume2-snapshot6', 'volume3-snapshot6']---vm1volume1_volume2_volume3
vm_snap3:['vm1-snapshot11', 'volume1-snapshot11', 'volume2-snapshot11', 'volume3-snapshot11']---vm1volume1_volume2_volume3
vm_snap4:['vm1-snapshot17', 'volume1-snapshot17', 'volume2-snapshot17', 'volume3-snapshot17']---vm1volume1_volume2_volume3
'''
| apache-2.0 | -8,096,399,857,198,965,000 | 49.833333 | 347 | 0.725 | false |
tanmaykm/edx-platform | openedx/core/djangoapps/external_auth/login_and_register.py | 11 | 3316 | """Intercept login and registration requests.
This module contains legacy code originally from `student.views`.
"""
import re
from django.conf import settings
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
import openedx.core.djangoapps.external_auth.views
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
# pylint: disable=fixme
# TODO: This function is kind of gnarly/hackish/etc and is only used in one location.
# It'd be awesome if we could get rid of it; manually parsing course_id strings form larger strings
# seems Probably Incorrect
def _parse_course_id_from_string(input_str):
"""
Helper function to determine if input_str (typically the queryparam 'next') contains a course_id.
@param input_str:
@return: the course_id if found, None if not
"""
m_obj = re.match(r'^/courses/{}'.format(settings.COURSE_ID_PATTERN), input_str)
if m_obj:
return CourseKey.from_string(m_obj.group('course_id'))
return None
def _get_course_enrollment_domain(course_id):
"""
Helper function to get the enrollment domain set for a course with id course_id
@param course_id:
@return:
"""
course = modulestore().get_course(course_id)
if course is None:
return None
return course.enrollment_domain
def login(request):
"""Allow external auth to intercept and handle a login request.
Arguments:
request (Request): A request for the login page.
Returns:
Response or None
"""
# Default to a `None` response, indicating that external auth
# is not handling the request.
response = None
if (
settings.FEATURES['AUTH_USE_CERTIFICATES'] and
openedx.core.djangoapps.external_auth.views.ssl_get_cert_from_request(request)
):
# SSL login doesn't require a view, so redirect
# branding and allow that to process the login if it
# is enabled and the header is in the request.
response = openedx.core.djangoapps.external_auth.views.redirect_with_get('root', request.GET)
elif settings.FEATURES.get('AUTH_USE_CAS'):
# If CAS is enabled, redirect auth handling to there
response = redirect(reverse('cas-login'))
elif settings.FEATURES.get('AUTH_USE_SHIB'):
redirect_to = request.GET.get('next')
if redirect_to:
course_id = _parse_course_id_from_string(redirect_to)
if course_id and _get_course_enrollment_domain(course_id):
response = openedx.core.djangoapps.external_auth.views.course_specific_login(
request,
course_id.to_deprecated_string(),
)
return response
def register(request):
"""Allow external auth to intercept and handle a registration request.
Arguments:
request (Request): A request for the registration page.
Returns:
Response or None
"""
response = None
if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP'):
# Redirect to branding to process their certificate if SSL is enabled
# and registration is disabled.
response = openedx.core.djangoapps.external_auth.views.redirect_with_get('root', request.GET)
return response
| agpl-3.0 | 3,176,330,818,127,330,300 | 32.836735 | 101 | 0.680941 | false |
qedsoftware/commcare-hq | corehq/apps/reports/standard/inspect.py | 1 | 7137 | import functools
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop, get_language
from corehq.apps.es import forms as form_es, filters as es_filters
from corehq.apps.hqcase.utils import SYSTEM_FORM_XMLNS
from corehq.apps.reports import util
from corehq.apps.reports.filters.users import ExpandedMobileWorkerFilter as EMWF
from corehq.apps.reports.models import HQUserType
from corehq.apps.reports.standard import ProjectReport, ProjectReportParametersMixin, DatespanMixin
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.display import FormDisplay
from corehq.apps.reports.filters.forms import FormsByApplicationFilter
from corehq.apps.reports.generic import (GenericTabularReport,
ProjectInspectionReportParamsMixin,
ElasticProjectInspectionReport)
from corehq.apps.reports.standard.monitoring import MultiFormDrilldownMixin, CompletionOrSubmissionTimeMixin
from corehq.apps.reports.util import datespan_from_beginning
from corehq.const import MISSING_APP_ID
from corehq.toggles import SUPPORT
from dimagi.utils.decorators.memoized import memoized
class ProjectInspectionReport(ProjectInspectionReportParamsMixin, GenericTabularReport, ProjectReport, ProjectReportParametersMixin):
"""
Base class for this reporting section
"""
exportable = False
asynchronous = False
ajax_pagination = True
fields = ['corehq.apps.reports.filters.users.UserTypeFilter',
'corehq.apps.reports.filters.users.SelectMobileWorkerFilter']
def get_user_link(self, user):
user_link = self.get_raw_user_link(user)
return self.table_cell(user.raw_username, user_link)
def get_raw_user_link(self, user):
raise NotImplementedError
class SubmitHistoryMixin(ElasticProjectInspectionReport,
ProjectReportParametersMixin,
CompletionOrSubmissionTimeMixin, MultiFormDrilldownMixin,
DatespanMixin):
name = ugettext_noop('Submit History')
slug = 'submit_history'
fields = [
'corehq.apps.reports.filters.users.ExpandedMobileWorkerFilter',
'corehq.apps.reports.filters.forms.FormsByApplicationFilter',
'corehq.apps.reports.filters.forms.CompletionOrSubmissionTimeFilter',
'corehq.apps.reports.filters.dates.DatespanFilter',
]
ajax_pagination = True
include_inactive = True
@property
def default_datespan(self):
return datespan_from_beginning(self.domain_object, self.timezone)
def _get_users_filter(self, mobile_user_and_group_slugs):
truthy_only = functools.partial(filter, None)
users_data = EMWF.pull_users_and_groups(
self.domain,
mobile_user_and_group_slugs,
include_inactive=True
)
selected_user_types = EMWF.selected_user_types(mobile_user_and_group_slugs)
all_mobile_workers_selected = HQUserType.REGISTERED in selected_user_types
if not all_mobile_workers_selected or users_data.admin_and_demo_users:
return form_es.user_id(truthy_only(
u.user_id for u in users_data.combined_users
))
else:
negated_ids = util.get_all_users_by_domain(
self.domain,
user_filter=HQUserType.all_but_users(),
simplified=True,
)
return es_filters.NOT(form_es.user_id(truthy_only(
user.user_id for user in negated_ids
)))
@staticmethod
def _form_filter(form):
app_id = form.get('app_id', None)
if app_id and app_id != MISSING_APP_ID:
return es_filters.AND(
form_es.app(app_id),
form_es.xmlns(form['xmlns'])
)
return form_es.xmlns(form['xmlns'])
@property
def es_query(self):
time_filter = form_es.submitted if self.by_submission_time else form_es.completed
mobile_user_and_group_slugs = self.request.GET.getlist(EMWF.slug)
query = (form_es.FormES()
.domain(self.domain)
.filter(time_filter(gte=self.datespan.startdate,
lt=self.datespan.enddate_adjusted))
.filter(self._get_users_filter(mobile_user_and_group_slugs)))
# filter results by app and xmlns if applicable
if FormsByApplicationFilter.has_selections(self.request):
form_values = self.all_relevant_forms.values()
if form_values:
query = query.OR(*[self._form_filter(f) for f in form_values])
# Exclude system forms unless they selected "Unknown User"
if HQUserType.UNKNOWN not in EMWF.selected_user_types(mobile_user_and_group_slugs):
query = query.NOT(form_es.xmlns(SYSTEM_FORM_XMLNS))
return query
@property
@memoized
def es_query_result(self):
return (self.es_query
.set_sorting_block(self.get_sorting_block())
.start(self.pagination.start)
.size(self.pagination.count)
.run())
def get_sorting_block(self):
sorting_block = super(SubmitHistoryMixin, self).get_sorting_block()
if sorting_block:
return sorting_block
else:
return [{self.time_field: {'order': 'desc'}}]
@property
def time_field(self):
return 'received_on' if self.by_submission_time else 'form.meta.timeEnd'
@property
def total_records(self):
return int(self.es_query_result.total)
class SubmitHistory(SubmitHistoryMixin, ProjectReport):
@property
def show_extra_columns(self):
return self.request.user and SUPPORT.enabled(self.request.user.username)
@classmethod
def display_in_dropdown(cls, domain=None, project=None, user=None):
if project and project.commtrack_enabled:
return False
else:
return True
@property
def headers(self):
h = [
DataTablesColumn(_("View Form")),
DataTablesColumn(_("Username"), prop_name='form.meta.username'),
DataTablesColumn(
_("Submission Time") if self.by_submission_time
else _("Completion Time"),
prop_name=self.time_field
),
DataTablesColumn(_("Form"), prop_name='form.@name'),
]
if self.show_extra_columns:
h.append(DataTablesColumn(_("Sync Log")))
return DataTablesHeader(*h)
@property
def rows(self):
for form in self.es_query_result.hits:
display = FormDisplay(form, self, lang=get_language())
row = [
display.form_data_link,
display.username,
display.submission_or_completion_time,
display.readable_form_name,
]
if self.show_extra_columns:
row.append(form.get('last_sync_token', ''))
yield row
| bsd-3-clause | -4,730,431,644,042,899,000 | 37.370968 | 133 | 0.638924 | false |
venicegeo/eventkit-cloud | eventkit_cloud/core/helpers.py | 1 | 4393 | # -*- coding: utf-8 -*-
import logging
import os
import shutil
import subprocess
import zipfile
from enum import Enum
from django.db import models
import dj_database_url
import requests
from django.conf import settings
from django.core.cache import cache
from notifications.signals import notify
from django.contrib.auth.models import User
logger = logging.getLogger(__name__)
def get_id(user: User):
if hasattr(user, "oauth"):
return user.oauth.identification
else:
return user.username
def get_model_by_params(model_class: models.Model, **kwargs):
return model_class.objects.get(**kwargs)
def get_cached_model(model: models.Model, prop: str, value: str) -> models.Model:
return cache.get_or_set(f"{model.__name__}-{prop}-{value}", get_model_by_params(model, **{prop: value}), 360)
def download_file(url, download_dir=None):
download_dir = download_dir or settings.EXPORT_STAGING_ROOT
file_location = os.path.join(download_dir, os.path.basename(url))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(file_location, "wb") as f:
for chunk in r:
f.write(chunk)
return file_location
else:
logger.error("Failed to download file, STATUS_CODE: {0}".format(r.status_code))
return None
def extract_zip(zipfile_path, extract_dir=None):
extract_dir = extract_dir or settings.EXPORT_STAGING_ROOT
logger.info("Extracting {0} to {1}...".format(zipfile_path, extract_dir))
zip_ref = zipfile.ZipFile(zipfile_path, "r")
zip_ref.extractall(extract_dir)
logger.info("Finished Extracting.")
zip_ref.close()
return extract_dir
def get_vector_file(directory):
for file in os.listdir(directory):
if file.endswith((".shp", ".geojson", ".gpkg")):
logger.info("Found: {0}".format(file))
return os.path.join(directory, file)
def load_land_vectors(db_conn=None, url=None):
if not url:
url = settings.LAND_DATA_URL
if db_conn:
database = dj_database_url.config(default=db_conn)
else:
database = settings.DATABASES["feature_data"]
logger.info("Downloading land data: {0}".format(url))
download_filename = download_file(url)
logger.info("Finished downloading land data: {0}".format(url))
file_dir = None
if os.path.splitext(download_filename)[1] == ".zip":
extract_zip(download_filename)
file_dir = os.path.splitext(download_filename)[0]
file_name = get_vector_file(file_dir)
else:
file_name = download_filename
cmd = (
'ogr2ogr -s_srs EPSG:3857 -t_srs EPSG:4326 -f "PostgreSQL" '
'PG:"host={host} user={user} password={password} dbname={name} port={port}" '
"{file} land_polygons".format(
host=database["HOST"],
user=database["USER"],
password=database["PASSWORD"].replace("$", "\$"),
name=database["NAME"],
port=database["PORT"],
file=file_name,
)
)
logger.info("Loading land data...")
exit_code = subprocess.call(cmd, shell=True)
if exit_code:
logger.error("There was an error importing the land data.")
if file_dir:
shutil.rmtree(file_dir)
os.remove(download_filename)
try:
os.remove(file_name)
except OSError:
pass
finally:
logger.info("Finished loading land data.")
class NotificationLevel(Enum):
SUCCESS = "success"
INFO = "info"
WARNING = "warning"
ERROR = "ERROR"
class NotificationVerb(Enum):
RUN_STARTED = "run_started"
RUN_COMPLETED = "run_completed"
RUN_FAILED = "run_failed"
RUN_DELETED = "run_deleted"
RUN_CANCELED = "run_canceled"
REMOVED_FROM_GROUP = "removed_from_group"
ADDED_TO_GROUP = "added_to_group"
SET_AS_GROUP_ADMIN = "set_as_group_admin"
REMOVED_AS_GROUP_ADMIN = "removed_as_group_admin"
RUN_EXPIRING = "run_expiring"
def sendnotification(actor, recipient, verb, action_object, target, level, description):
try:
notify.send(
actor,
recipient=recipient,
verb=verb,
action_object=action_object,
target=target,
level=level,
description=description,
)
except Exception as err:
logger.debug("notify send error ignored: %s" % err)
| bsd-3-clause | 8,222,111,555,290,391,000 | 28.286667 | 113 | 0.633963 | false |
SuperDARNCanada/placeholderOS | scheduler/local_scd_server.py | 2 | 12474 | #!/usr/bin/python3
# Copyright 2019 SuperDARN Canada
#
# local_scd_server.py
# 2019-04-18
# Moniters for new SWG files and adds the SWG info to the scd if there is an update.
#
import subprocess as sp
import scd_utils
import email_utils
import os
import datetime
import time
import argparse
SWG_GIT_REPO_DIR = 'schedules'
SWG_GIT_REPO = "https://github.com/SuperDARN/schedules.git"
EXPERIMENTS = {
"sas" : {
"common_time" : "twofsound",
"discretionary_time" : "twofsound",
"htr_common_time" : "twofsound",
"themis_time" : "themisscan",
"special_time_normal" : "twofsound",
"rbsp_time" : "rbspscan",
"no_switching_time" : "normalscan",
"interleaved_time" : "interleavedscan"
},
"pgr" : {
"common_time" : "normalscan",
"discretionary_time" : "normalscan",
"htr_common_time" : "normalscan",
"themis_time" : "themisscan",
"special_time_normal" : "normalscan",
"rbsp_time" : "rbspscan",
"no_switching_time" : "normalscan",
"interleaved_time" : "interleavedscan"
},
"rkn" : {
"common_time" : "twofsound",
"discretionary_time" : "twofsound",
"htr_common_time" : "twofsound",
"themis_time" : "themisscan",
"special_time_normal" : "twofsound",
"rbsp_time" : "rbspscan",
"no_switching_time" : "normalscan",
"interleaved_time" : "interleavedscan"
},
"inv" : {
"common_time" : "normalscan",
"discretionary_time" : "normalscan",
"htr_common_time" : "normalscan",
"themis_time" : "themisscan",
"special_time_normal" : "normalscan",
"rbsp_time" : "rbspscan",
"no_switching_time" : "normalscan",
"interleaved_time" : "interleavedscan"
},
"cly" : {
"common_time" : "normalscan",
"discretionary_time" : "normalscan",
"htr_common_time" : "normalscan",
"themis_time" : "themisscan",
"special_time_normal" : "normalscan",
"rbsp_time" : "rbspscan",
"no_switching_time" : "normalscan",
"interleaved_time" : "interleavedscan"
}
}
class SWG(object):
"""Holds the data needed for processing a SWG file.
Attributes:
scd_dir (str): Path to the SCD files dir.
"""
def __init__(self, scd_dir):
super(SWG, self).__init__()
self.scd_dir = scd_dir
try:
cmd = "git -C {}/{} rev-parse".format(self.scd_dir, SWG_GIT_REPO_DIR)
sp.check_output(cmd, shell=True)
except sp.CalledProcessError as e:
cmd = 'cd {}; git clone {}'.format(self.scd_dir, SWG_GIT_REPO)
sp.call(cmd, shell=True)
def new_swg_file_available(self):
"""Checks if a new swg file is uploaded via git.
Returns:
TYPE: True, if new git update is available.
"""
# This command will return the number of new commits available in master. This signals that
# there are new SWG files available.
cmd = "cd {}/{}; git fetch; git log ..origin/master --oneline | wc -l".format(self.scd_dir,
SWG_GIT_REPO_DIR)
shell_output = sp.check_output(cmd, shell=True)
return bool(int(shell_output))
def pull_new_swg_file(self):
"""Uses git to grab the new scd updates.
"""
cmd = "cd {}/{}; git pull origin master".format(self.scd_dir, SWG_GIT_REPO_DIR)
sp.call(cmd, shell=True)
def get_next_month(self):
"""Finds the datetime of the next month.
Returns:
TYPE: datetime object.
"""
today = datetime.datetime.utcnow()
counter = 1
new_date = today + datetime.timedelta(days=counter)
while new_date.month == today.month:
counter += 1
new_date = today + datetime.timedelta(days=counter)
return new_date
def parse_swg_to_scd(self, modes, radar, first_run):
"""Reads the new SWG file and parses into a set of parameters than can be used for borealis
scheduling.
Args:
modes (Dict): Holds the modes that correspond to the SWG requests.
radar (String): Radar acronym.
Returns:
TYPE: List of all the parsed parameters.
"""
if first_run:
month_to_use = datetime.datetime.utcnow()
else:
month_to_use = next_month = self.get_next_month()
year = month_to_use.strftime("%Y")
month = month_to_use.strftime("%m")
swg_file = "{scd_dir}/{swg_dir}/{yyyy}/{yyyymm}.swg".format(scd_dir=self.scd_dir,
swg_dir=SWG_GIT_REPO_DIR,
yyyy=year,
yyyymm=year+month)
with open(swg_file, 'r') as f:
swg_lines = f.readlines()
skip_line = False
parsed_params = []
for idx,line in enumerate(swg_lines):
# Skip line is used for special time radar lines
if skip_line:
skip_line = False
continue
# We've hit the SCD notes and no longer need to parse
if "# Notes:" in line:
break
# Skip only white space lines
if not line.strip():
continue
#Lines starting with '#' are comments
if line[0] == "#":
continue
items = line.split()
#First line is month and year
if idx == 0:
continue
start_day = items[0][0:2]
start_hr = items[0][3:]
end_day = items[1][0:2]
end_hr = items[1][3:]
if "Common Time" in line:
mode_type = "common"
# 2018 11 23 no longer scheduling twofsound as common time.
if "no switching" in line:
mode_to_use = modes["no_switching_time"]
else:
mode_to_use = modes["htr_common_time"]
if "Special Time" in line:
mode_type = "special"
if "ALL" in line or radar.upper() in line:
if "THEMIS" in line:
mode_to_use = modes["themis_time"]
elif "ST-APOG" in line or "RBSP" in line:
mode_to_use = modes["rbsp_time"]
elif "ARASE" in line:
if "themis" in swg_lines[idx+1]:
mode_to_use = modes["themis_time"]
if "interleaved" in swg_lines[idx+1]:
mode_to_use = modes["interleaved_time"]
else:
print("Unknown Special Time: using default common time")
mode_to_use = modes["htr_common_time"]
else:
mode_to_use = modes["special_time_normal"]
# Skip next line
#skip_line = True
if "Discretionary Time" in line:
mode_type = "discretionary"
mode_to_use = modes["discretionary_time"]
param = {"yyyymmdd": "{}{}{}".format(year, month, start_day),
"hhmm" : "{}:00".format(start_hr),
"experiment" : mode_to_use,
"scheduling_mode" : mode_type}
parsed_params.append(param)
return parsed_params
def main():
parser = argparse.ArgumentParser(description="Automatically schedules new events from the SWG")
parser.add_argument('--emails-filepath',required=True, help='A list of emails to send logs to')
parser.add_argument('--scd-dir', required=True, help='The scd working directory')
parser.add_argument('--first-run', action="store_true", help='This will generate the first set'
' of schedule files if running on'
' a fresh directory. If the next'
' month schedule is available,'
' you will need to roll back the'
' SWG schedule folder back to the'
' last commit before running in'
' continuous operation.')
args = parser.parse_args()
scd_dir = args.scd_dir
scd_logs = scd_dir + "/logs"
emailer = email_utils.Emailer(args.emails_filepath)
if not os.path.exists(scd_dir):
os.makedirs(scd_dir)
if not os.path.exists(scd_logs):
os.makedirs(scd_logs)
sites = list(EXPERIMENTS.keys())
site_scds = [scd_utils.SCDUtils("{}/{}.scd".format(scd_dir, s)) for s in sites]
swg = SWG(scd_dir)
while True:
if swg.new_swg_file_available() or args.first_run:
swg.pull_new_swg_file()
site_experiments = [swg.parse_swg_to_scd(EXPERIMENTS[s], s, args.first_run)
for s in sites]
errors = False
today = datetime.datetime.utcnow()
scd_error_log = today.strftime("/scd_errors.%Y%m%d")
for se, site_scd in zip(site_experiments, site_scds):
for ex in se:
try:
site_scd.add_line(ex['yyyymmdd'], ex['hhmm'], ex['experiment'], ex["scheduling_mode"])
except ValueError as e:
error_msg = ("{logtime} {sitescd}: Unable to add line with parameters:\n"
"\t {date} {time} {experiment} {mode}\n"
"\t Exception thrown:\n"
"\t\t {exception}\n")
error_msg = error_msg.format(logtime=today.strftime("%c"),
sitescd=site_scd.scd_filename,
date=ex['yyyymmdd'],
time=ex['hhmm'],
experiment=ex['experiment'],
mode=ex['scheduling_mode'],
exception=str(e))
with open(scd_logs + scd_error_log, 'a') as f:
f.write(error_msg)
errors = True
break
except FileNotFoundError as e:
error_msg = "SCD filename: {} is missing!!!\n".format(site_scd.scd_filename)
with open(scd_logs + scd_error_log, 'a') as f:
f.write(error_msg)
errors = True
break
subject = "Scheduling report for swg lines"
if not errors:
success_msg = "All swg lines successfully scheduled.\n"
for site, site_scd in zip(sites, site_scds):
yyyymmdd = today.strftime("%Y%m%d")
hhmm = today.strftime("%H:%M")
new_lines = site_scd.get_relevant_lines(yyyymmdd, hhmm)
text_lines = [site_scd.fmt_line(x) for x in new_lines]
success_msg += "\t{}\n".format(site)
for line in text_lines:
success_msg += "\t\t{}\n".format(line)
with open(scd_logs + scd_error_log, 'a') as f:
f.write(success_msg)
else:
errors = False
emailer.email_log(subject, scd_logs + scd_error_log)
if args.first_run:
break;
else:
time.sleep(300)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,541,475,855,291,476,500 | 35.79646 | 110 | 0.468895 | false |
csdl/makahiki | makahiki/apps/managers/player_mgr/migrations/0001_initial.py | 2 | 8253 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Profile'
db.create_table(u'player_mgr_profile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='profile', unique=True, to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('is_ra', self.gf('django.db.models.fields.BooleanField')(default=False)),
('team', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['team_mgr.Team'], null=True, blank=True)),
('theme', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('contact_text', self.gf('localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True)),
('contact_carrier', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('setup_profile', self.gf('django.db.models.fields.BooleanField')(default=False)),
('setup_complete', self.gf('django.db.models.fields.BooleanField')(default=False)),
('completion_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('daily_visit_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('last_visit_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('referring_user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='referred_profiles', null=True, to=orm['auth.User'])),
('referrer_awarded', self.gf('django.db.models.fields.BooleanField')(default=False)),
('properties', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'player_mgr', ['Profile'])
def backwards(self, orm):
# Deleting model 'Profile'
db.delete_table(u'player_mgr_profile')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'player_mgr.profile': {
'Meta': {'object_name': 'Profile'},
'completion_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'contact_carrier': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'contact_text': ('localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'daily_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_ra': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_visit_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'properties': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'referrer_awarded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'referring_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'referred_profiles'", 'null': 'True', 'to': u"orm['auth.User']"}),
'setup_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_profile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['team_mgr.Team']", 'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'team_mgr.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'team_mgr.team': {
'Meta': {'ordering': "('group', 'name')", 'object_name': 'Team'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['team_mgr.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'size': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['player_mgr'] | mit | 1,394,231,194,268,028,200 | 74.724771 | 195 | 0.571186 | false |
reingart/gestionlibre | languages/sk-sk.py | 2 | 16860 | # coding: utf8
{
' Quotas: %(quotas)s x%(quota_amount).2f': ' Quotas: %(quotas)s x%(quota_amount).2f',
' Transaction number: %s': ' Transaction number: %s',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" je voliteľný výraz ako "field1=\'newvalue\'". Nemôžete upravovať alebo zmazať výsledky JOINu',
'%Y-%m-%d': '%d.%m.%Y',
'%Y-%m-%d %H:%M:%S': '%d.%m.%Y %H:%M:%S',
'%s rows deleted': '%s zmazaných záznamov',
'%s rows updated': '%s upravených záznamov',
'/absolute/folder/path': '/absolute/folder/path',
'Account': 'Account',
'Add article': 'Add article',
'Add check': 'Add check',
'Add item': 'Add item',
'Add payment method': 'Add payment method',
'Add tax': 'Add tax',
'Administrative interface': 'pre administrátorské rozhranie kliknite sem',
'Administrative panel': 'Administrative panel',
'All tables modified': 'All tables modified',
'Allocate': 'Allocate',
'Allocate orders': 'Allocate orders',
'Allocated': 'Allocated',
'Amount': 'Amount',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Available databases and tables': 'Dostupné databázy a tabuľky',
'Bank': 'Bank',
'Bill': 'Bill',
'Bill checked': 'Bill checked',
'Blank for price list values': 'Blank for price list values',
'CSV parameters file: /absolute/path/file_name.csv': 'CSV parameters file: /absolute/path/file_name.csv',
'CSV table files path: /absolute/path/tables_folder': 'CSV table files path: /absolute/path/tables_folder',
'Calculate movements difference....': 'Calculate movements difference....',
'Calculated difference: %s': 'Calculated difference: %s',
'Cannot be empty': 'Nemôže byť prázdne',
'Cash/transfer': 'Cash/transfer',
'Change': 'Change',
'Change update taxes value to %s': 'Change update taxes value to %s',
'Check to delete': 'Označiť na zmazanie',
'Checks': 'Checks',
'Choose a concept': 'Choose a concept',
'Choose a document type': 'Choose a document type',
'Choose a price list': 'Choose a price list',
'Code': 'Code',
'Collect': 'Collect',
'Color': 'Color',
'Concept': 'Concept',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Could not change': 'Could not change',
'Could not process the operation': 'Could not process the operation',
'Could not process the operation: it is not editable': 'Could not process the operation: it is not editable',
'Could not process the receipt': 'Could not process the receipt',
'Create fee': 'Create fee',
'Create/Edit orders': 'Create/Edit orders',
'Credit': 'Credit',
'Current account': 'Current account',
'Current account calculated amount': 'Current account calculated amount',
'Current account list/payments': 'Current account list/payments',
'Current account payment data': 'Current account payment data',
'Current account payment options': 'Current account payment options',
'Current account quotas': 'Current account quotas',
'Current account report': 'Current account report',
'Current account value: %s': 'Current account value: %s',
'Current accounts payments': 'Current accounts payments',
'Current accounts type: %(at)s': 'Current accounts type: %(at)s',
'Current request': 'Aktuálna požiadavka',
'Current response': 'Aktuálna odpoveď',
'Current session': 'Aktuálne sedenie',
'Customer': 'Customer',
'Customer Panel': 'Customer Panel',
'Customer control panel': 'Customer control panel',
'Customer control panel (requires registration and login)': 'Customer control panel (requires registration and login)',
'Customer current account': 'Customer current account',
'Customer panel': 'Customer panel',
'Customer/Supplier data': 'Customer/Supplier data',
'DB Model': 'DB Model',
'Database': 'Databáza',
'Date': 'Date',
'Dates: ': 'Dates: ',
'Debit': 'Debit',
'Debt limit: %s': 'Debt limit: %s',
'Delete value is %s': 'Delete value is %s',
'Delete:': 'Zmazať:',
'Description': 'Popis',
'Difference': 'Difference',
'Difference: %s': 'Difference: %s',
'Discounts/Surcharges': 'Discounts/Surcharges',
'Document': 'Document',
'Documentation': 'Dokumentácia',
'Done': 'Done',
'Due date': 'Due date',
'Edit': 'Upraviť',
'Edit Profile': 'Upraviť profil',
'Edit current record': 'Upraviť aktuálny záznam',
'Edit in movements': 'Edit in movements',
'Edit order number': 'Edit order number',
'Ending': 'Ending',
'Entries': 'Entries',
'Entries: %s': 'Entries: %s',
'Entry': 'Entry',
'Erasing record %s': 'Erasing record %s',
'Error: could not calculate the total debt.': 'Error: could not calculate the total debt.',
'Errors': 'Errors',
'Esta es la plantilla accounting/offset_account.html': 'Esta es la plantilla accounting/offset_account.html',
'Exits: %s': 'Exits: %s',
'Family': 'Family',
'Fee': 'Fee',
'Fees': 'Fees',
'Fees list': 'Fees list',
'Firm': 'Firm',
'First name': 'Krstné meno',
'For purchases: %(pt)s payment is recorded as concept id %s(c)': 'For purchases: %(pt)s payment is recorded as concept id %s(c)',
'Form accepted': 'Form accepted',
'Form data: %(fd)s': 'Form data: %(fd)s',
'Form data: %s': 'Form data: %s',
'GestionLibre': 'GestionLibre',
'GestionLibre %(version)s': 'GestionLibre %(version)s',
'GestionLibre %s': 'GestionLibre %s',
'Group ID': 'ID skupiny',
'Hello World': 'Ahoj svet',
'ID': 'ID',
'Import legacy tables': 'Import legacy tables',
'Import/Export': 'Import/Export',
'Increase/Decrease stock values': 'Increase/Decrease stock values',
'Increase/decrease stock values': 'Increase/decrease stock values',
'Index': 'Index',
'Initialize': 'Initialize',
'Insert movements element': 'Insert movements element',
'Insert order element': 'Insert order element',
'Installment': 'Installment',
'Installment created': 'Installment created',
'Installments': 'Installments',
'Insufficient source stock quantity': 'Insufficient source stock quantity',
'Insufficient stock value.': 'Insufficient stock value.',
'Internal State': 'Vnútorný stav',
'Invalid Query': 'Neplatná otázka',
'Invalid email': 'Neplatný email',
'Invalid password': 'Nesprávne heslo',
'Item added': 'Item added',
'Item value input: %s': 'Item value input: %s',
'Journal Entries': 'Journal Entries',
'Journal Entry': 'Journal Entry',
'Journal entries': 'Journal entries',
'Journal entry total amount': 'Journal entry total amount',
'Last name': 'Priezvisko',
'Layout': 'Layout',
'List of operations': 'List of operations',
'List order allocation operations': 'List order allocation operations',
'List order allocations': 'List order allocations',
'Logged in': 'Prihlásený',
'Logged out': 'Odhlásený',
'Lost Password': 'Stratené heslo?',
'Menu Model': 'Menu Model',
'Modify movements element': 'Modify movements element',
'Modify operation item': 'Modify operation item',
'Modify sales order element': 'Modify sales order element',
'Move stock items': 'Move stock items',
'Movement (offset): %(mo)s: %(a)s': 'Movement (offset): %(mo)s: %(a)s',
'Movements (Operations)': 'Movements (Operations)',
'Movements detail': 'Movements detail',
'Movements list': 'Movements list',
'Movements panel': 'Movements panel',
'Movements process. Operation: %s': 'Movements process. Operation: %s',
'Moving to new record': 'Moving to new record',
'Name': 'Meno',
'New Record': 'Nový záznam',
'New customer': 'New customer',
'New fee': 'New fee',
'New installment': 'New installment',
'New operation': 'New operation',
'New operation (movements form)': 'New operation (movements form)',
'New operation check': 'New operation check',
'New operation item': 'New operation item',
'New operation tax': 'New operation tax',
'New option': 'New option',
'New option created.': 'New option created.',
'New order allocation': 'New order allocation',
'New packing slip from this allocation': 'New packing slip from this allocation',
'New password': 'Nové heslo',
'New subcustomer': 'New subcustomer',
'No databases in this application': 'V tejto aplikácii nie sú databázy',
'No tax id selected': 'No tax id selected',
'None selected': 'None selected',
'Number': 'Number',
'Old password': 'Staré heslo',
'Online examples': 'pre online príklady kliknite sem',
'Operation': 'Operation',
'Operation %s is not editable': 'Operation %s is not editable',
'Operation details: %s': 'Operation details: %s',
'Operation discounts and surcharges': 'Operation discounts and surcharges',
'Operation header': 'Operation header',
'Operation id(s): %s': 'Operation id(s): %s',
'Operation number %s': 'Operation number %s',
'Operation processed': 'Operation processed',
'Operation processing failed: debt limit reached': 'Operation processing failed: debt limit reached',
'Operation processing result': 'Operation processing result',
'Operation successfully processed': 'Operation successfully processed',
'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s': 'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s',
'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s, Movement: %(m)s': 'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s, Movement: %(m)s',
'Operations list': 'Operations list',
'Option modified.': 'Option modified.',
'Options': 'Options',
'Order allocation': 'Order allocation',
'Order allocation %s': 'Order allocation %s',
'Order allocation list': 'Order allocation list',
'Order list': 'Order list',
'Order number': 'Order number',
'Ordered': 'Ordered',
'Origin': 'Pôvod',
'Packing slip': 'Packing slip',
'Password': 'Heslo',
'Pay': 'Pay',
'Period': 'Period',
'Please choose different warehouses': 'Please choose different warehouses',
"Please insert your firm's tax id": "Please insert your firm's tax id",
'Populate tables': 'Populate tables',
'Populate_with_legacy_db Insert Error: Table %(table)s, row %(n)s: %(e)s': 'Populate_with_legacy_db Insert Error: Table %(table)s, row %(n)s: %(e)s',
'Post registration form': 'Post registration form',
'Post-registration form': 'Post-registration form',
'Posted': 'Posted',
'Powered by': 'Powered by',
'Process operation': 'Process operation',
'Product': 'Product',
'Product billing': 'Product billing',
'Product code': 'Product code',
'Purchases': 'Purchases',
'Quantity': 'Quantity',
'Query:': 'Otázka:',
'Quota': 'Quota',
'Quotas': 'Quotas',
'RIA Create/Edit operations': 'RIA Create/Edit operations',
'RIA Product billing': 'RIA Product billing',
'RIA Receipt': 'RIA Receipt',
'RIA Stock': 'RIA Stock',
'RIA Stock main menu': 'RIA Stock main menu',
'Receipt number': 'Receipt number',
'Receipt processed': 'Receipt processed',
'Receipts list': 'Receipts list',
'Record ID': 'ID záznamu',
'Record updated': 'Record updated',
'Register': 'Zaregistrovať sa',
'Registration key': 'Registračný kľúč',
'Registration successful': 'Registration successful',
'Remember me (for 30 days)': 'Zapamätaj si ma (na 30 dní)',
'Reset Password key': 'Nastaviť registračný kľúč',
'Reset operation': 'Reset operation',
'Reset order': 'Reset order',
'Reset receipt': 'Reset receipt',
'Role': 'Rola',
'Rows in table': 'riadkov v tabuľke',
'Rows selected': 'označených riadkov',
'SCM': 'SCM',
'Sales': 'Sales',
'Sales contact': 'Sales contact',
'Select': 'Select',
'Select an operation type': 'Select an operation type',
'Select price list': 'Select price list',
'Select warehouse': 'Select warehouse',
'Selection action: %s': 'Selection action: %s',
'Session data: %s': 'Session data: %s',
'Set options': 'Set options',
'Setting offset concept to %s': 'Setting offset concept to %s',
'Setup': 'Setup',
'Starting': 'Starting',
'Stock': 'Stock',
'Stock item update': 'Stock item update',
'Stock list': 'Stock list',
'Stock query': 'Stock query',
'Stock updated': 'Stock updated',
'Stock value changed': 'Stock value changed',
'Stylesheet': 'Stylesheet',
'Subcustomer': 'Subcustomer',
'Subcustomer current account': 'Subcustomer current account',
'Submit': 'Odoslať',
'Supplier': 'Supplier',
'Sure you want to delete this object?': 'Ste si istí, že chcete zmazať tento objekt?',
'TAX ID': 'TAX ID',
'Table name': 'Názov tabuľky',
'Tables': 'Tables',
'Tax id': 'Tax id',
'Taxes are': 'Taxes are',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"query" je podmienka ako "db.table1.field1==\'value\'". Niečo ako "db.table1.field1==db.table2.field2" má za výsledok SQL JOIN.',
'The CSV data was stored at your web2py root folder': 'The CSV data was stored at your web2py root folder',
'The db load failed with these errors: ': 'The db load failed with these errors: ',
'The db records were uploaded correctly': 'The db records were uploaded correctly',
'The following operations were created': 'The following operations were created',
'The form has errors': 'The form has errors',
'The item specified was not found in the warehouse': 'The item specified was not found in the warehouse',
'The item will be removed without confirmation': 'The item will be removed without confirmation',
'The operation has current account movements: %s': 'The operation has current account movements: %s',
'The operation processing failed. Booking ok: %(rs)s. Stock ok: %(st)s': 'The operation processing failed. Booking ok: %(rs)s. Stock ok: %(st)s',
'The output of the file is a dictionary that was rendered by the view': 'Výstup zo súboru je slovník, ktorý bol zobrazený vo view',
'This is a copy of the scaffolding application': 'Toto je kópia skeletu aplikácie',
'This is the webapp index view of': 'This is the webapp index view of',
'Timestamp': 'Časová pečiatka',
'Total': 'Total',
'Total amount': 'Total amount',
'Total debt': 'Total debt',
'Trying with': 'Trying with',
'Update fee': 'Update fee',
'Update installment': 'Update installment',
'Update order allocation': 'Update order allocation',
'Update quota': 'Update quota',
'Update:': 'Upraviť:',
'Updating stock id: %(st)s as %(vl)s': 'Updating stock id: %(st)s as %(vl)s',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Použite (...)&(...) pre AND, (...)|(...) pre OR a ~(...) pre NOT na poskladanie komplexnejších otázok.',
'User %(id)s Logged-in': 'Používateľ %(id)s prihlásený',
'User %(id)s Logged-out': 'Používateľ %(id)s odhlásený',
'User %(id)s Password changed': 'Používateľ %(id)s zmenil heslo',
'User %(id)s Profile updated': 'Používateľ %(id)s upravil profil',
'User %(id)s Registered': 'Používateľ %(id)s sa zaregistroval',
'User ID': 'ID používateľa',
"Valid firm tax id's": "Valid firm tax id's",
'Value': 'Value',
'Values: %s': 'Values: %s',
'Verify Password': 'Zopakujte heslo',
'View': 'Zobraziť',
'WARNING: JOURNAL ENTRY IS UNBALANCED': 'WARNING: JOURNAL ENTRY IS UNBALANCED',
'Warehouse': 'Warehouse',
'Warning! Wrong document type.': 'Warning! Wrong document type.',
'Web interface': 'Web interface',
'Welcome to web2py': 'Vitajte vo web2py',
'Which called the function': 'Ktorý zavolal funkciu',
'You are successfully running web2py': 'Úspešne ste spustili web2py',
'You can modify this application and adapt it to your needs': 'Môžete upraviť túto aplikáciu a prispôsobiť ju svojim potrebám',
"You have not specified you firm's TAX ID. Please visit the": "You have not specified you firm's TAX ID. Please visit the",
'You visited the url': 'Navštívili ste URL',
'and try again': 'and try again',
'appadmin is disabled because insecure channel': 'appadmin je zakázaný bez zabezpečeného spojenia',
'cache': 'cache',
'customize me!': 'prispôsob ma!',
'data uploaded': 'údaje naplnené',
'database': 'databáza',
'database %s select': 'databáza %s výber',
'db': 'db',
'design': 'návrh',
'does not update stock': 'does not update stock',
'done!': 'hotovo!',
'export as csv file': 'exportovať do csv súboru',
'filename.ext': 'filename.ext',
'from table': 'from table',
'i.e. third party payment transaction number': 'i.e. third party payment transaction number',
'insert new': 'vložiť nový záznam ',
'insert new %s': 'vložiť nový záznam %s',
'invalid request': 'Neplatná požiadavka',
'located in the file': 'nachádzajúci sa v súbore ',
'login': 'prihlásiť',
'logout': 'odhlásiť',
'lost password?': 'stratené heslo?',
'new record inserted': 'nový záznam bol vložený',
'next 100 rows': 'ďalších 100 riadkov',
'not updated': 'not updated',
'or import from csv file': 'alebo naimportovať z csv súboru',
'password': 'heslo',
'previous 100 rows': 'predchádzajúcich 100 riadkov',
'record': 'záznam',
'record does not exist': 'záznam neexistuje',
'record id': 'id záznamu',
'register': 'registrovať',
'selected': 'označených',
'session.difference :%s': 'session.difference :%s',
'state': 'stav',
'table': 'tabuľka',
'unable to parse csv file': 'nedá sa načítať csv súbor',
'updated': 'updated',
'updates stock': 'updates stock',
'with old record': 'with old record',
}
| agpl-3.0 | -593,140,976,289,536,400 | 44.298913 | 271 | 0.702519 | false |
fxia22/ASM_xf | PythonD/site_python/mx/BeeBase/Cache.py | 4 | 6738 | """ Cache - Generic cache implementation
Copyright (c) 1998-2000, Marc-Andre Lemburg; mailto:[email protected]
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
"""
import sys
import mx.Tools.NewBuiltins
# Weight table
MAXLOCALITY = 256
_weights = [1] * MAXLOCALITY
# maximal history size (must somewhat larger than MAXLOCALITY)
HISTORYLIMIT = 2 * MAXLOCALITY
# Cut goal that has to be reached (self.max_cachesize * float(DENOM / NOM))
DENOM = 3
NOM = 4
# Print debugging info ?
_debug = 0
# Init globals
class _modinit:
import math
l = frange(0,1,MAXLOCALITY)
for i,factor in irange(l):
_weights[i] = int((math.exp(factor) - 1.0) * 8192)
if _debug:
print i,'. weight =',_weights[i]
del _modinit
### Classes
class NotCached:
""" A singleton that can be used in conjunction with the .get()
method of Cache. It behaves like None.
"""
def __nonzero__(self):
return 0
__len__ = __nonzero__
NotCached = NotCached()
###
class Cache:
""" Tunable cache implementation
The following parameters influence the cache behaviour:
- max_cachesize: the cache will be cut smaller when this limit
is reached
- max_elementsize: elements with size greate than this limit
will not be cached
- locality: these many elements will be looked at in
the hit statistics to determine how important
a specific entry is
"""
data = None # Dict. of id:value cache entries
put_history = None # Reverse list of id puts; last is most
# recent access; contains id for each access
get_history = None # Reverse list of id gets; last is most
# recent access; contains id for each access
def __init__(self,max_cachesize=200,
max_elementsize=4096,
locality=50):
self.max_cachesize = max_cachesize
self.max_elementsize = max_elementsize
if locality > MAXLOCALITY:
raise ValueError,'locality must be <= %i' % MAXLOCALITY
self.locality = locality
self.cachesize = 0
self.cuts = 0
self.misses = 0
self.hits = 0
self.data = {}
self.put_history = []
self.get_history = []
def cut(self,
NOM=NOM,DENOM=DENOM):
""" Force a cut of the cache's contents.
This will make room for at least one new entry.
"""
if _debug:
print ' Cutting down cache size...'
cachesize = self.cachesize
# Cut the cache down to the entries in recent get history
newdata = {}
known_key = newdata.has_key
data = self.data
for id in self.get_history[-self.locality:]:
if known_key(id):
continue
try:
newdata[id] = data[id]
except KeyError:
pass
cachesize = len(newdata)
if _debug:
print ' Size after cut to recent history:',cachesize
# Check
if cachesize * NOM >= self.max_cachesize * DENOM:
# Calculate weights
d = {}
weights = _weights
d_get = d.get
for i,id in irange(self.get_history[-self.locality:]):
if not known_key(id):
continue
d[id] = d_get(id,0) + weights[i]
# Delete all entries left from median
ranking = sortedby(d.items(),1)
if _debug:
print ' Ranking:',ranking
for id,weight in ranking[:len(d)/2]:
if _debug:
print ' Deleting',id,'with weight =',weight
del newdata[id]
# Check
cachesize = len(newdata)
if cachesize * NOM >= self.max_cachesize * DENOM:
# Ok, so the smart way didn't work...
if _debug:
print ' Did not work, going the hard way...'
newdata.clear()
cachesize = 0
self.data = newdata
self.cachesize = cachesize
self.cuts = self.cuts + 1
def clear(self):
""" Clear the cache.
"""
self.cachesize = 0
self.data = {}
self.history = []
def get(self,id,default=NotCached,
HISTORYLIMIT=HISTORYLIMIT):
""" Get a value from the cache or return default if it is
not cached.
"""
item = self.data.get(id,None)
if item is None:
self.misses = self.misses + 1
return default
# Add "hit"
self.get_history.append(id)
if len(self.get_history) > HISTORYLIMIT:
del self.get_history[-self.locality:]
self.hits = self.hits + 1
return item
def put(self,id,value,sizeof=sizeof,
HISTORYLIMIT=HISTORYLIMIT):
""" Add a value to the cache or update an existing one.
"""
size = sizeof(value)
if size > self.max_elementsize:
return
# Adding a new entry: make sure there is room
if not self.data.has_key(id):
if _debug:
print ' Adding',id
self.cachesize = cachesize = self.cachesize + 1
if cachesize > self.max_cachesize:
self.cut()
self.data[id] = value
self.put_history.append(id)
if len(self.put_history) > HISTORYLIMIT:
del self.put_history[-self.locality:]
# Aliases
add = put
update = put
def delete(self,id):
""" Delete an entry from the cache.
It is not an error, if the entry is not currently in the cache.
"""
try:
del self.data[id]
except KeyError:
pass
# Aliases
remove = delete
###
if __name__ == '__main__':
c = Cache(10,100,locality=17)
i = 1
while 1:
print 'Round',i,c.cachesize,c.cuts,c.misses,c.data.keys(); i = i + 1
c.put(1,2)
c.get(1)
c.get(1)
c.put(5,2)
c.get(5)
c.put(2,2)
c.put(3,2)
c.get(2)
c.get(3)
c.put(2,3)
c.put(3,'x'*200)
c.get(2)
c.get(3)
c.get(2)
c.put(4,2)
c.get(4,2)
c.get(4,2)
c.get(4,2)
# Add noise
c.put(i,'x')
c.put(i * 2,'x')
c.put(i * 3,'x')
c.get(i)
| gpl-2.0 | 2,945,018,716,508,595,700 | 25.952 | 77 | 0.513951 | false |
jaimeMF/youtube-dl | youtube_dl/extractor/xhamster.py | 7 | 6404 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
unified_strdate,
)
class XHamsterIE(InfoExtractor):
_VALID_URL = r'(?P<proto>https?)://(?:.+?\.)?xhamster\.com/movies/(?P<id>[0-9]+)/(?P<seo>.+?)\.html(?:\?.*)?'
_TESTS = [
{
'url': 'http://xhamster.com/movies/1509445/femaleagent_shy_beauty_takes_the_bait.html',
'info_dict': {
'id': '1509445',
'ext': 'mp4',
'title': 'FemaleAgent Shy beauty takes the bait',
'upload_date': '20121014',
'uploader': 'Ruseful2011',
'duration': 893.52,
'age_limit': 18,
}
},
{
'url': 'http://xhamster.com/movies/2221348/britney_spears_sexy_booty.html?hd',
'info_dict': {
'id': '2221348',
'ext': 'mp4',
'title': 'Britney Spears Sexy Booty',
'upload_date': '20130914',
'uploader': 'jojo747400',
'duration': 200.48,
'age_limit': 18,
}
},
{
'url': 'https://xhamster.com/movies/2272726/amber_slayed_by_the_knight.html',
'only_matching': True,
},
]
def _real_extract(self, url):
def extract_video_url(webpage, name):
return self._search_regex(
[r'''file\s*:\s*(?P<q>["'])(?P<mp4>.+?)(?P=q)''',
r'''<a\s+href=(?P<q>["'])(?P<mp4>.+?)(?P=q)\s+class=["']mp4Thumb''',
r'''<video[^>]+file=(?P<q>["'])(?P<mp4>.+?)(?P=q)[^>]*>'''],
webpage, name, group='mp4')
def is_hd(webpage):
return '<div class=\'icon iconHD\'' in webpage
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
seo = mobj.group('seo')
proto = mobj.group('proto')
mrss_url = '%s://xhamster.com/movies/%s/%s.html' % (proto, video_id, seo)
webpage = self._download_webpage(mrss_url, video_id)
title = self._html_search_regex(
[r'<h1[^>]*>([^<]+)</h1>',
r'<meta[^>]+itemprop=".*?caption.*?"[^>]+content="(.+?)"',
r'<title[^>]*>(.+?)(?:,\s*[^,]*?\s*Porn\s*[^,]*?:\s*xHamster[^<]*| - xHamster\.com)</title>'],
webpage, 'title')
# Only a few videos have an description
mobj = re.search(r'<span>Description: </span>([^<]+)', webpage)
description = mobj.group(1) if mobj else None
upload_date = unified_strdate(self._search_regex(
r'hint=["\'](\d{4}-\d{2}-\d{2}) \d{2}:\d{2}:\d{2} [A-Z]{3,4}',
webpage, 'upload date', fatal=False))
uploader = self._html_search_regex(
r'<span[^>]+itemprop=["\']author[^>]+><a[^>]+href=["\'].+?xhamster\.com/user/[^>]+>(?P<uploader>.+?)</a>',
webpage, 'uploader', default='anonymous')
thumbnail = self._search_regex(
[r'''thumb\s*:\s*(?P<q>["'])(?P<thumbnail>.+?)(?P=q)''',
r'''<video[^>]+poster=(?P<q>["'])(?P<thumbnail>.+?)(?P=q)[^>]*>'''],
webpage, 'thumbnail', fatal=False, group='thumbnail')
duration = float_or_none(self._search_regex(
r'(["\'])duration\1\s*:\s*(["\'])(?P<duration>.+?)\2',
webpage, 'duration', fatal=False, group='duration'))
view_count = int_or_none(self._search_regex(
r'content=["\']User(?:View|Play)s:(\d+)',
webpage, 'view count', fatal=False))
mobj = re.search(r"hint='(?P<likecount>\d+) Likes / (?P<dislikecount>\d+) Dislikes'", webpage)
(like_count, dislike_count) = (mobj.group('likecount'), mobj.group('dislikecount')) if mobj else (None, None)
mobj = re.search(r'</label>Comments \((?P<commentcount>\d+)\)</div>', webpage)
comment_count = mobj.group('commentcount') if mobj else 0
age_limit = self._rta_search(webpage)
hd = is_hd(webpage)
format_id = 'hd' if hd else 'sd'
video_url = extract_video_url(webpage, format_id)
formats = [{
'url': video_url,
'format_id': 'hd' if hd else 'sd',
'preference': 1,
}]
if not hd:
mrss_url = self._search_regex(r'<link rel="canonical" href="([^"]+)', webpage, 'mrss_url')
webpage = self._download_webpage(mrss_url + '?hd', video_id, note='Downloading HD webpage')
if is_hd(webpage):
video_url = extract_video_url(webpage, 'hd')
formats.append({
'url': video_url,
'format_id': 'hd',
'preference': 2,
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'upload_date': upload_date,
'uploader': uploader,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'like_count': int_or_none(like_count),
'dislike_count': int_or_none(dislike_count),
'comment_count': int_or_none(comment_count),
'age_limit': age_limit,
'formats': formats,
}
class XHamsterEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?xhamster\.com/xembed\.php\?video=(?P<id>\d+)'
_TEST = {
'url': 'http://xhamster.com/xembed.php?video=3328539',
'info_dict': {
'id': '3328539',
'ext': 'mp4',
'title': 'Pen Masturbation',
'upload_date': '20140728',
'uploader_id': 'anonymous',
'duration': 5,
'age_limit': 18,
}
}
@staticmethod
def _extract_urls(webpage):
return [url for _, url in re.findall(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?xhamster\.com/xembed\.php\?video=\d+)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._search_regex(
r'href="(https?://xhamster\.com/movies/%s/[^"]+\.html[^"]*)"' % video_id,
webpage, 'xhamster url')
return self.url_result(video_url, 'XHamster')
| unlicense | -5,756,276,094,176,948,000 | 35.594286 | 118 | 0.483292 | false |
auduny/home-assistant | homeassistant/components/garadget/cover.py | 7 | 8788 | """Platform for the Garadget cover component."""
import logging
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.cover import CoverDevice, PLATFORM_SCHEMA
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.const import (
CONF_DEVICE, CONF_USERNAME, CONF_PASSWORD, CONF_ACCESS_TOKEN, CONF_NAME,
STATE_CLOSED, STATE_OPEN, CONF_COVERS)
_LOGGER = logging.getLogger(__name__)
ATTR_AVAILABLE = 'available'
ATTR_SENSOR_STRENGTH = 'sensor_reflection_rate'
ATTR_SIGNAL_STRENGTH = 'wifi_signal_strength'
ATTR_TIME_IN_STATE = 'time_in_state'
DEFAULT_NAME = 'Garadget'
STATE_CLOSING = 'closing'
STATE_OFFLINE = 'offline'
STATE_OPENING = 'opening'
STATE_STOPPED = 'stopped'
STATES_MAP = {
'open': STATE_OPEN,
'opening': STATE_OPENING,
'closed': STATE_CLOSED,
'closing': STATE_CLOSING,
'stopped': STATE_STOPPED
}
COVER_SCHEMA = vol.Schema({
vol.Optional(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_DEVICE): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COVERS): cv.schema_with_slug_keys(COVER_SCHEMA),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Garadget covers."""
covers = []
devices = config.get(CONF_COVERS)
for device_id, device_config in devices.items():
args = {
'name': device_config.get(CONF_NAME),
'device_id': device_config.get(CONF_DEVICE, device_id),
'username': device_config.get(CONF_USERNAME),
'password': device_config.get(CONF_PASSWORD),
'access_token': device_config.get(CONF_ACCESS_TOKEN)
}
covers.append(GaradgetCover(hass, args))
add_entities(covers)
class GaradgetCover(CoverDevice):
"""Representation of a Garadget cover."""
def __init__(self, hass, args):
"""Initialize the cover."""
self.particle_url = 'https://api.particle.io'
self.hass = hass
self._name = args['name']
self.device_id = args['device_id']
self.access_token = args['access_token']
self.obtained_token = False
self._username = args['username']
self._password = args['password']
self._state = None
self.time_in_state = None
self.signal = None
self.sensor = None
self._unsub_listener_cover = None
self._available = True
if self.access_token is None:
self.access_token = self.get_token()
self._obtained_token = True
try:
if self._name is None:
doorconfig = self._get_variable('doorConfig')
if doorconfig['nme'] is not None:
self._name = doorconfig['nme']
self.update()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error(
"Unable to connect to server: %(reason)s", dict(reason=ex))
self._state = STATE_OFFLINE
self._available = False
self._name = DEFAULT_NAME
except KeyError:
_LOGGER.warning("Garadget device %(device)s seems to be offline",
dict(device=self.device_id))
self._name = DEFAULT_NAME
self._state = STATE_OFFLINE
self._available = False
def __del__(self):
"""Try to remove token."""
if self._obtained_token is True:
if self.access_token is not None:
self.remove_token()
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return True
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
data = {}
if self.signal is not None:
data[ATTR_SIGNAL_STRENGTH] = self.signal
if self.time_in_state is not None:
data[ATTR_TIME_IN_STATE] = self.time_in_state
if self.sensor is not None:
data[ATTR_SENSOR_STRENGTH] = self.sensor
if self.access_token is not None:
data[CONF_ACCESS_TOKEN] = self.access_token
return data
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._state is None:
return None
return self._state == STATE_CLOSED
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'garage'
def get_token(self):
"""Get new token for usage during this session."""
args = {
'grant_type': 'password',
'username': self._username,
'password': self._password
}
url = '{}/oauth/token'.format(self.particle_url)
ret = requests.post(
url, auth=('particle', 'particle'), data=args, timeout=10)
try:
return ret.json()['access_token']
except KeyError:
_LOGGER.error("Unable to retrieve access token")
def remove_token(self):
"""Remove authorization token from API."""
url = '{}/v1/access_tokens/{}'.format(
self.particle_url, self.access_token)
ret = requests.delete(
url, auth=(self._username, self._password), timeout=10)
return ret.text
def _start_watcher(self, command):
"""Start watcher."""
_LOGGER.debug("Starting Watcher for command: %s ", command)
if self._unsub_listener_cover is None:
self._unsub_listener_cover = track_utc_time_change(
self.hass, self._check_state)
def _check_state(self, now):
"""Check the state of the service during an operation."""
self.schedule_update_ha_state(True)
def close_cover(self, **kwargs):
"""Close the cover."""
if self._state not in ['close', 'closing']:
ret = self._put_command('setState', 'close')
self._start_watcher('close')
return ret.get('return_value') == 1
def open_cover(self, **kwargs):
"""Open the cover."""
if self._state not in ['open', 'opening']:
ret = self._put_command('setState', 'open')
self._start_watcher('open')
return ret.get('return_value') == 1
def stop_cover(self, **kwargs):
"""Stop the door where it is."""
if self._state not in ['stopped']:
ret = self._put_command('setState', 'stop')
self._start_watcher('stop')
return ret['return_value'] == 1
def update(self):
"""Get updated status from API."""
try:
status = self._get_variable('doorStatus')
_LOGGER.debug("Current Status: %s", status['status'])
self._state = STATES_MAP.get(status['status'], None)
self.time_in_state = status['time']
self.signal = status['signal']
self.sensor = status['sensor']
self._available = True
except requests.exceptions.ConnectionError as ex:
_LOGGER.error(
"Unable to connect to server: %(reason)s", dict(reason=ex))
self._state = STATE_OFFLINE
except KeyError:
_LOGGER.warning("Garadget device %(device)s seems to be offline",
dict(device=self.device_id))
self._state = STATE_OFFLINE
if self._state not in [STATE_CLOSING, STATE_OPENING]:
if self._unsub_listener_cover is not None:
self._unsub_listener_cover()
self._unsub_listener_cover = None
def _get_variable(self, var):
"""Get latest status."""
url = '{}/v1/devices/{}/{}?access_token={}'.format(
self.particle_url, self.device_id, var, self.access_token)
ret = requests.get(url, timeout=10)
result = {}
for pairs in ret.json()['result'].split('|'):
key = pairs.split('=')
result[key[0]] = key[1]
return result
def _put_command(self, func, arg=None):
"""Send commands to API."""
params = {'access_token': self.access_token}
if arg:
params['command'] = arg
url = '{}/v1/devices/{}/{}'.format(
self.particle_url, self.device_id, func)
ret = requests.post(url, data=params, timeout=10)
return ret.json()
| apache-2.0 | 2,099,804,994,104,809,200 | 32.541985 | 77 | 0.580451 | false |
tctara/calamares | src/modules/testmodule.py | 6 | 3130 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# === This file is part of Calamares - <http://github.com/calamares> ===
#
# Copyright 2014, Teo Mrnjavac <[email protected]>
#
# Calamares is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Calamares is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Calamares. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os
import sys
import yaml
try:
import libcalamares
except ImportError:
print("Failed to import libcalamares. Make sure then PYTHONPATH "
"environment variable includes the dir where libcalamares.so is "
"installed.")
print()
raise
class Job:
"""
:param working_path:
:param doc:
:param cfg_doc:
"""
def __init__(self, working_path, doc, cfg_doc):
self.module_name = doc["name"]
self.pretty_name = "Testing job " + doc["name"]
self.working_path = working_path
self.configuration = cfg_doc
def setprogress(self, progress):
"""
:param progress:
"""
print("Job set progress to {}%.".format(progress * 100))
def main():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument("moduledir",
help="Dir containing the Python module.")
parser.add_argument("globalstorage_yaml", nargs="?",
help="A yaml file to initialize GlobalStorage.")
parser.add_argument("configuration_yaml", nargs="?",
help="A yaml file to initialize the configuration dict.")
args = parser.parse_args()
print("Testing module in: " + args.moduledir)
confpath = os.path.join(args.moduledir, "module.desc")
with open(confpath) as f:
doc = yaml.load(f)
if doc["type"] != "job" or doc["interface"] != "python":
print("Only Python jobs can be tested.")
return 1
libcalamares.globalstorage = libcalamares.GlobalStorage()
# if a file for simulating globalStorage contents is provided, load it
if args.globalstorage_yaml:
with open(args.globalstorage_yaml) as f:
gs_doc = yaml.load(f)
for key, value in gs_doc.items():
libcalamares.globalstorage.insert(key, value)
cfg_doc = dict()
if args.configuration_yaml:
with open(args.configuration_yaml) as f:
cfg_doc = yaml.load(f)
libcalamares.job = Job(args.moduledir, doc, cfg_doc)
scriptpath = os.path.abspath(args.moduledir)
sys.path.append(scriptpath)
import main
print("Output from module:")
print(main.run())
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 | 2,136,100,140,854,637,600 | 27.198198 | 81 | 0.633866 | false |
tryfer/tryfer | examples/tracing-client-to-restkin.py | 1 | 2108 | # Copyright 2012 Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from twisted.internet import reactor
from twisted.web.client import Agent
from twisted.python import log
from tryfer.tracers import push_tracer, EndAnnotationTracer, RESTkinHTTPTracer
from tryfer.http import TracingAgent
if __name__ == '__main__':
# Set up twisted's logging.
log.startLogging(sys.stdout)
# Set up our RESTkinHTTPTracer to send JSON to a RESTkin instance
# If you're not running RESTkin locally (defaults to 6956), change
# the URL to https://trace.k1k.me/v1.0/22/trace .... and add authentication
# with the python twisted keystone agent
# https://github.com/racker/python-twisted-keystone-agent
push_tracer(EndAnnotationTracer(
RESTkinHTTPTracer(Agent(reactor),
'http://localhost:6956/v1.0/22/trace',
max_idle_time=0)))
def _do():
# The Agent API is composable so we wrap an Agent in a TracingAgent
# and every call to TracingAgent.request will result in a client_send,
# client_receive, and http.uri annotations.
a = TracingAgent(Agent(reactor))
d = a.request('GET', 'http://google.com')
# Print the response code when receive the response.
d.addCallback(lambda r: print("Received {0} response.".format(r.code)))
# stop the reactor.
d.addBoth(lambda _: reactor.callLater(1, reactor.stop))
reactor.callLater(1, _do)
reactor.run()
| apache-2.0 | 4,985,480,875,968,085,000 | 36.642857 | 79 | 0.683112 | false |
jorsea/odoomrp-wip | mrp_project_link/models/mrp_production.py | 12 | 6131 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api, _
class MrpProduction(models.Model):
_inherit = 'mrp.production'
project_id = fields.Many2one("project.project", string="Project")
analytic_account_id = fields.Many2one(
"account.analytic.account", string="Analytic Account")
@api.one
@api.onchange('project_id')
def onchange_project_id(self):
self.analytic_account_id = self.project_id.analytic_account_id
@api.multi
def action_in_production(self):
task_obj = self.env['project.task']
for record in self:
task_domain = [('mrp_production_id', '=', record.id),
('wk_order', '=', False)]
tasks = task_obj.search(task_domain)
if not tasks:
if record.product_id.default_code:
task_name = ("%s::[%s] %s") % (
record.name,
record.product_id.default_code,
record.product_id.name)
else:
task_name = ("%s::%s") % (
record.name,
record.product_id.name)
task_descr = _("""
Manufacturing Order: %s
Product to Produce: [%s]%s
Quantity to Produce: %s
Bill of Material: %s
Planned Date: %s
""") % (record.name, record.product_id.default_code,
record.product_id.name, record.product_qty,
record.bom_id.name, record.date_planned)
task_values = {
'mrp_production_id': record.id,
'user_id': record.user_id.id,
'reviewer_id': record.user_id.id,
'name': task_name,
'project_id': record.project_id.id,
'description': task_descr
}
if 'code' in task_values.keys():
task_values.pop('code')
task_obj.create(task_values)
return super(MrpProduction, self).action_in_production()
@api.multi
def action_confirm(self):
procurement_obj = self.env['procurement.order']
mto_record = self.env.ref('stock.route_warehouse0_mto')
result = super(MrpProduction, self).action_confirm()
for record in self:
if record.project_id:
main_project = record.project_id.id
for move in record.move_lines:
if mto_record in move.product_id.route_ids:
move.main_project_id = main_project
procurements = procurement_obj.search(
[('move_dest_id', '=', move.id)])
procurements.write({'main_project_id': main_project})
procurements.refresh()
procurements.set_main_project()
return result
class MrpProductionWorkcenterLine(models.Model):
_inherit = 'mrp.production.workcenter.line'
@api.multi
def action_start_working(self):
task_obj = self.env['project.task']
res = super(MrpProductionWorkcenterLine, self).action_start_working()
for record in self:
task_domain = [('mrp_production_id', '=', record.production_id.id),
('wk_order', '=', False)]
production_tasks = task_obj.search(task_domain)
task_descr = _("""
Manufacturing Order: %s
Work Order: %s
Workcenter: %s
Cycle: %s
Hour: %s
""") % (record.production_id.name, record.name,
record.workcenter_id.name, record.cycle, record.hour)
task_values = {
'mrp_production_id': record.production_id.id,
'wk_order': record.id,
'user_id': False,
'reviewer_id': record.production_id.user_id.id,
'description': task_descr,
'project_id': record.production_id.project_id.id,
'parent_ids': [(6, 0, production_tasks.ids)]
}
if record.routing_wc_line:
count = (
record.routing_wc_line.op_wc_lines.filtered(
lambda r: r.workcenter == record.workcenter_id
).op_number or record.workcenter_id.op_number)
op_list = record.workcenter_id.operators
for i in range(count):
if len(op_list) > i:
task_values['user_id'] = op_list[i].id
task_name = (_("%s:: WO%s-%s:: %s") %
(record.production_id.name,
str(record.sequence).zfill(3),
str(i).zfill(3), record.name))
task_values['name'] = task_name
if 'code' in task_values.keys():
task_values.pop('code')
task_obj.create(task_values)
return res
class MrpProductionProductLine(models.Model):
_inherit = 'mrp.production.product.line'
task_id = fields.Many2one('project.task', string="Task")
| agpl-3.0 | 2,138,653,206,343,383,600 | 41.874126 | 79 | 0.508074 | false |
bjarniegill/Cordova-Survey | csv_parser/.env/lib/python2.7/site-packages/setuptools/package_index.py | 54 | 39971 | """PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
import itertools
from functools import wraps
try:
from urllib.parse import splituser
except ImportError:
from urllib2 import splituser
from setuptools.extern import six
from setuptools.extern.six.moves import urllib, http_client, configparser, map
import setuptools
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from fnmatch import translate
from setuptools.py26compat import strip_fragment
from setuptools.py27compat import get_all_headers
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I)
# this is here to fix emacs' cruddy broken syntax highlighting
PYPI_MD5 = re.compile(
'<a href="([^"#]+)">([^<]+)</a>\n\\s+\\(<a (?:title="MD5 hash"\n\\s+)'
'href="[^?]+\\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
user_agent = _tmpl.format(py_major=sys.version[:3], setuptools=setuptools)
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py', -16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py', -20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base, py_ver, plat
def egg_info_for_url(url):
parts = urllib.parse.urlparse(url)
scheme, server, path, parameters, query, fragment = parts
base = urllib.parse.unquote(path.split('/')[-1])
if server == 'sourceforge.net' and base == 'download': # XXX Yuck
base = urllib.parse.unquote(path.split('/')[-2])
if '#' in base:
base, fragment = base.split('#', 1)
return base, fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata):
yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence=CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]):
# it is a bdist_dumb, not an sdist -- bail out
return
for p in range(1, len(parts) + 1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence=precedence,
platform=platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in six.moves.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos != -1:
match = HREF.search(page, pos)
if match:
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
class ContentChecker(object):
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urllib.parse.urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.python.org/simple", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self, *args, **kw)
self.index_url = index_url + "/" [:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate, hosts))).match
self.to_scan = []
use_ssl = (
verify_ssl
and ssl_support.is_available
and (ca_bundle or ssl_support.find_ca_bundle())
)
if use_ssl:
self.opener = ssl_support.opener_for(ca_bundle)
else:
self.opener = urllib.request.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
tmpl = "Download error on %s: %%s -- Some packages may not be found!"
f = self.open_url(url, tmpl % url)
if f is None:
return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, urllib.error.HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path, item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
is_file = s and s.group(1).lower() == 'file'
if is_file or self.allows(urllib.parse.urlparse(url)[1]):
return True
msg = ("\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/1dg9ijs for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
dirs = filter(os.path.isdir, search_path)
egg_links = (
(path, entry)
for path in dirs
for entry in os.listdir(path)
if entry.endswith('.egg-link')
)
list(itertools.starmap(self.scan_egg_link, egg_links))
def scan_egg_link(self, path, entry):
with open(os.path.join(path, entry)) as raw_lines:
# filter non-empty lines
lines = list(filter(None, map(str.strip, raw_lines)))
if len(lines) != 2:
# format is not recognized; punt
return
egg_path, setup_path = lines
for dist in find_distributions(os.path.join(path, egg_path)):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self, url, page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
urllib.parse.unquote, link[len(self.index_url):].split('/')
))
if len(parts) == 2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(), {})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url += '#egg=%s-%s' % (pkg, ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg:
self.warn(msg, *args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name + '/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name + '/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key, ())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement, installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?" % (
checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec, Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found, fragment, tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
spec = parse_requirement_arg(spec)
return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None
):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence == DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn("Skipping development or system egg: %s", dist)
skipped[dist] = 1
continue
if dist in req and (dist.precedence <= SOURCE_DIST or not source):
dist.download_location = self.download(dist.location, tmpdir)
if os.path.exists(dist.download_location):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if not dist and local_index is not None:
dist = find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or working download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=dist.download_location)
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists) == 1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename = dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment, dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp, info = None, None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(strip_fragment(url))
if isinstance(fp, urllib.error.HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code, fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename, 'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp:
fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, http_client.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg))
except urllib.error.HTTPError as v:
return v
except urllib.error.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason))
except http_client.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
)
except (http_client.HTTPException, socket.error) as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v))
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..', '.').replace('\\', '_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir, name)
# Download the file
#
if scheme == 'svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme == 'git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme == 'file':
return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type', '').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at " + url)
def _download_svn(self, url, filename):
url = url.split('#', 1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/', 1)
auth, host = splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':', 1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username=" + auth
netloc = host
parts = scheme, netloc, url, p, q, f
url = urllib.parse.urlunparse(parts)
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#', 1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("(cd %s && git checkout --quiet %s)" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("(cd %s && hg up -C -r %s >&-)" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def uchr(c):
if not isinstance(c, int):
return c
if c > 255:
return six.unichr(c)
return chr(c)
def decode_entity(match):
what = match.group(1)
if what.startswith('#x'):
what = int(what[2:], 16)
elif what.startswith('#'):
what = int(what[1:])
else:
what = six.moves.html_entities.name2codepoint.get(what, match.group(0))
return uchr(what)
def htmldecode(text):
"""Decode HTML entities in the given text."""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = urllib.parse.unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n', '')
class Credential(object):
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(configparser.RawConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
configparser.RawConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib.request.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
scheme, netloc, path, params, query, frag = urllib.parse.urlparse(url)
# Double scheme does not raise on Mac OS X as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise http_client.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, host = splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)', *info)
if auth:
auth = "Basic " + _encode_auth(auth)
parts = scheme, host, path, params, query, frag
new_url = urllib.parse.urlunparse(parts)
request = urllib.request.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib.request.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
if s2 == scheme and h2 == host:
parts = s2, netloc, path2, param2, query2, frag2
fp.url = urllib.parse.urlunparse(parts)
return fp
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
f += '/'
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
tmpl = ("<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream)
| mit | 7,454,705,661,213,929,000 | 34.84843 | 87 | 0.564684 | false |
devananda/ironic | ironic/tests/unit/common/test_rpc.py | 2 | 4079 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
import oslo_messaging as messaging
from ironic.common import context as ironic_context
from ironic.common import rpc
from ironic.tests import base
CONF = cfg.CONF
class TestUtils(base.TestCase):
@mock.patch.object(messaging, 'Notifier', autospec=True)
@mock.patch.object(messaging, 'JsonPayloadSerializer', autospec=True)
@mock.patch.object(messaging, 'get_notification_transport', autospec=True)
@mock.patch.object(messaging, 'get_transport', autospec=True)
def test_init_globals(self, mock_get_transport, mock_get_notification,
mock_serializer, mock_notifier):
rpc.TRANSPORT = None
rpc.NOTIFICATION_TRANSPORT = None
rpc.NOTIFIER = None
rpc.init(CONF)
self.assertEqual(mock_get_transport.return_value, rpc.TRANSPORT)
self.assertEqual(mock_get_notification.return_value,
rpc.NOTIFICATION_TRANSPORT)
self.assertTrue(mock_serializer.called)
self.assertEqual(mock_notifier.return_value, rpc.NOTIFIER)
class TestRequestContextSerializer(base.TestCase):
def setUp(self):
super(TestRequestContextSerializer, self).setUp()
self.mock_serializer = mock.MagicMock()
self.serializer = rpc.RequestContextSerializer(self.mock_serializer)
self.context = ironic_context.RequestContext()
self.entity = {'foo': 'bar'}
def test_serialize_entity(self):
self.serializer.serialize_entity(self.context, self.entity)
self.mock_serializer.serialize_entity.assert_called_with(
self.context, self.entity)
def test_serialize_entity_empty_base(self):
# NOTE(viktors): Return False for check `if self.serializer._base:`
bool_args = {'__bool__': lambda *args: False,
'__nonzero__': lambda *args: False}
self.mock_serializer.configure_mock(**bool_args)
entity = self.serializer.serialize_entity(self.context, self.entity)
self.assertFalse(self.mock_serializer.serialize_entity.called)
# If self.serializer._base is empty, return entity directly
self.assertEqual(self.entity, entity)
def test_deserialize_entity(self):
self.serializer.deserialize_entity(self.context, self.entity)
self.mock_serializer.deserialize_entity.assert_called_with(
self.context, self.entity)
def test_deserialize_entity_empty_base(self):
# NOTE(viktors): Return False for check `if self.serializer._base:`
bool_args = {'__bool__': lambda *args: False,
'__nonzero__': lambda *args: False}
self.mock_serializer.configure_mock(**bool_args)
entity = self.serializer.deserialize_entity(self.context, self.entity)
self.assertFalse(self.mock_serializer.serialize_entity.called)
self.assertEqual(self.entity, entity)
def test_serialize_context(self):
serialize_values = self.serializer.serialize_context(self.context)
self.assertEqual(self.context.to_dict(), serialize_values)
def test_deserialize_context(self):
self.context.user = 'fake-user'
self.context.tenant = 'fake-tenant'
serialize_values = self.context.to_dict()
new_context = self.serializer.deserialize_context(serialize_values)
# Ironic RequestContext from_dict will pop 'user' and 'tenant' and
# initialize to None.
self.assertIsNone(new_context.user)
self.assertIsNone(new_context.tenant)
| apache-2.0 | 7,079,833,234,765,980,000 | 41.051546 | 78 | 0.692572 | false |
Alshak/clowdflows | workflows/ilp/treeliker/treeliker.py | 1 | 3010 | import shutil
import tempfile
import os.path
from subprocess import Popen, PIPE
class TreeLiker:
def __init__(self, dataset, template, test_dataset=None, settings={}):
self.basename = 'default'
self.dataset = dataset
self.test_dataset = test_dataset
self.template = template
self.settings = settings
def _copy_data(self):
self.tmpdir = tempfile.mkdtemp()
with open('%s/%s.txt' % (self.tmpdir, self.basename), 'w') as f:
f.write(self.dataset)
if self.test_dataset:
with open('%s/%s_test.txt' % (self.tmpdir, self.basename), 'w') as f:
f.write(self.test_dataset)
# Copy binaries to tmp folder
cdir = os.path.dirname(os.path.abspath(__file__))
shutil.copytree('%s/bin/' % cdir, '%s/bin/' % self.tmpdir)
def run(self, cleanup=True):
'''
Runs TreeLiker with the given settings.
'''
self._copy_data()
self._batch()
p = Popen(['java', '-Xmx3G', '-cp', 'bin/TreeLiker.jar',
'ida.ilp.treeLiker.TreeLikerMain', '-batch', self.batch],
cwd=self.tmpdir)
stdout_str, stderr_str = p.communicate()
if not self.test_dataset:
arff = open('%s/%s.arff' % (self.tmpdir, self.basename)).read()
arff_test = None
else:
arff = open('%s/conversion/train.arff' % self.tmpdir).read()
arff_test = open('%s/conversion/test.arff' % self.tmpdir).read()
if cleanup:
self._cleanup()
return (arff, arff_test)
def _batch(self):
'''
Creates the batch file to run the experiment.
'''
self.batch = '%s/%s.treeliker' % (self.tmpdir, self.basename)
commands = []
if not self.test_dataset:
commands.append('set(output_type, single)')
commands.append("set(examples, '%s.txt')" % self.basename)
else:
commands.append('set(output_type, train_test)')
commands.append("set(train_set, '%s.txt')" % self.basename)
commands.append("set(test_set, '%s_test.txt')" % self.basename)
commands.append('set(template, %s)' % self.template)
if not self.test_dataset:
commands.append('set(output, %s.arff)' % self.basename)
else:
commands.append('set(output, conversion)')
# Optional settings
for key, val in self.settings.items():
if val not in [None, '']:
commands.append('set(%s, %s)' % (key, str(val)))
commands.append('work(yes)')
with open(self.batch, 'w') as f:
f.write('\n'.join(commands))
def _cleanup(self):
"""
Cleans up all the temporary files.
"""
try:
shutil.rmtree(self.tmpdir)
except:
logger.info('Problem removing temporary files. \
The files are probably in use.')
| mit | -6,274,616,354,173,264,000 | 31.717391 | 81 | 0.542193 | false |
toobaz/pandas | pandas/tests/util/test_assert_categorical_equal.py | 2 | 2785 | import pytest
from pandas import Categorical
from pandas.util.testing import assert_categorical_equal
@pytest.mark.parametrize(
"c",
[Categorical([1, 2, 3, 4]), Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4, 5])],
)
def test_categorical_equal(c):
assert_categorical_equal(c, c)
@pytest.mark.parametrize("check_category_order", [True, False])
def test_categorical_equal_order_mismatch(check_category_order):
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([1, 2, 3, 4], categories=[4, 3, 2, 1])
kwargs = dict(check_category_order=check_category_order)
if check_category_order:
msg = """Categorical\\.categories are different
Categorical\\.categories values are different \\(100\\.0 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[4, 3, 2, 1\\], dtype='int64'\\)"""
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2, **kwargs)
else:
assert_categorical_equal(c1, c2, **kwargs)
def test_categorical_equal_categories_mismatch():
msg = """Categorical\\.categories are different
Categorical\\.categories values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 5\\], dtype='int64'\\)"""
c1 = Categorical([1, 2, 3, 4])
c2 = Categorical([1, 2, 3, 5])
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2)
def test_categorical_equal_codes_mismatch():
categories = [1, 2, 3, 4]
msg = """Categorical\\.codes are different
Categorical\\.codes values are different \\(50\\.0 %\\)
\\[left\\]: \\[0, 1, 3, 2\\]
\\[right\\]: \\[0, 1, 2, 3\\]"""
c1 = Categorical([1, 2, 4, 3], categories=categories)
c2 = Categorical([1, 2, 3, 4], categories=categories)
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2)
def test_categorical_equal_ordered_mismatch():
data = [1, 2, 3, 4]
msg = """Categorical are different
Attribute "ordered" are different
\\[left\\]: False
\\[right\\]: True"""
c1 = Categorical(data, ordered=False)
c2 = Categorical(data, ordered=True)
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2)
@pytest.mark.parametrize("obj", ["index", "foo", "pandas"])
def test_categorical_equal_object_override(obj):
data = [1, 2, 3, 4]
msg = """{obj} are different
Attribute "ordered" are different
\\[left\\]: False
\\[right\\]: True""".format(
obj=obj
)
c1 = Categorical(data, ordered=False)
c2 = Categorical(data, ordered=True)
with pytest.raises(AssertionError, match=msg):
assert_categorical_equal(c1, c2, obj=obj)
| bsd-3-clause | 994,439,187,746,322,300 | 29.271739 | 87 | 0.628366 | false |
jodvova/stocks | main.py | 1 | 3100 | #!/bin/env python
import optparse
from sql import SQL, sql_sink
from common import *
class driver:
def __init__(self, symbols, options):
self.symbols_ = symbols
self.options_ = options
self.request_ = request(diag=False)
def fetch_live(self):
assert len(self.symbols_) > 0
self.data_ = []
if self.request_.prepare_live(self.symbols_):
r = self.request_.send()
if r is not None:
if isinstance(r, list):
for e in r: self.data_.append(live_data(e))
else:
self.data_.append(live_data(r))
else:
print("No live prices received for: %s" % str(self.symbols_))
def fetch_hist(self):
assert len(self.symbols_) > 0
self.data_ = {}
if self.request_.prepare_hist(self.symbols_, self.options_.start, self.options_.end):
r = self.request_.send()
if r is not None:
for e in r:
k = e["Symbol"]
if k not in self.data_:
self.data_[k] = hist_price_list(k)
self.data_[e["Symbol"]].add(hist_price_data(e))
else:
print("No historical prices received for: %s\n" % str(self.symbols_))
def data(self):
return self.data_
if __name__ == "__main__":
usage = "usage: %prog [options] <symbol1> [<symbol2>..<symbolN>]"
opt = optparse.OptionParser(usage=usage, version="%prog 0.1")
opt.add_option("-s", dest="start", default=None, help="start date")
opt.add_option("-e", dest="end", default=None, help="end date")
opt.add_option("-i", dest="ignore_zero_vol", action="store_true", default=False, help="ignore 0 total volume")
opt.add_option("--sql", dest="sql", action="store_true", default=False, help="output to sql")
opt.add_option("-u", dest="user", default=SQL.user, help="sql username")
opt.add_option("-p", dest="pwd", default=SQL.pwd, help="sql password")
opt.add_option("--host", dest="host", default=SQL.host, help="sql host")
opt.add_option("-d", dest="db", default=SQL.db, help="sql database")
try:
(options, args) = opt.parse_args()
if len(args) == 0:
opt.error("incorrect number of stocks")
s = driver(args, options)
if options.start is None or options.end is None:
s.fetch_live()
out = console_sink()
out.log_live(s.data())
else:
s.fetch_hist()
if options.sql:
out = sql_sink(options.db, options.host, options.user, options.pwd)
else:
out = console_sink()
out.log_hist(s.data(), lambda d: True if not options.ignore_zero_vol or d.volume() > 0 else False)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
import traceback
traceback.print_exception(exc_type, exc_value, exc_traceback)
sys.exit(1)
sys.exit(0)
| apache-2.0 | 5,832,388,723,488,165,000 | 36.804878 | 114 | 0.542258 | false |
toshywoshy/ansible | test/units/modules/network/f5/test_bigip_monitor_ldap.py | 22 | 2959 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_monitor_ldap import ApiParameters
from library.modules.bigip_monitor_ldap import ModuleManager
from library.modules.bigip_monitor_ldap import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_monitor_ldap import ApiParameters
from ansible.modules.network.f5.bigip_monitor_ldap import ModuleManager
from ansible.modules.network.f5.bigip_monitor_ldap import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
parent='/Common/ldap',
interval=10,
time_until_up=0,
timeout=30,
)
p = ApiParameters(params=args)
assert p.parent == '/Common/ldap'
assert p.interval == 10
assert p.time_until_up == 0
assert p.timeout == 30
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
parent='/Common/ldap',
interval=20,
timeout=30,
time_until_up=60,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 | 531,774,032,876,011,500 | 25.9 | 91 | 0.636364 | false |
jonvestal/open-kilda | src-python/lab-service/lab/service/test/test_topology.py | 2 | 1061 | # Copyright 2018 Telstra Open Source
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from service.topology import Topology
def test_smoke(mocker):
mocker.patch('service.topology.daemon_start')
mocker.patch('service.topology.run_cmd')
mocker.patch('service.topology.vsctl')
mocker.patch('service.topology.ofctl')
mocker.patch('service.service.setup_app_home')
with open("./service/test/res/topology.json", "r") as f:
topo = Topology.create(json.loads(f.read()))
topo.run()
topo.destroy()
| apache-2.0 | -3,259,586,292,970,030,000 | 34.366667 | 76 | 0.719133 | false |
srajag/contrail-controller | src/opserver/test/analytics_systest.py | 1 | 19604 | #!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# analytics_systest.py
#
# System tests for analytics
#
import sys
builddir = sys.path[0] + '/../..'
import threading
threading._DummyThread._Thread__stop = lambda x: 42
import signal
import gevent
from gevent import monkey
monkey.patch_all()
import os
import unittest
import testtools
import fixtures
import socket
from utils.analytics_fixture import AnalyticsFixture
from utils.generator_fixture import GeneratorFixture
from mockcassandra import mockcassandra
from mockredis import mockredis
import logging
import time
import pycassa
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
from opserver.sandesh.viz.constants import *
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
class AnalyticsTest(testtools.TestCase, fixtures.TestWithFixtures):
@classmethod
def setUpClass(cls):
if AnalyticsTest._check_skip_test() is True:
return
if (os.getenv('LD_LIBRARY_PATH', '').find('build/lib') < 0):
if (os.getenv('DYLD_LIBRARY_PATH', '').find('build/lib') < 0):
assert(False)
cls.cassandra_port = AnalyticsTest.get_free_port()
mockcassandra.start_cassandra(cls.cassandra_port)
cls.redis_port = AnalyticsTest.get_free_port()
mockredis.start_redis(
cls.redis_port, builddir+'/testroot/bin/redis-server')
@classmethod
def tearDownClass(cls):
if AnalyticsTest._check_skip_test() is True:
return
mockcassandra.stop_cassandra(cls.cassandra_port)
mockredis.stop_redis(cls.redis_port)
pass
def _update_analytics_start_time(self, start_time):
pool = ConnectionPool(COLLECTOR_KEYSPACE, ['127.0.0.1:%s'
% (self.__class__.cassandra_port)])
col_family = ColumnFamily(pool, SYSTEM_OBJECT_TABLE)
col_family.insert(SYSTEM_OBJECT_ANALYTICS,
{SYSTEM_OBJECT_START_TIME: start_time})
# end _update_analytics_start_time
#@unittest.skip('Skipping cassandra test with vizd')
def test_01_startup(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it checks that the collector UVE (via redis)
and syslog (via cassandra) can be accessed from
opserver.
'''
logging.info("*** test_01_startup ***")
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
return True
#@unittest.skip('Query query engine logs to test QE')
def test_02_message_table_query(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it checks that the collector UVE (via redis)
and syslog (via cassandra) can be accessed from
opserver.
'''
logging.info("*** test_02_message_table_query ***")
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
assert vizd_obj.verify_message_table_moduleid()
assert vizd_obj.verify_message_table_select_uint_type()
assert vizd_obj.verify_message_table_messagetype()
assert vizd_obj.verify_message_table_where_or()
assert vizd_obj.verify_message_table_where_and()
assert vizd_obj.verify_message_table_filter()
assert vizd_obj.verify_message_table_filter2()
assert vizd_obj.verify_message_table_sort()
assert vizd_obj.verify_message_table_limit()
return True
# end test_02_message_table_query
#@unittest.skip('Send/query flow stats to test QE')
def test_04_flow_query(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it sends flow stats to the collector
and checks if flow stats can be accessed from
QE.
'''
logging.info("*** test_04_flow_query ***")
if AnalyticsTest._check_skip_test() is True:
return True
# set the start time in analytics db 1 hour earlier than
# the current time. For flow series test, we need to create
# flow samples older than the current time.
start_time = UTCTimestampUsec() - 3600 * 1000 * 1000
self._update_analytics_start_time(start_time)
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port(),
start_time))
assert generator_obj.verify_on_setup()
generator_obj.generate_flow_samples()
generator_obj1 = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port(),
start_time, hostname=socket.gethostname() + "dup"))
assert generator_obj1.verify_on_setup()
generator_obj1.generate_flow_samples()
generator_object = [generator_obj, generator_obj1]
for obj in generator_object:
assert vizd_obj.verify_flow_samples(obj)
assert vizd_obj.verify_flow_table(generator_obj)
assert vizd_obj.verify_flow_series_aggregation_binning(generator_object)
return True
# end test_04_flow_query
#@unittest.skip('InterVN stats using StatsOracle')
def test_06_intervn_query(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
Then it sends intervn stats to the collector
and checks if intervn stats can be accessed from
QE.
'''
logging.info("*** test_06_intervn_query ***")
if AnalyticsTest._check_skip_test() is True:
return True
# set the start time in analytics db 1 hour earlier than
# the current time. For flow series test, we need to create
# flow samples older than the current time.
start_time = UTCTimestampUsec() - 3600 * 1000 * 1000
self._update_analytics_start_time(start_time)
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port(),
start_time))
assert generator_obj.verify_on_setup()
logging.info("Starting intervn gen " + str(UTCTimestampUsec()))
generator_obj.generate_intervn()
logging.info("Ending intervn gen " + str(UTCTimestampUsec()))
assert vizd_obj.verify_intervn_all(generator_obj)
assert vizd_obj.verify_intervn_sum(generator_obj)
return True
# end test_06_intervn_query
#@unittest.skip(' Messagetype and Objecttype queries')
def test_07_fieldname_query(self):
'''
This test starts redis,vizd,opserver and qed
It uses the test class' cassandra instance
It then queries the stats table for messagetypes
and objecttypes
'''
logging.info("*** test_07_fieldname_query ***")
start_time = UTCTimestampUsec() - 3600 * 1000 * 1000
self._update_analytics_start_time(start_time)
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
assert vizd_obj.verify_fieldname_messagetype();
assert vizd_obj.verify_fieldname_objecttype();
return True;
#end test_07_fieldname_query
#@unittest.skip('verify send-tracebuffer')
def test_08_send_tracebuffer(self):
'''
This test verifies /analytics/send-tracebuffer/ REST API.
Opserver publishes the request to send trace buffer to all
the redis-uve instances. Collector forwards the request to
the appropriate generator(s). Generator sends the tracebuffer
to the Collector which then dumps the trace messages in the
analytics db. Verify that the trace messages are written in
the analytics db.
'''
logging.info('*** test_08_send_tracebuffer ***')
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1,
self.__class__.cassandra_port,
collector_ha_test=True))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
# Make sure the contrail-collector is connected to the redis-uve before
# sending the trace buffer request
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[0])
# Send trace buffer request for only the first collector
vizd_obj.opserver.send_tracebuffer_request(
vizd_obj.collectors[0].hostname,
ModuleNames[Module.COLLECTOR], '0',
'UveTrace')
assert vizd_obj.verify_tracebuffer_in_analytics_db(
vizd_obj.collectors[0].hostname,
ModuleNames[Module.COLLECTOR], 'UveTrace')
# There should be no trace buffer from the second collector
assert not vizd_obj.verify_tracebuffer_in_analytics_db(
vizd_obj.collectors[1].hostname,
ModuleNames[Module.COLLECTOR], 'UveTrace')
# Make sure the contrail-collector is connected to the redis-uve before
# sending the trace buffer request
assert vizd_obj.verify_collector_redis_uve_connection(
vizd_obj.collectors[1])
# Send trace buffer request for all collectors
vizd_obj.opserver.send_tracebuffer_request(
'*', ModuleNames[Module.COLLECTOR], '0',
'UveTrace')
assert vizd_obj.verify_tracebuffer_in_analytics_db(
vizd_obj.collectors[1].hostname,
ModuleNames[Module.COLLECTOR], 'UveTrace')
#end test_08_send_tracebuffer
#@unittest.skip('verify source/module list')
def test_09_table_source_module_list(self):
'''
This test verifies /analytics/table/<table>/column-values/Source
and /analytics/table/<table>/column-values/ModuleId
'''
logging.info('*** test_09_source_module_list ***')
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir, -1,
self.__class__.cassandra_port,
collector_ha_test=True))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
exp_genlist1 = ['contrail-collector', 'contrail-analytics-api',
'contrail-query-engine']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[0],
exp_genlist1)
exp_genlist2 = ['contrail-collector']
assert vizd_obj.verify_generator_list(vizd_obj.collectors[1],
exp_genlist2)
exp_src_list = [col.hostname for col in vizd_obj.collectors]
exp_mod_list = exp_genlist1
assert vizd_obj.verify_table_source_module_list(exp_src_list,
exp_mod_list)
# stop the second redis_uve instance and verify the src/module list
vizd_obj.redis_uves[1].stop()
exp_src_list = [vizd_obj.collectors[0].hostname]
exp_mod_list = exp_genlist1
assert vizd_obj.verify_table_source_module_list(exp_src_list,
exp_mod_list)
#end test_09_table_source_module_list
#@unittest.skip(' where queries with different conditions')
def test_10_where_clause_query(self):
'''
This test is used to check the working of integer
fields in the where query
'''
logging.info("*** test_09_where_clause_query ***")
if AnalyticsTest._check_skip_test() is True:
return True
start_time = UTCTimestampUsec() - 3600 * 1000 * 1000
self._update_analytics_start_time(start_time)
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_where_query()
#Query the flowseriestable with different where options
assert vizd_obj.verify_collector_obj_count()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("contrail-vrouter-agent", collectors,
logging, vizd_obj.get_opserver_port(),
start_time))
assert generator_obj.verify_on_setup()
generator_obj.generate_flow_samples()
assert vizd_obj.verify_where_query_prefix(generator_obj)
return True;
#end test_10_where_clause_query
#@unittest.skip('verify ObjectValueTable query')
def test_11_verify_object_value_table_query(self):
'''
This test verifies the ObjectValueTable query.
'''
logging.info('*** test_10_verify_object_value_table_query ***')
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
assert vizd_obj.verify_collector_obj_count()
assert vizd_obj.verify_object_value_table_query(
table='ObjectCollectorInfo',
exp_object_values=[vizd_obj.collectors[0].hostname])
# verify that the object table query works for object id containing
# XML control characters.
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture('contrail-vrouter-agent', collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
generator_obj.send_vm_uve(vm_id='vm11&>', num_vm_ifs=2,
msg_count=1)
assert vizd_obj.verify_object_value_table_query(table='ObjectVMTable',
exp_object_values=['vm11&>'])
# end test_11_verify_object_table_query
#@unittest.skip('verify ObjectTable query')
def test_12_verify_syslog_table_query(self):
'''
This test verifies the Syslog query.
'''
import logging.handlers
logging.info('*** test_11_verify_syslog_table_query ***')
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port,
syslog_port = True))
assert vizd_obj.verify_on_setup()
syslogger = logging.getLogger("SYSLOGER")
lh = logging.handlers.SysLogHandler(address=('127.0.0.1',
vizd_obj.collectors[0].get_syslog_port()))
lh.setFormatter(logging.Formatter('%(asctime)s %(name)s:%(message)s',
datefmt='%b %d %H:%M:%S'))
lh.setLevel(logging.INFO)
syslogger.addHandler(lh)
line = 'pizza pasta babaghanoush'
syslogger.critical(line)
assert vizd_obj.verify_keyword_query(line, ['pasta', 'pizza'])
assert vizd_obj.verify_keyword_query(line, ['babaghanoush'])
# SYSTEMLOG
assert vizd_obj.verify_keyword_query(line, ['PROGRESS', 'QueryExec'])
# bad charecter (loose?)
line = 'football ' + chr(201) + chr(203) + chr(70) + ' and baseball'
syslogger.critical(line)
assert vizd_obj.verify_keyword_query(line, ['football', 'baseball'])
# end test_12_verify_syslog_table_query
#@unittest.skip('Skipping Fieldnames table test')
def test_13_fieldname_table(self):
'''
This test starts vizd and a python generators that simulates
vrouter and sends messages. It uses the test class' cassandra
instance.Then it checks that the Field names table got the
values.
'''
logging.info("*** test_12_fieldname_table ***")
if AnalyticsTest._check_skip_test() is True:
return True
vizd_obj = self.useFixture(
AnalyticsFixture(logging, builddir,
self.__class__.redis_port,
self.__class__.cassandra_port))
assert vizd_obj.verify_on_setup()
collectors = [vizd_obj.get_collector()]
generator_obj = self.useFixture(
GeneratorFixture("VRouterAgent", collectors,
logging, vizd_obj.get_opserver_port()))
assert generator_obj.verify_on_setup()
# Sends 2 different vn uves in 1 sec spacing
generator_obj.generate_intervn()
assert vizd_obj.verify_fieldname_table()
return True
# end test_13_fieldname_table
@staticmethod
def get_free_port():
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
return cport
@staticmethod
def _check_skip_test():
if (socket.gethostname() == 'build01'):
logging.info("Skipping test")
return True
return False
def _term_handler(*_):
raise IntSignal()
if __name__ == '__main__':
gevent.signal(signal.SIGINT,_term_handler)
unittest.main(catchbreak=True)
| apache-2.0 | 4,258,992,166,110,186,500 | 40.710638 | 80 | 0.600439 | false |
wvdschel/snippets | gdb-scripts/tracefunctionsi.py | 1 | 10498 | import gdb
from traceback import print_exc
from datetime import datetime
class TraceFunctionsI(gdb.Command):
def __init__(self):
self.threads = {}
self.context_switch_interval = 0
self.step_size = 1
self.output_filename = None
self.reset()
if hasattr(gdb.events, "new_thread"):
gdb.events.new_thread.connect(self.new_thread)
else:
TraceFunctionsI.printf("This GDB does not know the new_thread event, multi-threading will not work.")
super(TraceFunctionsI, self).__init__("tracefunctionsi", gdb.COMMAND_OBSCURE)
@staticmethod
def printf(msg):
print("tracefunctionsi: %s" % (msg,))
@staticmethod
def describe_current_position():
frame = gdb.newest_frame()
func = frame.function()
sal = frame.find_sal()
line = "%s:%s" % (sal.filename, sal.line)
return "%s %s" % (func, line)
@staticmethod
def callstack_depth():
depth = 1
frame = gdb.newest_frame()
while frame is not None:
frame = frame.older()
depth += 1
return depth
def new_thread(self, event):
thread = event.inferior_thread
self.threads[thread.global_num] = thread
TraceFunctionsI.printf("aware of new thread %d: %s" % (thread.global_num, thread.ptid,))
def reset(self):
if hasattr(self, "output"):
self.output.flush()
if self.output != gdb:
self.output.close()
if self.output_filename:
self.output = open(self.output_filename, mode='a')
else:
self.output = gdb
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
command = None
instruction_count = None
if len(argv) > 0:
command = argv[0]
try:
instruction_count = int(argv[0])
except ValueError:
pass
if instruction_count:
if len(argv) > 1:
self.tracefunctionsi(instruction_count, argv[1])
else:
self.tracefunctionsi(instruction_count)
elif command == "step-size":
try:
self.step_size = int(argv[1])
except:
print("usage: tracefunctionsi step-size <instruction count>")
elif command == "context-switch-interval":
try:
self.context_switch_interval = int(argv[1])
except:
print("usage: tracefunctionsi context_switch_interval <step count>")
elif command == "output":
if len(argv) > 1:
self.output_filename = argv[1]
else:
self.output_filename = None
self.reset()
elif command == "info" or command is None:
print("tracefunctionsi configuration:")
print(" Output: %s" % (self.output_filename if self.output_filename else "gdb console"))
print(" Step size: %d instructions" % self.step_size)
print(" Context switch interval: %d steps" % self.context_switch_interval)
else:
print("Usage: tracefunctionsi (<instruction count> [stop_function] | output [filename] | step-size <instruction count> | context-switch-interval <step count> | info)")
def log(self, prefix, pc, function, filename, linenumber):
timestamp = (datetime.now() - self.start_time).total_seconds()
thread = gdb.selected_thread().ptid
if function:
function_str = function.print_name
else:
function_str = "unknown function around %x" % pc
if filename:
line = "%s:%d" % (filename, linenumber)
else:
line = "unknown source"
self.output.write("[%04.8f] %s %s %s %s\n" % (timestamp, thread, prefix, function_str, line))
#TraceFunctionsI.printf("[%s] %s %s %s %s" % (timestamp, thread, prefix, function_str, line))
def switch_thread(self, current_thread_id=None, first_thread_id=None):
if len(self.threads) == 0:
TraceFunctionsI.printf("warning: no known threads, no thread switch performed.")
return
all_threads = sorted(self.threads.keys())
if current_thread_id is None:
current_thread_id = gdb.selected_thread().global_num
if first_thread_id is None:
first_thread_id = current_thread_id
try:
next_thread_idx = all_threads.index(current_thread_id) + 1
if next_thread_idx >= len(all_threads):
next_thread_idx = 0
except ValueError:
next_thread_idx = 0
#TraceFunctionsI.printf("Attempting to switch to thread at idx %d of %s" % (next_thread_idx, all_threads))
next_thread_id = all_threads[next_thread_idx]
if next_thread_id == first_thread_id:
if len(all_threads) > 0:
TraceFunctionsI.printf("error: failed to find any next thread to execute. Not performing context switch.")
next_thread = self.threads[next_thread_id]
if next_thread.is_exited() or not next_thread.is_valid():
TraceFunctionsI.printf("thread %s has become invalid or exited, removing from watch list." % (next_thread.ptid,))
del self.threads[next_thread_id]
self.switch_thread(current_thread_id, first_thread_id)
else:
# TODO this doesn't work at all
gdb.execute("set scheduler-locking off", to_string=True)
TraceFunctionsI.printf("switching to thread %s" % (next_thread.ptid,))
next_thread.switch()
#brk = gdb.FinishBreakpoint(gdb.newest_frame(), internal=True)
#brk.thread = int(next_thread.global_num)
gtid = gdb.selected_thread().global_num
frame = gdb.newest_frame()
sal = frame.find_sal()
if sal.symtab:
filename = sal.symtab.filename
else:
filename = None
linenumber = sal.line
function = frame.function()
gdb.execute("finish", to_string=True)
self.log("returned from", frame.pc(), function, filename, linenumber)
gdb.execute("set scheduler-locking on", to_string=True)
TraceFunctionsI.printf("switch to thread %s completed" % (next_thread.ptid,))
def tracefunctionsi(self, instruction_count, until_string=None):
try:
gdb.execute("set pagination off", to_string=True)
#gdb.execute("set scheduler-locking on", to_string=True)
self.start_time = datetime.now()
TraceFunctionsI.printf("Starting trace at %s" % self.start_time.isoformat())
self.output.write("Starting trace at %s\n" % self.start_time.isoformat())
stack_depths = {}
frame = gdb.newest_frame()
active_functions = {}
active_source_files = {}
active_source_lines = {}
first_addrs = {}
step_in_thread = 0
TraceFunctionsI.printf("tracing function calls for %d instructions" % instruction_count)
while instruction_count > 0:
if step_in_thread == self.context_switch_interval and self.context_switch_interval != 0:
step_in_thread = 0
TraceFunctionsI.printf("Initiating thread switch")
self.switch_thread()
try:
gtid = gdb.selected_thread().global_num
except AttributeError:
TraceFunctionsI.printf("Cannot determine thread id.")
gtid = 0
frame = gdb.newest_frame()
sal = frame.find_sal()
if sal.symtab:
filename = sal.symtab.filename
else:
filename = None
linenumber = sal.line
function = frame.function()
stack_depth = TraceFunctionsI.callstack_depth()
if gtid not in stack_depths:
TraceFunctionsI.printf("initializing thread %s" % gtid)
stack_depths[gtid] = stack_depth
active_functions[gtid] = function
active_source_files[gtid] = filename
active_source_lines[gtid] = linenumber
first_addrs[gtid] = frame.pc()
else:
if filename != active_source_files[gtid] or \
function != active_functions[gtid] or \
stack_depth != stack_depths[gtid]:
if stack_depth <= stack_depths[gtid]:
self.log("returned from", first_addrs[gtid], active_functions[gtid], active_source_files[gtid], active_source_lines[gtid])
if stack_depth >= stack_depths[gtid]:
self.log("entering", frame.pc(), function, filename, linenumber)
stack_depths[gtid] = stack_depth
active_functions[gtid] = function
active_source_files[gtid] = filename
active_source_lines[gtid] = linenumber
first_addrs[gtid] = frame.pc()
if until_string:
if str(function).endswith(until_string) \
or (until_string + "(") in str(function) \
or ("%s:%d" % (filename, linenumber)).endswith(until_string) \
or "0x%x" % frame.pc() == until_string.lower():
TraceFunctionsI.printf("Reached %s, stopping trace." % until_string)
instruction_count = 0
curr_step_size = min(self.step_size, instruction_count)
gdb.execute("step %d" % curr_step_size, to_string=True)
step_in_thread += 1
instruction_count -= curr_step_size
#TraceFunctionsI.printf("Stepped %d instructions, step %d/%d in thread %d, %d instructions left" % (
# curr_step_size, step_in_thread, self.context_switch_interval, gtid, instruction_count))
except Exception as e:
print_exc(e)
raise e
TraceFunctionsI.printf("done.")
self.output.flush()
#print(dir(gdb))
TraceFunctionsI()
| bsd-2-clause | 8,476,986,076,674,132,000 | 41.84898 | 179 | 0.54839 | false |
IDragonfire/modular-client | src/stats/__init__.py | 1 | 1176 | #-------------------------------------------------------------------------------
# Copyright (c) 2012 Gael Honorez.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#-------------------------------------------------------------------------------
from PyQt4 import QtCore
from PyQt4 import QtWebKit
import logging
import urllib
import util
logger = logging.getLogger("faf.ladder")
logger.setLevel(logging.DEBUG)
from _statswidget import StatsWidget as Stats
| gpl-3.0 | 6,342,525,656,017,083,000 | 34.75 | 80 | 0.652211 | false |
xujun10110/pth-toolkit | lib/python2.7/site-packages/openchange/tests/test_mailbox.py | 9 | 2661 | #!/usr/bin/python
# OpenChange provisioning
# Copyright (C) Jelmer Vernooij <[email protected]> 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openchange.mailbox import NoSuchServer, OpenChangeDB, gen_mailbox_folder_fid
import os
import unittest
class OpenChangeDBTests(unittest.TestCase):
"""Tests for OpenChangeDB."""
def setUp(self):
if os.path.exists("openchange.ldb"):
os.unlink("openchange.ldb")
self.db = OpenChangeDB("openchange.ldb")
self.db.setup()
def test_user_exists_no_server(self):
self.assertRaises(NoSuchServer, self.db.user_exists, "someserver", "foo")
def test_server_lookup_doesnt_exist(self):
self.assertRaises(NoSuchServer, self.db.lookup_server,
"nonexistantserver")
def test_server_lookup(self):
self.db.add_server("dc=blaserver", "blaserver", "firstorg", "firstou")
self.assertEquals("dc=blaserver", str(self.db.lookup_server("blaserver")['dn']))
def test_add_mailbox_user(self):
self.db.add_server("cn=myserver", "myserver", "firstorg", "firstou")
self.db.add_mailbox_user("cn=firstorg,cn=firstou,cn=myserver", "someuser")
self.assertTrue(self.db.user_exists("myserver", "someuser"))
def test_msg_globalcount_initial(self):
self.db.add_server("dc=myserver", "myserver", "firstorg", "firstou")
self.assertEquals(1, self.db.get_message_GlobalCount("myserver"))
def test_set_msg_globalcount(self):
self.db.add_server("dc=myserver", "myserver", "firstorg", "firstou")
self.db.set_message_GlobalCount("myserver", 42)
self.assertEquals(42, self.db.get_message_GlobalCount("myserver"))
def test_msg_replicaid_initial(self):
self.db.add_server("dc=myserver", "myserver", "firstorg", "firstou")
self.assertEquals(1, self.db.get_message_ReplicaID("myserver"))
class MailboxFIDTests(unittest.TestCase):
def test_simple(self):
self.assertEquals("0x00000000109282806", gen_mailbox_folder_fid(4242, 534534))
| bsd-2-clause | -1,817,714,485,684,167,200 | 38.132353 | 88 | 0.696355 | false |
wrwrwr/mezzanine | mezzanine/project_template/urls.py | 13 | 3856 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from mezzanine.core.views import direct_to_template
admin.autodiscover()
# Add the urlpatterns for any custom Django applications here.
# You can also change the ``home`` view to add your own functionality
# to the project's homepage.
urlpatterns = patterns("",
# Change the admin prefix here to use an alternate URL for the
# admin interface, which would be marginally more secure.
("^admin/", include(admin.site.urls)),
# We don't want to presume how your homepage works, so here are a
# few patterns you can use to set it up.
# HOMEPAGE AS STATIC TEMPLATE
# ---------------------------
# This pattern simply loads the index.html template. It isn't
# commented out like the others, so it's the default. You only need
# one homepage pattern, so if you use a different one, comment this
# one out.
url("^$", direct_to_template, {"template": "index.html"}, name="home"),
# HOMEPAGE AS AN EDITABLE PAGE IN THE PAGE TREE
# ---------------------------------------------
# This pattern gives us a normal ``Page`` object, so that your
# homepage can be managed via the page tree in the admin. If you
# use this pattern, you'll need to create a page in the page tree,
# and specify its URL (in the Meta Data section) as "/", which
# is the value used below in the ``{"slug": "/"}`` part.
# Also note that the normal rule of adding a custom
# template per page with the template name using the page's slug
# doesn't apply here, since we can't have a template called
# "/.html" - so for this case, the template "pages/index.html"
# should be used if you want to customize the homepage's template.
# url("^$", "mezzanine.pages.views.page", {"slug": "/"}, name="home"),
# HOMEPAGE FOR A BLOG-ONLY SITE
# -----------------------------
# This pattern points the homepage to the blog post listing page,
# and is useful for sites that are primarily blogs. If you use this
# pattern, you'll also need to set BLOG_SLUG = "" in your
# ``settings.py`` module, and delete the blog page object from the
# page tree in the admin if it was installed.
# url("^$", "mezzanine.blog.views.blog_post_list", name="home"),
# MEZZANINE'S URLS
# ----------------
# ADD YOUR OWN URLPATTERNS *ABOVE* THE LINE BELOW.
# ``mezzanine.urls`` INCLUDES A *CATCH ALL* PATTERN
# FOR PAGES, SO URLPATTERNS ADDED BELOW ``mezzanine.urls``
# WILL NEVER BE MATCHED!
# If you'd like more granular control over the patterns in
# ``mezzanine.urls``, go right ahead and take the parts you want
# from it, and use them directly below instead of using
# ``mezzanine.urls``.
("^", include("mezzanine.urls")),
# MOUNTING MEZZANINE UNDER A PREFIX
# ---------------------------------
# You can also mount all of Mezzanine's urlpatterns under a
# URL prefix if desired. When doing this, you need to define the
# ``SITE_PREFIX`` setting, which will contain the prefix. Eg:
# SITE_PREFIX = "my/site/prefix"
# For convenience, and to avoid repeating the prefix, use the
# commented out pattern below (commenting out the one above of course)
# which will make use of the ``SITE_PREFIX`` setting. Make sure to
# add the import ``from django.conf import settings`` to the top
# of this file as well.
# Note that for any of the various homepage patterns above, you'll
# need to use the ``SITE_PREFIX`` setting as well.
# ("^%s/" % settings.SITE_PREFIX, include("mezzanine.urls"))
)
# Adds ``STATIC_URL`` to the context of error pages, so that error
# pages can use JS, CSS and images.
handler404 = "mezzanine.core.views.page_not_found"
handler500 = "mezzanine.core.views.server_error"
| bsd-2-clause | 6,464,408,715,774,169,000 | 41.373626 | 75 | 0.65249 | false |
Freeseer/freeseer | src/freeseer/frontend/qtcommon/AboutDialog.py | 1 | 2699 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2011, 2013 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/Freeseer/freeseer/
from PyQt4.QtCore import QString
from PyQt4.QtCore import SIGNAL
from PyQt4.QtGui import QDialog
from PyQt4.QtGui import QDialogButtonBox
from PyQt4.QtGui import QIcon
from PyQt4.QtGui import QPixmap
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QGridLayout
try:
_fromUtf8 = QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
from freeseer.frontend.qtcommon import resource # noqa
from freeseer.frontend.qtcommon.AboutWidget import AboutWidget
RECORD_BUTTON_ARTIST = u'Sekkyumu'
RECORD_BUTTON_LINK = u'http://sekkyumu.deviantart.com/'
HEADPHONES_ARTIST = u'Ben Fleming'
HEADPHONES_LINK = u'http://mediadesign.deviantart.com/'
class AboutDialog(QDialog):
"""
Common About Dialog for the Freeseer Project. This should be used for the
about dialog when including one in GUIs.
Grid Layout:
Logo | About Infos
------|-------------
| Close Button
"""
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.aboutWidget = AboutWidget()
icon = QIcon()
icon.addPixmap(QPixmap(_fromUtf8(":/freeseer/logo.png")), QIcon.Normal, QIcon.Off)
self.setWindowIcon(icon)
self.layout = QGridLayout()
self.setLayout(self.layout)
self.layout.addWidget(self.aboutWidget)
# Right Bottom corner of grid, Close Button
self.buttonBox = QDialogButtonBox()
self.closeButton = self.buttonBox.addButton("Close", QDialogButtonBox.AcceptRole)
self.layout.addWidget(self.buttonBox, 1, 1)
self.connect(self.closeButton, SIGNAL("clicked()"), self.close)
self.setWindowTitle("About Freeseer")
| gpl-3.0 | -1,502,230,007,751,245,600 | 32.164557 | 90 | 0.691738 | false |
WeblateOrg/weblate | weblate/vcs/tests/test_gpg.py | 2 | 3516 | #
# Copyright © 2012 - 2021 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import subprocess
from distutils.version import LooseVersion
from unittest import SkipTest
from django.core.cache import cache
from django.test import TestCase
from django.test.utils import override_settings
import weblate.vcs.gpg
from weblate.utils.checks import check_data_writable
from weblate.utils.unittest import tempdir_setting
from weblate.vcs.gpg import (
generate_gpg_key,
get_gpg_key,
get_gpg_public_key,
get_gpg_sign_key,
)
class GPGTest(TestCase):
gpg_error = None
@classmethod
def setUpClass(cls):
"""Check whether we can use gpg."""
super().setUpClass()
try:
result = subprocess.run(
["gpg", "--version"],
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
version = result.stdout.splitlines()[0].strip().rsplit(None, 1)[-1]
if LooseVersion(version) < LooseVersion("2.1"):
cls.gpg_error = "gpg too old"
except (subprocess.CalledProcessError, OSError):
cls.gpg_error = "gpg not found"
def setUp(self):
if self.gpg_error:
raise SkipTest(self.gpg_error)
def check_errors(self):
self.assertEqual(weblate.vcs.gpg.GPG_ERRORS, {})
@tempdir_setting("DATA_DIR")
@override_settings(
WEBLATE_GPG_IDENTITY="Weblate <[email protected]>", WEBLATE_GPG_ALGO="rsa512"
)
def test_generate(self):
self.assertEqual(check_data_writable(), [])
self.assertIsNone(get_gpg_key(silent=True))
key = generate_gpg_key()
self.check_errors()
self.assertIsNotNone(key)
self.assertEqual(key, get_gpg_key())
@tempdir_setting("DATA_DIR")
@override_settings(
WEBLATE_GPG_IDENTITY="Weblate <[email protected]>", WEBLATE_GPG_ALGO="rsa512"
)
def test_get(self):
self.assertEqual(check_data_writable(), [])
# This will generate new key
key = get_gpg_sign_key()
self.check_errors()
self.assertIsNotNone(key)
# Check cache access
self.assertEqual(key, get_gpg_sign_key())
# Check empty cache
cache.delete("gpg-key-id")
self.assertEqual(key, get_gpg_sign_key())
@tempdir_setting("DATA_DIR")
@override_settings(
WEBLATE_GPG_IDENTITY="Weblate <[email protected]>", WEBLATE_GPG_ALGO="rsa512"
)
def test_public(self):
self.assertEqual(check_data_writable(), [])
# This will generate new key
key = get_gpg_public_key()
self.check_errors()
self.assertIsNotNone(key)
# Check cache access
self.assertEqual(key, get_gpg_public_key())
| gpl-3.0 | 2,746,651,159,329,330,000 | 32.141509 | 87 | 0.646171 | false |
malderete/ninja-ide | ninja_ide/core/plugin_manager.py | 9 | 19407 | # -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import sys
import shutil
import copy
import zipfile
import traceback
#lint:disable
try:
from urllib.request import urlopen
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen
from urllib2 import URLError
#lint:enable
from ninja_ide import resources
from ninja_ide.tools.logger import NinjaLogger
from ninja_ide.tools import json_manager
logger = NinjaLogger('ninja_ide.core.plugin_manager')
REQUIREMENTS = 'requirements.txt'
COMMAND_FOR_PIP_INSTALL = 'pip install -r %s'
try:
# For Python2
str = unicode # lint:ok
except NameError:
# We are in Python3
pass
class ServiceLocator(object):
'''
Hold the services and allows the interaction between NINJA-IDE and plugins
'''
def __init__(self, services=None):
self.__services = services if services else {}
def get_service(self, name):
return self.__services.get(name)
def get_availables_services(self):
return list(self.__services.keys())
'''
NINJA-IDE Plugin
my_plugin.plugin
{
"module": "my_plugin",
"class": "MyPluginExample",
"authors": "Martin Alderete <[email protected]>",
"version": "0.1",
"description": "Este plugin es de prueba"
}
class MyPluginExample(Plugin):
def initialize(self):
#Configure the plugin using the NINJA-IDE API!!!
self.editor_s = self.service_locator.get_service('editor')
self.toolbar_s = self.service_locator.get_service('toolbar')
self.toolbar_s.add_action(QAction(...........))
self.appmenu_s = self.service_locator.get_service('appmenu')
self.appmenu_s.add_menu(QMenu(......))
#connect events!
self.editor_s.editorKeyPressEvent.connect(self.my_plugin_key_pressed)
def my_plugin_key_pressed(self, ...):
print 'se apreto alguna tecla en el ide...'
'''
###############################################################################
# NINJA-IDE Plugin Manager
###############################################################################
class PluginManagerException(Exception):
pass
#Singleton
__pluginManagerInstance = None
def PluginManager(*args, **kw):
global __pluginManagerInstance
if __pluginManagerInstance is None:
__pluginManagerInstance = __PluginManager(*args, **kw)
return __pluginManagerInstance
#Extension of the NINJA-IDE plugin
PLUGIN_EXTENSION = '.plugin'
class __PluginManager(object):
'''
Plugin manager allows to load, unload, initialize plugins.
'''
def __init__(self, plugins_dir, service_locator):
'''
@param plugins_dir: Path to search plugins.
@param service_loctor: ServiceLocator object.
'''
self._service_locator = service_locator
#new!
self._plugins_by_dir = {}
#add all the plugins paths
for path in self.__create_list(plugins_dir):
self.add_plugin_dir(path)
#end new!
#self._plugins_dir = plugins_dir
self._errors = []
#found plugins
#example: ["logger", "my_plugin"]
self._found_plugins = []
#active plugins
#example: {"logger": (LoggerIntance, metadata),
# "my_plugin": (MyPluginInstance, metadata)}
self._active_plugins = {}
def __create_list(self, obj):
if isinstance(obj, (list, tuple)):
return obj
#string then returns a list of one item!
return [obj]
def add_plugin_dir(self, plugin_dir):
'''
Add a new directory to search plugins.
@param plugin_dir: absolute path.
'''
if not plugin_dir in self._plugins_by_dir:
self._plugins_by_dir[plugin_dir] = []
def get_actives_plugins(self):
import warnings
warnings.warn("Deprecated in behalf of a TYPO free method name")
return self.get_active_plugins()
def get_active_plugins(self):
'''
Returns a list the instances
'''
return [plugin[0] for plugin in list(self._active_plugins.values())]
def _get_dir_from_plugin_name(self, plugin_name):
'''
Returns the dir of the plugin_name
'''
for dir_, plug_names in list(self._plugins_by_dir.items()):
if plugin_name in plug_names:
return dir_
def __getitem__(self, plugin_name):
'''
Magic method to get a plugin instance
from a given name.
@Note: This method has the logic below.
Check if the plugin is known,
if it is active return it,
otherwise, active it and return it.
If the plugin name does not exist
raise KeyError exception.
@param plugin_name: plugin name.
@return: Plugin instance or None
'''
global PLUGIN_EXTENSION
ext = PLUGIN_EXTENSION
if not plugin_name.endswith(ext):
plugin_name += ext
if plugin_name in self._found_plugins:
if not plugin_name in self._active_plugins:
dir_ = self._get_dir_from_plugin_name(plugin_name)
self.load(plugin_name, dir_)
return self._active_plugins[plugin_name][0]
raise KeyError(plugin_name)
def __contains__(self, plugin_name):
'''
Magic method to know whether the
PluginManager contains
a plugin with a given name.
@param plugin_name: plugin name.
@return: True or False.
'''
return plugin_name in self._found_plugins
def __iter__(self):
'''
Magic method to iterate over all
the plugin's names.
@return: iterator.
'''
return iter(self._found_plugins)
def __len__(self):
'''
Magic method to know the plugins
quantity.
@return: length.
'''
return len(self._found_plugins)
def __bool__(self):
'''
Magic method to indicate that any
instance must pass the if conditional
if x:
'''
return True
def get_plugin_name(self, file_name):
'''
Get the plugin's name from a file name.
@param file_name: A file object name.
@return: A plugin name from a file.
'''
plugin_file_name, file_ext = os.path.splitext(file_name)
return plugin_file_name
def list_plugins(self, dir_name):
'''
Crawl a directory and collect plugins.
@return: List with plugin names.
'''
global PLUGIN_EXTENSION
ext = PLUGIN_EXTENSION
try:
listdir = os.listdir(dir_name)
return [plug for plug in listdir if plug.endswith(ext)]
except OSError:
return ()
def is_plugin_active(self, plugin_name):
'''
Check if a plugin is or not active
@param plugin_name: Plugin name to check.
@return: True or False
'''
return plugin_name in self._active_plugins
def discover(self):
'''
Search all files for directory
and get the valid plugin's names.
'''
for dir_name in self._plugins_by_dir:
for file_name in self.list_plugins(dir_name):
plugin_name = file_name
if not plugin_name in self._found_plugins:
self._found_plugins.append(plugin_name)
self._plugins_by_dir[dir_name].append(plugin_name)
def _load_module(self, module, klassname, metadata, dir_name):
old_syspath = copy.copy(sys.path)
try:
sys.path.insert(1, dir_name)
module = __import__(module, globals(), locals(), [])
klass = getattr(module, klassname)
#Instanciate the plugin
plugin_instance = klass(self._service_locator, metadata=metadata)
#return the plugin instance
return plugin_instance
except(ImportError, AttributeError) as reason:
raise PluginManagerException('Error loading "%s": %s' %
(module, reason))
finally:
sys.path = old_syspath
return None
def load(self, plugin_name, dir_name):
global PLUGIN_EXTENSION
if plugin_name in self._active_plugins:
return
for dir_name, plugin_list in list(self._plugins_by_dir.items()):
if plugin_name in plugin_list:
ext = PLUGIN_EXTENSION
plugin_filename = os.path.join(dir_name, plugin_name)
plugin_structure = json_manager.read_json(plugin_filename)
plugin_structure['name'] = plugin_name.replace(ext, '')
module = plugin_structure.get('module', None)
klassname = plugin_structure.get('class', None)
if module is not None and klassname is not None:
try:
plugin_instance = self._load_module(module,
klassname, plugin_structure, dir_name)
#set a get_plugin method to get the reference to other
#setattr(plugin_instance,'get_plugin',self.__getitem__)
#call a special method *initialize* in the plugin!
plugin_instance.metadata = plugin_structure
logger.info("Calling initialize (%s)", plugin_name)
plugin_instance.initialize()
#tuple (instance, metadata)
plugin_metadata = (plugin_instance, plugin_structure)
self._active_plugins[plugin_name] = plugin_metadata
except (PluginManagerException, Exception) as reason:
logger.error("Not instanciated (%s): %s", plugin_name,
reason)
#remove the plugin because has errors
self._found_plugins.remove(plugin_name)
traceback_msg = traceback.format_exc()
plugin_name = plugin_name.replace(ext, '')
#add the traceback to errors
self._add_error(plugin_name, traceback_msg)
else:
logger.info("Successfuly initialized (%s)",
plugin_name)
def load_all(self):
for dir, pl in list(self._plugins_by_dir.items()):
#Copy the list because may be we REMOVE item while iterate!
found_plugins_aux = copy.copy(pl)
for plugin_name in found_plugins_aux:
self.load(plugin_name, dir)
def load_all_external(self, plugin_path):
#Copy the list because may be we REMOVE item while iterate!
found_plugins_aux = copy.copy(self._found_plugins)
for plugin_name in found_plugins_aux:
self.load(plugin_name, plugin_path)
def unload(self, plugin_name):
try:
plugin_object = self._active_plugins[plugin_name][0]
#call a special method *finish* in the plugin!
plugin_object.finish()
del self._active_plugins[plugin_name]
except Exception as reason:
logger.error("Finishing plugin (%s): %s", plugin_name, reason)
else:
logger.info("Successfuly finished (%s)", plugin_name)
def unload_all(self):
#Copy the list because may be we REMOVE item while iterate!
active_plugins_aux = copy.copy(self._active_plugins)
for plugin_name in active_plugins_aux:
self.unload(plugin_name)
def shutdown(self):
self.unload_all()
def get_availables_services(self):
"""
Returns all services availables
"""
self._service_locator.get_availables_services()
def _add_error(self, plugin_name, traceback_msg):
self._errors.append((plugin_name, traceback_msg))
@property
def errors(self):
"""
Returns a comma separated values of errors
"""
return self._errors
def _availables_plugins(url):
"""
Return the availables plugins from an url in NINJA-IDE web page
"""
try:
descriptor = urlopen(url)
plugins = json_manager.read_json_from_stream(descriptor)
return plugins
except URLError:
return {}
def available_oficial_plugins():
'''
Returns a dict with OFICIAL availables plugins in NINJA-IDE web page
'''
return _availables_plugins(resources.PLUGINS_WEB)
def available_community_plugins():
'''
Returns a dict with COMMUNITY availables plugins in NINJA-IDE web page
'''
return _availables_plugins(resources.PLUGINS_COMMUNITY)
def local_plugins():
'''
Returns the local plugins
'''
if not os.path.isfile(resources.PLUGINS_DESCRIPTOR):
return []
plugins = json_manager.read_json(resources.PLUGINS_DESCRIPTOR)
return plugins
def __get_all_plugin_descriptors():
'''
Returns all the .plugin files
'''
global PLUGIN_EXTENSION
return [pf for pf in os.listdir(resources.PLUGINS)
if pf.endswith(PLUGIN_EXTENSION)]
def download_plugin(file_):
'''
Download a plugin specified by file_
'''
global PLUGIN_EXTENSION
#get all the .plugin files in local filesystem
plugins_installed_before = set(__get_all_plugin_descriptors())
#download the plugin
fileName = os.path.join(resources.PLUGINS, os.path.basename(file_))
content = urlopen(file_)
f = open(fileName, 'wb')
f.write(content.read())
f.close()
#create the zip
zipFile = zipfile.ZipFile(fileName, 'r')
zipFile.extractall(resources.PLUGINS)
zipFile.close()
#clean up the enviroment
os.remove(fileName)
#get the name of the last installed plugin
plugins_installed_after = set(__get_all_plugin_descriptors())
#using set operations get the difference that is the new plugin
new_plugin = (plugins_installed_after - plugins_installed_before).pop()
return new_plugin
def manual_install(file_):
"""Copy zip file and install."""
global PLUGIN_EXTENSION
#get all the .plugin files in local filesystem
plugins_installed_before = set(__get_all_plugin_descriptors())
#copy the plugin
fileName = os.path.join(resources.PLUGINS, os.path.basename(file_))
shutil.copyfile(file_, fileName)
#extract the zip
zipFile = zipfile.ZipFile(fileName, 'r')
zipFile.extractall(resources.PLUGINS)
zipFile.close()
#clean up the enviroment
os.remove(fileName)
#get the name of the last installed plugin
plugins_installed_after = set(__get_all_plugin_descriptors())
#using set operations get the difference that is the new plugin
new_plugin = (plugins_installed_after - plugins_installed_before).pop()
return new_plugin
def has_dependencies(plug):
global REQUIREMENTS, COMMAND_FOR_PIP_INSTALL
plugin_name = plug[0]
structure = []
if os.path.isfile(resources.PLUGINS_DESCRIPTOR):
structure = json_manager.read_json(resources.PLUGINS_DESCRIPTOR)
PLUGINS = resources.PLUGINS
for p in structure:
if p['name'] == plugin_name:
pd_file = os.path.join(PLUGINS, p['plugin-descriptor'])
p_json = json_manager.read_json(pd_file)
module = p_json.get('module')
#plugin_module/requirements.txt
req_file = os.path.join(os.path.join(PLUGINS, module),
REQUIREMENTS)
if os.path.isfile(req_file):
return (True, COMMAND_FOR_PIP_INSTALL % req_file)
#the plugin was found but no requirement then break!
break
return (False, None)
def update_local_plugin_descriptor(plugins):
'''
updates the local plugin description
The description.json file holds the information about the plugins
downloaded with NINJA-IDE
This is a way to track the versions of the plugins
'''
structure = []
if os.path.isfile(resources.PLUGINS_DESCRIPTOR):
structure = json_manager.read_json(resources.PLUGINS_DESCRIPTOR)
for plug_list in plugins:
#create the plugin data
plug = {}
plug['name'] = plug_list[0]
plug['version'] = plug_list[1]
plug['description'] = plug_list[2]
plug['authors'] = plug_list[3]
plug['home'] = plug_list[4]
plug['download'] = plug_list[5]
plug['plugin-descriptor'] = plug_list[6]
#append the plugin data
structure.append(plug)
json_manager.write_json(structure, resources.PLUGINS_DESCRIPTOR)
def uninstall_plugin(plug):
"""
Uninstall the given plugin
"""
plugin_name = plug[0]
structure = []
if os.path.isfile(resources.PLUGINS_DESCRIPTOR):
structure = json_manager.read_json(resources.PLUGINS_DESCRIPTOR)
#copy the strcuture we iterate and remove at the same time
structure_aux = copy.copy(structure)
for plugin in structure_aux:
if plugin["name"] == plugin_name:
fileName = plugin["plugin-descriptor"]
structure.remove(plugin)
break
#open <plugin>.plugin file and get the module to remove
fileName = os.path.join(resources.PLUGINS, fileName)
plugin = json_manager.read_json(fileName)
module = plugin.get('module')
if module:
pluginDir = os.path.join(resources.PLUGINS, module)
folders = [pluginDir]
for root, dirs, files in os.walk(pluginDir):
pluginFiles = [os.path.join(root, f) for f in files]
#remove all files
list(map(os.remove, pluginFiles))
#collect subfolders
folders += [os.path.join(root, d) for d in dirs]
folders.reverse()
for f in folders:
if os.path.isdir(f):
os.removedirs(f)
#remove ths plugin_name.plugin file
os.remove(fileName)
#write the new info
json_manager.write_json(structure, resources.PLUGINS_DESCRIPTOR)
###############################################################################
# Module Test
###############################################################################
if __name__ == '__main__':
folders = resources.PLUGINS
services = {}
sl = ServiceLocator(services)
pm = PluginManager(folders, sl)
#There are not plugins yet...lets discover
pm.discover()
logger.info("listing plugins names...")
for p in pm:
print(p)
logger.info("Activating plugins...")
pm.load_all()
logger.info("Plugins already actives...")
logger.info(pm.get_active_plugins())
| gpl-3.0 | -6,938,131,261,176,964,000 | 32.174359 | 79 | 0.599629 | false |
rodrigolucianocosta/ControleEstoque | rOne/Storage101/django-localflavor/django-localflavor-1.3/localflavor/lv/forms.py | 4 | 2569 | from __future__ import unicode_literals
import re
from datetime import date
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, Select
from django.utils.translation import ugettext_lazy as _
from .lv_choices import MUNICIPALITY_CHOICES
zipcode = re.compile(r'^(LV\s?-\s?)?(?P<code>[1-5]\d{3})$', re.IGNORECASE)
idcode = re.compile(r'^(\d\d)(\d\d)(\d\d)-([0-2])(?:\d{3})(\d)$')
class LVPostalCodeField(Field):
"""
A form field that validates and normalizes Latvian postal codes.
Latvian postal codes in following forms accepted:
* XXXX
* LV-XXXX
"""
default_error_messages = {
'invalid': _('Enter a postal code in the format XXXX or LV-XXXX.'),
}
def clean(self, value):
value = super(LVPostalCodeField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = re.match(zipcode, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
return 'LV-' + match.group('code')
class LVMunicipalitySelect(Select):
"""A select field of Latvian municipalities."""
def __init__(self, attrs=None):
super(LVMunicipalitySelect, self).__init__(attrs, choices=MUNICIPALITY_CHOICES)
class LVPersonalCodeField(Field):
"""A form field that validates input as a Latvian personal code."""
default_error_messages = {
'invalid_format': _('Enter a Latvian personal code in format XXXXXX-XXXXX.'),
'invalid': _('Enter a valid Latvian personal code.'),
}
@staticmethod
def lv_checksum(value):
"""Takes a string of 10 digits as input, returns check digit."""
multipliers = (1, 6, 3, 7, 9, 10, 5, 8, 4, 2)
check = sum(mult * int(c) for mult, c in zip(multipliers, value))
return ((1 - check) % 11) % 10
def clean(self, value):
super(LVPersonalCodeField, self).clean(value)
if value in EMPTY_VALUES:
return ''
match = re.match(idcode, value)
if not match:
raise ValidationError(self.error_messages['invalid_format'])
day, month, year, century, check = map(int, match.groups())
if check != self.lv_checksum(value[0:6] + value[7:11]):
raise ValidationError(self.error_messages['invalid'])
year += 1800 + 100 * century
try:
date(year, month, day)
except ValueError:
raise ValidationError(self.error_messages['invalid'])
return value
| gpl-3.0 | -5,201,829,318,750,747,000 | 29.583333 | 87 | 0.625535 | false |
Raekkeri/gargoyle | runtests.py | 3 | 2012 | #!/usr/bin/env python
"""
runtests
~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import sys
from django.conf import settings
from optparse import OptionParser
if not settings.configured:
settings.configure(
DATABASE_ENGINE='sqlite3',
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
},
},
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.sites',
# Included to fix Disqus' test Django which solves IntegrityMessage case
'django.contrib.contenttypes',
'gargoyle',
'south',
'tests',
],
ROOT_URLCONF='',
DEBUG=False,
TEMPLATE_DEBUG=True,
GARGOYLE_SWITCH_DEFAULTS={
'active_by_default': {
'is_active': True,
'label': 'Default Active',
'description': 'When you want the newness',
},
'inactive_by_default': {
'is_active': False,
'label': 'Default Inactive',
'description': 'Controls the funkiness.',
},
},
SITE_ID=1,
)
from django_nose import NoseTestSuiteRunner
def runtests(*test_args, **kwargs):
if 'south' in settings.INSTALLED_APPS:
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
if not test_args:
test_args = ['tests']
test_runner = NoseTestSuiteRunner(**kwargs)
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--verbosity', dest='verbosity', action='store', default=1, type=int)
parser.add_options(NoseTestSuiteRunner.options)
(options, args) = parser.parse_args()
runtests(*args, **options.__dict__)
| apache-2.0 | 865,979,918,716,822,800 | 25.12987 | 91 | 0.563618 | false |
pombreda/iris-panel | iris/core/migrations/0003_backup_packages.py | 7 | 15369 | # -*- coding: utf-8 -*-
# This file is part of IRIS: Infrastructure and Release Information System
#
# Copyright (C) 2013-2015 Intel Corporation
#
# IRIS is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2.0 as published by the Free Software Foundation.
#pylint: skip-file
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
Backup = orm['core.PackageBackup']
to_be_add = [Backup(name=pack.name,
pid=pack.id,
tid=pack.gittree_id)
for pack in orm['core.Package'].objects.all()]
Backup.objects.bulk_create(to_be_add)
def backwards(self, orm):
"Write your backwards methods here."
orm['core.PackageBackup'].objects.all().delete()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '225'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.domain': {
'Meta': {'object_name': 'Domain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'core.domainrole': {
'Meta': {'object_name': 'DomainRole', '_ormbases': [u'auth.Group']},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'role_set'", 'to': "orm['core.Domain']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'})
},
'core.gittree': {
'Meta': {'object_name': 'GitTree'},
'gitpath': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'licenses': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.License']", 'symmetrical': 'False'}),
'subdomain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SubDomain']"})
},
'core.gittreerole': {
'Meta': {'object_name': 'GitTreeRole', '_ormbases': [u'auth.Group']},
'gittree': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'role_set'", 'to': "orm['core.GitTree']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'})
},
'core.image': {
'Meta': {'object_name': 'Image'},
'arch': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Product']"}),
'target': ('django.db.models.fields.TextField', [], {})
},
'core.imagebuild': {
'Meta': {'object_name': 'ImageBuild'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Image']"}),
'log': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Log']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '8'})
},
'core.license': {
'Meta': {'object_name': 'License'},
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shortname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'core.log': {
'Meta': {'object_name': 'Log'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'core.package': {
'Meta': {'unique_together': "(('name', 'gittree'),)", 'object_name': 'Package'},
'gittree': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.GitTree']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'core.packagebackup': {
'Meta': {'object_name': 'PackageBackup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isdel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pid': ('django.db.models.fields.IntegerField', [], {}),
'tid': ('django.db.models.fields.IntegerField', [], {})
},
'core.packagebuild': {
'Meta': {'object_name': 'PackageBuild'},
'arch': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Log']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Package']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'target': ('django.db.models.fields.TextField', [], {})
},
'core.product': {
'Meta': {'object_name': 'Product'},
'description': ('django.db.models.fields.TextField', [], {}),
'gittrees': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.GitTree']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'core.productrole': {
'Meta': {'object_name': 'ProductRole', '_ormbases': [u'auth.Group']},
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Product']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'})
},
'core.subdomain': {
'Meta': {'unique_together': "(('name', 'domain'),)", 'object_name': 'SubDomain'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Domain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'core.subdomainrole': {
'Meta': {'object_name': 'SubDomainRole', '_ormbases': [u'auth.Group']},
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'subdomain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SubDomain']"})
},
'core.submission': {
'Meta': {'object_name': 'Submission'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'commit': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'gittree': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.GitTree']", 'symmetrical': 'False', 'blank': 'True'}),
'ibuilds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.ImageBuild']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'db_index': 'True'}),
'pbuilds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.PackageBuild']", 'symmetrical': 'False', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Product']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'submitters': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}),
'testresults': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.TestResult']", 'symmetrical': 'False', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.submissiongroup': {
'Meta': {'object_name': 'SubmissionGroup'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Product']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'submissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.Submission']", 'symmetrical': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.testresult': {
'Meta': {'object_name': 'TestResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Log']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'core.userparty': {
'Meta': {'object_name': 'UserParty', '_ormbases': [u'auth.Group']},
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'}),
'party': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'core.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['core']
symmetrical = True
| gpl-2.0 | -3,501,250,292,748,019,700 | 70.483721 | 195 | 0.542456 | false |
Phoenix1369/site | judge/views/organization.py | 1 | 13395 | from itertools import chain
from django import forms
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db import transaction
from django.db.models import Count, Max
from django.forms import Form, modelformset_factory
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _, ugettext_lazy, ungettext
from django.views.generic import DetailView, ListView, View, UpdateView, FormView
from django.views.generic.detail import SingleObjectMixin, SingleObjectTemplateResponseMixin
from reversion import revisions
from judge.forms import EditOrganizationForm
from judge.models import Organization, OrganizationRequest, Profile
from judge.utils.ranker import ranker
from judge.utils.views import generic_message, TitleMixin
__all__ = ['OrganizationList', 'OrganizationHome', 'OrganizationUsers', 'OrganizationMembershipChange',
'JoinOrganization', 'LeaveOrganization', 'EditOrganization', 'RequestJoinOrganization',
'OrganizationRequestDetail', 'OrganizationRequestView', 'OrganizationRequestLog',
'KickUserWidgetView']
class OrganizationMixin(object):
context_object_name = 'organization'
model = Organization
slug_field = 'key'
slug_url_kwarg = 'key'
def dispatch(self, request, *args, **kwargs):
try:
return super(OrganizationMixin, self).dispatch(request, *args, **kwargs)
except Http404:
key = kwargs.get(self.slug_url_kwarg, None)
if key:
return generic_message(request, _('No such organization'),
_('Could not find an organization with the key "%s".') % key)
else:
return generic_message(request, _('No such organization'),
_('Could not find such organization.'))
def can_edit_organization(self, org=None):
if org is None:
org = self.object
if not self.request.user.is_authenticated:
return False
profile_id = self.request.user.profile.id
return org.admins.filter(id=profile_id).exists() or org.registrant_id == profile_id
class OrganizationList(TitleMixin, ListView):
model = Organization
context_object_name = 'organizations'
template_name = 'organization/list.jade'
title = ugettext_lazy('Organizations')
class OrganizationHome(OrganizationMixin, DetailView):
template_name = 'organization/home.jade'
def get_context_data(self, **kwargs):
context = super(OrganizationHome, self).get_context_data(**kwargs)
context['title'] = self.object.name
context['can_edit'] = self.can_edit_organization()
return context
class OrganizationUsers(OrganizationMixin, DetailView):
template_name = 'organization/users.jade'
def get_context_data(self, **kwargs):
context = super(OrganizationUsers, self).get_context_data(**kwargs)
context['title'] = _('%s Members') % self.object.name
context['users'] = ranker(chain(*[
i.select_related('user').defer('about') for i in (
self.object.members.filter(submission__points__gt=0).order_by('-performance_points')
.annotate(problems=Count('submission__problem', distinct=True)),
self.object.members.annotate(problems=Max('submission__points')).filter(problems=0),
self.object.members.annotate(problems=Count('submission__problem', distinct=True)).filter(problems=0),
)
]))
context['partial'] = True
context['is_admin'] = self.can_edit_organization()
context['kick_url'] = reverse('organization_user_kick', args=[self.object.key])
return context
class OrganizationMembershipChange(LoginRequiredMixin, OrganizationMixin, SingleObjectMixin, View):
def get(self, request, *args, **kwargs):
org = self.get_object()
response = self.handle(request, org, request.user.profile)
if response is not None:
return response
return HttpResponseRedirect(reverse('organization_home', args=(org.key,)))
def handle(self, request, org, profile):
raise NotImplementedError()
class JoinOrganization(OrganizationMembershipChange):
def handle(self, request, org, profile):
if profile.organizations.filter(id=org.id).exists():
return generic_message(request, _('Joining organization'), _('You are already in the organization.'))
if not org.is_open:
return generic_message(request, _('Joining organization'), _('This organization is not open.'))
profile.organizations.add(org)
profile.save()
cache.delete(make_template_fragment_key('org_member_count', (org.id,)))
class LeaveOrganization(OrganizationMembershipChange):
def handle(self, request, org, profile):
if not profile.organizations.filter(id=org.id).exists():
return generic_message(request, _('Leaving organization'), _('You are not in "%s".') % org.key)
profile.organizations.remove(org)
cache.delete(make_template_fragment_key('org_member_count', (org.id,)))
class OrganizationRequestForm(Form):
reason = forms.CharField(widget=forms.Textarea)
class RequestJoinOrganization(LoginRequiredMixin, SingleObjectMixin, FormView):
model = Organization
slug_field = 'key'
slug_url_kwarg = 'key'
template_name = 'organization/requests/request.jade'
form_class = OrganizationRequestForm
def dispatch(self, request, *args, **kwargs):
self.object = self.get_object()
return super(RequestJoinOrganization, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(RequestJoinOrganization, self).get_context_data(**kwargs)
if self.object.is_open:
raise Http404()
context['title'] = _('Request to join %s') % self.object.name
return context
def form_valid(self, form):
request = OrganizationRequest()
request.organization = self.get_object()
request.user = self.request.user.profile
request.reason = form.cleaned_data['reason']
request.state = 'P'
request.save()
return HttpResponseRedirect(reverse('request_organization_detail', args=(request.organization.key, request.id)))
class OrganizationRequestDetail(LoginRequiredMixin, TitleMixin, DetailView):
model = OrganizationRequest
template_name = 'organization/requests/detail.jade'
title = ugettext_lazy('Join request detail')
def get_object(self, queryset=None):
object = super(OrganizationRequestDetail, self).get_object(queryset)
profile = self.request.user.profile
if object.user_id != profile.id and not object.organization.admins.filter(id=profile.id).exists():
raise PermissionDenied()
return object
OrganizationRequestFormSet = modelformset_factory(
OrganizationRequest, extra=0, fields=('state',), can_delete=True
)
class OrganizationRequestBaseView(LoginRequiredMixin, SingleObjectTemplateResponseMixin, SingleObjectMixin, View):
model = Organization
slug_field = 'key'
slug_url_kwarg = 'key'
tab = None
def get_object(self, queryset=None):
organization = super(OrganizationRequestBaseView, self).get_object(queryset)
if not organization.admins.filter(id=self.request.user.profile.id).exists():
raise PermissionDenied()
return organization
def get_context_data(self, **kwargs):
context = super(OrganizationRequestBaseView, self).get_context_data(**kwargs)
context['title'] = _('Managing join requests for %s') % self.object.name
context['tab'] = self.tab
return context
class OrganizationRequestView(OrganizationRequestBaseView):
template_name = 'organization/requests/pending.jade'
tab = 'pending'
def get_context_data(self, **kwargs):
context = super(OrganizationRequestView, self).get_context_data(**kwargs)
context['formset'] = self.formset
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object()
self.formset = OrganizationRequestFormSet(
queryset=OrganizationRequest.objects.filter(state='P', organization=self.object)
)
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
self.object = organization = self.get_object()
self.formset = formset = OrganizationRequestFormSet(request.POST, request.FILES)
if formset.is_valid():
if organization.slots is not None:
deleted_set = set(formset.deleted_forms)
to_approve = sum(form.cleaned_data['state'] == 'A' for form in formset.forms if form not in deleted_set)
can_add = organization.slots - organization.members.count()
if to_approve > can_add:
messages.error(request, _('Your organization can only receive %d more members. '
'You cannot approve %d users.') % (can_add, to_approve))
return self.render_to_response(self.get_context_data(object=organization))
approved, rejected = 0, 0
for obj in formset.save():
if obj.state == 'A':
obj.user.organizations.add(obj.organization)
approved += 1
elif obj.state == 'R':
rejected += 1
messages.success(request,
ungettext('Approved %d user.', 'Approved %d users.', approved) % approved + '\n' +
ungettext('Rejected %d user.', 'Rejected %d users.', rejected) % rejected)
return HttpResponseRedirect(request.get_full_path())
context = self.get_context_data(object=organization)
return self.render_to_response(context)
put = post
class OrganizationRequestLog(OrganizationRequestBaseView):
states = ('A', 'R')
tab = 'log'
template_name = 'organization/requests/log.jade'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super(OrganizationRequestLog, self).get_context_data(**kwargs)
context['requests'] = self.object.requests.filter(state__in=self.states)
return context
class EditOrganization(LoginRequiredMixin, TitleMixin, OrganizationMixin, UpdateView):
template_name = 'organization/edit.jade'
model = Organization
form_class = EditOrganizationForm
def get_title(self):
return _('Editing %s') % self.object.name
def get_object(self, queryset=None):
object = super(EditOrganization, self).get_object()
if not self.can_edit_organization(object):
raise PermissionDenied()
return object
def get_form(self, form_class=None):
form = super(EditOrganization, self).get_form(form_class)
form.fields['admins'].queryset = self.object.members.all()
return form
def form_valid(self, form):
with transaction.atomic(), revisions.create_revision():
revisions.set_comment(_('Edited from site'))
revisions.set_user(self.request.user)
return super(EditOrganization, self).form_valid(form)
def dispatch(self, request, *args, **kwargs):
try:
return super(EditOrganization, self).dispatch(request, *args, **kwargs)
except PermissionDenied:
return generic_message(request, _("Can't edit organization"),
_('You are not allowed to edit this organization.'), status=403)
class KickUserWidgetView(LoginRequiredMixin, OrganizationMixin, View):
def post(self, request, *args, **kwargs):
organization = get_object_or_404(Organization, key=kwargs['key'])
if not self.can_edit_organization(organization):
return generic_message(request, _("Can't edit organization"),
_('You are not allowed to kick people from this organization.'), status=403)
try:
user = Profile.objects.get(id=request.POST.get('user', None))
except Profile.DoesNotExist:
return generic_message(request, _("Can't kick user"),
_('The user you are trying to kick does not exist!'), status=400)
if not organization.members.filter(id=user.id).exists():
return generic_message(request, _("Can't kick user"),
_('The user you are trying to kick is not in organization: %s.') %
organization.name, status=400)
organization.members.remove(user)
return HttpResponseRedirect(reverse('organization_users', args=[organization.key]))
| agpl-3.0 | -3,961,480,037,890,479,600 | 42.209677 | 120 | 0.657857 | false |
shoopio/shoop | shuup_tests/simple_cms/test_page_delete.py | 1 | 1846 | # This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import json
import pytest
from django.core.urlresolvers import reverse
from shuup.simple_cms.admin_module.views import PageDeleteView, PageListView
from shuup.simple_cms.models import Page
from shuup.testing.factories import get_default_shop
from shuup.testing.utils import apply_request_middleware
from shuup_tests.simple_cms.utils import create_page
@pytest.mark.django_db
def test_page_delete(rf, admin_user):
request = apply_request_middleware(rf.post("/"), user=admin_user)
page = create_page(url="bacon", shop=get_default_shop())
assert Page.objects.filter(pk=page.pk).not_deleted().exists() is True
delete_view = PageDeleteView.as_view()
response = delete_view(request, **{"pk": page.pk})
assert response.status_code == 302
assert response.url == reverse("shuup_admin:simple_cms.page.list")
assert Page.objects.filter(pk=page.pk).not_deleted().exists() is False
page_two = create_page(url="bacon", shop=get_default_shop())
assert Page.objects.filter(pk=page_two.pk).exists()
@pytest.mark.django_db
def test_ensure_deleted_inlist(rf, admin_user):
page = create_page(url="bacon", shop=get_default_shop())
list_view = PageListView.as_view()
request = apply_request_middleware(rf.get("/", {"jq": json.dumps({"perPage": 100, "page": 1})}), user=admin_user)
response = list_view(request)
data = json.loads(response.content.decode("utf-8"))
assert data["pagination"]["nItems"] == 1
page.soft_delete()
response = list_view(request)
data = json.loads(response.content.decode("utf-8"))
assert data["pagination"]["nItems"] == 0
| agpl-3.0 | -3,763,854,880,133,976,000 | 35.92 | 117 | 0.714518 | false |
akesandgren/easybuild-easyblocks | easybuild/easyblocks/s/shrimp.py | 3 | 3238 | ##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for SHRiMP, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import shutil
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
class EB_SHRiMP(ConfigureMake):
"""Support for building SHRiMP."""
def configure_step(self):
"""Add openmp compilation flag to CXX_FLAGS."""
cxxflags = os.getenv('CXXFLAGS')
env.setvar('CXXFLAGS', "%s %s" % (cxxflags, self.toolchain.get_flag('openmp')))
def install_step(self):
"""Install SHRiMP by copying files to install dir, and fix permissions."""
try:
for d in ["bin", "utils"]:
shutil.copytree(d, os.path.join(self.installdir, d))
cwd = os.getcwd()
os.chdir(os.path.join(self.installdir, 'utils'))
for f in glob.glob("*.py"):
self.log.info("Fixing permissions of %s in utils" % f)
os.chmod(f, 0o755)
os.chdir(cwd)
except OSError as err:
raise EasyBuildError("Failed to copy files to install dir: %s", err)
def sanity_check_step(self):
"""Custom sanity check for SHRiMP."""
custom_paths = {
'files': ['bin/%s' % x for x in ['fasta2fastq', 'gmapper', 'mergesam',
'prettyprint', 'probcalc', 'probcalc_mp',
'shrimp2sam', 'shrimp_var']],
'dirs': ['utils'],
}
super(EB_SHRiMP, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
"""Add both 'bin' and 'utils' directories to PATH."""
guesses = super(EB_SHRiMP, self).make_module_req_guess()
guesses.update({'PATH': ['bin', 'utils']})
return guesses
def make_module_extra(self):
"""Set SHRIMP_FOLDER environment variable in module."""
txt = super(EB_SHRiMP, self).make_module_extra()
txt += self.module_generator.set_environment('SHRIMP_FOLDER', self.installdir)
return txt
| gpl-2.0 | 8,436,672,497,839,907,000 | 32.729167 | 96 | 0.64021 | false |
adamrp/qiime | tests/test_adjust_seq_orientation.py | 15 | 2314 | #!/usr/bin/env python
# File created on 07 Oct 2009.
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from unittest import TestCase, main
from skbio.parse.sequences import parse_fasta
from qiime.adjust_seq_orientation import rc_fasta_lines, null_seq_desc_mapper,\
append_rc
class AdjustSeqOrientationTests(TestCase):
""" """
def setUp(self):
""" """
self.fasta_lines1 = fasta_lines1.split('\n')
self.fasta_lines1_mixed_case = fasta_lines1_mixed_case.split('\n')
self.fasta_lines1_exp = list(parse_fasta(
fasta_lines1_exp.split('\n')))
self.fasta_lines1_mixed_case_exp = list(parse_fasta(
fasta_lines1_mixed_case_exp.split('\n')))
self.fasta_lines1_exp_null_desc_mapper = list(parse_fasta(
fasta_lines1_exp_null_desc_mapper.split('\n')))
def test_rc_fasta_lines(self):
"""rc_fasta_lines: functions as expected w/ seq_id mapping
"""
self.assertEqual(list(rc_fasta_lines(self.fasta_lines1, append_rc)),
self.fasta_lines1_exp)
def test_rc_fasta_lines_mixed_case(self):
"""rc_fasta_lines: functions with mixed cases in sequences
"""
self.assertEqual(list(
rc_fasta_lines(self.fasta_lines1_mixed_case, append_rc)),
self.fasta_lines1_mixed_case_exp)
def test_rc_fasta_lines_leave_seq_desc(self):
"""rc_fasta_lines: functions as expected w/o seq_id mapping
"""
self.assertEqual(list(
rc_fasta_lines(self.fasta_lines1, null_seq_desc_mapper)),
self.fasta_lines1_exp_null_desc_mapper)
fasta_lines1 = """>s1 some description
AAATGGCGCGCG
>s2
TTATATCCGC
"""
fasta_lines1_mixed_case = """>s1 some description
aaatGGcgcgcg
>s2
ttatatccgc
"""
fasta_lines1_exp = """>s1 some description RC
CGCGCGCCATTT
>s2 RC
GCGGATATAA
"""
fasta_lines1_mixed_case_exp = """>s1 some description RC
CGCGCGCCATTT
>s2 RC
GCGGATATAA
"""
fasta_lines1_exp_null_desc_mapper = """>s1 some description
CGCGCGCCATTT
>s2
GCGGATATAA
"""
if __name__ == "__main__":
main()
| gpl-2.0 | -7,614,032,355,520,860,000 | 25 | 79 | 0.641746 | false |
sujithvm/red-alert | src/RIP_SNIP_curve.py | 3 | 3153 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def poly_fit(x,y,deg):
#POLYNOMIAL FIT
# calculate polynomial
z = np.polyfit(x, y, deg)
f = np.poly1d(z)
# calculate new x's and y's
x_new = np.linspace(np.amin(x), np.amax(x), 50)
y_new = f(x_new)
plt.plot(x,y,'o', x_new, y_new)
plt.xlim([np.amin(x), np.amax(x) ])
plt.legend()
plt.show()
def func(x, a, b, c):
return a * np.exp(-b * x) + c
def curvefit():
x = np.linspace(0,4,50)
y = func(x, 2.5, 1.3, 0.5)
yn = y + 0.2*np.random.normal(size=len(x))
popt, pcov = curve_fit(func, x, yn)
plt.figure()
plt.plot(x, yn, 'ko', label="Original Noised Data")
plt.plot(x, func(x, *popt), 'r-', label="Fitted Curve")
plt.legend()
plt.show()
def scatter_plot(journals,SNIP_year,IPP_year):
journals.plot(kind='scatter', x=SNIP_year,y=IPP_year)
plt.legend()
plt.show()
list_of_cols = [1,2,38,40,41,43,44,46,47,49,50,52,53] #2009-2014
list_of_cols = [1,7,8,10,11,13,14,16,17,19,20,22,23,25,26,28,29,31,32,34,35,38,40,41,43,44,46,47,49,50,52,53] #1999-2014
#SNIP = pd.read_excel(io="../data/journal_SNIP_values.xlsx",parse_cols=list_of_cols, skip_footer=0)
SNIP = pd.read_csv("../data/journal_SNIP_values.csv")
SNIP = SNIP.fillna(0) # removing all np.nan values because (np.nan == np.nan) is FALSE
i = 0
jnames = []
journals = pd.DataFrame()
for index, row in SNIP.iterrows():
if(1):
#if ( (row['2010 SNIP'] != 0) and (row['2011 SNIP'] != 0) and (row['2012 SNIP'] != 0) and (row['2013 SNIP'] != 0) and (row['2014 SNIP'] != 0) ) : #checking if that journal has SNIP values
#print "[DEBUG]" + row['Source Title']
jnames.append(row['Source Title'])
journals = journals.append( SNIP[SNIP['Source Title'] == row['Source Title'] ], ignore_index = True )
i = i+ 1
print i
#print journals
#journals = journals.rename(columns= lambda x: x.replace('SNIP', '')) #removing 'SNIP' from 'XXXX SNIP'
#journals = journals.rename(columns= lambda x: x.replace('IPP', '')) #removing 'IPP' from 'XXXX IPP'
#journals.drop(journals.columns[[1]], inplace=True, axis=1) # removing print ISSN column
#journals = pd.melt(journals, id_vars='Source Title',var_name="SNIP Year", value_name="Value") # converting columns to rows
#journals.drop(journals.columns[[1]], inplace=True, axis=1) # removing print ISSN column
journals.drop(journals.columns[[0]],inplace=True,axis=1) #removing Source Title column
print journals
journals = pd.DataFrame(journals,dtype=float) # converting 'SNIP Year','Value' column into float type
journals.to_csv("../data/SNIP_IPP_1994to2014_values")
'''
plt.plot(SNIP['2014 SNIP'],SNIP['2014 IPP'],'ro')
plt.plot(SNIP['2013 SNIP'],SNIP['2013 IPP'],'bo')
plt.plot(SNIP['2012 SNIP'],SNIP['2012 IPP'],'go')
plt.plot(SNIP['2011 SNIP'],SNIP['2011 IPP'],'yo')
plt.plot(SNIP['2010 SNIP'],SNIP['2010 IPP'],'bo')
plt.axis([-5, 40, -5, 40])
plt.legend()
plt.show()
'''
#poly_fit(x,y,4)
#scatter_plot(df,'2011 SNIP','2011 IPP')
#curvefit()
#journals.to_csv("../data/journal_SNIP_info.csv",columns = (1,2))
| mit | 1,565,293,112,962,755,300 | 29.61165 | 191 | 0.639708 | false |
zhengwsh/InplusTrader_Linux | rqalpha/examples/test_pt.py | 5 | 1292 | import time
import talib
# 在这个方法中编写任何的初始化逻辑。context对象将会在你的算法策略的任何方法之间做传递。
def init(context):
context.s1 = "000001.XSHE"
# 设置这个策略当中会用到的参数,在策略中可以随时调用,这个策略使用长短均线,我们在这里设定长线和短线的区间,在调试寻找最佳区间的时候只需要在这里进行数值改动
context.SHORTPERIOD = 20
context.LONGPERIOD = 120
context.count = 0
print("init")
def before_trading(context):
print("before_trading", context.count)
time.sleep(1)
# 你选择的证券的数据更新将会触发此段逻辑,例如日或分钟历史数据切片或者是实时数据切片更新
def handle_bar(context, bar_dict):
print("handle_bar", context.count)
context.count += 1
print(context.count, bar_dict["000001.XSHE"].close)
print(context.count, bar_dict["000001.XSHG"].close)
print(current_snapshot("000001.XSHE").last)
print(current_snapshot("000001.XSHG").last)
order_shares("000001.XSHE", 100)
order_shares("000001.XSHE", -100)
print(context.portfolio)
print(context.portfolio.positions)
def after_trading(context):
print("after_trading", context.count)
| mit | -1,169,475,229,002,547,700 | 22.756098 | 83 | 0.717659 | false |
openstack/cinder | cinder/volume/drivers/solidfire.py | 2 | 130312 | # All Rights Reserved.
# Copyright 2013 SolidFire Inc
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import json
import math
import re
import socket
import string
import time
import warnings
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import units
import requests
import six
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.objects import fields
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume.targets import iscsi as iscsi_driver
from cinder.volume import volume_types
from cinder.volume import volume_utils
LOG = logging.getLogger(__name__)
sf_opts = [
cfg.BoolOpt('sf_emulate_512',
default=True,
help='Set 512 byte emulation on volume creation; '),
cfg.BoolOpt('sf_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('sf_account_prefix',
help='Create SolidFire accounts with this prefix. Any string '
'can be used here, but the string \"hostname\" is special '
'and will create a prefix using the cinder node hostname '
'(previous default behavior). The default is NO prefix.'),
cfg.StrOpt('sf_volume_prefix',
default='UUID-',
help='Create SolidFire volumes with this prefix. Volume names '
'are of the form <sf_volume_prefix><cinder-volume-id>. '
'The default is to use a prefix of \'UUID-\'.'),
cfg.StrOpt('sf_svip',
help='Overrides default cluster SVIP with the one specified. '
'This is required or deployments that have implemented '
'the use of VLANs for iSCSI networks in their cloud.'),
cfg.PortOpt('sf_api_port',
default=443,
help='SolidFire API port. Useful if the device api is behind '
'a proxy on a different port.'),
cfg.BoolOpt('sf_enable_vag',
default=False,
help='Utilize volume access groups on a per-tenant basis.'),
cfg.StrOpt('sf_provisioning_calc',
default='maxProvisionedSpace',
choices=['maxProvisionedSpace', 'usedSpace'],
help='Change how SolidFire reports used space and '
'provisioning calculations. If this parameter is set to '
'\'usedSpace\', the driver will report correct '
'values as expected by Cinder '
'thin provisioning.'),
cfg.IntOpt('sf_cluster_pairing_timeout',
default=60,
min=3,
help='Sets time in seconds to wait for clusters to complete '
'pairing.'),
cfg.IntOpt('sf_volume_pairing_timeout',
default=3600,
min=30,
help='Sets time in seconds to wait for a migrating volume to '
'complete pairing and sync.'),
cfg.IntOpt('sf_api_request_timeout',
default=30,
min=30,
help='Sets time in seconds to wait for an api request to '
'complete.'),
cfg.IntOpt('sf_volume_clone_timeout',
default=600,
min=60,
help='Sets time in seconds to wait for a clone of a volume or '
'snapshot to complete.'
),
cfg.IntOpt('sf_volume_create_timeout',
default=60,
min=30,
help='Sets time in seconds to wait for a create volume '
'operation to complete.')]
CONF = cfg.CONF
CONF.register_opts(sf_opts, group=configuration.SHARED_CONF_GROUP)
# SolidFire API Error Constants
xExceededLimit = 'xExceededLimit'
xAlreadyInVolumeAccessGroup = 'xAlreadyInVolumeAccessGroup'
xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist'
xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup'
class SolidFireAPIException(exception.VolumeBackendAPIException):
message = _("Bad response from SolidFire API")
class SolidFireDriverException(exception.VolumeDriverException):
message = _("SolidFire Cinder Driver exception")
class SolidFireAPIDataException(SolidFireAPIException):
message = _("Error in SolidFire API response: data=%(data)s")
class SolidFireAccountNotFound(SolidFireDriverException):
message = _("Unable to locate account %(account_name)s in "
"SolidFire cluster")
class SolidFireVolumeNotFound(SolidFireDriverException):
message = _("Unable to locate volume id %(volume_id)s in "
"SolidFire cluster")
class SolidFireRetryableException(exception.VolumeBackendAPIException):
message = _("Retryable SolidFire Exception encountered")
class SolidFireReplicationPairingError(exception.VolumeBackendAPIException):
message = _("Error on SF Keys")
class SolidFireDataSyncTimeoutError(exception.VolumeBackendAPIException):
message = _("Data sync volumes timed out")
class SolidFireDuplicateVolumeNames(SolidFireDriverException):
message = _("Volume name [%(vol_name)s] already exists "
"in the SolidFire backend.")
def retry(exc_tuple, tries=5, delay=1, backoff=2):
def retry_dec(f):
@six.wraps(f)
def func_retry(*args, **kwargs):
_tries, _delay = tries, delay
while _tries > 1:
try:
return f(*args, **kwargs)
except exc_tuple:
time.sleep(_delay)
_tries -= 1
_delay *= backoff
LOG.debug('Retrying %(args)s, %(tries)s attempts '
'remaining...',
{'args': args, 'tries': _tries})
# NOTE(jdg): Don't log the params passed here
# some cmds like createAccount will have sensitive
# info in the params, grab only the second tuple
# which should be the Method
msg = (_('Retry count exceeded for command: %s') %
(args[1],))
LOG.error(msg)
raise SolidFireAPIException(message=msg)
return func_retry
return retry_dec
def locked_image_id_operation(f, external=False):
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
if call_args.get('image_meta'):
image_id = call_args['image_meta']['id']
else:
err_msg = _('The decorated method must accept image_meta.')
raise exception.VolumeBackendAPIException(data=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, image_id),
external=external)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
def locked_source_id_operation(f, external=False):
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
src_arg = call_args.get('source', None)
if src_arg and src_arg.get('id', None):
source_id = call_args['source']['id']
else:
err_msg = _('The decorated method must accept src_uuid.')
raise exception.VolumeBackendAPIException(message=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, source_id),
external=external)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
@interface.volumedriver
class SolidFireDriver(san.SanISCSIDriver):
"""OpenStack driver to enable SolidFire cluster.
.. code-block:: default
Version history:
1.0 - Initial driver
1.1 - Refactor, clone support, qos by type and minor bug fixes
1.2 - Add xfr and retype support
1.2.1 - Add export/import support
1.2.2 - Catch VolumeNotFound on accept xfr
2.0.0 - Move from httplib to requests
2.0.1 - Implement SolidFire Snapshots
2.0.2 - Implement secondary account
2.0.3 - Implement cluster pairing
2.0.4 - Implement volume replication
2.0.5 - Try and deal with the stupid retry/clear issues from objects
and tflow
2.0.6 - Add a lock decorator around the clone_image method
2.0.7 - Add scaled IOPS
2.0.8 - Add active status filter to get volume ops
2.0.9 - Always purge on delete volume
2.0.10 - Add response to debug on retryable errors
2.0.11 - Add ability to failback replicating volumes
2.0.12 - Fix bug #1744005
2.0.14 - Fix bug #1782588 qos settings on extend
2.0.15 - Fix bug #1834013 NetApp SolidFire replication errors
2.0.16 - Add options for replication mode (Async, Sync and
SnapshotsOnly)
2.0.17 - Fix bug #1859653 SolidFire fails to failback when volume
service is restarted
2.1.0 - Add Cinder Active/Active support
- Enable Active/Active support flag
- Implement Active/Active replication support
2.2.0 - Add storage assisted volume migration support
2.2.1 - Fix bug #1891914 fix error on cluster workload rebalancing
by adding xNotPrimary to the retryable exception list
2.2.2 - Fix bug #1896112 SolidFire Driver creates duplicate volume
when API response is lost
"""
VERSION = '2.2.2'
SUPPORTS_ACTIVE_ACTIVE = True
# ThirdPartySystems wiki page
CI_WIKI_NAME = "NetApp_SolidFire_CI"
driver_prefix = 'solidfire'
sf_qos_dict = {'slow': {'minIOPS': 100,
'maxIOPS': 200,
'burstIOPS': 200},
'medium': {'minIOPS': 200,
'maxIOPS': 400,
'burstIOPS': 400},
'fast': {'minIOPS': 500,
'maxIOPS': 1000,
'burstIOPS': 1000},
'performant': {'minIOPS': 2000,
'maxIOPS': 4000,
'burstIOPS': 4000},
'off': None}
sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS']
sf_scale_qos_keys = ['scaledIOPS', 'scaleMin', 'scaleMax', 'scaleBurst']
sf_iops_lim_min = {'minIOPS': 100, 'maxIOPS': 100, 'burstIOPS': 100}
sf_iops_lim_max = {'minIOPS': 15000,
'maxIOPS': 200000,
'burstIOPS': 200000}
cluster_stats = {}
retry_exc_tuple = (SolidFireRetryableException,
requests.exceptions.ConnectionError)
retryable_errors = ['xDBVersionMismatch',
'xMaxSnapshotsPerVolumeExceeded',
'xMaxClonesPerVolumeExceeded',
'xMaxSnapshotsPerNodeExceeded',
'xMaxClonesPerNodeExceeded',
'xSliceNotRegistered',
'xNotReadyForIO',
'xNotPrimary']
def __init__(self, *args, **kwargs):
super(SolidFireDriver, self).__init__(*args, **kwargs)
self.failed_over_id = kwargs.get('active_backend_id', None)
self.replication_status = kwargs.get('replication_status', "na")
self.configuration.append_config_values(sf_opts)
self.template_account_id = None
self.max_volumes_per_account = 1990
self.volume_map = {}
self.cluster_pairs = []
self.replication_enabled = False
self.failed_over = False
self.verify_ssl = self.configuration.driver_ssl_cert_verify
self.target_driver = SolidFireISCSI(solidfire_driver=self,
configuration=self.configuration)
self._check_replication_configs()
# If we're failed over, we need to parse things out and set the active
# cluster appropriately
if self.failed_over_id:
LOG.info("Running on failed-over mode. "
"Active backend-id: %s", self.failed_over_id)
repl_target = self.configuration.get('replication_device', [])
if not repl_target:
LOG.error('Failed to initialize SolidFire driver to '
'a remote cluster specified at id: %s',
self.failed_over_id)
raise SolidFireDriverException
remote_endpoint = self._build_repl_endpoint_info(
**repl_target[0])
self.active_cluster = self._create_cluster_reference(
remote_endpoint)
self.failed_over = True
self.replication_enabled = True
else:
self.active_cluster = self._create_cluster_reference()
if self.configuration.replication_device:
self._set_cluster_pairs()
self.replication_enabled = True
LOG.debug("Active cluster: %s", self.active_cluster)
# NOTE(jdg): This works even in a failed over state, because what we
# do is use self.active_cluster in issue_api_request so by default we
# always use the currently active cluster, override that by providing
# an endpoint to issue_api_request if needed
try:
self._update_cluster_status()
except SolidFireAPIException:
pass
@classmethod
def get_driver_options(cls):
additional_opts = cls._get_oslo_driver_opts(
'san_ip', 'san_login', 'san_password', 'driver_ssl_cert_verify',
'replication_device', 'reserved_percentage',
'max_over_subscription_ratio')
return sf_opts + additional_opts
def _init_vendor_properties(self):
properties = {}
self._set_property(
properties,
"solidfire:replication_mode",
"Replication mode",
_("Specifies replication mode."),
"string",
enum=["Async", "Sync", "SnapshotsOnly"])
return properties, 'solidfire'
def __getattr__(self, attr):
if hasattr(self.target_driver, attr):
return getattr(self.target_driver, attr)
else:
msg = _('Attribute: %s not found.') % attr
raise NotImplementedError(msg)
def _get_remote_info_by_id(self, backend_id):
remote_info = None
for rd in self.configuration.get('replication_device', []):
if rd.get('backend_id', None) == backend_id:
remote_endpoint = self._build_endpoint_info(**rd)
remote_info = self._get_cluster_info(remote_endpoint)
remote_info['endpoint'] = remote_endpoint
if not remote_info['endpoint']['svip']:
remote_info['endpoint']['svip'] = (
remote_info['svip'] + ':3260')
return remote_info
def _create_remote_pairing(self, remote_device):
try:
pairing_info = self._issue_api_request('StartClusterPairing',
{}, version='8.0')['result']
pair_id = self._issue_api_request(
'CompleteClusterPairing',
{'clusterPairingKey': pairing_info['clusterPairingKey']},
version='8.0',
endpoint=remote_device['endpoint'])['result']['clusterPairID']
except SolidFireAPIException as ex:
if 'xPairingAlreadyExists' in ex.msg:
LOG.debug('Pairing already exists during init.')
else:
with excutils.save_and_reraise_exception():
LOG.error('Cluster pairing failed: %s', ex.msg)
LOG.debug('Initialized Cluster pair with ID: %s', pair_id)
return pair_id
def _get_cluster_info(self, remote_endpoint):
try:
return self._issue_api_request(
'GetClusterInfo', {},
endpoint=remote_endpoint)['result']['clusterInfo']
except SolidFireAPIException:
msg = _("Replication device is unreachable!")
LOG.exception(msg)
raise
def _check_replication_configs(self):
repl_configs = self.configuration.replication_device
if not repl_configs:
return
# We only support one replication target. Checking if the user is
# trying to add more than one;
if len(repl_configs) > 1:
msg = _("SolidFire driver only supports one replication target "
"device.")
LOG.error(msg)
raise SolidFireDriverException(msg)
repl_configs = repl_configs[0]
# Check if the user is not using the same MVIP as source
# and replication target.
if repl_configs['mvip'] == self.configuration.san_ip:
msg = _("Source mvip cannot be the same "
"as the replication target.")
LOG.error(msg)
raise SolidFireDriverException(msg)
def _set_cluster_pairs(self):
repl_configs = self.configuration.replication_device[0]
remote_endpoint = self._build_repl_endpoint_info(**repl_configs)
remote_cluster = self._create_cluster_reference(remote_endpoint)
remote_cluster['backend_id'] = repl_configs['backend_id']
cluster_pair = self._get_or_create_cluster_pairing(
remote_cluster, check_connected=True)
remote_cluster['clusterPairID'] = cluster_pair['clusterPairID']
if self.cluster_pairs:
self.cluster_pairs.clear()
self.cluster_pairs.append(remote_cluster)
def _get_cluster_pair(self, remote_cluster):
existing_pairs = self._issue_api_request(
'ListClusterPairs', {}, version='8.0')['result']['clusterPairs']
LOG.debug("Existing cluster pairs: %s", existing_pairs)
remote_pair = None
for ep in existing_pairs:
if remote_cluster['mvip'] == ep['mvip']:
remote_pair = ep
LOG.debug("Found remote pair: %s", remote_pair)
break
return remote_pair
def _get_or_create_cluster_pairing(self, remote_cluster,
check_connected=False):
# FIXME(sfernand): We check for pairs only in the remote cluster.
# This is an issue if a pair exists only in destination cluster.
remote_pair = self._get_cluster_pair(remote_cluster)
if not remote_pair:
LOG.debug("Setting up new cluster pairs.")
self._create_remote_pairing(remote_cluster)
remote_pair = self._get_cluster_pair(remote_cluster)
if check_connected:
if not remote_pair:
msg = _("Cluster pair not found for cluster [%s]",
remote_cluster['mvip'])
raise SolidFireReplicationPairingError(message=msg)
if remote_pair['status'] == 'Connected':
return remote_pair
def _wait_cluster_pairing_connected():
pair = self._get_cluster_pair(remote_cluster)
if pair and pair['status'] == 'Connected':
raise loopingcall.LoopingCallDone(pair)
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_cluster_pairing_connected)
remote_pair = timer.start(
interval=3,
timeout=self.configuration.sf_cluster_pairing_timeout) \
.wait()
except loopingcall.LoopingCallTimeOut:
msg = _("Cluster pair not found or in an invalid state.")
raise SolidFireReplicationPairingError(message=msg)
return remote_pair
def _create_cluster_reference(self, endpoint=None):
cluster_ref = {}
cluster_ref['endpoint'] = endpoint
if not endpoint:
cluster_ref['endpoint'] = self._build_endpoint_info()
cluster_info = (self._issue_api_request(
'GetClusterInfo', {}, endpoint=cluster_ref['endpoint'])
['result']['clusterInfo'])
for k, v in cluster_info.items():
cluster_ref[k] = v
# Add a couple extra things that are handy for us
cluster_ref['clusterAPIVersion'] = (
self._issue_api_request('GetClusterVersionInfo',
{}, endpoint=cluster_ref['endpoint'])
['result']['clusterAPIVersion'])
# NOTE(sfernand): If a custom svip is configured, we update the
# default storage ip to the configuration value.
# Otherwise, we update endpoint info with the default storage ip
# retrieved from GetClusterInfo API call.
svip = cluster_ref['endpoint'].get('svip')
if not svip:
svip = cluster_ref['svip']
if ':' not in svip:
svip += ':3260'
cluster_ref['svip'] = svip
cluster_ref['endpoint']['svip'] = svip
return cluster_ref
def _set_active_cluster(self, endpoint=None):
if not endpoint:
self.active_cluster['endpoint'] = self._build_endpoint_info()
else:
self.active_cluster['endpoint'] = endpoint
for k, v in self._issue_api_request(
'GetClusterInfo',
{})['result']['clusterInfo'].items():
self.active_cluster[k] = v
# Add a couple extra things that are handy for us
self.active_cluster['clusterAPIVersion'] = (
self._issue_api_request('GetClusterVersionInfo',
{})['result']['clusterAPIVersion'])
if self.configuration.get('sf_svip', None):
self.active_cluster['svip'] = (
self.configuration.get('sf_svip'))
def _create_provider_id_string(self,
resource_id,
account_or_vol_id):
# NOTE(jdg): We use the same format, but in the case
# of snapshots, we don't have an account id, we instead
# swap that with the parent volume id
return "%s %s %s" % (resource_id,
account_or_vol_id,
self.active_cluster['uuid'])
def _init_snapshot_mappings(self, srefs):
updates = []
sf_snaps = self._issue_api_request(
'ListSnapshots', {}, version='6.0')['result']['snapshots']
for s in srefs:
seek_name = '%s%s' % (self.configuration.sf_volume_prefix, s['id'])
sfsnap = next(
(ss for ss in sf_snaps if ss['name'] == seek_name), None)
if sfsnap:
id_string = self._create_provider_id_string(
sfsnap['snapshotID'],
sfsnap['volumeID'])
if s.get('provider_id') != id_string:
updates.append(
{'id': s['id'],
'provider_id': id_string})
return updates
def _init_volume_mappings(self, vrefs):
updates = []
sf_vols = self._issue_api_request('ListActiveVolumes',
{})['result']['volumes']
self.volume_map = {}
for v in vrefs:
seek_name = '%s%s' % (self.configuration.sf_volume_prefix, v['id'])
sfvol = next(
(sv for sv in sf_vols if sv['name'] == seek_name), None)
if sfvol:
if v.get('provider_id', 'nil') != sfvol['volumeID']:
updates.append(
{'id': v['id'],
'provider_id': self._create_provider_id_string(
sfvol['volumeID'], sfvol['accountID'])})
return updates
def update_provider_info(self, vrefs, snaprefs):
volume_updates = self._init_volume_mappings(vrefs)
snapshot_updates = self._init_snapshot_mappings(snaprefs)
return (volume_updates, snapshot_updates)
def _build_repl_endpoint_info(self, **repl_device):
endpoint = {
'mvip': repl_device.get('mvip'),
'login': repl_device.get('login'),
'passwd': repl_device.get('password'),
'port': repl_device.get('port', 443),
'url': 'https://%s:%s' % (repl_device.get('mvip'),
repl_device.get('port', 443)),
'svip': repl_device.get('svip')
}
return endpoint
def _build_endpoint_info(self, backend_conf=None, **kwargs):
endpoint = {}
if not backend_conf:
backend_conf = self.configuration
# NOTE(jdg): We default to the primary cluster config settings
# but always check to see if desired settings were passed in
# to handle things like replication targets with unique settings
endpoint['mvip'] = (
kwargs.get('mvip', backend_conf.san_ip))
endpoint['login'] = (
kwargs.get('login', backend_conf.san_login))
endpoint['passwd'] = (
kwargs.get('password', backend_conf.san_password))
endpoint['port'] = (
kwargs.get(('port'), backend_conf.sf_api_port))
sanitized_mvip = volume_utils.sanitize_host(endpoint['mvip'])
endpoint['url'] = 'https://%s:%s' % (sanitized_mvip,
endpoint['port'])
endpoint['svip'] = kwargs.get('svip', backend_conf.sf_svip)
if not endpoint.get('mvip', None) and kwargs.get('backend_id', None):
endpoint['mvip'] = kwargs.get('backend_id')
return endpoint
@retry(retry_exc_tuple, tries=6)
def _issue_api_request(self, method, params, version='1.0',
endpoint=None, timeout=None):
if params is None:
params = {}
if endpoint is None:
endpoint = self.active_cluster['endpoint']
if not timeout:
timeout = self.configuration.sf_api_request_timeout
payload = {'method': method, 'params': params}
url = '%s/json-rpc/%s/' % (endpoint['url'], version)
with warnings.catch_warnings():
warnings.simplefilter(
"ignore",
requests.packages.urllib3.exceptions.InsecureRequestWarning)
req = requests.post(url,
data=json.dumps(payload),
auth=(endpoint['login'], endpoint['passwd']),
verify=self.verify_ssl,
timeout=timeout)
response = req.json()
req.close()
if (('error' in response) and
(response['error']['name'] in self.retryable_errors)):
msg = ('Retryable error (%s) encountered during '
'SolidFire API call.' % response['error']['name'])
LOG.debug(msg)
LOG.debug("API response: %s", response)
raise SolidFireRetryableException(message=msg)
if (('error' in response) and
response['error']['name'] == 'xInvalidPairingKey'):
LOG.debug("Error on volume pairing")
raise SolidFireReplicationPairingError
if 'error' in response:
msg = _('API response: %s') % response
raise SolidFireAPIException(msg)
return response
def _get_volumes_by_sfaccount(self, account_id, endpoint=None):
"""Get all volumes on cluster for specified account."""
params = {'accountID': account_id}
return self._issue_api_request(
'ListVolumesForAccount',
params,
endpoint=endpoint)['result']['volumes']
def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None,
endpoint=None):
# ListVolumesForAccount gives both Active and Deleted
# we require the solidfire accountID, uuid of volume
# is optional
vols = self._get_volumes_by_sfaccount(sf_account_id, endpoint=endpoint)
if cinder_uuid:
vlist = [v for v in vols if
cinder_uuid in v['name']]
else:
vlist = [v for v in vols]
vlist = sorted(vlist, key=lambda k: k['volumeID'])
return vlist
def _get_sfvol_by_cinder_vref(self, vref):
# sfvols is one or more element objects returned from a list call
# sfvol is the single volume object that will be returned or it will
# be None
sfvols = None
sfvol = None
provider_id = vref.get('provider_id', None)
if provider_id:
try:
sf_vid, sf_aid, sf_cluster_id = provider_id.split(' ')
except ValueError:
LOG.warning("Invalid provider_id entry for volume: %s",
vref.id)
else:
# So there shouldn't be any clusters out in the field that are
# running Element < 8.0, but just in case; we'll to a try
# block here and fall back to the old methods just to be safe
try:
sfvol = self._issue_api_request(
'ListVolumes',
{'startVolumeID': sf_vid,
'limit': 1},
version='8.0')['result']['volumes'][0]
# Bug 1782373 validate the list returned has what we asked
# for, check if there was no match
if sfvol['volumeID'] != int(sf_vid):
sfvol = None
except Exception:
pass
if not sfvol:
LOG.info("Failed to find volume by provider_id, "
"attempting ListForAccount")
for account in self._get_sfaccounts_for_tenant(vref.project_id):
sfvols = self._issue_api_request(
'ListVolumesForAccount',
{'accountID': account['accountID']})['result']['volumes']
# Bug 1782373 match single vref.id encase no provider as the
# above call will return a list for the account
for sfv in sfvols:
if sfv['attributes'].get('uuid', None) == vref.id:
sfvol = sfv
break
return sfvol
def _get_sfaccount_by_name(self, sf_account_name, endpoint=None):
"""Get SolidFire account object by name."""
sfaccount = None
params = {'username': sf_account_name}
try:
data = self._issue_api_request('GetAccountByName',
params,
endpoint=endpoint)
if 'result' in data and 'account' in data['result']:
LOG.debug('Found solidfire account: %s', sf_account_name)
sfaccount = data['result']['account']
except SolidFireAPIException as ex:
if 'xUnknownAccount' in ex.msg:
return sfaccount
else:
raise
return sfaccount
def _get_sf_account_name(self, project_id):
"""Build the SolidFire account name to use."""
prefix = self.configuration.sf_account_prefix or ''
if prefix == 'hostname':
prefix = socket.gethostname()
return '%s%s%s' % (prefix, '-' if prefix else '', project_id)
def _get_sfaccount(self, project_id):
sf_account_name = self._get_sf_account_name(project_id)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
raise SolidFireAccountNotFound(
account_name=sf_account_name)
return sfaccount
def _create_sfaccount(self, sf_account_name, endpoint=None):
"""Create account on SolidFire device if it doesn't already exist.
We're first going to check if the account already exists, if it does
just return it. If not, then create it.
"""
sfaccount = self._get_sfaccount_by_name(sf_account_name,
endpoint=endpoint)
if sfaccount is None:
LOG.debug('solidfire account: %s does not exist, create it...',
sf_account_name)
chap_secret = self._generate_random_string(12)
params = {'username': sf_account_name,
'initiatorSecret': chap_secret,
'targetSecret': chap_secret,
'attributes': {}}
self._issue_api_request('AddAccount', params,
endpoint=endpoint)
sfaccount = self._get_sfaccount_by_name(sf_account_name,
endpoint=endpoint)
return sfaccount
def _generate_random_string(self, length):
"""Generates random_string to use for CHAP password."""
return volume_utils.generate_password(
length=length,
symbolgroups=(string.ascii_uppercase + string.digits))
def _build_connection_info(self, sfaccount, vol, endpoint=None):
"""Gets the connection info for specified account and volume."""
if endpoint:
iscsi_portal = endpoint['svip']
else:
iscsi_portal = self.active_cluster['svip']
if ':' not in iscsi_portal:
iscsi_portal += ':3260'
chap_secret = sfaccount['targetSecret']
vol_id = vol['volumeID']
iqn = vol['iqn']
conn_info = {
# NOTE(john-griffith): SF volumes are always at lun 0
'provider_location': ('%s %s %s' % (iscsi_portal, iqn, 0)),
'provider_auth': ('CHAP %s %s' % (sfaccount['username'],
chap_secret))
}
if not self.configuration.sf_emulate_512:
conn_info['provider_geometry'] = ('%s %s' % (4096, 4096))
conn_info['provider_id'] = (
self._create_provider_id_string(vol_id, sfaccount['accountID']))
return conn_info
def _get_model_info(self, sfaccount, sf_volume_id, endpoint=None):
volume = None
volume_list = self._get_volumes_by_sfaccount(
sfaccount['accountID'], endpoint=endpoint)
for v in volume_list:
if v['volumeID'] == sf_volume_id:
volume = v
break
if not volume:
LOG.error('Failed to retrieve volume SolidFire-'
'ID: %s in get_by_account!', sf_volume_id)
raise exception.VolumeNotFound(volume_id=sf_volume_id)
model_update = self._build_connection_info(sfaccount, volume,
endpoint=endpoint)
return model_update
def _snapshot_discovery(self, src_uuid, params, vref):
# NOTE(jdg): First check the SF snapshots
# if we don't find a snap by the given name, just move on to check
# volumes. This may be a running system that was updated from
# before we did snapshots, so need to check both
is_clone = False
sf_vol = None
snap_name = '%s%s' % (self.configuration.sf_volume_prefix, src_uuid)
snaps = self._get_sf_snapshots()
snap = next((s for s in snaps if s["name"] == snap_name), None)
if snap:
params['snapshotID'] = int(snap['snapshotID'])
params['volumeID'] = int(snap['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
else:
sf_vol = self._get_sf_volume(src_uuid)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=src_uuid)
params['volumeID'] = int(sf_vol['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
is_clone = True
return params, is_clone, sf_vol
def _do_clone_volume(self, src_uuid,
vref, sf_src_snap=None):
"""Create a clone of an existing volume or snapshot."""
LOG.debug("Creating cloned volume from vol %(src)s to %(dst)s.",
{'src': src_uuid, 'dst': vref.id})
sf_account = self._get_create_account(vref['project_id'])
params = {'name': '%(prefix)s%(id)s' %
{'prefix': self.configuration.sf_volume_prefix,
'id': vref['id']},
'newAccountID': sf_account['accountID']}
is_clone = False
if sf_src_snap:
# In some scenarios we are passed the snapshot information that we
# are supposed to clone.
params['snapshotID'] = sf_src_snap['snapshotID']
params['volumeID'] = sf_src_snap['volumeID']
params['newSize'] = int(vref['size'] * units.Gi)
else:
params, is_clone, sf_src_vol = self._snapshot_discovery(
src_uuid, params, vref)
data = self._issue_api_request('CloneVolume', params, version='6.0')
if (('result' not in data) or ('volumeID' not in data['result'])):
msg = _("API response: %s") % data
raise SolidFireAPIException(msg)
sf_cloned_id = data['result']['volumeID']
# NOTE(jdg): all attributes are copied via clone, need to do an update
# to set any that were provided
params = self._get_default_volume_params(vref, is_clone=is_clone)
params['volumeID'] = sf_cloned_id
data = self._issue_api_request('ModifyVolume', params)
def _wait_volume_is_active():
try:
model_info = self._get_model_info(sf_account, sf_cloned_id)
if model_info:
raise loopingcall.LoopingCallDone(model_info)
except exception.VolumeNotFound:
LOG.debug('Waiting for cloned volume [%s] - [%s] to become '
'active', sf_cloned_id, vref.id)
pass
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_volume_is_active)
model_update = timer.start(
interval=1,
timeout=self.configuration.sf_volume_clone_timeout).wait()
except loopingcall.LoopingCallTimeOut:
msg = _('Failed to get model update from clone [%s] - [%s]' %
(sf_cloned_id, vref.id))
LOG.error(msg)
raise SolidFireAPIException(msg)
rep_settings = self._retrieve_replication_settings(vref)
if self.replication_enabled and rep_settings:
try:
vref['volumeID'] = sf_cloned_id
rep_updates = self._replicate_volume(
vref, params, sf_account, rep_settings)
model_update.update(rep_updates)
except SolidFireDriverException:
with excutils.save_and_reraise_exception():
self._issue_api_request('DeleteVolume',
{'volumeID': sf_cloned_id})
self._issue_api_request('PurgeDeletedVolume',
{'volumeID': sf_cloned_id})
# Increment the usage count, just for data collection
# We're only doing this for clones, not create_from snaps
if is_clone:
data = self._update_attributes(sf_src_vol)
return (data, sf_account, model_update)
def _update_attributes(self, sf_vol):
cloned_count = sf_vol['attributes'].get('cloned_count', 0)
cloned_count += 1
attributes = sf_vol['attributes']
attributes['cloned_count'] = cloned_count
params = {'volumeID': int(sf_vol['volumeID'])}
params['attributes'] = attributes
return self._issue_api_request('ModifyVolume', params)
def _list_volumes_by_name(self, sf_volume_name):
params = {'volumeName': sf_volume_name}
return self._issue_api_request(
'ListVolumes', params, version='8.0')['result']['volumes']
def _wait_volume_is_active(self, sf_volume_name):
def _wait():
volumes = self._list_volumes_by_name(sf_volume_name)
if volumes:
LOG.debug("Found Volume [%s] in SolidFire backend. "
"Current status is [%s].",
sf_volume_name, volumes[0]['status'])
if volumes[0]['status'] == 'active':
raise loopingcall.LoopingCallDone(volumes[0])
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait)
sf_volume = (timer.start(
interval=1,
timeout=self.configuration.sf_volume_create_timeout).wait())
return sf_volume
except loopingcall.LoopingCallTimeOut:
msg = ("Timeout while waiting volume [%s] "
"to be in active state." % sf_volume_name)
LOG.error(msg)
raise SolidFireAPIException(msg)
def _do_volume_create(self, sf_account, params, endpoint=None):
sf_volume_name = params['name']
volumes_found = self._list_volumes_by_name(sf_volume_name)
if volumes_found:
raise SolidFireDuplicateVolumeNames(vol_name=sf_volume_name)
sf_volid = None
try:
params['accountID'] = sf_account['accountID']
response = self._issue_api_request(
'CreateVolume', params, endpoint=endpoint)
sf_volid = response['result']['volumeID']
except requests.exceptions.ReadTimeout:
LOG.debug("Read Timeout exception caught while creating "
"volume [%s].", sf_volume_name)
# Check if volume was created for the given name,
# in case the backend has processed the request but failed
# to deliver the response before api request timeout.
volume_created = self._wait_volume_is_active(sf_volume_name)
sf_volid = volume_created['volumeID']
return self._get_model_info(sf_account, sf_volid, endpoint=endpoint)
def _do_snapshot_create(self, params):
model_update = {}
snapshot_id = self._issue_api_request(
'CreateSnapshot', params, version='6.0')['result']['snapshotID']
snaps = self._get_sf_snapshots()
snap = (
next((s for s in snaps if int(s["snapshotID"]) ==
int(snapshot_id)), None))
model_update['provider_id'] = (
self._create_provider_id_string(snap['snapshotID'],
snap['volumeID']))
return model_update
def _set_qos_presets(self, volume):
qos = {}
valid_presets = self.sf_qos_dict.keys()
# First look to see if they included a preset
presets = [i.value for i in volume.get('volume_metadata')
if i.key == 'sf-qos' and i.value in valid_presets]
if len(presets) > 0:
if len(presets) > 1:
LOG.warning('More than one valid preset was '
'detected, using %s', presets[0])
qos = self.sf_qos_dict[presets[0]]
else:
# look for explicit settings
for i in volume.get('volume_metadata'):
if i.key in self.sf_qos_keys:
qos[i.key] = int(i.value)
return qos
def _extract_sf_attributes_from_extra_specs(self, type_id):
# This will do a 1:1 copy of the extra spec keys that
# include the SolidFire delimeter into a Volume attribute
# K/V pair
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
sf_keys = []
for key, value in specs.items():
if "SFAttribute:" in key:
fields = key.split(':')
sf_keys.append({fields[1]: value})
return sf_keys
def _set_qos_by_volume_type(self, ctxt, type_id, vol_size):
qos = {}
scale_qos = {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
specs = volume_type.get('extra_specs')
# NOTE(jdg): We prefer the qos_specs association
# and over-ride any existing
# extra-specs settings if present
if qos_specs_id is not None:
# Policy changes require admin context to get QoS specs
# at the object layer (base:get_by_id), we can either
# explicitly promote here, or pass in a context of None
# and let the qos_specs api get an admin context for us
# personally I prefer explicit, so here ya go.
admin_ctxt = context.get_admin_context()
kvs = qos_specs.get_qos_specs(admin_ctxt, qos_specs_id)['specs']
else:
kvs = specs
for key, value in kvs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.sf_qos_keys:
qos[key] = int(value)
if key in self.sf_scale_qos_keys:
scale_qos[key] = value
# look for the 'scaledIOPS' key and scale QoS if set
if 'scaledIOPS' in scale_qos:
scale_qos.pop('scaledIOPS')
for key, value in scale_qos.items():
if key == 'scaleMin':
qos['minIOPS'] = (qos['minIOPS'] +
(int(value) * (vol_size - 1)))
elif key == 'scaleMax':
qos['maxIOPS'] = (qos['maxIOPS'] +
(int(value) * (vol_size - 1)))
elif key == 'scaleBurst':
qos['burstIOPS'] = (qos['burstIOPS'] +
(int(value) * (vol_size - 1)))
# Cap the IOPS values at their limits
capped = False
for key, value in qos.items():
if value > self.sf_iops_lim_max[key]:
qos[key] = self.sf_iops_lim_max[key]
capped = True
if value < self.sf_iops_lim_min[key]:
qos[key] = self.sf_iops_lim_min[key]
capped = True
if capped:
LOG.debug("A SolidFire QoS value was capped at the defined limits")
# Check that minIOPS <= maxIOPS <= burstIOPS
if (qos.get('minIOPS', 0) > qos.get('maxIOPS', 0) or
qos.get('maxIOPS', 0) > qos.get('burstIOPS', 0)):
msg = (_("Scaled QoS error. Must be minIOPS <= maxIOPS <= "
"burstIOPS. Currently: Min: %(min)s, Max: "
"%(max)s, Burst: %(burst)s.") %
{"min": qos['minIOPS'],
"max": qos['maxIOPS'],
"burst": qos['burstIOPS']})
raise exception.InvalidQoSSpecs(reason=msg)
return qos
def _get_sf_volume(self, uuid, params=None, endpoint=None):
if params:
vols = [v for v in self._issue_api_request(
'ListVolumesForAccount',
params)['result']['volumes'] if v['status'] == "active"]
else:
vols = self._issue_api_request(
'ListActiveVolumes', params,
endpoint=endpoint)['result']['volumes']
found_count = 0
sf_volref = None
for v in vols:
# NOTE(jdg): In the case of "name" we can't
# update that on manage/import, so we use
# the uuid attribute
meta = v.get('attributes')
alt_id = ''
if meta:
alt_id = meta.get('uuid', '')
if uuid in v['name'] or uuid in alt_id:
found_count += 1
sf_volref = v
LOG.debug("Mapped SolidFire volumeID %(volume_id)s "
"to cinder ID %(uuid)s.",
{'volume_id': v['volumeID'], 'uuid': uuid})
if found_count == 0:
# NOTE(jdg): Previously we would raise here, but there are cases
# where this might be a cleanup for a failed delete.
# Until we get better states we'll just log an error
LOG.error("Volume %s, not found on SF Cluster.", uuid)
if found_count > 1:
LOG.error("Found %(count)s volumes mapped to id: %(uuid)s.",
{'count': found_count,
'uuid': uuid})
raise SolidFireDuplicateVolumeNames(vol_name=uuid)
return sf_volref
def _get_sf_snapshots(self, sf_volid=None):
params = {}
if sf_volid:
params = {'volumeID': sf_volid}
return self._issue_api_request(
'ListSnapshots', params, version='6.0')['result']['snapshots']
def _get_sfaccounts_for_tenant(self, cinder_project_id, endpoint=None):
accounts = self._issue_api_request(
'ListAccounts', {}, endpoint=endpoint)['result']['accounts']
# Note(jdg): On SF we map account-name to OpenStack's tenant ID
# we use tenantID in here to get secondaries that might exist
# Also: we expect this to be sorted, so we get the primary first
# in the list
return sorted([acc for acc in accounts
if self._get_sf_account_name(cinder_project_id) in
acc['username']],
key=lambda k: k['accountID'])
def _get_all_active_volumes(self, cinder_uuid=None):
params = {}
volumes = self._issue_api_request('ListActiveVolumes',
params)['result']['volumes']
if cinder_uuid:
vols = ([v for v in volumes if
cinder_uuid in v.name])
else:
vols = [v for v in volumes]
return vols
def _get_all_deleted_volumes(self, cinder_uuid=None):
params = {}
vols = self._issue_api_request('ListDeletedVolumes',
params)['result']['volumes']
if cinder_uuid:
deleted_vols = ([v for v in vols if
cinder_uuid in v['name']])
else:
deleted_vols = [v for v in vols]
return deleted_vols
def _get_account_create_availability(self, accounts, endpoint=None):
# we'll check both the primary and the secondary
# if it exists and return whichever one has count
# available.
for acc in accounts:
if len(self._get_volumes_for_account(
acc['accountID'],
endpoint=endpoint)) < self.max_volumes_per_account:
return acc
if len(accounts) == 1:
sfaccount = self._create_sfaccount(accounts[0]['username'] + '_',
endpoint=endpoint)
return sfaccount
return None
def _get_create_account(self, proj_id, endpoint=None):
# Retrieve SolidFire accountID to be used for creating volumes.
sf_accounts = self._get_sfaccounts_for_tenant(
proj_id, endpoint=endpoint)
if not sf_accounts:
sf_account_name = self._get_sf_account_name(proj_id)
sf_account = self._create_sfaccount(
sf_account_name, endpoint=endpoint)
else:
# Check availability for creates
sf_account = self._get_account_create_availability(
sf_accounts, endpoint=endpoint)
if not sf_account:
msg = _('Volumes/account exceeded on both primary and '
'secondary SolidFire accounts.')
raise SolidFireDriverException(msg)
return sf_account
def _create_vag(self, iqn, vol_id=None):
"""Create a volume access group(vag).
Returns the vag_id.
"""
vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn)
params = {'name': vag_name,
'initiators': [iqn],
'volumes': [vol_id],
'attributes': {'openstack': True}}
try:
result = self._issue_api_request('CreateVolumeAccessGroup',
params,
version='7.0')
return result['result']['volumeAccessGroupID']
except SolidFireAPIException as error:
if xExceededLimit in error.msg:
if iqn in error.msg:
# Initiator double registered.
return self._safe_create_vag(iqn, vol_id)
else:
# VAG limit reached. Purge and start over.
self._purge_vags()
return self._safe_create_vag(iqn, vol_id)
else:
raise
def _safe_create_vag(self, iqn, vol_id=None):
# Potential race condition with simultaneous volume attaches to the
# same host. To help avoid this, VAG creation makes a best attempt at
# finding and using an existing VAG.
vags = self._get_vags_by_name(iqn)
if vags:
# Filter through the vags and find the one with matching initiator
vag = next((v for v in vags if iqn in v['initiators']), None)
if vag:
return vag['volumeAccessGroupID']
else:
# No matches, use the first result, add initiator IQN.
vag_id = vags[0]['volumeAccessGroupID']
return self._add_initiator_to_vag(iqn, vag_id)
return self._create_vag(iqn, vol_id)
def _base_get_vags(self):
params = {}
vags = self._issue_api_request(
'ListVolumeAccessGroups',
params,
version='7.0')['result']['volumeAccessGroups']
return vags
def _get_vags_by_name(self, iqn):
"""Retrieve SolidFire volume access group objects by name.
Returns an array of vags with a matching name value.
Returns an empty array if there are no matches.
"""
vags = self._base_get_vags()
vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn)
matching_vags = [vag for vag in vags if vag['name'] == vag_name]
return matching_vags
def _get_vags_by_volume(self, vol_id):
params = {"volumeID": vol_id}
vags = self._issue_api_request(
'GetVolumeStats',
params)['result']['volumeStats']['volumeAccessGroups']
return vags
def _add_initiator_to_vag(self, iqn, vag_id):
# Added a vag_id return as there is a chance that we might have to
# create a new VAG if our target VAG is deleted underneath us.
params = {"initiators": [iqn],
"volumeAccessGroupID": vag_id}
try:
self._issue_api_request('AddInitiatorsToVolumeAccessGroup',
params,
version='7.0')
return vag_id
except SolidFireAPIException as error:
if xAlreadyInVolumeAccessGroup in error.msg:
return vag_id
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
# No locking means sometimes a VAG can be removed by a parallel
# volume detach against the same host.
return self._safe_create_vag(iqn)
else:
raise
def _add_volume_to_vag(self, vol_id, iqn, vag_id):
# Added a vag_id return to be consistent with add_initiator_to_vag. It
# isn't necessary but may be helpful in the future.
params = {"volumeAccessGroupID": vag_id,
"volumes": [vol_id]}
try:
self._issue_api_request('AddVolumesToVolumeAccessGroup',
params,
version='7.0')
return vag_id
except SolidFireAPIException as error:
if xAlreadyInVolumeAccessGroup in error.msg:
return vag_id
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
return self._safe_create_vag(iqn, vol_id)
else:
raise
def _remove_volume_from_vag(self, vol_id, vag_id):
params = {"volumeAccessGroupID": vag_id,
"volumes": [vol_id]}
try:
self._issue_api_request('RemoveVolumesFromVolumeAccessGroup',
params,
version='7.0')
except SolidFireAPIException as error:
if xNotInVolumeAccessGroup in error.msg:
pass
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
pass
else:
raise
def _remove_volume_from_vags(self, vol_id):
# Due to all sorts of uncertainty around multiattach, on volume
# deletion we make a best attempt at removing the vol_id from VAGs.
vags = self._get_vags_by_volume(vol_id)
for vag in vags:
self._remove_volume_from_vag(vol_id, vag['volumeAccessGroupID'])
def _remove_vag(self, vag_id):
params = {"volumeAccessGroupID": vag_id}
try:
self._issue_api_request('DeleteVolumeAccessGroup',
params,
version='7.0')
except SolidFireAPIException as error:
if xVolumeAccessGroupIDDoesNotExist not in error.msg:
raise
def _purge_vags(self, limit=10):
# Purge up to limit number of VAGs that have no active volumes,
# initiators, and an OpenStack attribute. Purge oldest VAGs first.
vags = self._base_get_vags()
targets = [v for v in vags if v['volumes'] == [] and
v['initiators'] == [] and
v['deletedVolumes'] == [] and
v['attributes'].get('openstack')]
sorted_targets = sorted(targets,
key=lambda k: k['volumeAccessGroupID'])
for vag in sorted_targets[:limit]:
self._remove_vag(vag['volumeAccessGroupID'])
@locked_image_id_operation
def clone_image(self, context,
volume, image_location,
image_meta, image_service):
"""Clone an existing image volume."""
public = False
# NOTE(jdg): Glance V2 moved from is_public to visibility
# so we check both, as we don't necessarily know or want
# to care which we're using. Will need to look at
# future handling of things like shared and community
# but for now, it's owner or public and that's it
visibility = image_meta.get('visibility', None)
if visibility and visibility == 'public':
public = True
elif image_meta.get('is_public', False):
public = True
else:
if image_meta['owner'] == volume['project_id']:
public = True
if not public:
LOG.warning("Requested image is not "
"accessible by current Tenant.")
return None, False
# If we don't have the image-volume to clone from return failure
# cinder driver will then create source for clone first
try:
(data, sfaccount, model) = self._do_clone_volume(image_meta['id'],
volume)
except exception.VolumeNotFound:
return None, False
return model, True
# extended_size > 0 when we are extending a volume
def _retrieve_qos_setting(self, volume, extended_size=0):
qos = {}
if (self.configuration.sf_allow_tenant_qos and
volume.get('volume_metadata') is not None):
qos = self._set_qos_presets(volume)
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id', None)
if type_id is not None:
qos = self._set_qos_by_volume_type(ctxt, type_id,
extended_size if extended_size
> 0 else volume.get('size'))
return qos
def _get_default_volume_params(self, volume, sf_account=None,
is_clone=False):
if not sf_account:
sf_account = self._get_create_account(volume.project_id)
qos = self._retrieve_qos_setting(volume)
create_time = volume.created_at.isoformat()
attributes = {
'uuid': volume.id,
'is_clone': is_clone,
'created_at': create_time,
'cinder-name': volume.get('display_name', "")
}
if volume.volume_type_id:
for attr in self._extract_sf_attributes_from_extra_specs(
volume.volume_type_id):
for k, v in attr.items():
attributes[k] = v
vol_name = '%s%s' % (self.configuration.sf_volume_prefix, volume.id)
params = {'name': vol_name,
'accountID': sf_account['accountID'],
'sliceCount': 1,
'totalSize': int(volume.size * units.Gi),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
return params
def create_volume(self, volume):
"""Create volume on SolidFire device.
The account is where CHAP settings are derived from, volume is
created and exported. Note that the new volume is immediately ready
for use.
One caveat here is that an existing user account must be specified
in the API call to create a new volume. We use a set algorithm to
determine account info based on passed in cinder volume object. First
we check to see if the account already exists (and use it), or if it
does not already exist, we'll go ahead and create it.
"""
sf_account = self._get_create_account(volume['project_id'])
params = self._get_default_volume_params(volume, sf_account)
# NOTE(jdg): Check if we're a migration tgt, if so
# use the old volume-id here for the SF Name
migration_status = volume.get('migration_status', None)
if migration_status and 'target' in migration_status:
k, v = migration_status.split(':')
vname = '%s%s' % (self.configuration.sf_volume_prefix, v)
params['name'] = vname
params['attributes']['migration_uuid'] = volume['id']
params['attributes']['uuid'] = v
model_update = self._do_volume_create(sf_account, params)
try:
rep_settings = self._retrieve_replication_settings(volume)
if self.replication_enabled and rep_settings:
volume['volumeID'] = (
int(model_update['provider_id'].split()[0]))
rep_updates = self._replicate_volume(volume, params,
sf_account, rep_settings)
if rep_updates:
model_update.update(rep_updates)
except SolidFireAPIException:
# NOTE(jdg): Something went wrong after the source create, due to
# the way TFLOW works and it's insistence on retrying the same
# command over and over coupled with the fact that the introduction
# of objects now sets host to None on failures we'll end up with an
# orphaned volume on the backend for every one of these segments
# that fail, for n-retries. Sad Sad Panda!! We'll just do it
# ourselves until we can get a general fix in Cinder further up the
# line
with excutils.save_and_reraise_exception():
sf_volid = int(model_update['provider_id'].split()[0])
self._issue_api_request('DeleteVolume', {'volumeID': sf_volid})
self._issue_api_request('PurgeDeletedVolume',
{'volumeID': sf_volid})
return model_update
def _retrieve_replication_settings(self, volume):
rep_data = "Async"
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id', None)
if type_id is not None:
rep_data = self._set_rep_by_volume_type(ctxt, type_id)
return rep_data
def _set_rep_by_volume_type(self, ctxt, type_id):
rep_modes = ['Async', 'Sync', 'SnapshotsOnly']
rep_opts = {}
type_ref = volume_types.get_volume_type(ctxt, type_id)
specs = type_ref.get('extra_specs')
if specs.get('replication_enabled', "") == "<is> True":
if specs.get('solidfire:replication_mode') in rep_modes:
rep_opts['rep_type'] = specs.get('solidfire:replication_mode')
else:
rep_opts['rep_type'] = 'Async'
return rep_opts
def _create_volume_pairing(self, volume, dst_volume, tgt_cluster):
src_sf_volid = int(volume['provider_id'].split()[0])
dst_sf_volid = int(dst_volume['provider_id'].split()[0])
@retry(SolidFireReplicationPairingError, tries=6)
def _pair_volumes():
rep_type = "Sync"
# Enable volume pairing
LOG.debug("Starting pairing source volume ID: %s",
src_sf_volid)
# Make sure we split any pair the volume has
params = {
'volumeID': src_sf_volid,
'mode': rep_type
}
self._issue_api_request('RemoveVolumePair', params, '8.0')
rep_key = self._issue_api_request(
'StartVolumePairing', params,
'8.0')['result']['volumePairingKey']
LOG.debug("Volume pairing started on source: "
"%(endpoint)s",
{'endpoint': tgt_cluster['endpoint']['url']})
params = {
'volumeID': dst_sf_volid,
'volumePairingKey': rep_key
}
self._issue_api_request('CompleteVolumePairing',
params,
'8.0',
endpoint=tgt_cluster['endpoint'])
LOG.debug("Volume pairing completed on destination: "
"%(endpoint)s",
{'endpoint': tgt_cluster['endpoint']['url']})
_pair_volumes()
def _replicate_volume(self, volume, params,
parent_sfaccount, rep_info):
updates = {}
rep_success_status = fields.ReplicationStatus.ENABLED
# NOTE(erlon): Right now we only support 1 remote target so, we always
# get cluster_pairs[0]
tgt_endpoint = self.cluster_pairs[0]['endpoint']
LOG.debug("Replicating volume on remote cluster: %(tgt)s\n params: "
"%(params)s", {'tgt': tgt_endpoint, 'params': params})
params['username'] = self._get_sf_account_name(volume['project_id'])
try:
params['initiatorSecret'] = parent_sfaccount['initiatorSecret']
params['targetSecret'] = parent_sfaccount['targetSecret']
self._issue_api_request(
'AddAccount',
params,
endpoint=tgt_endpoint)['result']['accountID']
except SolidFireAPIException as ex:
if 'xDuplicateUsername' not in ex.msg:
raise
remote_account = (
self._get_sfaccount_by_name(params['username'],
endpoint=tgt_endpoint))
# Create the volume on the remote cluster w/same params as original
params['accountID'] = remote_account['accountID']
LOG.debug("Create remote volume on: %(endpoint)s with account: "
"%(account)s",
{'endpoint': tgt_endpoint['url'], 'account': remote_account})
model_update = self._do_volume_create(
remote_account, params, endpoint=tgt_endpoint)
tgt_sfid = int(model_update['provider_id'].split()[0])
params = {'volumeID': tgt_sfid, 'access': 'replicationTarget'}
self._issue_api_request('ModifyVolume',
params,
'8.0',
endpoint=tgt_endpoint)
# NOTE(erlon): For some reason the SF cluster randomly fail the
# replication of volumes. The generated keys are deemed invalid by the
# target backend. When that happens, we re-start the volume pairing
# process.
@retry(SolidFireReplicationPairingError, tries=6)
def _pair_volumes():
# Enable volume pairing
LOG.debug("Start volume pairing on volume ID: %s",
volume['volumeID'])
# Make sure we split any pair the volume have
params = {'volumeID': volume['volumeID'],
'mode': rep_info['rep_type']}
self._issue_api_request('RemoveVolumePair', params, '8.0')
rep_key = self._issue_api_request(
'StartVolumePairing', params,
'8.0')['result']['volumePairingKey']
params = {'volumeID': tgt_sfid,
'volumePairingKey': rep_key}
LOG.debug("Sending issue CompleteVolumePairing request on remote: "
"%(endpoint)s, %(parameters)s",
{'endpoint': tgt_endpoint['url'], 'parameters': params})
self._issue_api_request('CompleteVolumePairing',
params,
'8.0',
endpoint=tgt_endpoint)
try:
_pair_volumes()
except SolidFireAPIException:
with excutils.save_and_reraise_exception():
params = {'volumeID': tgt_sfid}
LOG.debug("Error pairing volume on remote cluster. Rolling "
"back and deleting volume %(vol)s at cluster "
"%(cluster)s.",
{'vol': tgt_sfid, 'cluster': tgt_endpoint})
self._issue_api_request('DeleteVolume', params,
endpoint=tgt_endpoint)
self._issue_api_request('PurgeDeletedVolume', params,
endpoint=tgt_endpoint)
updates['replication_status'] = rep_success_status
LOG.debug("Completed volume pairing.")
return updates
def _disable_replication(self, volume):
updates = {}
tgt_endpoint = self.cluster_pairs[0]['endpoint']
sfvol = self._get_sfvol_by_cinder_vref(volume)
if len(sfvol['volumePairs']) != 1:
LOG.warning("Trying to disable replication on volume %s but "
"volume does not have pairs.", volume.id)
updates['replication_status'] = fields.ReplicationStatus.DISABLED
return updates
params = {'volumeID': sfvol['volumeID']}
self._issue_api_request('RemoveVolumePair', params, '8.0')
remote_sfid = sfvol['volumePairs'][0]['remoteVolumeID']
params = {'volumeID': remote_sfid}
self._issue_api_request('RemoveVolumePair',
params, '8.0', endpoint=tgt_endpoint)
self._issue_api_request('DeleteVolume', params,
endpoint=tgt_endpoint)
self._issue_api_request('PurgeDeletedVolume', params,
endpoint=tgt_endpoint)
updates['replication_status'] = fields.ReplicationStatus.DISABLED
return updates
@locked_source_id_operation
def create_cloned_volume(self, volume, source):
"""Create a clone of an existing volume."""
(_data, _sfaccount, model) = self._do_clone_volume(
source['id'],
volume)
return model
def delete_volume(self, volume):
"""Delete SolidFire Volume from device.
SolidFire allows multiple volumes with same name,
volumeID is what's guaranteed unique.
"""
sf_vol = self._get_sfvol_by_cinder_vref(volume)
if sf_vol is not None:
for vp in sf_vol.get('volumePairs', []):
LOG.debug("Deleting paired volume on remote cluster...")
pair_id = vp['clusterPairID']
for cluster in self.cluster_pairs:
if cluster['clusterPairID'] == pair_id:
params = {'volumeID': vp['remoteVolumeID']}
LOG.debug("Issue Delete request on cluster: "
"%(remote)s with params: %(parameters)s",
{'remote': cluster['endpoint']['url'],
'parameters': params})
self._issue_api_request('DeleteVolume', params,
endpoint=cluster['endpoint'])
self._issue_api_request('PurgeDeletedVolume', params,
endpoint=cluster['endpoint'])
# The multiattach volumes are only removed from the VAG on
# deletion.
if volume.get('multiattach'):
self._remove_volume_from_vags(sf_vol['volumeID'])
if sf_vol['status'] == 'active':
params = {'volumeID': sf_vol['volumeID']}
self._issue_api_request('DeleteVolume', params)
self._issue_api_request('PurgeDeletedVolume', params)
else:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"delete_volume operation!", volume['id'])
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot from the SolidFire cluster."""
sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix,
snapshot['id'])
accounts = self._get_sfaccounts_for_tenant(snapshot['project_id'])
snap = None
for acct in accounts:
params = {'accountID': acct['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
if sf_vol:
sf_snaps = self._get_sf_snapshots(sf_vol['volumeID'])
snap = next((s for s in sf_snaps if s["name"] == sf_snap_name),
None)
if snap:
params = {'snapshotID': snap['snapshotID']}
self._issue_api_request('DeleteSnapshot',
params,
version='6.0')
return
LOG.warning(
"Snapshot %s not found, old style clones may not be deleted.",
snapshot.id)
def create_snapshot(self, snapshot):
sfaccount = self._get_sfaccount(snapshot['project_id'])
if sfaccount is None:
LOG.error("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"create_snapshot operation!", snapshot['volume_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=snapshot['volume_id'])
params = {'volumeID': sf_vol['volumeID'],
'name': '%s%s' % (self.configuration.sf_volume_prefix,
snapshot['id'])}
rep_settings = self._retrieve_replication_settings(snapshot.volume)
if self.replication_enabled and rep_settings:
params['enableRemoteReplication'] = True
return self._do_snapshot_create(params)
@locked_source_id_operation
def create_volume_from_snapshot(self, volume, source):
"""Create a volume from the specified snapshot."""
if source.get('group_snapshot_id'):
# We're creating a volume from a snapshot that resulted from a
# consistency group snapshot. Because of the way that SolidFire
# creates cgsnaps, we have to search for the correct snapshot.
group_snapshot_id = source.get('group_snapshot_id')
snapshot_id = source.get('volume_id')
sf_name = self.configuration.sf_volume_prefix + group_snapshot_id
sf_group_snap = self._get_group_snapshot_by_name(sf_name)
return self._create_clone_from_sf_snapshot(snapshot_id,
group_snapshot_id,
sf_group_snap,
volume)
(_data, _sfaccount, model) = self._do_clone_volume(
source['id'],
volume)
return model
# Consistency group helpers
def _sf_create_group_snapshot(self, name, sf_volumes):
# Group snapshot is our version of a consistency group snapshot.
vol_ids = [vol['volumeID'] for vol in sf_volumes]
params = {'name': name,
'volumes': vol_ids}
snapshot_id = self._issue_api_request('CreateGroupSnapshot',
params,
version='7.0')
return snapshot_id['result']
def _group_snapshot_creator(self, gsnap_name, src_vol_ids):
# Common helper that takes in an array of OpenStack Volume UUIDs and
# creates a SolidFire group snapshot with them.
vol_names = [self.configuration.sf_volume_prefix + vol_id
for vol_id in src_vol_ids]
active_sf_vols = self._get_all_active_volumes()
target_vols = [vol for vol in active_sf_vols
if vol['name'] in vol_names]
if len(src_vol_ids) != len(target_vols):
msg = (_("Retrieved a different amount of SolidFire volumes for "
"the provided Cinder volumes. Retrieved: %(ret)s "
"Desired: %(des)s") % {"ret": len(target_vols),
"des": len(src_vol_ids)})
raise SolidFireDriverException(msg)
result = self._sf_create_group_snapshot(gsnap_name, target_vols)
return result
def _create_temp_group_snapshot(self, source_cg, source_vols):
# Take a temporary snapshot to create the volumes for a new
# consistency group.
gsnap_name = ("%(prefix)s%(id)s-tmp" %
{"prefix": self.configuration.sf_volume_prefix,
"id": source_cg['id']})
vol_ids = [vol['id'] for vol in source_vols]
self._group_snapshot_creator(gsnap_name, vol_ids)
return gsnap_name
def _list_group_snapshots(self):
result = self._issue_api_request('ListGroupSnapshots',
{},
version='7.0')
return result['result']['groupSnapshots']
def _get_group_snapshot_by_name(self, name):
target_snaps = self._list_group_snapshots()
target = next((snap for snap in target_snaps
if snap['name'] == name), None)
return target
def _delete_group_snapshot(self, gsnapid):
params = {'groupSnapshotID': gsnapid}
self._issue_api_request('DeleteGroupSnapshot',
params,
version='7.0')
def _delete_cgsnapshot_by_name(self, snap_name):
# Common function used to find and delete a snapshot.
target = self._get_group_snapshot_by_name(snap_name)
if not target:
msg = _("Failed to find group snapshot named: %s") % snap_name
raise SolidFireDriverException(msg)
self._delete_group_snapshot(target['groupSnapshotID'])
def _find_linked_snapshot(self, target_uuid, group_snap):
# Because group snapshots name each individual snapshot the group
# snapshot name, we have to trawl through the SolidFire snapshots to
# find the SolidFire snapshot from the group that is linked with the
# SolidFire volumeID that is linked to the Cinder snapshot source
# volume.
source_vol = self._get_sf_volume(target_uuid)
target_snap = next((sn for sn in group_snap['members']
if sn['volumeID'] == source_vol['volumeID']), None)
return target_snap
def _create_clone_from_sf_snapshot(self, target_uuid, src_uuid,
sf_group_snap, vol):
# Find the correct SolidFire backing snapshot.
sf_src_snap = self._find_linked_snapshot(target_uuid,
sf_group_snap)
_data, _sfaccount, model = self._do_clone_volume(src_uuid,
vol,
sf_src_snap)
model['id'] = vol['id']
model['status'] = 'available'
return model
def _map_sf_volumes(self, cinder_volumes, endpoint=None):
"""Get a list of SolidFire volumes.
Creates a list of SolidFire volumes based
on matching a list of cinder volume ID's,
also adds an 'cinder_id' key to match cinder.
"""
vols = self._issue_api_request(
'ListActiveVolumes', {},
endpoint=endpoint)['result']['volumes']
# FIXME(erlon): When we fetch only for the volume name, we miss
# volumes that where brought to Cinder via cinder-manage.
vlist = (
[sfvol for sfvol in vols for cv in cinder_volumes if cv['id'] in
sfvol['name']])
for v in vlist:
v['cinder_id'] = v['name'].split(
self.configuration.sf_volume_prefix)[1]
return vlist
# Generic Volume Groups.
def create_group(self, ctxt, group):
# SolidFire does not have the concept of volume groups. We're going to
# play along with the group song and dance. There will be a lot of
# no-ops because of this.
if volume_utils.is_group_a_cg_snapshot_type(group):
return {'status': fields.GroupStatus.AVAILABLE}
# Blatantly ripping off this pattern from other drivers.
raise NotImplementedError()
def create_group_from_src(self, ctxt, group, volumes, group_snapshots=None,
snapshots=None, source_group=None,
source_vols=None):
# At this point this is just a pass-through.
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._create_consistencygroup_from_src(
ctxt,
group,
volumes,
group_snapshots,
snapshots,
source_group,
source_vols)
# Default implementation handles other scenarios.
raise NotImplementedError()
def create_group_snapshot(self, ctxt, group_snapshot, snapshots):
# This is a pass-through to the old consistency group stuff.
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
return self._create_cgsnapshot(ctxt, group_snapshot, snapshots)
# Default implementation handles other scenarios.
raise NotImplementedError()
def delete_group(self, ctxt, group, volumes):
# Delete a volume group. SolidFire does not track volume groups,
# however we do need to actually remove the member volumes of the
# group. Right now only consistent volume groups are supported.
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._delete_consistencygroup(ctxt, group, volumes)
# Default implementation handles other scenarios.
raise NotImplementedError()
def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None):
# Regarding consistency groups SolidFire does not track volumes, so
# this is a no-op. In the future with replicated volume groups this
# might actually do something.
if volume_utils.is_group_a_cg_snapshot_type(group):
return self._update_consistencygroup(ctxt,
group,
add_volumes,
remove_volumes)
# Default implementation handles other scenarios.
raise NotImplementedError()
def _create_consistencygroup_from_src(self, ctxt, group, volumes,
cgsnapshot, snapshots,
source_cg, source_vols):
if cgsnapshot and snapshots:
sf_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
sf_group_snap = self._get_group_snapshot_by_name(sf_name)
# Go about creating volumes from provided snaps.
vol_models = []
for vol, snap in zip(volumes, snapshots):
vol_models.append(self._create_clone_from_sf_snapshot(
snap['volume_id'],
snap['id'],
sf_group_snap,
vol))
return ({'status': fields.GroupStatus.AVAILABLE},
vol_models)
elif source_cg and source_vols:
# Create temporary group snapshot.
gsnap_name = self._create_temp_group_snapshot(source_cg,
source_vols)
try:
sf_group_snap = self._get_group_snapshot_by_name(gsnap_name)
# For each temporary snapshot clone the volume.
vol_models = []
for vol in volumes:
vol_models.append(self._create_clone_from_sf_snapshot(
vol['source_volid'],
vol['source_volid'],
sf_group_snap,
vol))
finally:
self._delete_cgsnapshot_by_name(gsnap_name)
return {'status': fields.GroupStatus.AVAILABLE}, vol_models
def _create_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
vol_ids = [snapshot['volume_id'] for snapshot in snapshots]
vol_names = [self.configuration.sf_volume_prefix + vol_id
for vol_id in vol_ids]
active_sf_vols = self._get_all_active_volumes()
target_vols = [vol for vol in active_sf_vols
if vol['name'] in vol_names]
if len(snapshots) != len(target_vols):
msg = (_("Retrieved a different amount of SolidFire volumes for "
"the provided Cinder snapshots. Retrieved: %(ret)s "
"Desired: %(des)s") % {"ret": len(target_vols),
"des": len(snapshots)})
raise SolidFireDriverException(msg)
snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
self._sf_create_group_snapshot(snap_name, target_vols)
return None, None
def _update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
# Similar to create_consistencygroup, SolidFire's lack of a consistency
# group object means there is nothing to update on the cluster.
return None, None, None
def _delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
self._delete_cgsnapshot_by_name(snap_name)
return None, None
def delete_group_snapshot(self, context, group_snapshot, snapshots):
if volume_utils.is_group_a_cg_snapshot_type(group_snapshot):
return self._delete_cgsnapshot(context, group_snapshot, snapshots)
# Default implementation handles other scenarios.
raise NotImplementedError()
def _delete_consistencygroup(self, ctxt, group, volumes):
# TODO(chris_morrell): exception handling and return correctly updated
# volume_models.
for vol in volumes:
self.delete_volume(vol)
return None, None
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data
"""
if refresh:
try:
self._update_cluster_status()
except SolidFireAPIException:
pass
LOG.debug("SolidFire cluster_stats: %s", self.cluster_stats)
return self.cluster_stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"extend_volume operation!", volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
qos = self._retrieve_qos_setting(volume, new_size)
params = {
'volumeID': sf_vol['volumeID'],
'totalSize': int(new_size * units.Gi),
'qos': qos
}
self._issue_api_request('ModifyVolume',
params, version='5.0')
rep_settings = self._retrieve_replication_settings(volume)
if self.replication_enabled and rep_settings:
if len(sf_vol['volumePairs']) != 1:
LOG.error("Can't find remote pair while extending the "
"volume or multiple replication pairs found!")
raise exception.VolumeNotFound(volume_id=volume['id'])
tgt_endpoint = self.cluster_pairs[0]['endpoint']
target_vol_id = sf_vol['volumePairs'][0]['remoteVolumeID']
params2 = params.copy()
params2['volumeID'] = target_vol_id
self._issue_api_request('ModifyVolume',
params2, version='5.0',
endpoint=tgt_endpoint)
def _get_provisioned_capacity_iops(self):
response = self._issue_api_request('ListVolumes', {}, version='8.0')
volumes = response['result']['volumes']
LOG.debug("%s volumes present in cluster", len(volumes))
provisioned_cap = 0
provisioned_iops = 0
for vol in volumes:
provisioned_cap += vol['totalSize']
provisioned_iops += vol['qos']['minIOPS']
return provisioned_cap, provisioned_iops
def _update_cluster_status(self):
"""Retrieve status info for the Cluster."""
params = {}
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'SolidFire Inc'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'iSCSI'
data['consistencygroup_support'] = True
data['consistent_group_snapshot_enabled'] = True
data['replication_enabled'] = self.replication_enabled
if self.replication_enabled:
data['replication'] = 'enabled'
data['active_cluster_mvip'] = self.active_cluster['mvip']
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = True
data['multiattach'] = True
try:
results = self._issue_api_request('GetClusterCapacity', params,
version='8.0')
except SolidFireAPIException:
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0
self.cluster_stats = data
return
results = results['result']['clusterCapacity']
prov_cap, prov_iops = self._get_provisioned_capacity_iops()
if self.configuration.sf_provisioning_calc == 'usedSpace':
free_capacity = (
results['maxUsedSpace'] - results['usedSpace'])
data['total_capacity_gb'] = results['maxUsedSpace'] / units.Gi
data['thin_provisioning_support'] = True
data['provisioned_capacity_gb'] = prov_cap / units.Gi
data['max_over_subscription_ratio'] = (
self.configuration.max_over_subscription_ratio
)
else:
free_capacity = (
results['maxProvisionedSpace'] - results['usedSpace'])
data['total_capacity_gb'] = (
results['maxProvisionedSpace'] / units.Gi)
data['free_capacity_gb'] = float(free_capacity / units.Gi)
if (results['uniqueBlocksUsedSpace'] == 0 or
results['uniqueBlocks'] == 0 or
results['zeroBlocks'] == 0 or
results['nonZeroBlocks'] == 0):
data['compression_percent'] = 100
data['deduplication_percent'] = 100
data['thin_provision_percent'] = 100
else:
data['compression_percent'] = (
(float(results['uniqueBlocks'] * 4096) /
results['uniqueBlocksUsedSpace']) * 100)
data['deduplication_percent'] = (
float(results['nonZeroBlocks'] /
results['uniqueBlocks']) * 100)
data['thin_provision_percent'] = (
(float(results['nonZeroBlocks'] + results['zeroBlocks']) /
results['nonZeroBlocks']) * 100)
data['provisioned_iops'] = prov_iops
data['current_iops'] = results['currentIOPS']
data['average_iops'] = results['averageIOPS']
data['max_iops'] = results['maxIOPS']
data['peak_iops'] = results['peakIOPS']
data['shared_targets'] = False
self.cluster_stats = data
def initialize_connection(self, volume, connector):
"""Initialize the connection and return connection info.
Optionally checks and utilizes volume access groups.
"""
properties = self._sf_initialize_connection(volume, connector)
properties['data']['discard'] = True
return properties
def attach_volume(self, context, volume,
instance_uuid, host_name,
mountpoint):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
# In a retype of an attached volume scenario, the volume id will be
# as a target on 'migration_status', otherwise it'd be None.
migration_status = volume.get('migration_status')
if migration_status and 'target' in migration_status:
__, vol_id = migration_status.split(':')
else:
vol_id = volume['id']
sf_vol = self._get_sf_volume(vol_id, params)
if sf_vol is None:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"attach_volume operation!", volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = volume.get('attach_time', None)
attributes['attached_to'] = instance_uuid
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
self._issue_api_request('ModifyVolume', params)
def terminate_connection(self, volume, properties, force):
return self._sf_terminate_connection(volume,
properties,
force)
def detach_volume(self, context, volume, attachment=None):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"detach_volume operation!", volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = None
attributes['attached_to'] = None
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
self._issue_api_request('ModifyVolume', params)
def accept_transfer(self, context, volume,
new_user, new_project):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"accept_transfer operation!", volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
if new_project != volume['project_id']:
# do a create_sfaccount here as this tenant
# may not exist on the cluster yet
sfaccount = self._get_create_account(new_project)
params = {
'volumeID': sf_vol['volumeID'],
'accountID': sfaccount['accountID']
}
self._issue_api_request('ModifyVolume',
params, version='5.0')
volume['project_id'] = new_project
volume['user_id'] = new_user
return self.target_driver.ensure_export(context, volume, None)
def _setup_intercluster_volume_migration(self, src_volume,
dst_cluster_ref):
LOG.info("Setting up cluster migration for volume [%s]",
src_volume.name)
# We should be able to rollback in case something went wrong
def _do_migrate_setup_rollback(src_sf_volume_id, dst_sf_volume_id):
# Removing volume pair in source cluster
params = {'volumeID': src_sf_volume_id}
self._issue_api_request('RemoveVolumePair', params, '8.0')
# Removing volume pair in destination cluster
params = {'volumeID': dst_sf_volume_id}
self._issue_api_request('RemoveVolumePair', params, '8.0',
endpoint=dst_cluster_ref["endpoint"])
# Destination volume should also be removed.
self._issue_api_request('DeleteVolume', params,
endpoint=dst_cluster_ref["endpoint"])
self._issue_api_request('PurgeDeletedVolume', params,
endpoint=dst_cluster_ref["endpoint"])
self._get_or_create_cluster_pairing(
dst_cluster_ref, check_connected=True)
dst_sf_account = self._get_create_account(
src_volume['project_id'], endpoint=dst_cluster_ref['endpoint'])
LOG.debug("Destination account is [%s]", dst_sf_account["username"])
params = self._get_default_volume_params(src_volume, dst_sf_account)
dst_volume = self._do_volume_create(
dst_sf_account, params, endpoint=dst_cluster_ref['endpoint'])
try:
self._create_volume_pairing(
src_volume, dst_volume, dst_cluster_ref)
except SolidFireReplicationPairingError:
with excutils.save_and_reraise_exception():
dst_sf_volid = int(dst_volume['provider_id'].split()[0])
src_sf_volid = int(src_volume['provider_id'].split()[0])
LOG.debug("Error pairing volume on remote cluster. Rolling "
"back and deleting volume %(vol)s at cluster "
"%(cluster)s.",
{'vol': dst_sf_volid,
'cluster': dst_cluster_ref['mvip']})
_do_migrate_setup_rollback(src_sf_volid, dst_sf_volid)
return dst_volume
def _do_intercluster_volume_migration_data_sync(self, src_volume,
src_sf_account,
dst_sf_volume_id,
dst_cluster_ref):
params = {'volumeID': dst_sf_volume_id, 'access': 'replicationTarget'}
self._issue_api_request('ModifyVolume',
params,
'8.0',
endpoint=dst_cluster_ref['endpoint'])
def _wait_sync_completed():
vol_params = None
if src_sf_account:
vol_params = {'accountID': src_sf_account['accountID']}
sf_vol = self._get_sf_volume(src_volume.id, vol_params)
state = sf_vol['volumePairs'][0]['remoteReplication']['state']
if state == 'Active':
raise loopingcall.LoopingCallDone(sf_vol)
LOG.debug("Waiting volume data to sync. "
"Replication state is [%s]", state)
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_sync_completed)
timer.start(
interval=30,
timeout=self.configuration.sf_volume_pairing_timeout).wait()
except loopingcall.LoopingCallTimeOut:
msg = _("Timeout waiting volumes to sync.")
raise SolidFireDataSyncTimeoutError(reason=msg)
self._do_intercluster_volume_migration_complete_data_sync(
dst_sf_volume_id, dst_cluster_ref)
def _do_intercluster_volume_migration_complete_data_sync(self,
sf_volume_id,
cluster_ref):
params = {'volumeID': sf_volume_id, 'access': 'readWrite'}
self._issue_api_request('ModifyVolume',
params,
'8.0',
endpoint=cluster_ref['endpoint'])
def _cleanup_intercluster_volume_migration(self, src_volume,
dst_sf_volume_id,
dst_cluster_ref):
src_sf_volume_id = int(src_volume['provider_id'].split()[0])
# Removing volume pair in destination cluster
params = {'volumeID': dst_sf_volume_id}
self._issue_api_request('RemoveVolumePair', params, '8.0',
endpoint=dst_cluster_ref["endpoint"])
# Removing volume pair in source cluster
params = {'volumeID': src_sf_volume_id}
self._issue_api_request('RemoveVolumePair', params, '8.0')
# Destination volume should also be removed.
self._issue_api_request('DeleteVolume', params)
self._issue_api_request('PurgeDeletedVolume', params)
def _do_intercluster_volume_migration(self, volume, host, dst_config):
LOG.debug("Start migrating volume [%(name)s] to cluster [%(cluster)s]",
{"name": volume.name, "cluster": host["host"]})
dst_endpoint = self._build_endpoint_info(backend_conf=dst_config)
LOG.debug("Destination cluster mvip is [%s]", dst_endpoint["mvip"])
dst_cluster_ref = self._create_cluster_reference(dst_endpoint)
LOG.debug("Destination cluster reference created. API version is [%s]",
dst_cluster_ref["clusterAPIVersion"])
dst_volume = self._setup_intercluster_volume_migration(
volume, dst_cluster_ref)
dst_sf_volume_id = int(dst_volume["provider_id"].split()[0])
# FIXME(sfernand): should pass src account to improve performance
self._do_intercluster_volume_migration_data_sync(
volume, None, dst_sf_volume_id, dst_cluster_ref)
self._cleanup_intercluster_volume_migration(
volume, dst_sf_volume_id, dst_cluster_ref)
return dst_volume
def migrate_volume(self, ctxt, volume, host):
"""Migrate a SolidFire volume to the specified host/backend"""
LOG.info("Migrate volume %(vol_id)s to %(host)s.",
{"vol_id": volume.id, "host": host["host"]})
if volume.status != fields.VolumeStatus.AVAILABLE:
msg = _("Volume status must be 'available' to execute "
"storage assisted migration.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if volume.is_replicated():
msg = _("Migration of replicated volumes is not allowed.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
src_backend = volume_utils.extract_host(
volume.host, "backend").split("@")[1]
dst_backend = volume_utils.extract_host(
host["host"], "backend").split("@")[1]
if src_backend == dst_backend:
LOG.info("Same backend, nothing to do.")
return True, {}
try:
dst_config = volume_utils.get_backend_configuration(
dst_backend, self.get_driver_options())
except exception.ConfigNotFound:
msg = _("Destination backend config not found. Check if "
"destination backend stanza is properly configured in "
"cinder.conf, or add parameter --force-host-copy True "
"to perform host-assisted migration.")
raise exception.VolumeMigrationFailed(reason=msg)
if self.active_cluster['mvip'] == dst_config.san_ip:
LOG.info("Same cluster, nothing to do.")
return True, {}
else:
LOG.info("Source and destination clusters are different. "
"A cluster migration will be performed.")
LOG.debug("Active cluster: [%(active)s], "
"Destination: [%(dst)s]",
{"active": self.active_cluster['mvip'],
"dst": dst_config.san_ip})
updates = self._do_intercluster_volume_migration(volume, host,
dst_config)
LOG.info("Successfully migrated volume %(vol_id)s to %(host)s.",
{"vol_id": volume.id, "host": host["host"]})
return True, updates
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred and a dict
with the updates on the volume.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
model_update = {}
LOG.debug("Retyping volume %(vol)s to new type %(type)s",
{'vol': volume.id, 'type': new_type})
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
if self.replication_enabled:
ctxt = context.get_admin_context()
src_rep_type = self._set_rep_by_volume_type(
ctxt, volume.volume_type_id)
dst_rep_type = self._set_rep_by_volume_type(ctxt, new_type['id'])
if src_rep_type != dst_rep_type:
if dst_rep_type:
rep_settings = self._retrieve_replication_settings(volume)
rep_params = self._get_default_volume_params(volume)
volume['volumeID'] = (
int(volume.provider_id.split()[0]))
rep_updates = self._replicate_volume(volume, rep_params,
sfaccount,
rep_settings)
else:
rep_updates = self._disable_replication(volume)
if rep_updates:
model_update.update(rep_updates)
attributes = sf_vol['attributes']
attributes['retyped_at'] = timeutils.utcnow().isoformat()
params = {'volumeID': sf_vol['volumeID'], 'attributes': attributes}
qos = self._set_qos_by_volume_type(ctxt, new_type['id'],
volume.get('size'))
if qos:
params['qos'] = qos
self._issue_api_request('ModifyVolume', params)
return True, model_update
def manage_existing(self, volume, external_ref):
"""Manages an existing SolidFire Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant and
replication settings.
"""
sfid = external_ref.get('source-id', None)
sfname = external_ref.get('name', None)
LOG.debug("Managing volume %(id)s to ref %(ref)s",
{'id': volume.id, 'ref': external_ref})
if sfid is None:
raise SolidFireAPIException(_("Manage existing volume "
"requires 'source-id'."))
# First get the volume on the SF cluster (MUST be active)
params = {'startVolumeID': sfid,
'limit': 1}
vols = self._issue_api_request(
'ListActiveVolumes', params)['result']['volumes']
sf_ref = vols[0]
sfaccount = self._get_create_account(volume['project_id'])
import_time = volume['created_at'].isoformat()
attributes = {'uuid': volume['id'],
'is_clone': 'False',
'os_imported_at': import_time,
'old_name': sfname}
params = self._get_default_volume_params(volume)
params['volumeID'] = sf_ref['volumeID']
params['attributes'] = attributes
params.pop('totalSize')
self._issue_api_request('ModifyVolume',
params, version='5.0')
try:
rep_updates = {}
rep_settings = self._retrieve_replication_settings(volume)
if self.replication_enabled and rep_settings:
if len(sf_ref['volumePairs']) != 0:
msg = _("Not possible to manage a volume with "
"replicated pair! Please split the volume pairs.")
LOG.error(msg)
raise SolidFireDriverException(msg)
else:
params = self._get_default_volume_params(volume)
params['volumeID'] = sf_ref['volumeID']
volume['volumeID'] = sf_ref['volumeID']
params['totalSize'] = sf_ref['totalSize']
rep_updates = self._replicate_volume(
volume, params, sfaccount, rep_settings)
except Exception:
with excutils.save_and_reraise_exception():
# When the replication fails in mid process, we need to
# set the volume properties the way it was before.
LOG.error("Error trying to replicate volume %s",
volume.id)
params = {'volumeID': sf_ref['volumeID']}
params['attributes'] = sf_ref['attributes']
self._issue_api_request('ModifyVolume',
params, version='5.0')
model_update = self._get_model_info(sfaccount, sf_ref['volumeID'])
model_update.update(rep_updates)
return model_update
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'name': <name of existing volume on SF Cluster>}
"""
sfid = external_ref.get('source-id', None)
if sfid is None:
raise SolidFireAPIException(_("Manage existing get size "
"requires 'id'."))
params = {'startVolumeID': int(sfid),
'limit': 1}
vols = self._issue_api_request(
'ListActiveVolumes', params)['result']['volumes']
if len(vols) != 1:
msg = _("Provided volume id does not exist on SolidFire backend.")
raise SolidFireDriverException(msg)
return int(math.ceil(float(vols[0]['totalSize']) / units.Gi))
def unmanage(self, volume):
"""Mark SolidFire Volume as unmanaged (export from Cinder)."""
sfaccount = self._get_sfaccount(volume['project_id'])
if sfaccount is None:
LOG.error("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"unmanage operation!", volume['id'])
raise SolidFireAPIException(_("Failed to find account "
"for volume."))
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
export_time = timeutils.utcnow().isoformat()
attributes = sf_vol['attributes']
attributes['os_exported_at'] = export_time
params = {'volumeID': int(sf_vol['volumeID']),
'attributes': attributes}
self._issue_api_request('ModifyVolume',
params, version='5.0')
def _failover_volume(self, tgt_vol, tgt_cluster, src_vol=None):
"""Modify remote volume to R/W mode."""
if src_vol:
# Put the src in tgt mode assuming it's still available
# catch the exception if the cluster isn't available and
# continue on
params = {'volumeID': src_vol['volumeID'],
'access': 'replicationTarget'}
try:
self._issue_api_request('ModifyVolume', params)
except SolidFireAPIException:
# FIXME
pass
# Now call out to the remote and make the tgt our new src
params = {'volumeID': tgt_vol['volumeID'],
'access': 'readWrite'}
self._issue_api_request('ModifyVolume', params,
endpoint=tgt_cluster['endpoint'])
def failover(self, context, volumes, secondary_id=None, groups=None):
"""Failover to replication target.
In order to do failback, you MUST specify the original/default cluster
using secondary_id option. You can do this simply by specifying:
`secondary_id=default`
"""
remote = None
failback = False
volume_updates = []
if not self.replication_enabled:
LOG.error("SolidFire driver received failover_host "
"request, however replication is NOT "
"enabled.")
raise exception.UnableToFailOver(reason=_("Failover requested "
"on non replicated "
"backend."))
# NOTE(erlon): For now we only support one replication target device.
# So, there are two cases we have to deal with here:
# 1. Caller specified a backend target to fail-over to (this must be
# the backend_id as defined in replication_device. Any other values
# will raise an error. If the user does not specify anything, we
# also fall in this case.
# 2. Caller wants to failback and therefore sets backend_id=default.
secondary_id = secondary_id.lower() if secondary_id else None
if secondary_id == "default" and not self.failed_over:
msg = _("SolidFire driver received failover_host "
"specifying failback to default, the "
"host however is not in `failed_over` "
"state.")
raise exception.InvalidReplicationTarget(msg)
elif secondary_id == "default" and self.failed_over:
LOG.info("Failing back to primary cluster.")
remote = self._create_cluster_reference()
failback = True
else:
repl_configs = self.configuration.replication_device[0]
if secondary_id and repl_configs['backend_id'] != secondary_id:
msg = _("Replication id (%s) does not match the configured "
"one in cinder.conf.") % secondary_id
raise exception.InvalidReplicationTarget(msg)
LOG.info("Failing over to secondary cluster %s.", secondary_id)
remote = self.cluster_pairs[0]
LOG.debug("Target cluster to failover: %s.",
{'name': remote['name'],
'mvip': remote['mvip'],
'clusterAPIVersion': remote['clusterAPIVersion']})
target_vols = self._map_sf_volumes(volumes,
endpoint=remote['endpoint'])
LOG.debug("Total Cinder volumes found in target: %d",
len(target_vols))
primary_vols = None
try:
primary_vols = self._map_sf_volumes(volumes)
LOG.debug("Total Cinder volumes found in primary cluster: %d",
len(primary_vols))
except SolidFireAPIException:
# API Request failed on source. Failover/failback will skip next
# calls to it.
pass
for v in volumes:
if v['status'] == "error":
LOG.debug("Skipping operation for Volume %s as it is "
"on error state.", v['id'])
continue
target_vlist = [sfv for sfv in target_vols
if sfv['cinder_id'] == v['id']]
if len(target_vlist) > 0:
target_vol = target_vlist[0]
if primary_vols:
vols = [sfv for sfv in primary_vols
if sfv['cinder_id'] == v['id']]
if not vols:
LOG.error("SolidFire driver cannot proceed. "
"Could not find volume %s in "
"back-end storage.", v['id'])
raise exception.UnableToFailOver(
reason=_("Cannot find cinder volume in "
"back-end storage."))
# Have at least one cinder volume in storage
primary_vol = vols[0]
else:
primary_vol = None
LOG.info('Failing-over volume %s.', v.id)
LOG.debug('Target vol: %s',
{'access': target_vol['access'],
'accountID': target_vol['accountID'],
'name': target_vol['name'],
'status': target_vol['status'],
'volumeID': target_vol['volumeID']})
LOG.debug('Primary vol: %s',
{'access': primary_vol['access'],
'accountID': primary_vol['accountID'],
'name': primary_vol['name'],
'status': primary_vol['status'],
'volumeID': primary_vol['volumeID']})
try:
self._failover_volume(target_vol, remote, primary_vol)
sf_account = self._get_create_account(
v.project_id, endpoint=remote['endpoint'])
LOG.debug("Target account: %s", sf_account['accountID'])
conn_info = self._build_connection_info(
sf_account, target_vol, endpoint=remote['endpoint'])
# volume status defaults to failed-over
replication_status = 'failed-over'
# in case of a failback, volume status must be reset to its
# original state
if failback:
replication_status = 'enabled'
vol_updates = {
'volume_id': v['id'],
'updates': {
'replication_status': replication_status
}
}
vol_updates['updates'].update(conn_info)
volume_updates.append(vol_updates)
except Exception:
volume_updates.append({'volume_id': v['id'],
'updates': {'status': 'error', }})
LOG.exception("Error trying to failover volume %s.",
v['id'])
else:
volume_updates.append({'volume_id': v['id'],
'updates': {'status': 'error', }})
return '' if failback else remote['backend_id'], volume_updates, []
def failover_completed(self, context, active_backend_id=None):
"""Update volume node when `failover` is completed.
Expects the following scenarios:
1) active_backend_id='' when failing back
2) active_backend_id=<secondary_backend_id> when failing over
3) When `failover` raises an Exception, this will be called
with the previous active_backend_id (Will be empty string
in case backend wasn't in failed-over state).
"""
if not active_backend_id:
LOG.info("Failback completed. "
"Switching active cluster back to default.")
self.active_cluster = self._create_cluster_reference()
self.failed_over = False
# Recreating cluster pairs after a successful failback
if self.configuration.replication_device:
self._set_cluster_pairs()
self.replication_enabled = True
else:
LOG.info("Failover completed. "
"Switching active cluster to %s.", active_backend_id)
self.active_cluster = self.cluster_pairs[0]
self.failed_over = True
def failover_host(self, context, volumes, secondary_id=None, groups=None):
"""Failover to replication target in non-clustered deployment."""
active_cluster_id, volume_updates, group_updates = (
self.failover(context, volumes, secondary_id, groups))
self.failover_completed(context, active_cluster_id)
return active_cluster_id, volume_updates, group_updates
def freeze_backend(self, context):
"""Freeze backend notification."""
pass
def thaw_backend(self, context):
"""Thaw backend notification."""
pass
def revert_to_snapshot(self, context, volume, snapshot):
"""Revert a volume to a given snapshot."""
sfaccount = self._get_sfaccount(volume.project_id)
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume.id, params)
if sf_vol is None:
LOG.error("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"revert_to_snapshot operation!", volume.id)
raise exception.VolumeNotFound(volume_id=volume['id'])
params['volumeID'] = sf_vol['volumeID']
sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix,
snapshot.id)
sf_snaps = self._get_sf_snapshots(sf_vol['volumeID'])
snap = next((s for s in sf_snaps if s["name"] == sf_snap_name),
None)
if not snap:
LOG.error("Snapshot ID %s was not found on "
"the SolidFire Cluster while attempting "
"revert_to_snapshot operation!", snapshot.id)
raise exception.VolumeSnapshotNotFound(volume_id=volume.id)
params['snapshotID'] = snap['snapshotID']
params['saveCurrentState'] = 'false'
self._issue_api_request('RollbackToSnapshot',
params,
version='6.0')
class SolidFireISCSI(iscsi_driver.SanISCSITarget):
def __init__(self, *args, **kwargs):
super(SolidFireISCSI, self).__init__(*args, **kwargs)
self.sf_driver = kwargs.get('solidfire_driver')
def __getattr__(self, attr):
if hasattr(self.sf_driver, attr):
return getattr(self.sf_driver, attr)
else:
msg = _('Attribute: %s not found.') % attr
raise NotImplementedError(msg)
def _do_iscsi_export(self, volume):
sfaccount = self._get_sfaccount(volume['project_id'])
model_update = {}
model_update['provider_auth'] = ('CHAP %s %s'
% (sfaccount['username'],
sfaccount['targetSecret']))
return model_update
def create_export(self, context, volume, volume_path):
return self._do_iscsi_export(volume)
def ensure_export(self, context, volume, volume_path):
try:
return self._do_iscsi_export(volume)
except SolidFireAPIException:
return None
# Following are abc's that we make sure are caught and
# paid attention to. In our case we don't use them
# so just stub them out here.
def remove_export(self, context, volume):
pass
def terminate_connection(self, volume, connector, **kwargs):
pass
def _sf_initialize_connection(self, volume, connector):
"""Initialize the connection and return connection info.
Optionally checks and utilizes volume access groups.
"""
if self.configuration.sf_enable_vag:
iqn = connector['initiator']
provider_id = volume['provider_id']
vol_id = int(provider_id.split()[0])
# safe_create_vag may opt to reuse vs create a vag, so we need to
# add our vol_id.
vag_id = self._safe_create_vag(iqn, vol_id)
self._add_volume_to_vag(vol_id, iqn, vag_id)
# Continue along with default behavior
return super(SolidFireISCSI, self).initialize_connection(volume,
connector)
def _sf_terminate_connection(self, volume, properties, force):
"""Terminate the volume connection.
Optionally remove volume from volume access group.
If the VAG is empty then the VAG is also removed.
"""
if self.configuration.sf_enable_vag:
provider_id = volume['provider_id']
vol_id = int(provider_id.split()[0])
if properties:
iqn = properties['initiator']
vag = self._get_vags_by_name(iqn)
if vag and not volume['multiattach']:
# Multiattach causes problems with removing volumes from
# VAGs.
# Compromise solution for now is to remove multiattach
# volumes from VAGs during volume deletion.
vag = vag[0]
vag_id = vag['volumeAccessGroupID']
if [vol_id] == vag['volumes']:
self._remove_vag(vag_id)
elif vol_id in vag['volumes']:
self._remove_volume_from_vag(vol_id, vag_id)
else:
self._remove_volume_from_vags(vol_id)
return super(SolidFireISCSI, self).terminate_connection(volume,
properties,
force=force)
| apache-2.0 | 6,493,206,346,257,849,000 | 41.267921 | 79 | 0.544263 | false |
rnicoll/bitcoin | test/functional/feature_taproot.py | 8 | 87479 | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test Taproot softfork (BIPs 340-342)
from test_framework.blocktools import (
create_coinbase,
create_block,
add_witness_commitment,
MAX_BLOCK_SIGOPS_WEIGHT,
NORMAL_GBT_REQUEST_PARAMS,
WITNESS_SCALE_FACTOR,
)
from test_framework.messages import (
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
ToHex,
)
from test_framework.script import (
ANNEX_TAG,
CScript,
CScriptNum,
CScriptOp,
LEAF_VERSION_TAPSCRIPT,
LegacySignatureHash,
LOCKTIME_THRESHOLD,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_16,
OP_2DROP,
OP_2DUP,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_CHECKSIG,
OP_CHECKSIGADD,
OP_CHECKSIGVERIFY,
OP_CODESEPARATOR,
OP_DROP,
OP_DUP,
OP_ELSE,
OP_ENDIF,
OP_EQUAL,
OP_EQUALVERIFY,
OP_HASH160,
OP_IF,
OP_NOP,
OP_NOT,
OP_NOTIF,
OP_PUSHDATA1,
OP_RETURN,
OP_SWAP,
OP_VERIFY,
SIGHASH_DEFAULT,
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY,
SegwitV0SignatureHash,
TaprootSignatureHash,
is_op_success,
taproot_construct,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error, assert_equal
from test_framework.key import generate_privkey, compute_xonly_pubkey, sign_schnorr, tweak_add_privkey, ECKey
from test_framework.address import (
hash160,
sha256,
)
from collections import OrderedDict, namedtuple
from io import BytesIO
import json
import hashlib
import os
import random
# === Framework for building spending transactions. ===
#
# The computation is represented as a "context" dict, whose entries store potentially-unevaluated expressions that
# refer to lower-level ones. By overwriting these expression, many aspects - both high and low level - of the signing
# process can be overridden.
#
# Specifically, a context object is a dict that maps names to compositions of:
# - values
# - lists of values
# - callables which, when fed the context object as argument, produce any of these
#
# The DEFAULT_CONTEXT object specifies a standard signing process, with many overridable knobs.
#
# The get(ctx, name) function can evaluate a name, and cache its result in the context.
# getter(name) can be used to construct a callable that evaluates name. For example:
#
# ctx1 = {**DEFAULT_CONTEXT, inputs=[getter("sign"), b'\x01']}
#
# creates a context where the script inputs are a signature plus the bytes 0x01.
#
# override(expr, name1=expr1, name2=expr2, ...) can be used to cause an expression to be evaluated in a selectively
# modified context. For example:
#
# ctx2 = {**DEFAULT_CONTEXT, sighash=override(default_sighash, hashtype=SIGHASH_DEFAULT)}
#
# creates a context ctx2 where the sighash is modified to use hashtype=SIGHASH_DEFAULT. This differs from
#
# ctx3 = {**DEFAULT_CONTEXT, hashtype=SIGHASH_DEFAULT}
#
# in that ctx3 will globally use hashtype=SIGHASH_DEFAULT (including in the hashtype byte appended to the signature)
# while ctx2 only uses the modified hashtype inside the sighash calculation.
def deep_eval(ctx, expr):
"""Recursively replace any callables c in expr (including inside lists) with c(ctx)."""
while callable(expr):
expr = expr(ctx)
if isinstance(expr, list):
expr = [deep_eval(ctx, x) for x in expr]
return expr
# Data type to represent fully-evaluated expressions in a context dict (so we can avoid reevaluating them).
Final = namedtuple("Final", "value")
def get(ctx, name):
"""Evaluate name in context ctx."""
assert name in ctx, "Missing '%s' in context" % name
expr = ctx[name]
if not isinstance(expr, Final):
# Evaluate and cache the result.
expr = Final(deep_eval(ctx, expr))
ctx[name] = expr
return expr.value
def getter(name):
"""Return a callable that evaluates name in its passed context."""
return lambda ctx: get(ctx, name)
def override(expr, **kwargs):
"""Return a callable that evaluates expr in a modified context."""
return lambda ctx: deep_eval({**ctx, **kwargs}, expr)
# === Implementations for the various default expressions in DEFAULT_CONTEXT ===
def default_hashtype(ctx):
"""Default expression for "hashtype": SIGHASH_DEFAULT for taproot, SIGHASH_ALL otherwise."""
mode = get(ctx, "mode")
if mode == "taproot":
return SIGHASH_DEFAULT
else:
return SIGHASH_ALL
def default_tapleaf(ctx):
"""Default expression for "tapleaf": looking up leaf in tap[2]."""
return get(ctx, "tap").leaves[get(ctx, "leaf")]
def default_script_taproot(ctx):
"""Default expression for "script_taproot": tapleaf.script."""
return get(ctx, "tapleaf").script
def default_leafversion(ctx):
"""Default expression for "leafversion": tapleaf.version"""
return get(ctx, "tapleaf").version
def default_negflag(ctx):
"""Default expression for "negflag": tap.negflag."""
return get(ctx, "tap").negflag
def default_pubkey_internal(ctx):
"""Default expression for "pubkey_internal": tap.internal_pubkey."""
return get(ctx, "tap").internal_pubkey
def default_merklebranch(ctx):
"""Default expression for "merklebranch": tapleaf.merklebranch."""
return get(ctx, "tapleaf").merklebranch
def default_controlblock(ctx):
"""Default expression for "controlblock": combine leafversion, negflag, pubkey_internal, merklebranch."""
return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_internal") + get(ctx, "merklebranch")
def default_sighash(ctx):
"""Default expression for "sighash": depending on mode, compute BIP341, BIP143, or legacy sighash."""
tx = get(ctx, "tx")
idx = get(ctx, "idx")
hashtype = get(ctx, "hashtype_actual")
mode = get(ctx, "mode")
if mode == "taproot":
# BIP341 signature hash
utxos = get(ctx, "utxos")
annex = get(ctx, "annex")
if get(ctx, "leaf") is not None:
codeseppos = get(ctx, "codeseppos")
leaf_ver = get(ctx, "leafversion")
script = get(ctx, "script_taproot")
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=True, script=script, leaf_ver=leaf_ver, codeseparator_pos=codeseppos, annex=annex)
else:
return TaprootSignatureHash(tx, utxos, hashtype, idx, scriptpath=False, annex=annex)
elif mode == "witv0":
# BIP143 signature hash
scriptcode = get(ctx, "scriptcode")
utxos = get(ctx, "utxos")
return SegwitV0SignatureHash(scriptcode, tx, idx, hashtype, utxos[idx].nValue)
else:
# Pre-segwit signature hash
scriptcode = get(ctx, "scriptcode")
return LegacySignatureHash(scriptcode, tx, idx, hashtype)[0]
def default_tweak(ctx):
"""Default expression for "tweak": None if a leaf is specified, tap[0] otherwise."""
if get(ctx, "leaf") is None:
return get(ctx, "tap").tweak
return None
def default_key_tweaked(ctx):
"""Default expression for "key_tweaked": key if tweak is None, tweaked with it otherwise."""
key = get(ctx, "key")
tweak = get(ctx, "tweak")
if tweak is None:
return key
else:
return tweak_add_privkey(key, tweak)
def default_signature(ctx):
"""Default expression for "signature": BIP340 signature or ECDSA signature depending on mode."""
sighash = get(ctx, "sighash")
if get(ctx, "mode") == "taproot":
key = get(ctx, "key_tweaked")
flip_r = get(ctx, "flag_flip_r")
flip_p = get(ctx, "flag_flip_p")
return sign_schnorr(key, sighash, flip_r=flip_r, flip_p=flip_p)
else:
key = get(ctx, "key")
return key.sign_ecdsa(sighash)
def default_hashtype_actual(ctx):
"""Default expression for "hashtype_actual": hashtype, unless mismatching SIGHASH_SINGLE in taproot."""
hashtype = get(ctx, "hashtype")
mode = get(ctx, "mode")
if mode != "taproot":
return hashtype
idx = get(ctx, "idx")
tx = get(ctx, "tx")
if hashtype & 3 == SIGHASH_SINGLE and idx >= len(tx.vout):
return (hashtype & ~3) | SIGHASH_NONE
return hashtype
def default_bytes_hashtype(ctx):
"""Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise."""
return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0])
def default_sign(ctx):
"""Default expression for "sign": concatenation of signature and bytes_hashtype."""
return get(ctx, "signature") + get(ctx, "bytes_hashtype")
def default_inputs_keypath(ctx):
"""Default expression for "inputs_keypath": a signature."""
return [get(ctx, "sign")]
def default_witness_taproot(ctx):
"""Default expression for "witness_taproot", consisting of inputs, script, control block, and annex as needed."""
annex = get(ctx, "annex")
suffix_annex = []
if annex is not None:
suffix_annex = [annex]
if get(ctx, "leaf") is None:
return get(ctx, "inputs_keypath") + suffix_annex
else:
return get(ctx, "inputs") + [bytes(get(ctx, "script_taproot")), get(ctx, "controlblock")] + suffix_annex
def default_witness_witv0(ctx):
"""Default expression for "witness_witv0", consisting of inputs and witness script, as needed."""
script = get(ctx, "script_witv0")
inputs = get(ctx, "inputs")
if script is None:
return inputs
else:
return inputs + [script]
def default_witness(ctx):
"""Default expression for "witness", delegating to "witness_taproot" or "witness_witv0" as needed."""
mode = get(ctx, "mode")
if mode == "taproot":
return get(ctx, "witness_taproot")
elif mode == "witv0":
return get(ctx, "witness_witv0")
else:
return []
def default_scriptsig(ctx):
"""Default expression for "scriptsig", consisting of inputs and redeemscript, as needed."""
scriptsig = []
mode = get(ctx, "mode")
if mode == "legacy":
scriptsig = get(ctx, "inputs")
redeemscript = get(ctx, "script_p2sh")
if redeemscript is not None:
scriptsig += [bytes(redeemscript)]
return scriptsig
# The default context object.
DEFAULT_CONTEXT = {
# == The main expressions to evaluate. Only override these for unusual or invalid spends. ==
# The overall witness stack, as a list of bytes objects.
"witness": default_witness,
# The overall scriptsig, as a list of CScript objects (to be concatenated) and bytes objects (to be pushed)
"scriptsig": default_scriptsig,
# == Expressions you'll generally only override for intentionally invalid spends. ==
# The witness stack for spending a taproot output.
"witness_taproot": default_witness_taproot,
# The witness stack for spending a P2WPKH/P2WSH output.
"witness_witv0": default_witness_witv0,
# The script inputs for a taproot key path spend.
"inputs_keypath": default_inputs_keypath,
# The actual hashtype to use (usually equal to hashtype, but in taproot SIGHASH_SINGLE is not always allowed).
"hashtype_actual": default_hashtype_actual,
# The bytes object for a full signature (including hashtype byte, if needed).
"bytes_hashtype": default_bytes_hashtype,
# A full script signature (bytes including hashtype, if needed)
"sign": default_sign,
# An ECDSA or Schnorr signature (excluding hashtype byte).
"signature": default_signature,
# The 32-byte tweaked key (equal to key for script path spends, or key+tweak for key path spends).
"key_tweaked": default_key_tweaked,
# The tweak to use (None for script path spends, the actual tweak for key path spends).
"tweak": default_tweak,
# The sighash value (32 bytes)
"sighash": default_sighash,
# The information about the chosen script path spend (TaprootLeafInfo object).
"tapleaf": default_tapleaf,
# The script to push, and include in the sighash, for a taproot script path spend.
"script_taproot": default_script_taproot,
# The internal pubkey for a taproot script path spend (32 bytes).
"pubkey_internal": default_pubkey_internal,
# The negation flag of the internal pubkey for a taproot script path spend.
"negflag": default_negflag,
# The leaf version to include in the sighash (this does not affect the one in the control block).
"leafversion": default_leafversion,
# The Merkle path to include in the control block for a script path spend.
"merklebranch": default_merklebranch,
# The control block to push for a taproot script path spend.
"controlblock": default_controlblock,
# Whether to produce signatures with invalid P sign (Schnorr signatures only).
"flag_flip_p": False,
# Whether to produce signatures with invalid R sign (Schnorr signatures only).
"flag_flip_r": False,
# == Parameters that can be changed without invalidating, but do have a default: ==
# The hashtype (as an integer).
"hashtype": default_hashtype,
# The annex (only when mode=="taproot").
"annex": None,
# The codeseparator position (only when mode=="taproot").
"codeseppos": -1,
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
"script_p2sh": None,
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
"script_witv0": None,
# The leaf to use in taproot spends (if script path spend; None implies key path spend).
"leaf": None,
# The input arguments to provide to the executed script
"inputs": [],
# == Parameters to be set before evaluation: ==
# - mode: what spending style to use ("taproot", "witv0", or "legacy").
# - key: the (untweaked) private key to sign with (ECKey object for ECDSA, 32 bytes for Schnorr).
# - tap: the TaprootInfo object (see taproot_construct; needed in mode=="taproot").
# - tx: the transaction to sign.
# - utxos: the UTXOs being spent (needed in mode=="witv0" and mode=="taproot").
# - idx: the input position being signed.
# - scriptcode: the scriptcode to include in legacy and witv0 sighashes.
}
def flatten(lst):
ret = []
for elem in lst:
if isinstance(elem, list):
ret += flatten(elem)
else:
ret.append(elem)
return ret
def spend(tx, idx, utxos, **kwargs):
"""Sign transaction input idx of tx, provided utxos is the list of outputs being spent.
Additional arguments may be provided that override any aspect of the signing process.
See DEFAULT_CONTEXT above for what can be overridden, and what must be provided.
"""
ctx = {**DEFAULT_CONTEXT, "tx":tx, "idx":idx, "utxos":utxos, **kwargs}
def to_script(elem):
"""If fed a CScript, return it; if fed bytes, return a CScript that pushes it."""
if isinstance(elem, CScript):
return elem
else:
return CScript([elem])
scriptsig_list = flatten(get(ctx, "scriptsig"))
scriptsig = CScript(b"".join(bytes(to_script(elem)) for elem in scriptsig_list))
witness_stack = flatten(get(ctx, "witness"))
return (scriptsig, witness_stack)
# === Spender objects ===
#
# Each spender is a tuple of:
# - A scriptPubKey which is to be spent from (CScript)
# - A comment describing the test (string)
# - Whether the spending (on itself) is expected to be standard (bool)
# - A tx-signing lambda returning (scriptsig, witness_stack), taking as inputs:
# - A transaction to sign (CTransaction)
# - An input position (int)
# - The spent UTXOs by this transaction (list of CTxOut)
# - Whether to produce a valid spend (bool)
# - A string with an expected error message for failure case if known
# - The (pre-taproot) sigops weight consumed by a successful spend
# - Whether this spend cannot fail
# - Whether this test demands being placed in a txin with no corresponding txout (for testing SIGHASH_SINGLE behavior)
Spender = namedtuple("Spender", "script,comment,is_standard,sat_function,err_msg,sigops_weight,no_fail,need_vin_vout_mismatch")
def make_spender(comment, *, tap=None, witv0=False, script=None, pkh=None, p2sh=False, spk_mutate_pre_p2sh=None, failure=None, standard=True, err_msg=None, sigops_weight=0, need_vin_vout_mismatch=False, **kwargs):
"""Helper for constructing Spender objects using the context signing framework.
* tap: a TaprootInfo object (see taproot_construct), for Taproot spends (cannot be combined with pkh, witv0, or script)
* witv0: boolean indicating the use of witness v0 spending (needs one of script or pkh)
* script: the actual script executed (for bare/P2WSH/P2SH spending)
* pkh: the public key for P2PKH or P2WPKH spending
* p2sh: whether the output is P2SH wrapper (this is supported even for Taproot, where it makes the output unencumbered)
* spk_mutate_pre_psh: a callable to be applied to the script (before potentially P2SH-wrapping it)
* failure: a dict of entries to override in the context when intentionally failing to spend (if None, no_fail will be set)
* standard: whether the (valid version of) spending is expected to be standard
* err_msg: a string with an expected error message for failure (or None, if not cared about)
* sigops_weight: the pre-taproot sigops weight consumed by a successful spend
* need_vin_vout_mismatch: whether this test requires being tested in a transaction input that has no corresponding
transaction output.
"""
conf = dict()
# Compute scriptPubKey and set useful defaults based on the inputs.
if witv0:
assert tap is None
conf["mode"] = "witv0"
if pkh is not None:
# P2WPKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_0, pubkeyhash])
conf["scriptcode"] = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["script_witv0"] = None
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# P2WSH
spk = CScript([OP_0, sha256(script)])
conf["scriptcode"] = script
conf["script_witv0"] = script
else:
assert False
elif tap is None:
conf["mode"] = "legacy"
if pkh is not None:
# P2PKH
assert script is None
pubkeyhash = hash160(pkh)
spk = CScript([OP_DUP, OP_HASH160, pubkeyhash, OP_EQUALVERIFY, OP_CHECKSIG])
conf["scriptcode"] = spk
conf["inputs"] = [getter("sign"), pkh]
elif script is not None:
# bare
spk = script
conf["scriptcode"] = script
else:
assert False
else:
assert script is None
conf["mode"] = "taproot"
conf["tap"] = tap
spk = tap.scriptPubKey
if spk_mutate_pre_p2sh is not None:
spk = spk_mutate_pre_p2sh(spk)
if p2sh:
# P2SH wrapper can be combined with anything else
conf["script_p2sh"] = spk
spk = CScript([OP_HASH160, hash160(spk), OP_EQUAL])
conf = {**conf, **kwargs}
def sat_fn(tx, idx, utxos, valid):
if valid:
return spend(tx, idx, utxos, **conf)
else:
assert failure is not None
return spend(tx, idx, utxos, **{**conf, **failure})
return Spender(script=spk, comment=comment, is_standard=standard, sat_function=sat_fn, err_msg=err_msg, sigops_weight=sigops_weight, no_fail=failure is None, need_vin_vout_mismatch=need_vin_vout_mismatch)
def add_spender(spenders, *args, **kwargs):
"""Make a spender using make_spender, and add it to spenders."""
spenders.append(make_spender(*args, **kwargs))
# === Helpers for the test ===
def random_checksig_style(pubkey):
"""Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack."""
opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD])
if (opcode == OP_CHECKSIGVERIFY):
ret = CScript([pubkey, opcode, OP_1])
elif (opcode == OP_CHECKSIGADD):
num = random.choice([0, 0x7fffffff, -0x7fffffff])
ret = CScript([num, pubkey, opcode, num + 1, OP_EQUAL])
else:
ret = CScript([pubkey, opcode])
return bytes(ret)
def random_bytes(n):
"""Return a random bytes object of length n."""
return bytes(random.getrandbits(8) for i in range(n))
def bitflipper(expr):
"""Return a callable that evaluates expr and returns it with a random bitflip."""
def fn(ctx):
sub = deep_eval(ctx, expr)
assert isinstance(sub, bytes)
return (int.from_bytes(sub, 'little') ^ (1 << random.randrange(len(sub) * 8))).to_bytes(len(sub), 'little')
return fn
def zero_appender(expr):
"""Return a callable that evaluates expr and returns it with a zero added."""
return lambda ctx: deep_eval(ctx, expr) + b"\x00"
def byte_popper(expr):
"""Return a callable that evaluates expr and returns it with its last byte removed."""
return lambda ctx: deep_eval(ctx, expr)[:-1]
# Expected error strings
ERR_SIG_SIZE = {"err_msg": "Invalid Schnorr signature size"}
ERR_SIG_HASHTYPE = {"err_msg": "Invalid Schnorr signature hash type"}
ERR_SIG_SCHNORR = {"err_msg": "Invalid Schnorr signature"}
ERR_OP_RETURN = {"err_msg": "OP_RETURN was encountered"}
ERR_CONTROLBLOCK_SIZE = {"err_msg": "Invalid Taproot control block size"}
ERR_WITNESS_PROGRAM_MISMATCH = {"err_msg": "Witness program hash mismatch"}
ERR_PUSH_LIMIT = {"err_msg": "Push value size limit exceeded"}
ERR_DISABLED_OPCODE = {"err_msg": "Attempted to use a disabled opcode"}
ERR_TAPSCRIPT_CHECKMULTISIG = {"err_msg": "OP_CHECKMULTISIG(VERIFY) is not available in tapscript"}
ERR_MINIMALIF = {"err_msg": "OP_IF/NOTIF argument must be minimal in tapscript"}
ERR_UNKNOWN_PUBKEY = {"err_msg": "Public key is neither compressed or uncompressed"}
ERR_STACK_SIZE = {"err_msg": "Stack size limit exceeded"}
ERR_CLEANSTACK = {"err_msg": "Stack size must be exactly one after execution"}
ERR_STACK_EMPTY = {"err_msg": "Operation not valid with the current stack size"}
ERR_SIGOPS_RATIO = {"err_msg": "Too much signature validation relative to witness weight"}
ERR_UNDECODABLE = {"err_msg": "Opcode missing or not understood"}
ERR_NO_SUCCESS = {"err_msg": "Script evaluated without error but finished with a false/empty top stack element"}
ERR_EMPTY_WITNESS = {"err_msg": "Witness program was passed an empty witness"}
ERR_CHECKSIGVERIFY = {"err_msg": "Script failed an OP_CHECKSIGVERIFY operation"}
VALID_SIGHASHES_ECDSA = [
SIGHASH_ALL,
SIGHASH_NONE,
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_ALL,
SIGHASH_ANYONECANPAY + SIGHASH_NONE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT = [SIGHASH_DEFAULT] + VALID_SIGHASHES_ECDSA
VALID_SIGHASHES_TAPROOT_SINGLE = [
SIGHASH_SINGLE,
SIGHASH_ANYONECANPAY + SIGHASH_SINGLE
]
VALID_SIGHASHES_TAPROOT_NO_SINGLE = [h for h in VALID_SIGHASHES_TAPROOT if h not in VALID_SIGHASHES_TAPROOT_SINGLE]
SIGHASH_BITFLIP = {"failure": {"sighash": bitflipper(default_sighash)}}
SIG_POP_BYTE = {"failure": {"sign": byte_popper(default_sign)}}
SINGLE_SIG = {"inputs": [getter("sign")]}
SIG_ADD_ZERO = {"failure": {"sign": zero_appender(default_sign)}}
DUST_LIMIT = 600
MIN_FEE = 50000
# === Actual test cases ===
def spenders_taproot_active():
"""Return a list of Spenders for testing post-Taproot activation behavior."""
secs = [generate_privkey() for _ in range(8)]
pubs = [compute_xonly_pubkey(sec)[0] for sec in secs]
spenders = []
# == Tests for BIP340 signature validation. ==
# These are primarily tested through the test vectors implemented in libsecp256k1, and in src/tests/key_tests.cpp.
# Some things are tested programmatically as well here.
tap = taproot_construct(pubs[0])
# Test with key with bit flipped.
add_spender(spenders, "sig/key", tap=tap, key=secs[0], failure={"key_tweaked": bitflipper(default_key_tweaked)}, **ERR_SIG_SCHNORR)
# Test with sighash with bit flipped.
add_spender(spenders, "sig/sighash", tap=tap, key=secs[0], failure={"sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# Test with invalid R sign.
add_spender(spenders, "sig/flip_r", tap=tap, key=secs[0], failure={"flag_flip_r": True}, **ERR_SIG_SCHNORR)
# Test with invalid P sign.
add_spender(spenders, "sig/flip_p", tap=tap, key=secs[0], failure={"flag_flip_p": True}, **ERR_SIG_SCHNORR)
# Test with signature with bit flipped.
add_spender(spenders, "sig/bitflip", tap=tap, key=secs[0], failure={"signature": bitflipper(default_signature)}, **ERR_SIG_SCHNORR)
# == Tests for signature hashing ==
# Run all tests once with no annex, and once with a valid random annex.
for annex in [None, lambda _: bytes([ANNEX_TAG]) + random_bytes(random.randrange(0, 250))]:
# Non-empty annex is non-standard
no_annex = annex is None
# Sighash mutation tests (test all sighash combinations)
for hashtype in VALID_SIGHASHES_TAPROOT:
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
# Pure pubkey
tap = taproot_construct(pubs[0])
add_spender(spenders, "sighash/purepk", tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Pubkey/P2PK script combination
scripts = [("s0", CScript(random_checksig_style(pubs[1])))]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/keypath_hashtype_%x" % hashtype, tap=tap, key=secs[0], **common, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath_hashtype_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Test SIGHASH_SINGLE behavior in combination with mismatching outputs
if hashtype in VALID_SIGHASHES_TAPROOT_SINGLE:
add_spender(spenders, "sighash/keypath_hashtype_mis_%x" % hashtype, tap=tap, key=secs[0], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
add_spender(spenders, "sighash/scriptpath_hashtype_mis_%x" % hashtype, tap=tap, leaf="s0", key=secs[1], annex=annex, standard=no_annex, hashtype_actual=random.choice(VALID_SIGHASHES_TAPROOT_NO_SINGLE), **SINGLE_SIG, failure={"hashtype_actual": hashtype}, **ERR_SIG_HASHTYPE, need_vin_vout_mismatch=True)
# Test OP_CODESEPARATOR impact on sighashing.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
common = {"annex": annex, "hashtype": hashtype, "standard": no_annex}
scripts = [
("pk_codesep", CScript(random_checksig_style(pubs[1]) + bytes([OP_CODESEPARATOR]))), # codesep after checksig
("codesep_pk", CScript(bytes([OP_CODESEPARATOR]) + random_checksig_style(pubs[1]))), # codesep before checksig
("branched_codesep", CScript([random_bytes(random.randrange(511)), OP_DROP, OP_IF, OP_CODESEPARATOR, pubs[0], OP_ELSE, OP_CODESEPARATOR, pubs[1], OP_ENDIF, OP_CHECKSIG])), # branch dependent codesep
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
# Reusing the scripts above, test that various features affect the sighash.
add_spender(spenders, "sighash/annex", tap=tap, leaf="pk_codesep", key=secs[1], hashtype=hashtype, standard=False, **SINGLE_SIG, annex=bytes([ANNEX_TAG]), failure={"sighash": override(default_sighash, annex=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/script", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, script_taproot=tap.leaves["codesep_pk"].script)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/leafver", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leafversion=random.choice([x & 0xFE for x in range(0x100) if x & 0xFE != 0xC0]))}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, failure={"sighash": override(default_sighash, leaf=None)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/keypath", tap=tap, key=secs[0], **common, failure={"sighash": override(default_sighash, leaf="pk_codesep")}, **ERR_SIG_SCHNORR)
# Test that invalid hashtypes don't work, both in key path and script path spends
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for invalid_hashtype in [x for x in range(0x100) if x not in VALID_SIGHASHES_TAPROOT]:
add_spender(spenders, "sighash/keypath_unk_hashtype_%x" % invalid_hashtype, tap=tap, key=secs[0], hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/scriptpath_unk_hashtype_%x" % invalid_hashtype, tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=hashtype, failure={"hashtype": invalid_hashtype}, **ERR_SIG_HASHTYPE)
# Test that hashtype 0 cannot have a hashtype byte, and 1 must have one.
add_spender(spenders, "sighash/hashtype0_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype0_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_DEFAULT])}, **ERR_SIG_HASHTYPE)
add_spender(spenders, "sighash/hashtype1_byte_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1_byte_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test that hashtype 0 and hashtype 1 cannot be transmuted into each other.
add_spender(spenders, "sighash/hashtype0to1_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype0to1_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_DEFAULT, failure={"bytes_hashtype": bytes([SIGHASH_ALL])}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_keypath", tap=tap, key=secs[0], hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
add_spender(spenders, "sighash/hashtype1to0_scriptpath", tap=tap, leaf="pk_codesep", key=secs[1], **SINGLE_SIG, hashtype=SIGHASH_ALL, failure={"bytes_hashtype": b''}, **ERR_SIG_SCHNORR)
# Test aspects of signatures with unusual lengths
for hashtype in [SIGHASH_DEFAULT, random.choice(VALID_SIGHASHES_TAPROOT)]:
scripts = [
("csv", CScript([pubs[2], OP_CHECKSIGVERIFY, OP_1])),
("cs_pos", CScript([pubs[2], OP_CHECKSIG])),
("csa_pos", CScript([OP_0, pubs[2], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
("cs_neg", CScript([pubs[2], OP_CHECKSIG, OP_NOT])),
("csa_neg", CScript([OP_2, pubs[2], OP_CHECKSIGADD, OP_2, OP_EQUAL]))
]
random.shuffle(scripts)
tap = taproot_construct(pubs[3], scripts)
# Empty signatures
add_spender(spenders, "siglen/empty_keypath", tap=tap, key=secs[3], hashtype=hashtype, failure={"sign": b""}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_CHECKSIGVERIFY)
add_spender(spenders, "siglen/empty_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, failure={"sign": b""}, **ERR_NO_SUCCESS)
add_spender(spenders, "siglen/empty_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(1, 63))}, **ERR_SIG_SIZE)
add_spender(spenders, "siglen/empty_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": lambda _: random_bytes(random.randrange(66, 100))}, **ERR_SIG_SIZE)
# Appending a zero byte to signatures invalidates them
add_spender(spenders, "siglen/padzero_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
add_spender(spenders, "siglen/padzero_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_ADD_ZERO, **(ERR_SIG_HASHTYPE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SIZE))
# Removing the last byte from signatures invalidates them
add_spender(spenders, "siglen/popbyte_keypath", tap=tap, key=secs[3], hashtype=hashtype, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csv", tap=tap, key=secs[2], leaf="csv", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs", tap=tap, key=secs[2], leaf="cs_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa", tap=tap, key=secs[2], leaf="csa_pos", hashtype=hashtype, **SINGLE_SIG, **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
add_spender(spenders, "siglen/popbyte_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", **SIG_POP_BYTE, **(ERR_SIG_SIZE if hashtype == SIGHASH_DEFAULT else ERR_SIG_SCHNORR))
# Verify that an invalid signature is not allowed, not even when the CHECKSIG* is expected to fail.
add_spender(spenders, "siglen/invalid_cs_neg", tap=tap, key=secs[2], leaf="cs_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
add_spender(spenders, "siglen/invalid_csa_neg", tap=tap, key=secs[2], leaf="csa_neg", hashtype=hashtype, **SINGLE_SIG, sign=b"", failure={"sign": default_sign, "sighash": bitflipper(default_sighash)}, **ERR_SIG_SCHNORR)
# == Test that BIP341 spending only applies to witness version 1, program length 32, no P2SH ==
for p2sh in [False, True]:
for witver in range(1, 17):
for witlen in [20, 31, 32, 33]:
def mutate(spk):
prog = spk[2:]
assert len(prog) == 32
if witlen < 32:
prog = prog[0:witlen]
elif witlen > 32:
prog += bytes([0 for _ in range(witlen - 32)])
return CScript([CScriptOp.encode_op_n(witver), prog])
scripts = [("s0", CScript([pubs[0], OP_CHECKSIG])), ("dummy", CScript([OP_RETURN]))]
tap = taproot_construct(pubs[1], scripts)
if not p2sh and witver == 1 and witlen == 32:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, failure={"leaf": "dummy"}, **ERR_OP_RETURN)
else:
add_spender(spenders, "applic/keypath", p2sh=p2sh, spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[1], standard=False)
add_spender(spenders, "applic/scriptpath", p2sh=p2sh, leaf="s0", spk_mutate_pre_p2sh=mutate, tap=tap, key=secs[0], **SINGLE_SIG, standard=False)
# == Test various aspects of BIP341 spending paths ==
# A set of functions that compute the hashing partner in a Merkle tree, designed to exercise
# edge cases. This relies on the taproot_construct feature that a lambda can be passed in
# instead of a subtree, to compute the partner to be hashed with.
PARTNER_MERKLE_FN = [
# Combine with itself
lambda h: h,
# Combine with hash 0
lambda h: bytes([0 for _ in range(32)]),
# Combine with hash 2^256-1
lambda h: bytes([0xff for _ in range(32)]),
# Combine with itself-1 (BE)
lambda h: (int.from_bytes(h, 'big') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (BE)
lambda h: (int.from_bytes(h, 'big') + 1).to_bytes(32, 'big'),
# Combine with itself-1 (LE)
lambda h: (int.from_bytes(h, 'little') - 1).to_bytes(32, 'big'),
# Combine with itself+1 (LE)
lambda h: (int.from_bytes(h, 'little') + 1).to_bytes(32, 'little'),
# Combine with random bitflipped version of self.
lambda h: (int.from_bytes(h, 'little') ^ (1 << random.randrange(256))).to_bytes(32, 'little')
]
# Start with a tree of that has depth 1 for "128deep" and depth 2 for "129deep".
scripts = [("128deep", CScript([pubs[0], OP_CHECKSIG])), [("129deep", CScript([pubs[0], OP_CHECKSIG])), random.choice(PARTNER_MERKLE_FN)]]
# Add 127 nodes on top of that tree, so that "128deep" and "129deep" end up at their designated depths.
for _ in range(127):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
# Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it).
add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE)
# Test that flipping the negation bit invalidates spends.
add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the Merkle branch invalidate it.
add_spender(spenders, "spendpath/bitflipmerkle", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"merklebranch": bitflipper(default_merklebranch)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that bitflips in the internal pubkey invalidate it.
add_spender(spenders, "spendpath/bitflippubkey", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"pubkey_internal": bitflipper(default_pubkey_internal)}, **ERR_WITNESS_PROGRAM_MISMATCH)
# Test that empty witnesses are invalid.
add_spender(spenders, "spendpath/emptywit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"witness": []}, **ERR_EMPTY_WITNESS)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))]
tap = taproot_construct(pubs[1], scripts)
# Test that adding garbage to the control block invalidates it.
add_spender(spenders, "spendpath/padshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random_bytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block invalidates it.
add_spender(spenders, "spendpath/truncshortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE)
# Test that truncating the control block to 1 byte ("-1 Merkle length") invalidates it
add_spender(spenders, "spendpath/trunc1shortcontrol", tap=tap, leaf="s", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:1]}, **ERR_CONTROLBLOCK_SIZE)
# == Test BIP342 edge cases ==
csa_low_val = random.randrange(0, 17) # Within range for OP_n
csa_low_result = csa_low_val + 1
csa_high_val = random.randrange(17, 100) if random.getrandbits(1) else random.randrange(-100, -1) # Outside OP_n range
csa_high_result = csa_high_val + 1
OVERSIZE_NUMBER = 2**31
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER))), 6)
assert_equal(len(CScriptNum.encode(CScriptNum(OVERSIZE_NUMBER-1))), 5)
big_choices = []
big_scriptops = []
for i in range(1000):
r = random.randrange(len(pubs))
big_choices.append(r)
big_scriptops += [pubs[r], OP_CHECKSIGVERIFY]
def big_spend_inputs(ctx):
"""Helper function to construct the script input for t33/t34 below."""
# Instead of signing 999 times, precompute signatures for every (key, hashtype) combination
sigs = {}
for ht in VALID_SIGHASHES_TAPROOT:
for k in range(len(pubs)):
sigs[(k, ht)] = override(default_sign, hashtype=ht, key=secs[k])(ctx)
num = get(ctx, "num")
return [sigs[(big_choices[i], random.choice(VALID_SIGHASHES_TAPROOT))] for i in range(num - 1, -1, -1)]
# Various BIP342 features
scripts = [
# 0) drop stack element and OP_CHECKSIG
("t0", CScript([OP_DROP, pubs[1], OP_CHECKSIG])),
# 1) normal OP_CHECKSIG
("t1", CScript([pubs[1], OP_CHECKSIG])),
# 2) normal OP_CHECKSIGVERIFY
("t2", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 3) Hypothetical OP_CHECKMULTISIG script that takes a single sig as input
("t3", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIG])),
# 4) Hypothetical OP_CHECKMULTISIGVERIFY script that takes a single sig as input
("t4", CScript([OP_0, OP_SWAP, OP_1, pubs[1], OP_1, OP_CHECKMULTISIGVERIFY, OP_1])),
# 5) OP_IF script that needs a true input
("t5", CScript([OP_IF, pubs[1], OP_CHECKSIG, OP_ELSE, OP_RETURN, OP_ENDIF])),
# 6) OP_NOTIF script that needs a true input
("t6", CScript([OP_NOTIF, OP_RETURN, OP_ELSE, pubs[1], OP_CHECKSIG, OP_ENDIF])),
# 7) OP_CHECKSIG with an empty key
("t7", CScript([OP_0, OP_CHECKSIG])),
# 8) OP_CHECKSIGVERIFY with an empty key
("t8", CScript([OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 9) normal OP_CHECKSIGADD that also ensures return value is correct
("t9", CScript([csa_low_val, pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 10) OP_CHECKSIGADD with empty key
("t10", CScript([csa_low_val, OP_0, OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 11) OP_CHECKSIGADD with missing counter stack element
("t11", CScript([pubs[1], OP_CHECKSIGADD, OP_1, OP_EQUAL])),
# 12) OP_CHECKSIG that needs invalid signature
("t12", CScript([pubs[1], OP_CHECKSIGVERIFY, pubs[0], OP_CHECKSIG, OP_NOT])),
# 13) OP_CHECKSIG with empty key that needs invalid signature
("t13", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_CHECKSIG, OP_NOT])),
# 14) OP_CHECKSIGADD that needs invalid signature
("t14", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, pubs[0], OP_CHECKSIGADD, OP_NOT])),
# 15) OP_CHECKSIGADD with empty key that needs invalid signature
("t15", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGADD, OP_NOT])),
# 16) OP_CHECKSIG with unknown pubkey type
("t16", CScript([OP_1, OP_CHECKSIG])),
# 17) OP_CHECKSIGADD with unknown pubkey type
("t17", CScript([OP_0, OP_1, OP_CHECKSIGADD])),
# 18) OP_CHECKSIGVERIFY with unknown pubkey type
("t18", CScript([OP_1, OP_CHECKSIGVERIFY, OP_1])),
# 19) script longer than 10000 bytes and over 201 non-push opcodes
("t19", CScript([OP_0, OP_0, OP_2DROP] * 10001 + [pubs[1], OP_CHECKSIG])),
# 20) OP_CHECKSIGVERIFY with empty key
("t20", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_0, OP_0, OP_CHECKSIGVERIFY, OP_1])),
# 21) Script that grows the stack to 1000 elements
("t21", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 999 + [OP_DROP] * 999)),
# 22) Script that grows the stack to 1001 elements
("t22", CScript([pubs[1], OP_CHECKSIGVERIFY, OP_1] + [OP_DUP] * 1000 + [OP_DROP] * 1000)),
# 23) Script that expects an input stack of 1000 elements
("t23", CScript([OP_DROP] * 999 + [pubs[1], OP_CHECKSIG])),
# 24) Script that expects an input stack of 1001 elements
("t24", CScript([OP_DROP] * 1000 + [pubs[1], OP_CHECKSIG])),
# 25) Script that pushes a MAX_SCRIPT_ELEMENT_SIZE-bytes element
("t25", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE), OP_DROP, pubs[1], OP_CHECKSIG])),
# 26) Script that pushes a (MAX_SCRIPT_ELEMENT_SIZE+1)-bytes element
("t26", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, pubs[1], OP_CHECKSIG])),
# 27) CHECKSIGADD that must fail because numeric argument number is >4 bytes
("t27", CScript([CScriptNum(OVERSIZE_NUMBER), pubs[1], OP_CHECKSIGADD])),
# 28) Pushes random CScriptNum value, checks OP_CHECKSIGADD result
("t28", CScript([csa_high_val, pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 29) CHECKSIGADD that succeeds with proper sig because numeric argument number is <=4 bytes
("t29", CScript([CScriptNum(OVERSIZE_NUMBER-1), pubs[1], OP_CHECKSIGADD])),
# 30) Variant of t1 with "normal" 33-byte pubkey
("t30", CScript([b'\x03' + pubs[1], OP_CHECKSIG])),
# 31) Variant of t2 with "normal" 33-byte pubkey
("t31", CScript([b'\x02' + pubs[1], OP_CHECKSIGVERIFY, OP_1])),
# 32) Variant of t28 with "normal" 33-byte pubkey
("t32", CScript([csa_high_val, b'\x03' + pubs[1], OP_CHECKSIGADD, csa_high_result, OP_EQUAL])),
# 33) 999-of-999 multisig
("t33", CScript(big_scriptops[:1998] + [OP_1])),
# 34) 1000-of-1000 multisig
("t34", CScript(big_scriptops[:2000] + [OP_1])),
# 35) Variant of t9 that uses a non-minimally encoded input arg
("t35", CScript([bytes([csa_low_val]), pubs[1], OP_CHECKSIGADD, csa_low_result, OP_EQUAL])),
# 36) Empty script
("t36", CScript([])),
]
# Add many dummies to test huge trees
for j in range(100000):
scripts.append((None, CScript([OP_RETURN, random.randrange(100000)])))
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
common = {
"hashtype": hashtype,
"key": secs[1],
"tap": tap,
}
# Test that MAX_SCRIPT_ELEMENT_SIZE byte stack element inputs are valid, but not one more (and 80 bytes is standard but 81 is not).
add_spender(spenders, "tapscript/inputmaxlimit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE)], failure={"inputs": [getter("sign"), random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1)]}, **ERR_PUSH_LIMIT)
add_spender(spenders, "tapscript/input80limit", leaf="t0", **common, inputs=[getter("sign"), random_bytes(80)])
add_spender(spenders, "tapscript/input81limit", leaf="t0", **common, standard=False, inputs=[getter("sign"), random_bytes(81)])
# Test that OP_CHECKMULTISIG and OP_CHECKMULTISIGVERIFY cause failure, but OP_CHECKSIG and OP_CHECKSIGVERIFY work.
add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG)
# Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF)
add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF)
# Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid.
add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/unkpk/checksigverify", leaf="t18", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
# Test that 33-byte public keys (which are unknown) are acceptable but nonstandard with valid signatures, but normal pubkeys are not valid in that case.
add_spender(spenders, "tapscript/oldpk/checksig", leaf="t30", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t1"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigadd", leaf="t31", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t2"}, **ERR_SIG_SCHNORR)
add_spender(spenders, "tapscript/oldpk/checksigverify", leaf="t32", standard=False, **common, **SINGLE_SIG, sighash=bitflipper(default_sighash), failure={"leaf": "t28"}, **ERR_SIG_SCHNORR)
# Test that 0-byte public keys are not acceptable.
add_spender(spenders, "tapscript/emptypk/checksig", leaf="t1", **SINGLE_SIG, **common, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigverify", leaf="t2", **SINGLE_SIG, **common, failure={"leaf": "t8"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptypk/checksigadd", leaf="t35", standard=False, **SINGLE_SIG, **common, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY)
# Test that OP_CHECKSIGADD results are as expected
add_spender(spenders, "tapscript/checksigaddresults", leaf="t28", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
add_spender(spenders, "tapscript/checksigaddoversize", leaf="t29", **SINGLE_SIG, **common, failure={"leaf": "t27"}, err_msg="unknown error")
# Test that OP_CHECKSIGADD requires 3 stack elements.
add_spender(spenders, "tapscript/checksigadd3args", leaf="t9", **SINGLE_SIG, **common, failure={"leaf": "t11"}, **ERR_STACK_EMPTY)
# Test that empty signatures do not cause script failure in OP_CHECKSIG and OP_CHECKSIGADD (but do fail with empty pubkey, and do fail OP_CHECKSIGVERIFY)
add_spender(spenders, "tapscript/emptysigs/checksig", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t13"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/nochecksigverify", leaf="t12", **common, inputs=[b'', getter("sign")], failure={"leaf": "t20"}, **ERR_UNKNOWN_PUBKEY)
add_spender(spenders, "tapscript/emptysigs/checksigadd", leaf="t14", **common, inputs=[b'', getter("sign")], failure={"leaf": "t15"}, **ERR_UNKNOWN_PUBKEY)
# Test that scripts over 10000 bytes (and over 201 non-push ops) are acceptable.
add_spender(spenders, "tapscript/no10000limit", leaf="t19", **SINGLE_SIG, **common)
# Test that a stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000stack", leaf="t21", **SINGLE_SIG, **common, failure={"leaf": "t22"}, **ERR_STACK_SIZE)
# Test that an input stack size of 1000 elements is permitted, but 1001 isn't.
add_spender(spenders, "tapscript/1000inputs", leaf="t23", **common, inputs=[getter("sign")] + [b'' for _ in range(999)], failure={"leaf": "t24", "inputs": [getter("sign")] + [b'' for _ in range(1000)]}, **ERR_STACK_SIZE)
# Test that pushing a MAX_SCRIPT_ELEMENT_SIZE byte stack element is valid, but one longer is not.
add_spender(spenders, "tapscript/pushmaxlimit", leaf="t25", **common, **SINGLE_SIG, failure={"leaf": "t26"}, **ERR_PUSH_LIMIT)
# Test that 999-of-999 multisig works (but 1000-of-1000 triggers stack size limits)
add_spender(spenders, "tapscript/bigmulti", leaf="t33", **common, inputs=big_spend_inputs, num=999, failure={"leaf": "t34", "num": 1000}, **ERR_STACK_SIZE)
# Test that the CLEANSTACK rule is consensus critical in tapscript
add_spender(spenders, "tapscript/cleanstack", leaf="t36", tap=tap, inputs=[b'\x01'], failure={"inputs": [b'\x01', b'\x01']}, **ERR_CLEANSTACK)
# == Test for sigops ratio limit ==
# Given a number n, and a public key pk, functions that produce a (CScript, sigops). Each script takes as
# input a valid signature with the passed pk followed by a dummy push of bytes that are to be dropped, and
# will execute sigops signature checks.
SIGOPS_RATIO_SCRIPTS = [
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIG.
lambda n, pk: (CScript([OP_DROP, pk] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGVERIFY.
lambda n, pk: (CScript([OP_DROP, pk, OP_0, OP_IF, OP_2DUP, OP_CHECKSIGVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_2, OP_SWAP, OP_CHECKSIGADD, OP_3, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERIFYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIG.
lambda n, pk: (CScript([random_bytes(220), OP_2DROP, pk, OP_1, OP_NOTIF, OP_2DUP, OP_CHECKSIG, OP_VERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_4, OP_SWAP, OP_CHECKSIGADD, OP_5, OP_EQUAL]), n + 1),
# n OP_CHECKSIGVERFIYs and 1 OP_CHECKSIGADD, but also one unexecuted OP_CHECKSIGADD.
lambda n, pk: (CScript([OP_DROP, pk, OP_1, OP_IF, OP_ELSE, OP_2DUP, OP_6, OP_SWAP, OP_CHECKSIGADD, OP_7, OP_EQUALVERIFY, OP_ENDIF] + [OP_2DUP, OP_CHECKSIGVERIFY] * n + [OP_8, OP_SWAP, OP_CHECKSIGADD, OP_9, OP_EQUAL]), n + 1),
# n+1 OP_CHECKSIGs, but also one OP_CHECKSIG with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, pk, OP_CHECKSIG, OP_NOT, OP_VERIFY, pk] + [OP_2DUP, OP_CHECKSIG, OP_VERIFY] * n + [OP_CHECKSIG]), n + 1),
# n OP_CHECKSIGADDs and 1 OP_CHECKSIG, but also an OP_CHECKSIGADD with an empty signature.
lambda n, pk: (CScript([OP_DROP, OP_0, OP_10, pk, OP_CHECKSIGADD, OP_10, OP_EQUALVERIFY, pk] + [OP_2DUP, OP_16, OP_SWAP, OP_CHECKSIGADD, b'\x11', OP_EQUALVERIFY] * n + [OP_CHECKSIG]), n + 1),
]
for annex in [None, bytes([ANNEX_TAG]) + random_bytes(random.randrange(1000))]:
for hashtype in [SIGHASH_DEFAULT, SIGHASH_ALL]:
for pubkey in [pubs[1], random_bytes(random.choice([x for x in range(2, 81) if x != 32]))]:
for fn_num, fn in enumerate(SIGOPS_RATIO_SCRIPTS):
merkledepth = random.randrange(129)
def predict_sigops_ratio(n, dummy_size):
"""Predict whether spending fn(n, pubkey) with dummy_size will pass the ratio test."""
script, sigops = fn(n, pubkey)
# Predict the size of the witness for a given choice of n
stacklen_size = 1
sig_size = 64 + (hashtype != SIGHASH_DEFAULT)
siglen_size = 1
dummylen_size = 1 + 2 * (dummy_size >= 253)
script_size = len(script)
scriptlen_size = 1 + 2 * (script_size >= 253)
control_size = 33 + 32 * merkledepth
controllen_size = 1 + 2 * (control_size >= 253)
annex_size = 0 if annex is None else len(annex)
annexlen_size = 0 if annex is None else 1 + 2 * (annex_size >= 253)
witsize = stacklen_size + sig_size + siglen_size + dummy_size + dummylen_size + script_size + scriptlen_size + control_size + controllen_size + annex_size + annexlen_size
# sigops ratio test
return witsize + 50 >= 50 * sigops
# Make sure n is high enough that with empty dummy, the script is not valid
n = 0
while predict_sigops_ratio(n, 0):
n += 1
# But allow picking a bit higher still
n += random.randrange(5)
# Now pick dummy size *just* large enough that the overall construction passes
dummylen = 0
while not predict_sigops_ratio(n, dummylen):
dummylen += 1
scripts = [("s", fn(n, pubkey)[0])]
for _ in range(merkledepth):
scripts = [scripts, random.choice(PARTNER_MERKLE_FN)]
tap = taproot_construct(pubs[0], scripts)
standard = annex is None and dummylen <= 80 and len(pubkey) == 32
add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random_bytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random_bytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO)
# Future leaf versions
for leafver in range(0, 0x100, 2):
if leafver == LEAF_VERSION_TAPSCRIPT or leafver == ANNEX_TAG:
# Skip the defined LEAF_VERSION_TAPSCRIPT, and the ANNEX_TAG which is not usable as leaf version
continue
scripts = [
("bare_c0", CScript([OP_NOP])),
("bare_unkver", CScript([OP_NOP]), leafver),
("return_c0", CScript([OP_RETURN])),
("return_unkver", CScript([OP_RETURN]), leafver),
("undecodable_c0", CScript([OP_PUSHDATA1])),
("undecodable_unkver", CScript([OP_PUSHDATA1]), leafver),
("bigpush_c0", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP])),
("bigpush_unkver", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP]), leafver),
("1001push_c0", CScript([OP_0] * 1001)),
("1001push_unkver", CScript([OP_0] * 1001), leafver),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "unkver/bare", standard=False, tap=tap, leaf="bare_unkver", failure={"leaf": "bare_c0"}, **ERR_CLEANSTACK)
add_spender(spenders, "unkver/return", standard=False, tap=tap, leaf="return_unkver", failure={"leaf": "return_c0"}, **ERR_OP_RETURN)
add_spender(spenders, "unkver/undecodable", standard=False, tap=tap, leaf="undecodable_unkver", failure={"leaf": "undecodable_c0"}, **ERR_UNDECODABLE)
add_spender(spenders, "unkver/bigpush", standard=False, tap=tap, leaf="bigpush_unkver", failure={"leaf": "bigpush_c0"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "unkver/1001push", standard=False, tap=tap, leaf="1001push_unkver", failure={"leaf": "1001push_c0"}, **ERR_STACK_SIZE)
add_spender(spenders, "unkver/1001inputs", standard=False, tap=tap, leaf="bare_unkver", inputs=[b'']*1001, failure={"leaf": "bare_c0"}, **ERR_STACK_SIZE)
# OP_SUCCESSx tests.
hashtype = lambda _: random.choice(VALID_SIGHASHES_TAPROOT)
for opval in range(76, 0x100):
opcode = CScriptOp(opval)
if not is_op_success(opcode):
continue
scripts = [
("bare_success", CScript([opcode])),
("bare_nop", CScript([OP_NOP])),
("unexecif_success", CScript([OP_0, OP_IF, opcode, OP_ENDIF])),
("unexecif_nop", CScript([OP_0, OP_IF, OP_NOP, OP_ENDIF])),
("return_success", CScript([OP_RETURN, opcode])),
("return_nop", CScript([OP_RETURN, OP_NOP])),
("undecodable_success", CScript([opcode, OP_PUSHDATA1])),
("undecodable_nop", CScript([OP_NOP, OP_PUSHDATA1])),
("undecodable_bypassed_success", CScript([OP_PUSHDATA1, OP_2, opcode])),
("bigpush_success", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, opcode])),
("bigpush_nop", CScript([random_bytes(MAX_SCRIPT_ELEMENT_SIZE+1), OP_DROP, OP_NOP])),
("1001push_success", CScript([OP_0] * 1001 + [opcode])),
("1001push_nop", CScript([OP_0] * 1001 + [OP_NOP])),
]
random.shuffle(scripts)
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "opsuccess/bare", standard=False, tap=tap, leaf="bare_success", failure={"leaf": "bare_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/unexecif", standard=False, tap=tap, leaf="unexecif_success", failure={"leaf": "unexecif_nop"}, **ERR_CLEANSTACK)
add_spender(spenders, "opsuccess/return", standard=False, tap=tap, leaf="return_success", failure={"leaf": "return_nop"}, **ERR_OP_RETURN)
add_spender(spenders, "opsuccess/undecodable", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_nop"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/undecodable_bypass", standard=False, tap=tap, leaf="undecodable_success", failure={"leaf": "undecodable_bypassed_success"}, **ERR_UNDECODABLE)
add_spender(spenders, "opsuccess/bigpush", standard=False, tap=tap, leaf="bigpush_success", failure={"leaf": "bigpush_nop"}, **ERR_PUSH_LIMIT)
add_spender(spenders, "opsuccess/1001push", standard=False, tap=tap, leaf="1001push_success", failure={"leaf": "1001push_nop"}, **ERR_STACK_SIZE)
add_spender(spenders, "opsuccess/1001inputs", standard=False, tap=tap, leaf="bare_success", inputs=[b'']*1001, failure={"leaf": "bare_nop"}, **ERR_STACK_SIZE)
# Non-OP_SUCCESSx (verify that those aren't accidentally treated as OP_SUCCESSx)
for opval in range(0, 0x100):
opcode = CScriptOp(opval)
if is_op_success(opcode):
continue
scripts = [
("normal", CScript([OP_RETURN, opcode] + [OP_NOP] * 75)),
("op_success", CScript([OP_RETURN, CScriptOp(0x50)]))
]
tap = taproot_construct(pubs[0], scripts)
add_spender(spenders, "alwaysvalid/notsuccessx", tap=tap, leaf="op_success", inputs=[], standard=False, failure={"leaf": "normal"}) # err_msg differs based on opcode
# == Legacy tests ==
# Also add a few legacy spends into the mix, so that transactions which combine taproot and pre-taproot spends get tested too.
for compressed in [False, True]:
eckey1 = ECKey()
eckey1.set(generate_privkey(), compressed)
pubkey1 = eckey1.get_pubkey().get_bytes()
eckey2 = ECKey()
eckey2.set(generate_privkey(), compressed)
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)
add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([pubkey1, OP_CHECKSIG]), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
# Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic.
for p2sh in [False, True]:
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)
add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)
return spenders
def spenders_taproot_inactive():
"""Spenders for testing that pre-activation Taproot rules don't apply."""
spenders = []
sec = generate_privkey()
pub, _ = compute_xonly_pubkey(sec)
scripts = [
("pk", CScript([pub, OP_CHECKSIG])),
("future_leaf", CScript([pub, OP_CHECKSIG]), 0xc2),
("op_success", CScript([pub, OP_CHECKSIG, OP_0, OP_IF, CScriptOp(0x50), OP_ENDIF])),
]
tap = taproot_construct(pub, scripts)
# Test that keypath spending is valid & non-standard, regardless of validity.
add_spender(spenders, "inactive/keypath_valid", key=sec, tap=tap, standard=False)
add_spender(spenders, "inactive/keypath_invalidsig", key=sec, tap=tap, standard=False, sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/keypath_empty", key=sec, tap=tap, standard=False, witness=[])
# Same for scriptpath spending (and features like annex, leaf versions, or OP_SUCCESS don't change this)
add_spender(spenders, "inactive/scriptpath_valid", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalidsig", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_invalidcb", key=sec, tap=tap, leaf="pk", standard=False, inputs=[getter("sign")], controlblock=bitflipper(default_controlblock))
add_spender(spenders, "inactive/scriptpath_valid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_invalid_unkleaf", key=sec, tap=tap, leaf="future_leaf", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")])
add_spender(spenders, "inactive/scriptpath_valid_opsuccess", key=sec, tap=tap, leaf="op_success", standard=False, inputs=[getter("sign")], sighash=bitflipper(default_sighash))
return spenders
# Consensus validation flags to use in dumps for tests with "legacy/" or "inactive/" prefix.
LEGACY_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY"
# Consensus validation flags to use in dumps for all other tests.
TAPROOT_FLAGS = "P2SH,DERSIG,CHECKLOCKTIMEVERIFY,CHECKSEQUENCEVERIFY,WITNESS,NULLDUMMY,TAPROOT"
def dump_json_test(tx, input_utxos, idx, success, failure):
spender = input_utxos[idx].spender
# Determine flags to dump
flags = LEGACY_FLAGS if spender.comment.startswith("legacy/") or spender.comment.startswith("inactive/") else TAPROOT_FLAGS
fields = [
("tx", tx.serialize().hex()),
("prevouts", [x.output.serialize().hex() for x in input_utxos]),
("index", idx),
("flags", flags),
("comment", spender.comment)
]
# The "final" field indicates that a spend should be always valid, even with more validation flags enabled
# than the listed ones. Use standardness as a proxy for this (which gives a conservative underestimate).
if spender.is_standard:
fields.append(("final", True))
def dump_witness(wit):
return OrderedDict([("scriptSig", wit[0].hex()), ("witness", [x.hex() for x in wit[1]])])
if success is not None:
fields.append(("success", dump_witness(success)))
if failure is not None:
fields.append(("failure", dump_witness(failure)))
# Write the dump to $TEST_DUMP_DIR/x/xyz... where x,y,z,... are the SHA1 sum of the dump (which makes the
# file naming scheme compatible with fuzzing infrastructure).
dump = json.dumps(OrderedDict(fields)) + ",\n"
sha1 = hashlib.sha1(dump.encode("utf-8")).hexdigest()
dirname = os.environ.get("TEST_DUMP_DIR", ".") + ("/%s" % sha1[0])
os.makedirs(dirname, exist_ok=True)
with open(dirname + ("/%s" % sha1), 'w', encoding="utf8") as f:
f.write(dump)
# Data type to keep track of UTXOs, where they were created, and how to spend them.
UTXOData = namedtuple('UTXOData', 'outpoint,output,spender')
class TaprootTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_argument("--dumptests", dest="dump_tests", default=False, action="store_true",
help="Dump generated test cases to directory set by TEST_DUMP_DIR environment variable")
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
# Node 0 has Taproot inactive, Node 1 active.
self.extra_args = [["-par=1", "-vbparams=taproot:1:1"], ["-par=1"]]
def block_submit(self, node, txs, msg, err_msg, cb_pubkey=None, fees=0, sigops_weight=0, witness=False, accept=False):
# Deplete block of any non-tapscript sigops using a single additional 0-value coinbase output.
# It is not impossible to fit enough tapscript sigops to hit the old 80k limit without
# busting txin-level limits. We simply have to account for the p2pk outputs in all
# transactions.
extra_output_script = CScript([OP_CHECKSIG]*((MAX_BLOCK_SIGOPS_WEIGHT - sigops_weight) // WITNESS_SCALE_FACTOR))
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1, pubkey=cb_pubkey, extra_output_script=extra_output_script, fees=fees), self.lastblocktime + 1)
block.nVersion = 4
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
block_response = node.submitblock(block.serialize().hex())
if err_msg is not None:
assert block_response is not None and err_msg in block_response, "Missing error message '%s' from block response '%s': %s" % (err_msg, "(None)" if block_response is None else block_response, msg)
if (accept):
assert node.getbestblockhash() == block.hash, "Failed to accept: %s (response: %s)" % (msg, block_response)
self.tip = block.sha256
self.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert node.getbestblockhash() == self.lastblockhash, "Failed to reject: " + msg
def test_spenders(self, node, spenders, input_counts):
"""Run randomized tests with a number of "spenders".
Steps:
1) Generate an appropriate UTXO for each spender to test spend conditions
2) Generate 100 random addresses of all wallet types: pkh/sh_wpkh/wpkh
3) Select random number of inputs from (1)
4) Select random number of addresses from (2) as outputs
Each spender embodies a test; in a large randomized test, it is verified
that toggling the valid argument to each lambda toggles the validity of
the transaction. This is accomplished by constructing transactions consisting
of all valid inputs, except one invalid one.
"""
# Construct a bunch of sPKs that send coins back to the host wallet
self.log.info("- Constructing addresses for returning coins")
host_spks = []
host_pubkeys = []
for i in range(16):
addr = node.getnewaddress(address_type=random.choice(["legacy", "p2sh-segwit", "bech32"]))
info = node.getaddressinfo(addr)
spk = bytes.fromhex(info['scriptPubKey'])
host_spks.append(spk)
host_pubkeys.append(bytes.fromhex(info['pubkey']))
# Initialize variables used by block_submit().
self.lastblockhash = node.getbestblockhash()
self.tip = int(self.lastblockhash, 16)
block = node.getblock(self.lastblockhash)
self.lastblockheight = block['height']
self.lastblocktime = block['time']
# Create transactions spending up to 50 of the wallet's inputs, with one output for each spender, and
# one change output at the end. The transaction is constructed on the Python side to enable
# having multiple outputs to the same address and outputs with no assigned address. The wallet
# is then asked to sign it through signrawtransactionwithwallet, and then added to a block on the
# Python side (to bypass standardness rules).
self.log.info("- Creating test UTXOs...")
random.shuffle(spenders)
normal_utxos = []
mismatching_utxos = [] # UTXOs with input that requires mismatching output position
done = 0
while done < len(spenders):
# Compute how many UTXOs to create with this transaction
count_this_tx = min(len(spenders) - done, (len(spenders) + 4) // 5, 10000)
fund_tx = CTransaction()
# Add the 50 highest-value inputs
unspents = node.listunspent()
random.shuffle(unspents)
unspents.sort(key=lambda x: int(x["amount"] * 100000000), reverse=True)
if len(unspents) > 50:
unspents = unspents[:50]
random.shuffle(unspents)
balance = 0
for unspent in unspents:
balance += int(unspent["amount"] * 100000000)
txid = int(unspent["txid"], 16)
fund_tx.vin.append(CTxIn(COutPoint(txid, int(unspent["vout"])), CScript()))
# Add outputs
cur_progress = done / len(spenders)
next_progress = (done + count_this_tx) / len(spenders)
change_goal = (1.0 - 0.6 * next_progress) / (1.0 - 0.6 * cur_progress) * balance
self.log.debug("Create %i UTXOs in a transaction spending %i inputs worth %.8f (sending ~%.8f to change)" % (count_this_tx, len(unspents), balance * 0.00000001, change_goal * 0.00000001))
for i in range(count_this_tx):
avg = (balance - change_goal) / (count_this_tx - i)
amount = int(random.randrange(int(avg*0.85 + 0.5), int(avg*1.15 + 0.5)) + 0.5)
balance -= amount
fund_tx.vout.append(CTxOut(amount, spenders[done + i].script))
# Add change
fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks)))
# Ask the wallet to sign
ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(ToHex(fund_tx))["hex"]))
fund_tx.deserialize(ss)
# Construct UTXOData entries
fund_tx.rehash()
for i in range(count_this_tx):
utxodata = UTXOData(outpoint=COutPoint(fund_tx.sha256, i), output=fund_tx.vout[i], spender=spenders[done])
if utxodata.spender.need_vin_vout_mismatch:
mismatching_utxos.append(utxodata)
else:
normal_utxos.append(utxodata)
done += 1
# Mine into a block
self.block_submit(node, [fund_tx], "Funding tx", None, random.choice(host_pubkeys), 10000, MAX_BLOCK_SIGOPS_WEIGHT, True, True)
# Consume groups of choice(input_coins) from utxos in a tx, testing the spenders.
self.log.info("- Running %i spending tests" % done)
random.shuffle(normal_utxos)
random.shuffle(mismatching_utxos)
assert done == len(normal_utxos) + len(mismatching_utxos)
left = done
while left:
# Construct CTransaction with random nVersion, nLocktime
tx = CTransaction()
tx.nVersion = random.choice([1, 2, random.randint(-0x80000000, 0x7fffffff)])
min_sequence = (tx.nVersion != 1 and tx.nVersion != 0) * 0x80000000 # The minimum sequence number to disable relative locktime
if random.choice([True, False]):
tx.nLockTime = random.randrange(LOCKTIME_THRESHOLD, self.lastblocktime - 7200) # all absolute locktimes in the past
else:
tx.nLockTime = random.randrange(self.lastblockheight + 1) # all block heights in the past
# Decide how many UTXOs to test with.
acceptable = [n for n in input_counts if n <= left and (left - n > max(input_counts) or (left - n) in [0] + input_counts)]
num_inputs = random.choice(acceptable)
# If we have UTXOs that require mismatching inputs/outputs left, include exactly one of those
# unless there is only one normal UTXO left (as tests with mismatching UTXOs require at least one
# normal UTXO to go in the first position), and we don't want to run out of normal UTXOs.
input_utxos = []
while len(mismatching_utxos) and (len(input_utxos) == 0 or len(normal_utxos) == 1):
input_utxos.append(mismatching_utxos.pop())
left -= 1
# Top up until we hit num_inputs (but include at least one normal UTXO always).
for _ in range(max(1, num_inputs - len(input_utxos))):
input_utxos.append(normal_utxos.pop())
left -= 1
# The first input cannot require a mismatching output (as there is at least one output).
while True:
random.shuffle(input_utxos)
if not input_utxos[0].spender.need_vin_vout_mismatch:
break
first_mismatch_input = None
for i in range(len(input_utxos)):
if input_utxos[i].spender.need_vin_vout_mismatch:
first_mismatch_input = i
assert first_mismatch_input is None or first_mismatch_input > 0
# Decide fee, and add CTxIns to tx.
amount = sum(utxo.output.nValue for utxo in input_utxos)
fee = min(random.randrange(MIN_FEE * 2, MIN_FEE * 4), amount - DUST_LIMIT) # 10000-20000 sat fee
in_value = amount - fee
tx.vin = [CTxIn(outpoint=utxo.outpoint, nSequence=random.randint(min_sequence, 0xffffffff)) for utxo in input_utxos]
tx.wit.vtxinwit = [CTxInWitness() for _ in range(len(input_utxos))]
sigops_weight = sum(utxo.spender.sigops_weight for utxo in input_utxos)
self.log.debug("Test: %s" % (", ".join(utxo.spender.comment for utxo in input_utxos)))
# Add 1 to 4 random outputs (but constrained by inputs that require mismatching outputs)
num_outputs = random.choice(range(1, 1 + min(4, 4 if first_mismatch_input is None else first_mismatch_input)))
assert in_value >= 0 and fee - num_outputs * DUST_LIMIT >= MIN_FEE
for i in range(num_outputs):
tx.vout.append(CTxOut())
if in_value <= DUST_LIMIT:
tx.vout[-1].nValue = DUST_LIMIT
elif i < num_outputs - 1:
tx.vout[-1].nValue = in_value
else:
tx.vout[-1].nValue = random.randint(DUST_LIMIT, in_value)
in_value -= tx.vout[-1].nValue
tx.vout[-1].scriptPubKey = random.choice(host_spks)
sigops_weight += CScript(tx.vout[-1].scriptPubKey).GetSigOpCount(False) * WITNESS_SCALE_FACTOR
fee += in_value
assert fee >= 0
# Select coinbase pubkey
cb_pubkey = random.choice(host_pubkeys)
sigops_weight += 1 * WITNESS_SCALE_FACTOR
# Precompute one satisfying and one failing scriptSig/witness for each input.
input_data = []
for i in range(len(input_utxos)):
fn = input_utxos[i].spender.sat_function
fail = None
success = fn(tx, i, [utxo.output for utxo in input_utxos], True)
if not input_utxos[i].spender.no_fail:
fail = fn(tx, i, [utxo.output for utxo in input_utxos], False)
input_data.append((fail, success))
if self.options.dump_tests:
dump_json_test(tx, input_utxos, i, success, fail)
# Sign each input incorrectly once on each complete signing pass, except the very last.
for fail_input in list(range(len(input_utxos))) + [None]:
# Skip trying to fail at spending something that can't be made to fail.
if fail_input is not None and input_utxos[fail_input].spender.no_fail:
continue
# Expected message with each input failure, may be None(which is ignored)
expected_fail_msg = None if fail_input is None else input_utxos[fail_input].spender.err_msg
# Fill inputs/witnesses
for i in range(len(input_utxos)):
tx.vin[i].scriptSig = input_data[i][i != fail_input][0]
tx.wit.vtxinwit[i].scriptWitness.stack = input_data[i][i != fail_input][1]
# Submit to mempool to check standardness
is_standard_tx = fail_input is None and all(utxo.spender.is_standard for utxo in input_utxos) and tx.nVersion >= 1 and tx.nVersion <= 2
tx.rehash()
msg = ','.join(utxo.spender.comment + ("*" if n == fail_input else "") for n, utxo in enumerate(input_utxos))
if is_standard_tx:
node.sendrawtransaction(tx.serialize().hex(), 0)
assert node.getmempoolentry(tx.hash) is not None, "Failed to accept into mempool: " + msg
else:
assert_raises_rpc_error(-26, None, node.sendrawtransaction, tx.serialize().hex(), 0)
# Submit in a block
self.block_submit(node, [tx], msg, witness=True, accept=fail_input is None, cb_pubkey=cb_pubkey, fees=fee, sigops_weight=sigops_weight, err_msg=expected_fail_msg)
if (len(spenders) - left) // 200 > (len(spenders) - left - len(input_utxos)) // 200:
self.log.info(" - %i tests done" % (len(spenders) - left))
assert left == 0
assert len(normal_utxos) == 0
assert len(mismatching_utxos) == 0
self.log.info(" - Done")
def run_test(self):
# Post-taproot activation tests go first (pre-taproot tests' blocks are invalid post-taproot).
self.log.info("Post-activation tests...")
self.nodes[1].generate(101)
self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])
# Re-connect nodes in case they have been disconnected
self.disconnect_nodes(0, 1)
self.connect_nodes(0, 1)
# Transfer value of the largest 500 coins to pre-taproot node.
addr = self.nodes[0].getnewaddress()
unsp = self.nodes[1].listunspent()
unsp = sorted(unsp, key=lambda i: i['amount'], reverse=True)
unsp = unsp[:500]
rawtx = self.nodes[1].createrawtransaction(
inputs=[{
'txid': i['txid'],
'vout': i['vout']
} for i in unsp],
outputs={addr: sum(i['amount'] for i in unsp)}
)
rawtx = self.nodes[1].signrawtransactionwithwallet(rawtx)['hex']
# Mine a block with the transaction
block = create_block(tmpl=self.nodes[1].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS), txlist=[rawtx])
add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None, self.nodes[1].submitblock(block.serialize().hex()))
self.sync_blocks()
# Pre-taproot activation tests.
self.log.info("Pre-activation tests...")
# Run each test twice; once in isolation, and once combined with others. Testing in isolation
# means that the standardness is verified in every test (as combined transactions are only standard
# when all their inputs are standard).
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[1])
self.test_spenders(self.nodes[0], spenders_taproot_inactive(), input_counts=[2, 3])
if __name__ == '__main__':
TaprootTest().main()
| mit | -4,492,080,346,018,543,600 | 57.948113 | 363 | 0.647424 | false |
sikmir/QGIS | tests/src/python/test_qgsappstartup.py | 15 | 4749 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsApplication.
From build dir: ctest -R PyQgsAppStartup -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Hugo Mercier ([email protected])'
__date__ = '17/07/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
import sys
import os
import glob
import re
import time
import shutil
import subprocess
import tempfile
import errno
from qgis.testing import unittest
from utilities import unitTestDataPath
print('CTEST_FULL_OUTPUT')
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsAppStartup(unittest.TestCase):
TMP_DIR = ''
@classmethod
def setUpClass(cls):
cls.TMP_DIR = tempfile.mkdtemp()
# print('TMP_DIR: ' + cls.TMP_DIR)
# subprocess.call(['open', cls.TMP_DIR])
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.TMP_DIR, ignore_errors=True)
# TODO: refactor parameters to **kwargs to handle all startup combinations
def doTestStartup(self, option='', testDir='', testFile='',
loadPlugins=False, customization=False,
timeOut=360, env=None, additionalArguments=[]):
"""Run QGIS with the given option. Wait for testFile to be created.
If time runs out, fail.
"""
myTestFile = testFile
# from unicode to local
if testDir:
if not os.path.exists(testDir):
os.mkdir(testDir)
myTestFile = os.path.join(testDir, testFile)
if os.path.exists(myTestFile):
os.remove(myTestFile)
# whether to load plugins
plugins = '' if loadPlugins else '--noplugins'
# whether to enable GUI customization
customize = '' if customization else '--nocustomization'
# environment variables = system variables + provided 'env'
myenv = os.environ.copy()
if env is not None:
myenv.update(env)
call = [QGIS_BIN, "--nologo", plugins, customize, option, testDir] + additionalArguments
p = subprocess.Popen(call, env=myenv)
s = 0
while not os.path.exists(myTestFile):
p.poll()
if p.returncode is not None:
raise Exception('Return code: {}, Call: "{}", Env: {}'.format(p.returncode, ' '.join(call), env))
time.sleep(1)
s += 1
if s > timeOut:
raise Exception('Timed out waiting for application start, Call: "{}", Env: {}'.format(' '.join(call), env))
try:
p.terminate()
except OSError as e:
if e.errno != errno.ESRCH:
raise e
def testPyQgisStartupEnvVar(self):
# verify PYQGIS_STARTUP env variable file is run by embedded interpreter
# create a temp python module that writes out test file
testfile = 'pyqgis_startup.txt'
testfilepath = os.path.join(self.TMP_DIR, testfile).replace('\\', '/')
testcode = [
"f = open('{0}', 'w')\n".format(testfilepath),
"f.write('This is a test')\n",
"f.close()\n"
]
testmod = os.path.join(self.TMP_DIR, 'pyqgis_startup.py').replace('\\', '/')
f = open(testmod, 'w')
f.writelines(testcode)
f.close()
self.doTestStartup(
testFile=testfilepath,
timeOut=360,
env={'PYQGIS_STARTUP': testmod})
if __name__ == '__main__':
# look for qgis bin path
QGIS_BIN = ''
prefixPath = os.environ['QGIS_PREFIX_PATH']
# see qgsapplication.cpp:98
for f in ['', '..', 'bin']:
d = os.path.join(prefixPath, f)
b = os.path.abspath(os.path.join(d, 'qgis'))
if os.path.exists(b):
QGIS_BIN = b
break
b = os.path.abspath(os.path.join(d, 'qgis.exe'))
if os.path.exists(b):
QGIS_BIN = b
break
if sys.platform[:3] == 'dar': # Mac
# QGIS.app may be QGIS_x.x-dev.app for nightlies
# internal binary will match, minus the '.app'
found = False
for app_path in glob.glob(d + '/QGIS*.app'):
m = re.search('/(QGIS(_\d\.\d-dev)?)\.app', app_path)
if m:
QGIS_BIN = app_path + '/Contents/MacOS/' + m.group(1)
found = True
break
if found:
break
print(('\nQGIS_BIN: {}'.format(QGIS_BIN)))
assert QGIS_BIN, 'QGIS binary not found, skipping test suite'
unittest.main()
| gpl-2.0 | -332,627,084,762,308,740 | 31.751724 | 123 | 0.57212 | false |
JioCloud/tempest | tempest/services/volume/json/admin/volume_quotas_client.py | 17 | 2858 | # Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from six.moves.urllib import parse as urllib
from tempest.common import service_client
class BaseVolumeQuotasClient(service_client.ServiceClient):
"""
Client class to send CRUD Volume Quotas API requests to a Cinder endpoint
"""
TYPE = "json"
def show_default_quota_set(self, tenant_id):
"""List the default volume quota set for a tenant."""
url = 'os-quota-sets/%s/defaults' % tenant_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def show_quota_set(self, tenant_id, params=None):
"""List the quota set for a tenant."""
url = 'os-quota-sets/%s' % tenant_id
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def show_quota_usage(self, tenant_id):
"""List the quota set for a tenant."""
body = self.show_quota_set(tenant_id, params={'usage': True})
return body
def update_quota_set(self, tenant_id, gigabytes=None, volumes=None,
snapshots=None):
post_body = {}
if gigabytes is not None:
post_body['gigabytes'] = gigabytes
if volumes is not None:
post_body['volumes'] = volumes
if snapshots is not None:
post_body['snapshots'] = snapshots
post_body = jsonutils.dumps({'quota_set': post_body})
resp, body = self.put('os-quota-sets/%s' % tenant_id, post_body)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, self._parse_resp(body))
def delete_quota_set(self, tenant_id):
"""Delete the tenant's quota set."""
resp, body = self.delete('os-quota-sets/%s' % tenant_id)
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
class VolumeQuotasClient(BaseVolumeQuotasClient):
"""
Client class to send CRUD Volume Type API V1 requests to a Cinder endpoint
"""
| apache-2.0 | 5,130,522,716,512,749,000 | 34.283951 | 78 | 0.651854 | false |
endlessm/chromium-browser | tools/perf/core/services/pinpoint_service.py | 10 | 1051 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from core.services import luci_auth
from core.services import request
SERVICE_URL = 'https://pinpoint-dot-chromeperf.appspot.com/api'
def Request(endpoint, **kwargs):
"""Send a request to some pinpoint endpoint."""
kwargs.setdefault('use_auth', True)
kwargs.setdefault('accept', 'json')
return request.Request(SERVICE_URL + endpoint, **kwargs)
def Job(job_id, with_state=False, with_tags=False):
"""Get job information from its id."""
params = []
if with_state:
params.append(('o', 'STATE'))
if with_tags:
params.append(('o', 'TAGS'))
return Request('/job/%s' % job_id, params=params)
def Jobs():
"""List jobs for the authenticated user."""
return Request('/jobs')
def NewJob(**kwargs):
"""Create a new pinpoint job."""
if 'user' not in kwargs:
kwargs['user'] = luci_auth.GetUserEmail()
return Request('/new', method='POST', data=kwargs)
| bsd-3-clause | -4,228,830,729,423,917,000 | 26.657895 | 72 | 0.686965 | false |
MunGell/RedirectGuard | guard.py | 1 | 1868 | import sys
import csv
import time
import os.path
import argparse
import requests
parser = argparse.ArgumentParser()
parser.add_argument('--root', help="root URL to prepend to redirects URI")
parser.add_argument('--input', help="input file with URLs to check")
parser.add_argument('--output', help="output file with missing redirects")
args = parser.parse_args()
def read_input_file(input_file_path):
redirects = []
input_file = open(input_file_path, 'r')
try:
reader = csv.reader(input_file)
for row in reader:
redirects.append(row)
finally:
input_file.close()
return redirects
def crawler(redirects):
root = '' if not args.root else args.root
for redirect in redirects:
r = requests.get(root + redirect[0], allow_redirects=False)
if r.status_code in [301, 302] and r.headers['location'] == ( root + redirect[1] ):
print '[success] Redirect from ', redirect[0], ' to ', redirect[1], ' is successful with response code: ', str(r.status_code)
else:
location = r.headers['location'] if 'location' in r.headers else ''
output(redirect, r.status_code, location)
time.sleep(1)
def output(redirect, status, real_path):
output_file = open(args.output, "a")
writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow([redirect[0], redirect[1], status, real_path])
output_file.close()
if __name__ == '__main__':
# Check if input file exists
if not os.path.isfile(args.input):
print 'Input file does not exist.'
exit()
# Clear output file and add headers
output_file = open(args.output, "w+")
writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(['Source', 'Target', 'Status code', 'Real target'])
output_file.close()
redirects = read_input_file(args.input)
crawler(redirects) | bsd-2-clause | -6,678,677,528,333,244,000 | 31.789474 | 131 | 0.681478 | false |
MoritzS/django | tests/utils_tests/test_text.py | 6 | 11349 | import json
from django.test import SimpleTestCase
from django.utils import text
from django.utils.functional import lazystr
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy, override
IS_WIDE_BUILD = (len('\U0001F4A9') == 1)
class TestUtilsText(SimpleTestCase):
def test_get_text_list(self):
self.assertEqual(text.get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d')
self.assertEqual(text.get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c')
self.assertEqual(text.get_text_list(['a', 'b'], 'and'), 'a and b')
self.assertEqual(text.get_text_list(['a']), 'a')
self.assertEqual(text.get_text_list([]), '')
with override('ar'):
self.assertEqual(text.get_text_list(['a', 'b', 'c']), "a، b أو c")
def test_smart_split(self):
testdata = [
('This is "a person" test.',
['This', 'is', '"a person"', 'test.']),
('This is "a person\'s" test.',
['This', 'is', '"a person\'s"', 'test.']),
('This is "a person\\"s" test.',
['This', 'is', '"a person\\"s"', 'test.']),
('"a \'one',
['"a', "'one"]),
('all friends\' tests',
['all', 'friends\'', 'tests']),
('url search_page words="something else"',
['url', 'search_page', 'words="something else"']),
("url search_page words='something else'",
['url', 'search_page', "words='something else'"]),
('url search_page words "something else"',
['url', 'search_page', 'words', '"something else"']),
('url search_page words-"something else"',
['url', 'search_page', 'words-"something else"']),
('url search_page words=hello',
['url', 'search_page', 'words=hello']),
('url search_page words="something else',
['url', 'search_page', 'words="something', 'else']),
("cut:','|cut:' '",
["cut:','|cut:' '"]),
(lazystr("a b c d"), # Test for #20231
['a', 'b', 'c', 'd']),
]
for test, expected in testdata:
self.assertEqual(list(text.smart_split(test)), expected)
def test_truncate_chars(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.', truncator.chars(100)),
self.assertEqual('The quick brown fox ...', truncator.chars(23)),
self.assertEqual('The quick brown fo.....', truncator.chars(23, '.....')),
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
self.assertEqual('oüoüoüoü', nfc.chars(8))
self.assertEqual('oüoüoüoü', nfd.chars(8))
self.assertEqual('oü...', nfc.chars(5))
self.assertEqual('oü...', nfd.chars(5))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator('-B\u030AB\u030A----8')
self.assertEqual('-B\u030A...', truncator.chars(5))
self.assertEqual('-B\u030AB\u030A-...', truncator.chars(7))
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator('-----')
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual('...', text.Truncator('asdf').chars(1))
# lazy strings are handled correctly
self.assertEqual(text.Truncator(lazystr('The quick brown fox')).chars(12), 'The quick...')
def test_truncate_words(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.', truncator.words(10))
self.assertEqual('The quick brown fox...', truncator.words(4))
self.assertEqual('The quick brown fox[snip]', truncator.words(4, '[snip]'))
# lazy strings are handled correctly
truncator = text.Truncator(lazystr('The quick brown fox jumped over the lazy dog.'))
self.assertEqual('The quick brown fox...', truncator.words(4))
def test_truncate_html_words(self):
truncator = text.Truncator(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>'
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>',
truncator.words(10, html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox...</em></strong></p>',
truncator.words(4, html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox....</em></strong></p>',
truncator.words(4, '....', html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox</em></strong></p>',
truncator.words(4, '', html=True)
)
# Test with new line inside tag
truncator = text.Truncator(
'<p>The quick <a href="xyz.html"\n id="mylink">brown fox</a> jumped over the lazy dog.</p>'
)
self.assertEqual(
'<p>The quick <a href="xyz.html"\n id="mylink">brown...</a></p>',
truncator.words(3, '...', html=True)
)
# Test self-closing tags
truncator = text.Truncator('<br/>The <hr />quick brown fox jumped over the lazy dog.')
self.assertEqual('<br/>The <hr />quick brown...', truncator.words(3, '...', html=True))
truncator = text.Truncator('<br>The <hr/>quick <em>brown fox</em> jumped over the lazy dog.')
self.assertEqual('<br>The <hr/>quick <em>brown...</em>', truncator.words(3, '...', html=True))
# Test html entities
truncator = text.Truncator('<i>Buenos días! ¿Cómo está?</i>')
self.assertEqual('<i>Buenos días! ¿Cómo...</i>', truncator.words(3, '...', html=True))
truncator = text.Truncator('<p>I <3 python, what about you?</p>')
self.assertEqual('<p>I <3 python...</p>', truncator.words(3, '...', html=True))
def test_wrap(self):
digits = '1234 67 9'
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
self.assertEqual(text.wrap('short\na long line', 7), 'short\na long\nline')
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8), 'do-not-break-long-words\nplease?\nok')
long_word = 'l%sng' % ('o' * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(text.wrap('a %s word' % long_word, 10), 'a\n%s\nword' % long_word)
self.assertEqual(text.wrap(lazystr(digits), 100), '1234 67 9')
def test_normalize_newlines(self):
self.assertEqual(text.normalize_newlines("abc\ndef\rghi\r\n"), "abc\ndef\nghi\n")
self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n")
self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi")
self.assertEqual(text.normalize_newlines(""), "")
self.assertEqual(text.normalize_newlines(lazystr("abc\ndef\rghi\r\n")), "abc\ndef\nghi\n")
def test_normalize_newlines_bytes(self):
"""normalize_newlines should be able to handle bytes too"""
normalized = text.normalize_newlines(b"abc\ndef\rghi\r\n")
self.assertEqual(normalized, "abc\ndef\nghi\n")
def test_phone2numeric(self):
numeric = text.phone2numeric('0800 flowers')
self.assertEqual(numeric, '0800 3569377')
lazy_numeric = lazystr(text.phone2numeric('0800 flowers'))
self.assertEqual(lazy_numeric, '0800 3569377')
def test_slugify(self):
items = (
# given - expected - unicode?
('Hello, World!', 'hello-world', False),
('spam & eggs', 'spam-eggs', False),
('spam & ıçüş', 'spam-ıçüş', True),
('foo ıç bar', 'foo-ıç-bar', True),
(' foo ıç bar', 'foo-ıç-bar', True),
('你好', '你好', True),
)
for value, output, is_unicode in items:
self.assertEqual(text.slugify(value, allow_unicode=is_unicode), output)
def test_unescape_entities(self):
items = [
('', ''),
('foo', 'foo'),
('&', '&'),
('&', '&'),
('&', '&'),
('foo & bar', 'foo & bar'),
('foo & bar', 'foo & bar'),
]
for value, output in items:
self.assertEqual(text.unescape_entities(value), output)
self.assertEqual(text.unescape_entities(lazystr(value)), output)
def test_unescape_string_literal(self):
items = [
('"abc"', 'abc'),
("'abc'", 'abc'),
('"a \"bc\""', 'a "bc"'),
("'\'ab\' c'", "'ab' c"),
]
for value, output in items:
self.assertEqual(text.unescape_string_literal(value), output)
self.assertEqual(text.unescape_string_literal(lazystr(value)), output)
def test_get_valid_filename(self):
filename = "^&'@{}[],$=!-#()%+~_123.txt"
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
self.assertEqual(text.get_valid_filename(lazystr(filename)), "-_123.txt")
def test_compress_sequence(self):
data = [{'key': i} for i in range(10)]
seq = list(json.JSONEncoder().iterencode(data))
seq = [s.encode() for s in seq]
actual_length = len(b''.join(seq))
out = text.compress_sequence(seq)
compressed_length = len(b''.join(out))
self.assertTrue(compressed_length < actual_length)
def test_format_lazy(self):
self.assertEqual('django/test', format_lazy('{}/{}', 'django', lazystr('test')))
self.assertEqual('django/test', format_lazy('{0}/{1}', *('django', 'test')))
self.assertEqual('django/test', format_lazy('{a}/{b}', **{'a': 'django', 'b': 'test'}))
self.assertEqual('django/test', format_lazy('{a[0]}/{a[1]}', a=('django', 'test')))
t = {}
s = format_lazy('{0[a]}-{p[a]}', t, p=t)
t['a'] = lazystr('django')
self.assertEqual('django-django', s)
t['a'] = 'update'
self.assertEqual('update-update', s)
# The format string can be lazy. (string comes from contrib.admin)
s = format_lazy(
gettext_lazy("Added {name} \"{object}\"."),
name='article', object='My first try',
)
with override('fr'):
self.assertEqual('article «\xa0My first try\xa0» ajouté.', s)
| bsd-3-clause | 4,515,946,839,409,525,000 | 45.731405 | 116 | 0.562561 | false |
haya14busa/alc-etm-searcher | nltk-3.0a3/nltk/chunk/api.py | 2 | 1735 | # Natural Language Toolkit: Chunk parsing API
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]> (minor additions)
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
##//////////////////////////////////////////////////////
## Chunk Parser Interface
##//////////////////////////////////////////////////////
from nltk.parse import ParserI
from nltk.chunk.util import ChunkScore
class ChunkParserI(ParserI):
"""
A processing interface for identifying non-overlapping groups in
unrestricted text. Typically, chunk parsers are used to find base
syntactic constituents, such as base noun phrases. Unlike
``ParserI``, ``ChunkParserI`` guarantees that the ``parse()`` method
will always generate a parse.
"""
def parse(self, tokens):
"""
Return the best chunk structure for the given tokens
and return a tree.
:param tokens: The list of (word, tag) tokens to be chunked.
:type tokens: list(tuple)
:rtype: Tree
"""
raise NotImplementedError()
def evaluate(self, gold):
"""
Score the accuracy of the chunker against the gold standard.
Remove the chunking the gold standard text, rechunk it using
the chunker, and return a ``ChunkScore`` object
reflecting the performance of this chunk peraser.
:type gold: list(Tree)
:param gold: The list of chunked sentences to score the chunker on.
:rtype: ChunkScore
"""
chunkscore = ChunkScore()
for correct in gold:
chunkscore.score(correct, self.parse(correct.leaves()))
return chunkscore
| mit | 8,300,143,107,947,121,000 | 33.019608 | 75 | 0.616715 | false |
pellagic-puffbomb/noaa-data | ocean_modules/noaa_parser.py | 1 | 10156 | #!/usr/bin/env python3 -tt
from collections import OrderedDict
from bs4 import BeautifulSoup
from urllib.request import urlopen
from urllib.parse import urljoin
class NoaaParser(object):
'''This is an attempt to return useful data from the Noaa mobile marine
weather pages. To use this, you have to instatiate a NoaaParser object
and then run .get_locations(search_key, source), where "source" is the
page of urls listing buoys in the region you want and "search_key" is
some location marker that shows up in the link or links that you want.
weather.get_location accepts only one search key at the moment, but
this will be changed in future iterations to retrieve multiple weather
reports.
example usage:
weather = NoaaParser()
weather.get_locations("La Jolla", southwest_region_buoy_page)'''
def __init__(self):
self.weather_sources = []
self.latitude = ''
self.longitude = ''
self.coords = []
self.forecast_dict = {}
def parse_results(self, source):
'''Take ndbc.noaa.gov page and return a dict of locations along with their full urls.
This works on all the nav pages for ndbc.noaa.gov/mobile/?'''
self.source = source
loc_dict = {}
with urlopen(self.source) as f:
soup = BeautifulSoup(f)
for link in soup.find_all('a'):
if '@' in link.string:
pass
else:
loc_dict[link.string] = urljoin(self.source, link.get('href')) #builds dict out of locations and the urls to see those locations
return loc_dict
def _set_coords(self, soup_obj):
'''Takes final latitude and longitude listing from soup_obj instatiated by one of the get_weather functions and updates coordinates with the result.'''
self.soup_obj = soup_obj
self.coords = soup_obj.h1.next_sibling.string.split() # update latitude and longitude for use as class-level attribute
self.latitude = self.coords[0][:-1] # Drops the "W" or "N" or whatever.
self.longitude = self.coords[1][:-1]
def get_locations(self, search_key, source):
'''Given a search key and a url of result listings, the get_locations method will return urls specific to the search key.'''
self.source = source
self.search_key = search_key
result_dict = self.parse_results(self.source)
self.weather_sources = [val for key, val in result_dict.items() if self.search_key.lower() in key.lower()]
if len(self.weather_sources) > 0:
return self.weather_sources
else:
raise Exception("The location you entered was not found: try a different search key.\n Check the following source for valid search terms to find your location: \n{}".format(self.source))
def weather_get_all(self):
'''weather__get_all takes a list of urls and builds a dataset
from those urls. This is the information retrieval method that simply dumps
all data. You have to run get_locations(search_key, region_url)
before this does anything because self.weather_sources must be populated.
Usage: weather_get_all()
returns: list of data from the previously selected location, and source url.'''
datalist = []
try:
for url in self.weather_sources:
with urlopen(url) as f:
weathersoup = BeautifulSoup(f)
for node in weathersoup.find_all(['p', 'h2']):
datalist.extend(node.find_all(text=True))
except NameError:
raise Exception("weather_sources not defined")
# get rid of items containing the following:
excludes = ["Feedback:", "Main", "[email protected]"]
results = [x.strip('\n') for x in datalist if not any(y in excludes for y in x.split())]
final_results = [item for item in results if item]
if self.coords:
return final_results
else: #set class-level coordinates if unset.
self.coords = [item for item in final_results[0].split()]
self.latitude = self.coords[0][:-1] # Drops the "W" or "N" or whatever.
self.longitude = self.coords[1][:-1]
return final_results
def weather_info_dict(self, time_zone):
'''weather__info_dict takes a time-zone and builds a dictionary from
already-generated buoy urls. This method drops some data that may be duplicated
(for instance, where "Weather Summary" appears twice), but I prefer it because
it produces cleaner, better organized information and still has the most
important stuff. You have to run get_locations(search_key, region_url)
before this does anything because self.weather_sources must be populated.
usage: weather_info_dict(time-zone-for-desired-results)
Returns: nested dictionary that looks like "{'Weather Summary' {'time': '8:05'}, 'Wave Summary' : {'etc.'}}'''
self.time_zone = time_zone
weather_dict = {}
try:
for url in self.weather_sources:
with urlopen(url) as f:
weathersoup = BeautifulSoup(f)
if self.coords:
pass
else: #set class-level coordinates if unset.
self._set_coords(weathersoup)
for node in weathersoup.find_all('h2'):
if node.string not in weather_dict.keys():
if node.next_sibling == '\n':
weather_dict[node.string] = node.next_sibling.next_sibling(text=True)
# 'the .next_sibling.next_sibling' trick came directly from bs4 docs
else:
weather_dict[node.string] = node.next_sibling(text=True)
except NameError:
raise Exception("weather_sources not defined")
# The following creates a new nested dictionary out of splitting up stuff on
# either side of the colon. Thus, {"Weather : ["Air Temp: 66.0", etc.] becomes:
# {"Weather : {"Air Temp" : "66.0"}} etc.
data_final = {key : {val.rsplit(":")[0].strip() : val.rsplit(":")[1].strip() for val in value if val.strip() and "GMT" not in val and self.time_zone not in val} for key, value in weather_dict.items()}
# Last run through makes sure there's a 'Time' key in the dict. It was
# hard to get with that colon in it before!
if "Time" not in data_final.keys():
for list_of_info in weather_dict.values():
for _item in list_of_info:
if self.time_zone in _item:
data_final["Time"] = _item.strip()
return data_final
def get_forecast(self):
'''Takes the latitude and longitude and uses forecast url to retrieve
the forecast for that location. Conjecture seems to be that: marine-buoy lat
and lon correspond to marine weather forecasts, while terrestrial lat and lon
correspond to terrestrial weather reports. Test further to confirm.
usage: get_forecast().
returns: True or False. Use prettify forecast to get results or get it from the class attribute directly: self.forecast_dict. '''
if not self.coords: # Class-level coordinates are needed to get forecast page
if not self.weather_sources:
raise Exception("You will have to selet a weather page before getting the forecast. Try this first: get_locations('search key', 'region-url')\n\n")
else:
url = self.weather_sources[0]
with urlopen(url) as f:
forecastsoup = BeautifulSoup(f)
self._set_coords(forecastsoup)
forecast_url = "http://forecast.weather.gov/MapClick.php?lat={self.latitude}&lon=-{self.longitude}&unit=0&lg=english&FcstType=digital"
self.forecast_page = forecast_url.format_map(vars())
##IMPORTANT: Long is set to negative on the west coast of the USA,
# check full forecast url for details elsewhere and to see if your
# lat or long are negative inside the url.
# Adjust forecast_page accordingly if so.
with urlopen(self.forecast_page) as f:
forecastsoup = BeautifulSoup(f)
# ---------------------------------GET FIELD NAMES -----------------#
field_names = []
### ---- Get "Hour" --- ###
for node in forecastsoup.find_all(attrs={'align':'left', 'class':'date'}):
if node.string not in field_names:
field_names.append(node.string)
### ---- Get Other Fields in first column --- ###
for node in forecastsoup.find_all(attrs={'align':'left', 'width':'5%'}):
if node.string not in field_names and node.string != "Date":
field_names.append(node.string)
### ---- Get all the hours listed ---### Times have no other attributes: that's the pattern
first_row_times = []
for node in forecastsoup.find_all(attrs={'class' : 'date', 'align': None, 'width': None}):
if node.string not in first_row_times:
first_row_times.append(node.string)
# Lastly, we want 24-hours worth of data multiplied by 11 rows.
# first_fields is 11 field_names plus hour, but the "hours" have
# already been pulled out as our first-row header. Thus, we need to subtract the "hour" row.
# We do this by subtracting one from the field names to get the remaining total cells to pull from.
# This is the logic for the limit below.
table_width = len(first_row_times)
cell_data = []
for node in forecastsoup.find_all(attrs={'align' : 'center', 'width' : '3%'}, limit = table_width * (len(field_names) -1)):
cell_data.append(node.string)
for x in range(len(field_names)-1):
self.forecast_dict[field_names[x + 1].strip()] = (OrderedDict(zip(first_row_times, cell_data[table_width*x:table_width*(x+1)])))
if self.forecast_dict:
return True
else:
return False
def prettify_forecast(self, hours_ahead=6):
"""prettify_forecase takes the forecast generated by get_forecast() and yields it in a pretty format.
It's actually a generator for use in message or email creation or for web display.
prettify_forecast() takes an optional argument 'hours_ahead=n' where 'n' is the number of hours ahead you would like to forecast (max 24)."""
if not self.forecast_dict:
self.get_forecast()
else:
pass
self.hours_ahead = hours_ahead
for key in self.forecast_dict.keys():
for item in list(self.forecast_dict[key].keys())[:self.hours_ahead]:
if self.forecast_dict[key][item] != None:
yield key.strip(), item + "h:", self.forecast_dict[key][item]
else:
pass
| mit | -8,560,191,924,486,775,000 | 44.747748 | 204 | 0.668373 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.