repo_name
stringlengths
6
100
path
stringlengths
4
294
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
606
896k
license
stringclasses
15 values
alekz112/statsmodels
statsmodels/datasets/tests/test_utils.py
26
1697
import os import sys from statsmodels.datasets import get_rdataset, webuse, check_internet from numpy.testing import assert_, assert_array_equal, dec cur_dir = os.path.dirname(os.path.abspath(__file__)) def test_get_rdataset(): # smoke test if sys.version_info[0] >= 3: #NOTE: there's no way to test both since the cached files were #created with Python 2.x, they're strings, but Python 3 expects #bytes and the index file path is hard-coded so both can't live #side by side pass #duncan = get_rdataset("Duncan-py3", "car", cache=cur_dir) else: duncan = get_rdataset("Duncan", "car", cache=cur_dir) assert_(duncan.from_cache) #internet_available = check_internet() #@dec.skipif(not internet_available) def t_est_webuse(): # test copied and adjusted from iolib/tests/test_foreign from statsmodels.iolib.tests.results.macrodata import macrodata_result as res2 #base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/" base_gh = "http://statsmodels.sourceforge.net/devel/_static/" res1 = webuse('macrodata', baseurl=base_gh, as_df=False) assert_array_equal(res1 == res2, True) #@dec.skipif(not internet_available) def t_est_webuse_pandas(): # test copied and adjusted from iolib/tests/test_foreign from pandas.util.testing import assert_frame_equal from statsmodels.datasets import macrodata dta = macrodata.load_pandas().data base_gh = "http://github.com/statsmodels/statsmodels/raw/master/statsmodels/datasets/macrodata/" res1 = webuse('macrodata', baseurl=base_gh) res1 = res1.astype(float) assert_frame_equal(res1, dta)
bsd-3-clause
carlos-ferras/Sequence-ToolKit
pyqtgraph/graphicsItems/GraphicsObject.py
44
1720
from ..Qt import QtGui, QtCore, USE_PYSIDE if not USE_PYSIDE: import sip from .GraphicsItem import GraphicsItem __all__ = ['GraphicsObject'] class GraphicsObject(GraphicsItem, QtGui.QGraphicsObject): """ **Bases:** :class:`GraphicsItem <pyqtgraph.graphicsItems.GraphicsItem>`, :class:`QtGui.QGraphicsObject` Extension of QGraphicsObject with some useful methods (provided by :class:`GraphicsItem <pyqtgraph.graphicsItems.GraphicsItem>`) """ _qtBaseClass = QtGui.QGraphicsObject def __init__(self, *args): self.__inform_view_on_changes = True QtGui.QGraphicsObject.__init__(self, *args) self.setFlag(self.ItemSendsGeometryChanges) GraphicsItem.__init__(self) def itemChange(self, change, value): ret = QtGui.QGraphicsObject.itemChange(self, change, value) if change in [self.ItemParentHasChanged, self.ItemSceneHasChanged]: self.parentChanged() try: inform_view_on_change = self.__inform_view_on_changes except AttributeError: # It's possible that the attribute was already collected when the itemChange happened # (if it was triggered during the gc of the object). pass else: if inform_view_on_change and change in [self.ItemPositionHasChanged, self.ItemTransformHasChanged]: self.informViewBoundsChanged() ## workaround for pyqt bug: ## http://www.riverbankcomputing.com/pipermail/pyqt/2012-August/031818.html if not USE_PYSIDE and change == self.ItemParentChange and isinstance(ret, QtGui.QGraphicsItem): ret = sip.cast(ret, QtGui.QGraphicsItem) return ret
gpl-3.0
gtko/CouchPotatoServer
libs/guessit/transfo/guess_bonus_features.py
150
2155
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <[email protected]> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit.transfo import found_property import logging log = logging.getLogger(__name__) def process(mtree): def previous_group(g): for leaf in mtree.unidentified_leaves()[::-1]: if leaf.node_idx < g.node_idx: return leaf def next_group(g): for leaf in mtree.unidentified_leaves(): if leaf.node_idx > g.node_idx: return leaf def same_group(g1, g2): return g1.node_idx[:2] == g2.node_idx[:2] bonus = [ node for node in mtree.leaves() if 'bonusNumber' in node.guess ] if bonus: bonusTitle = next_group(bonus[0]) if same_group(bonusTitle, bonus[0]): found_property(bonusTitle, 'bonusTitle', 0.8) filmNumber = [ node for node in mtree.leaves() if 'filmNumber' in node.guess ] if filmNumber: filmSeries = previous_group(filmNumber[0]) found_property(filmSeries, 'filmSeries', 0.9) title = next_group(filmNumber[0]) found_property(title, 'title', 0.9) season = [ node for node in mtree.leaves() if 'season' in node.guess ] if season and 'bonusNumber' in mtree.info: series = previous_group(season[0]) if same_group(series, season[0]): found_property(series, 'series', 0.9)
gpl-3.0
Mibou/django-memcache-status
memcache_status/templatetags/memcache_status_tags.py
1
3249
from django import template from django.conf import settings try: from django.core.cache import caches except: from django.core.cache import get_cache as caches if caches.__module__.startswith('debug_toolbar'): from debug_toolbar.panels.cache import original_get_cache as caches get_cache = lambda cache_name: caches(cache_name) if hasattr(caches, '__call__') else caches[cache_name] register = template.Library() class CacheStats(template.Node): """ Reads the cache stats out of the memcached cache backend. Returns `None` if no cache stats supported. """ def render(self, context): cache_stats = [] for cache_backend_nm, cache_backend_attrs in settings.CACHES.iteritems(): try: cache_backend = get_cache(cache_backend_nm) this_backend_stats = cache_backend._cache.get_stats() # returns list of (name, stats) tuples for server_name, server_stats in this_backend_stats: cache_stats.append(("%s: %s" % ( cache_backend_nm, server_name), server_stats)) except AttributeError: # this backend probably doesn't support that continue context['cache_stats'] = cache_stats return '' @register.tag def get_cache_stats(parser, token): return CacheStats() @register.filter def prettyname(name): return ' '.join([word.capitalize() for word in name.split('_')]) @register.filter def prettyvalue(value, key): return PrettyValue().format(key, value) class PrettyValue(object): """ Helper class that reformats the value. Looks for a method named ``format_<key>_value`` and returns that value. Returns the value as is, if no format method is found. """ def format(self, key, value): try: func = getattr(self, 'format_%s_value' % key.lower()) return func(value) except AttributeError: return value def format_limit_maxbytes_value(self, value): return "%s (%s)" % (value, self.human_bytes(value)) def format_bytes_read_value(self, value): return "%s (%s)" % (value, self.human_bytes(value)) def format_bytes_written_value(self, value): return "%s (%s)" % (value, self.human_bytes(value)) def format_uptime_value(self, value): return self.fract_timestamp(int(value)) def format_time_value(self, value): from datetime import datetime return datetime.fromtimestamp(int(value)).strftime('%x %X') def fract_timestamp(self, s): years, s = divmod(s, 31556952) min, s = divmod(s, 60) h, min = divmod(min, 60) d, h = divmod(h, 24) return '%sy, %sd, %sh, %sm, %ss' % (years, d, h, min, s) def human_bytes(self, bytes): bytes = float(bytes) if bytes >= 1073741824: gigabytes = bytes / 1073741824 size = '%.2fGB' % gigabytes elif bytes >= 1048576: megabytes = bytes / 1048576 size = '%.2fMB' % megabytes elif bytes >= 1024: kilobytes = bytes / 1024 size = '%.2fKB' % kilobytes else: size = '%.2fB' % bytes return size
bsd-3-clause
alexteodor/odoo
addons/sale/edi/sale_order.py
403
10861
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv, fields from openerp.addons.edi import EDIMixin from openerp.tools.translate import _ from werkzeug import url_encode SALE_ORDER_LINE_EDI_STRUCT = { 'sequence': True, 'name': True, #custom: 'date_planned' 'product_id': True, 'product_uom': True, 'price_unit': True, #custom: 'product_qty' 'discount': True, # fields used for web preview only - discarded on import 'price_subtotal': True, } SALE_ORDER_EDI_STRUCT = { 'name': True, 'origin': True, 'company_id': True, # -> to be changed into partner #custom: 'partner_ref' 'date_order': True, 'partner_id': True, #custom: 'partner_address' #custom: 'notes' 'order_line': SALE_ORDER_LINE_EDI_STRUCT, # fields used for web preview only - discarded on import 'amount_total': True, 'amount_untaxed': True, 'amount_tax': True, 'payment_term': True, 'order_policy': True, 'user_id': True, 'state': True, } class sale_order(osv.osv, EDIMixin): _inherit = 'sale.order' def edi_export(self, cr, uid, records, edi_struct=None, context=None): """Exports a Sale order""" edi_struct = dict(edi_struct or SALE_ORDER_EDI_STRUCT) res_company = self.pool.get('res.company') res_partner_obj = self.pool.get('res.partner') edi_doc_list = [] for order in records: # generate the main report self._edi_generate_report_attachment(cr, uid, order, context=context) # Get EDI doc based on struct. The result will also contain all metadata fields and attachments. edi_doc = super(sale_order,self).edi_export(cr, uid, [order], edi_struct, context)[0] edi_doc.update({ # force trans-typing to purchase.order upon import '__import_model': 'purchase.order', '__import_module': 'purchase', 'company_address': res_company.edi_export_address(cr, uid, order.company_id, context=context), 'partner_address': res_partner_obj.edi_export(cr, uid, [order.partner_id], context=context)[0], 'currency': self.pool.get('res.currency').edi_export(cr, uid, [order.pricelist_id.currency_id], context=context)[0], 'partner_ref': order.client_order_ref or False, 'notes': order.note or False, }) edi_doc_list.append(edi_doc) return edi_doc_list def _edi_import_company(self, cr, uid, edi_document, context=None): # TODO: for multi-company setups, we currently import the document in the # user's current company, but we should perhaps foresee a way to select # the desired company among the user's allowed companies self._edi_requires_attributes(('company_id','company_address'), edi_document) res_partner = self.pool.get('res.partner') xid, company_name = edi_document.pop('company_id') # Retrofit address info into a unified partner info (changed in v7 - used to keep them separate) company_address_edi = edi_document.pop('company_address') company_address_edi['name'] = company_name company_address_edi['is_company'] = True company_address_edi['__import_model'] = 'res.partner' company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway if company_address_edi.get('logo'): company_address_edi['image'] = company_address_edi.pop('logo') company_address_edi['customer'] = True partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context) # modify edi_document to refer to new partner partner = res_partner.browse(cr, uid, partner_id, context=context) partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context) edi_document['partner_id'] = partner_edi_m2o edi_document['partner_invoice_id'] = partner_edi_m2o edi_document['partner_shipping_id'] = partner_edi_m2o edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address! return partner_id def _edi_get_pricelist(self, cr, uid, partner_id, currency, context=None): # TODO: refactor into common place for purchase/sale, e.g. into product module partner_model = self.pool.get('res.partner') partner = partner_model.browse(cr, uid, partner_id, context=context) pricelist = partner.property_product_pricelist if not pricelist: pricelist = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'list0', context=context) if not pricelist.currency_id == currency: # look for a pricelist with the right type and currency, or make a new one pricelist_type = 'sale' product_pricelist = self.pool.get('product.pricelist') match_pricelist_ids = product_pricelist.search(cr, uid,[('type','=',pricelist_type), ('currency_id','=',currency.id)]) if match_pricelist_ids: pricelist_id = match_pricelist_ids[0] else: pricelist_name = _('EDI Pricelist (%s)') % (currency.name,) pricelist_id = product_pricelist.create(cr, uid, {'name': pricelist_name, 'type': pricelist_type, 'currency_id': currency.id, }) self.pool.get('product.pricelist.version').create(cr, uid, {'name': pricelist_name, 'pricelist_id': pricelist_id}) pricelist = product_pricelist.browse(cr, uid, pricelist_id) return self.edi_m2o(cr, uid, pricelist, context=context) def edi_import(self, cr, uid, edi_document, context=None): self._edi_requires_attributes(('company_id','company_address','order_line','date_order','currency'), edi_document) #import company as a new partner partner_id = self._edi_import_company(cr, uid, edi_document, context=context) # currency for rounding the discount calculations and for the pricelist res_currency = self.pool.get('res.currency') currency_info = edi_document.pop('currency') currency_id = res_currency.edi_import(cr, uid, currency_info, context=context) order_currency = res_currency.browse(cr, uid, currency_id) partner_ref = edi_document.pop('partner_ref', False) edi_document['client_order_ref'] = edi_document['name'] edi_document['name'] = partner_ref or edi_document['name'] edi_document['note'] = edi_document.pop('notes', False) edi_document['pricelist_id'] = self._edi_get_pricelist(cr, uid, partner_id, order_currency, context=context) # discard web preview fields, if present edi_document.pop('amount_total', None) edi_document.pop('amount_tax', None) edi_document.pop('amount_untaxed', None) order_lines = edi_document['order_line'] for order_line in order_lines: self._edi_requires_attributes(('product_id', 'product_uom', 'product_qty', 'price_unit'), order_line) order_line['product_uom_qty'] = order_line['product_qty'] del order_line['product_qty'] # discard web preview fields, if present order_line.pop('price_subtotal', None) return super(sale_order,self).edi_import(cr, uid, edi_document, context=context) def _edi_paypal_url(self, cr, uid, ids, field, arg, context=None): res = dict.fromkeys(ids, False) for order in self.browse(cr, uid, ids, context=context): if order.order_policy in ('prepaid', 'manual') and \ order.company_id.paypal_account and order.state != 'draft': params = { "cmd": "_xclick", "business": order.company_id.paypal_account, "item_name": order.company_id.name + " Order " + order.name, "invoice": order.name, "amount": order.amount_total, "currency_code": order.pricelist_id.currency_id.name, "button_subtype": "services", "no_note": "1", "bn": "OpenERP_Order_PayNow_" + order.pricelist_id.currency_id.name, } res[order.id] = "https://www.paypal.com/cgi-bin/webscr?" + url_encode(params) return res _columns = { 'paypal_url': fields.function(_edi_paypal_url, type='char', string='Paypal Url'), } class sale_order_line(osv.osv, EDIMixin): _inherit='sale.order.line' def edi_export(self, cr, uid, records, edi_struct=None, context=None): """Overridden to provide sale order line fields with the expected names (sale and purchase orders have different column names)""" edi_struct = dict(edi_struct or SALE_ORDER_LINE_EDI_STRUCT) edi_doc_list = [] for line in records: edi_doc = super(sale_order_line,self).edi_export(cr, uid, [line], edi_struct, context)[0] edi_doc['__import_model'] = 'purchase.order.line' edi_doc['product_qty'] = line.product_uom_qty if line.product_uos: edi_doc.update(product_uom=line.product_uos, product_qty=line.product_uos_qty) edi_doc_list.append(edi_doc) return edi_doc_list # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
gcd0318/django
django/contrib/auth/backends.py
468
6114
from __future__ import unicode_literals from django.contrib.auth import get_user_model from django.contrib.auth.models import Permission class ModelBackend(object): """ Authenticates against settings.AUTH_USER_MODEL. """ def authenticate(self, username=None, password=None, **kwargs): UserModel = get_user_model() if username is None: username = kwargs.get(UserModel.USERNAME_FIELD) try: user = UserModel._default_manager.get_by_natural_key(username) if user.check_password(password): return user except UserModel.DoesNotExist: # Run the default password hasher once to reduce the timing # difference between an existing and a non-existing user (#20760). UserModel().set_password(password) def _get_user_permissions(self, user_obj): return user_obj.user_permissions.all() def _get_group_permissions(self, user_obj): user_groups_field = get_user_model()._meta.get_field('groups') user_groups_query = 'group__%s' % user_groups_field.related_query_name() return Permission.objects.filter(**{user_groups_query: user_obj}) def _get_permissions(self, user_obj, obj, from_name): """ Returns the permissions of `user_obj` from `from_name`. `from_name` can be either "group" or "user" to return permissions from `_get_group_permissions` or `_get_user_permissions` respectively. """ if not user_obj.is_active or user_obj.is_anonymous() or obj is not None: return set() perm_cache_name = '_%s_perm_cache' % from_name if not hasattr(user_obj, perm_cache_name): if user_obj.is_superuser: perms = Permission.objects.all() else: perms = getattr(self, '_get_%s_permissions' % from_name)(user_obj) perms = perms.values_list('content_type__app_label', 'codename').order_by() setattr(user_obj, perm_cache_name, set("%s.%s" % (ct, name) for ct, name in perms)) return getattr(user_obj, perm_cache_name) def get_user_permissions(self, user_obj, obj=None): """ Returns a set of permission strings the user `user_obj` has from their `user_permissions`. """ return self._get_permissions(user_obj, obj, 'user') def get_group_permissions(self, user_obj, obj=None): """ Returns a set of permission strings the user `user_obj` has from the groups they belong. """ return self._get_permissions(user_obj, obj, 'group') def get_all_permissions(self, user_obj, obj=None): if not user_obj.is_active or user_obj.is_anonymous() or obj is not None: return set() if not hasattr(user_obj, '_perm_cache'): user_obj._perm_cache = self.get_user_permissions(user_obj) user_obj._perm_cache.update(self.get_group_permissions(user_obj)) return user_obj._perm_cache def has_perm(self, user_obj, perm, obj=None): if not user_obj.is_active: return False return perm in self.get_all_permissions(user_obj, obj) def has_module_perms(self, user_obj, app_label): """ Returns True if user_obj has any permissions in the given app_label. """ if not user_obj.is_active: return False for perm in self.get_all_permissions(user_obj): if perm[:perm.index('.')] == app_label: return True return False def get_user(self, user_id): UserModel = get_user_model() try: return UserModel._default_manager.get(pk=user_id) except UserModel.DoesNotExist: return None class RemoteUserBackend(ModelBackend): """ This backend is to be used in conjunction with the ``RemoteUserMiddleware`` found in the middleware module of this package, and is used when the server is handling authentication outside of Django. By default, the ``authenticate`` method creates ``User`` objects for usernames that don't already exist in the database. Subclasses can disable this behavior by setting the ``create_unknown_user`` attribute to ``False``. """ # Create a User object if not already in the database? create_unknown_user = True def authenticate(self, remote_user): """ The username passed as ``remote_user`` is considered trusted. This method simply returns the ``User`` object with the given username, creating a new ``User`` object if ``create_unknown_user`` is ``True``. Returns None if ``create_unknown_user`` is ``False`` and a ``User`` object with the given username is not found in the database. """ if not remote_user: return user = None username = self.clean_username(remote_user) UserModel = get_user_model() # Note that this could be accomplished in one try-except clause, but # instead we use get_or_create when creating unknown users since it has # built-in safeguards for multiple threads. if self.create_unknown_user: user, created = UserModel._default_manager.get_or_create(**{ UserModel.USERNAME_FIELD: username }) if created: user = self.configure_user(user) else: try: user = UserModel._default_manager.get_by_natural_key(username) except UserModel.DoesNotExist: pass return user def clean_username(self, username): """ Performs any cleaning on the "username" prior to using it to get or create the user object. Returns the cleaned username. By default, returns the username unchanged. """ return username def configure_user(self, user): """ Configures a user after creation and returns the updated user. By default, returns the user unmodified. """ return user
bsd-3-clause
twitter-forks/bazel
src/test/py/bazel/bazel_clean_test.py
6
4484
# Copyright 2017 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import time import unittest from src.test.py.bazel import test_base class BazelCleanTest(test_base.TestBase): def testBazelClean(self): self.ScratchFile('WORKSPACE') self.ScratchFile('foo/BUILD', [ 'genrule(', ' name = "x",', ' outs = ["x.out"],', ' cmd = "touch $@",', ')', ]) exit_code, stdout, stderr = self.RunBazel(['info', 'bazel-genfiles']) self.AssertExitCode(exit_code, 0, stderr) bazel_genfiles = stdout[0] exit_code, stdout, stderr = self.RunBazel(['info', 'output_base']) self.AssertExitCode(exit_code, 0, stderr) output_base = stdout[0] # Repeat 10 times to ensure flaky error like # https://github.com/bazelbuild/bazel/issues/5907 are caught. for _ in range(0, 10): exit_code, _, stderr = self.RunBazel(['build', '//foo:x']) self.AssertExitCode(exit_code, 0, stderr) self.assertTrue(os.path.exists( os.path.join(bazel_genfiles, 'foo/x.out'))) exit_code, _, stderr = self.RunBazel(['clean']) self.AssertExitCode(exit_code, 0, stderr) self.assertFalse(os.path.exists( os.path.join(bazel_genfiles, 'foo/x.out'))) self.assertTrue(os.path.exists(output_base)) exit_code, _, stderr = self.RunBazel(['build', '//foo:x']) self.AssertExitCode(exit_code, 0, stderr) self.assertTrue(os.path.exists(os.path.join(bazel_genfiles, 'foo/x.out'))) exit_code, _, stderr = self.RunBazel(['clean', '--expunge']) self.AssertExitCode(exit_code, 0, stderr) self.assertFalse(os.path.exists( os.path.join(bazel_genfiles, 'foo/x.out'))) self.assertFalse(os.path.exists(output_base)) @unittest.skipIf(not test_base.TestBase.IsLinux(), 'Async clean only supported on Linux') def testBazelAsyncClean(self): self.ScratchFile('WORKSPACE') exit_code, _, stderr = self.RunBazel(['clean', '--async']) self.AssertExitCode(exit_code, 0, stderr) matcher = self._findMatch(' moved to (.*) for deletion', stderr) self.assertTrue(matcher, stderr) first_temp = matcher.group(1) self.assertTrue(first_temp, stderr) # Now do it again (we need to build to recreate exec root). self.RunBazel(['build']) exit_code, _, stderr = self.RunBazel(['clean', '--async']) self.AssertExitCode(exit_code, 0, stderr) matcher = self._findMatch(' moved to (.*) for deletion', stderr) self.assertTrue(matcher, stderr) second_temp = matcher.group(1) self.assertTrue(second_temp, stderr) # Two directories should be different. self.assertNotEqual(second_temp, first_temp, stderr) @unittest.skipIf(not test_base.TestBase.IsLinux(), 'Async clean only supported on Linux') def testBazelAsyncCleanWithReadonlyDirectories(self): self.ScratchFile('WORKSPACE') exit_code, _, stderr = self.RunBazel(['build']) self.AssertExitCode(exit_code, 0, stderr) exit_code, stdout, stderr = self.RunBazel(['info', 'execution_root']) self.AssertExitCode(exit_code, 0, stderr) execroot = stdout[0] readonly_dir = os.path.join(execroot, 'readonly') os.mkdir(readonly_dir) open(os.path.join(readonly_dir, 'somefile'), 'wb').close() os.chmod(readonly_dir, 0o555) exit_code, _, stderr = self.RunBazel(['clean', '--async']) matcher = self._findMatch(' moved to (.*) for deletion', stderr) self.assertTrue(matcher, stderr) temp = matcher.group(1) for _ in range(50): if not os.path.isdir(temp): break time.sleep(.1) else: self.fail('temporary directory not removed: {!r}'.format(stderr)) def _findMatch(self, pattern, items): r = re.compile(pattern) for line in items: matcher = r.search(line) if matcher: return matcher return None if __name__ == '__main__': unittest.main()
apache-2.0
MCP1/android_kernel_motorola_msm8960dt-common
tools/perf/scripts/python/net_dropmonitor.py
1258
1562
# Monitor the system for dropped packets and proudce a report of drop locations and counts import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * drop_log = {} kallsyms = [] def get_kallsyms_table(): global kallsyms try: f = open("/proc/kallsyms", "r") linecount = 0 for line in f: linecount = linecount+1 f.seek(0) except: return j = 0 for line in f: loc = int(line.split()[0], 16) name = line.split()[2] j = j +1 if ((j % 100) == 0): print "\r" + str(j) + "/" + str(linecount), kallsyms.append({ 'loc': loc, 'name' : name}) print "\r" + str(j) + "/" + str(linecount) kallsyms.sort() return def get_sym(sloc): loc = int(sloc) for i in kallsyms[::-1]: if loc >= i['loc']: return (i['name'], loc - i['loc']) return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") for i in drop_log.keys(): (sym, off) = get_sym(i) if sym == None: sym = i print "%25s %25s %25s" % (sym, off, drop_log[i]) def trace_begin(): print "Starting trace (Ctrl-C to dump results)" def trace_end(): print "Gathering kallsyms data" get_kallsyms_table() print_drop_table() # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, location, protocol): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 except: drop_log[slocation] = 1
gpl-2.0
SonyCSL/KadecotPepperSample
KadecotSamples/lib/requests/packages/urllib3/response.py
64
17149
from contextlib import contextmanager import zlib import io from socket import timeout as SocketTimeout from ._collections import HTTPHeaderDict from .exceptions import ( ProtocolError, DecodeError, ReadTimeoutError, ResponseNotChunked ) from .packages.six import string_types as basestring, binary_type, PY3 from .packages.six.moves import http_client as httplib from .connection import HTTPException, BaseSSLError from .util.response import is_fp_closed, is_response_to_head class DeflateDecoder(object): def __init__(self): self._first_try = True self._data = binary_type() self._obj = zlib.decompressobj() def __getattr__(self, name): return getattr(self._obj, name) def decompress(self, data): if not data: return data if not self._first_try: return self._obj.decompress(data) self._data += data try: return self._obj.decompress(data) except zlib.error: self._first_try = False self._obj = zlib.decompressobj(-zlib.MAX_WBITS) try: return self.decompress(self._data) finally: self._data = None class GzipDecoder(object): def __init__(self): self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS) def __getattr__(self, name): return getattr(self._obj, name) def decompress(self, data): if not data: return data return self._obj.decompress(data) def _get_decoder(mode): if mode == 'gzip': return GzipDecoder() return DeflateDecoder() class HTTPResponse(io.IOBase): """ HTTP Response container. Backwards-compatible to httplib's HTTPResponse but the response ``body`` is loaded and decoded on-demand when the ``data`` property is accessed. This class is also compatible with the Python standard library's :mod:`io` module, and can hence be treated as a readable object in the context of that framework. Extra parameters for behaviour not present in httplib.HTTPResponse: :param preload_content: If True, the response's body will be preloaded during construction. :param decode_content: If True, attempts to decode specific content-encoding's based on headers (like 'gzip' and 'deflate') will be skipped and raw data will be used instead. :param original_response: When this HTTPResponse wrapper is generated from an httplib.HTTPResponse object, it's convenient to include the original for debug purposes. It's otherwise unused. """ CONTENT_DECODERS = ['gzip', 'deflate'] REDIRECT_STATUSES = [301, 302, 303, 307, 308] def __init__(self, body='', headers=None, status=0, version=0, reason=None, strict=0, preload_content=True, decode_content=True, original_response=None, pool=None, connection=None): if isinstance(headers, HTTPHeaderDict): self.headers = headers else: self.headers = HTTPHeaderDict(headers) self.status = status self.version = version self.reason = reason self.strict = strict self.decode_content = decode_content self._decoder = None self._body = None self._fp = None self._original_response = original_response self._fp_bytes_read = 0 if body and isinstance(body, (basestring, binary_type)): self._body = body self._pool = pool self._connection = connection if hasattr(body, 'read'): self._fp = body # Are we using the chunked-style of transfer encoding? self.chunked = False self.chunk_left = None tr_enc = self.headers.get('transfer-encoding', '').lower() # Don't incur the penalty of creating a list and then discarding it encodings = (enc.strip() for enc in tr_enc.split(",")) if "chunked" in encodings: self.chunked = True # We certainly don't want to preload content when the response is chunked. if not self.chunked and preload_content and not self._body: self._body = self.read(decode_content=decode_content) def get_redirect_location(self): """ Should we redirect and where to? :returns: Truthy redirect location string if we got a redirect status code and valid location. ``None`` if redirect status and no location. ``False`` if not a redirect status code. """ if self.status in self.REDIRECT_STATUSES: return self.headers.get('location') return False def release_conn(self): if not self._pool or not self._connection: return self._pool._put_conn(self._connection) self._connection = None @property def data(self): # For backwords-compat with earlier urllib3 0.4 and earlier. if self._body: return self._body if self._fp: return self.read(cache_content=True) def tell(self): """ Obtain the number of bytes pulled over the wire so far. May differ from the amount of content returned by :meth:``HTTPResponse.read`` if bytes are encoded on the wire (e.g, compressed). """ return self._fp_bytes_read def _init_decoder(self): """ Set-up the _decoder attribute if necessar. """ # Note: content-encoding value should be case-insensitive, per RFC 7230 # Section 3.2 content_encoding = self.headers.get('content-encoding', '').lower() if self._decoder is None and content_encoding in self.CONTENT_DECODERS: self._decoder = _get_decoder(content_encoding) def _decode(self, data, decode_content, flush_decoder): """ Decode the data passed in and potentially flush the decoder. """ try: if decode_content and self._decoder: data = self._decoder.decompress(data) except (IOError, zlib.error) as e: content_encoding = self.headers.get('content-encoding', '').lower() raise DecodeError( "Received response with content-encoding: %s, but " "failed to decode it." % content_encoding, e) if flush_decoder and decode_content and self._decoder: buf = self._decoder.decompress(binary_type()) data += buf + self._decoder.flush() return data @contextmanager def _error_catcher(self): """ Catch low-level python exceptions, instead re-raising urllib3 variants, so that low-level exceptions are not leaked in the high-level api. On exit, release the connection back to the pool. """ try: try: yield except SocketTimeout: # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but # there is yet no clean way to get at it from this context. raise ReadTimeoutError(self._pool, None, 'Read timed out.') except BaseSSLError as e: # FIXME: Is there a better way to differentiate between SSLErrors? if 'read operation timed out' not in str(e): # Defensive: # This shouldn't happen but just in case we're missing an edge # case, let's avoid swallowing SSL errors. raise raise ReadTimeoutError(self._pool, None, 'Read timed out.') except HTTPException as e: # This includes IncompleteRead. raise ProtocolError('Connection broken: %r' % e, e) except Exception: # The response may not be closed but we're not going to use it anymore # so close it now to ensure that the connection is released back to the pool. if self._original_response and not self._original_response.isclosed(): self._original_response.close() raise finally: if self._original_response and self._original_response.isclosed(): self.release_conn() def read(self, amt=None, decode_content=None, cache_content=False): """ Similar to :meth:`httplib.HTTPResponse.read`, but with two additional parameters: ``decode_content`` and ``cache_content``. :param amt: How much of the content to read. If specified, caching is skipped because it doesn't make sense to cache partial content as the full response. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. :param cache_content: If True, will save the returned data such that the same result is returned despite of the state of the underlying file object. This is useful if you want the ``.data`` property to continue working after having ``.read()`` the file object. (Overridden if ``amt`` is set.) """ self._init_decoder() if decode_content is None: decode_content = self.decode_content if self._fp is None: return flush_decoder = False data = None with self._error_catcher(): if amt is None: # cStringIO doesn't like amt=None data = self._fp.read() flush_decoder = True else: cache_content = False data = self._fp.read(amt) if amt != 0 and not data: # Platform-specific: Buggy versions of Python. # Close the connection when no data is returned # # This is redundant to what httplib/http.client _should_ # already do. However, versions of python released before # December 15, 2012 (http://bugs.python.org/issue16298) do # not properly close the connection in all cases. There is # no harm in redundantly calling close. self._fp.close() flush_decoder = True if data: self._fp_bytes_read += len(data) data = self._decode(data, decode_content, flush_decoder) if cache_content: self._body = data return data def stream(self, amt=2**16, decode_content=None): """ A generator wrapper for the read() method. A call will block until ``amt`` bytes have been read from the connection or until the connection is closed. :param amt: How much of the content to read. The generator will return up to much data per iteration, but may return less. This is particularly likely when using compressed data. However, the empty string will never be returned. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ if self.chunked: for line in self.read_chunked(amt, decode_content=decode_content): yield line else: while not is_fp_closed(self._fp): data = self.read(amt=amt, decode_content=decode_content) if data: yield data @classmethod def from_httplib(ResponseCls, r, **response_kw): """ Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``. """ headers = r.msg if not isinstance(headers, HTTPHeaderDict): if PY3: # Python 3 headers = HTTPHeaderDict(headers.items()) else: # Python 2 headers = HTTPHeaderDict.from_httplib(headers) # HTTPResponse objects in Python 3 don't have a .strict attribute strict = getattr(r, 'strict', 0) resp = ResponseCls(body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw) return resp # Backwards-compatibility methods for httplib.HTTPResponse def getheaders(self): return self.headers def getheader(self, name, default=None): return self.headers.get(name, default) # Overrides from io.IOBase def close(self): if not self.closed: self._fp.close() @property def closed(self): if self._fp is None: return True elif hasattr(self._fp, 'closed'): return self._fp.closed elif hasattr(self._fp, 'isclosed'): # Python 2 return self._fp.isclosed() else: return True def fileno(self): if self._fp is None: raise IOError("HTTPResponse has no file to get a fileno from") elif hasattr(self._fp, "fileno"): return self._fp.fileno() else: raise IOError("The file-like object this HTTPResponse is wrapped " "around has no file descriptor") def flush(self): if self._fp is not None and hasattr(self._fp, 'flush'): return self._fp.flush() def readable(self): # This method is required for `io` module compatibility. return True def readinto(self, b): # This method is required for `io` module compatibility. temp = self.read(len(b)) if len(temp) == 0: return 0 else: b[:len(temp)] = temp return len(temp) def _update_chunk_length(self): # First, we'll figure out length of a chunk and then # we'll try to read it from socket. if self.chunk_left is not None: return line = self._fp.fp.readline() line = line.split(b';', 1)[0] try: self.chunk_left = int(line, 16) except ValueError: # Invalid chunked protocol response, abort. self.close() raise httplib.IncompleteRead(line) def _handle_chunk(self, amt): returned_chunk = None if amt is None: chunk = self._fp._safe_read(self.chunk_left) returned_chunk = chunk self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None elif amt < self.chunk_left: value = self._fp._safe_read(amt) self.chunk_left = self.chunk_left - amt returned_chunk = value elif amt == self.chunk_left: value = self._fp._safe_read(amt) self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None returned_chunk = value else: # amt > self.chunk_left returned_chunk = self._fp._safe_read(self.chunk_left) self._fp._safe_read(2) # Toss the CRLF at the end of the chunk. self.chunk_left = None return returned_chunk def read_chunked(self, amt=None, decode_content=None): """ Similar to :meth:`HTTPResponse.read`, but with an additional parameter: ``decode_content``. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ self._init_decoder() # FIXME: Rewrite this method and make it a class with a better structured logic. if not self.chunked: raise ResponseNotChunked("Response is not chunked. " "Header 'transfer-encoding: chunked' is missing.") # Don't bother reading the body of a HEAD request. if self._original_response and is_response_to_head(self._original_response): self._original_response.close() return with self._error_catcher(): while True: self._update_chunk_length() if self.chunk_left == 0: break chunk = self._handle_chunk(amt) yield self._decode(chunk, decode_content=decode_content, flush_decoder=True) # Chunk content ends with \r\n: discard it. while True: line = self._fp.fp.readline() if not line: # Some sites may not end with '\r\n'. break if line == b'\r\n': break # We read everything; close the "file". if self._original_response: self._original_response.close()
mit
nikolas/lettuce
tests/integration/lib/Django-1.3/tests/regressiontests/forms/models.py
89
2477
# -*- coding: utf-8 -*- import datetime import tempfile from django.db import models from django.core.files.storage import FileSystemStorage temp_storage_location = tempfile.mkdtemp() temp_storage = FileSystemStorage(location=temp_storage_location) class BoundaryModel(models.Model): positive_integer = models.PositiveIntegerField(null=True, blank=True) callable_default_value = 0 def callable_default(): global callable_default_value callable_default_value = callable_default_value + 1 return callable_default_value class Defaults(models.Model): name = models.CharField(max_length=255, default='class default value') def_date = models.DateField(default = datetime.date(1980, 1, 1)) value = models.IntegerField(default=42) callable_default = models.IntegerField(default=callable_default) class ChoiceModel(models.Model): """For ModelChoiceField and ModelMultipleChoiceField tests.""" name = models.CharField(max_length=10) class ChoiceOptionModel(models.Model): """Destination for ChoiceFieldModel's ForeignKey. Can't reuse ChoiceModel because error_message tests require that it have no instances.""" name = models.CharField(max_length=10) class Meta: ordering = ('name',) def __unicode__(self): return u'ChoiceOption %d' % self.pk class ChoiceFieldModel(models.Model): """Model with ForeignKey to another model, for testing ModelForm generation with ModelChoiceField.""" choice = models.ForeignKey(ChoiceOptionModel, blank=False, default=lambda: ChoiceOptionModel.objects.get(name='default')) choice_int = models.ForeignKey(ChoiceOptionModel, blank=False, related_name='choice_int', default=lambda: 1) multi_choice = models.ManyToManyField(ChoiceOptionModel, blank=False, related_name='multi_choice', default=lambda: ChoiceOptionModel.objects.filter(name='default')) multi_choice_int = models.ManyToManyField(ChoiceOptionModel, blank=False, related_name='multi_choice_int', default=lambda: [1]) class FileModel(models.Model): file = models.FileField(storage=temp_storage, upload_to='tests') class Group(models.Model): name = models.CharField(max_length=10) def __unicode__(self): return u'%s' % self.name class Cheese(models.Model): name = models.CharField(max_length=100)
gpl-3.0
gnieboer/tensorflow
tensorflow/contrib/learn/python/learn/preprocessing/__init__.py
138
1071
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Preprocessing tools useful for building models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=wildcard-import from tensorflow.contrib.learn.python.learn.preprocessing.categorical import * from tensorflow.contrib.learn.python.learn.preprocessing.text import * # pylint: enable=wildcard-import
apache-2.0
cnbeining/ABPlayerHTML5-Py--nix
httpd.py
3
6069
#!/usr/bin/env python #coding=utf-8 """ Author: Xia Kai <[email protected]/[email protected]> Filename: httpd.py Type: httpd that support resume. Last modified: 2011-06-27 17:38 Description: """ import os import socket import sys from SocketServer import ThreadingMixIn from random import randint from BaseHTTPServer import HTTPServer from SimpleHTTPServer import SimpleHTTPRequestHandler import urllib2 import StringIO import binascii import urllib import atexit class NotracebackServer(ThreadingMixIn, HTTPServer): """ could make this a mixin, but decide to keep it simple for a simple script. """ def handle_error(self, *args): """override default function to disable traceback.""" pass class PartialContentHandler(SimpleHTTPRequestHandler): def mycopy(self, f): """ This would do the actual file tranfer. if client terminated transfer, we would log it. """ try: self.copyfile(f, self.wfile) self.log_message('"%s" %s', self.requestline, "req finished.") except socket.error: self.log_message('"%s" %s', self.requestline, "req terminated.") finally: f.close() return None def do_GET(self): """Serve a GET request.""" #print('PATH:' + self.path) if self.path.startswith('/__proxy__/'): #Reverse proxy this #print() url = self.path[11:] print(url) #request = urllib2.Request(url) #response = urllib2.urlopen(request) #print(response.code) #self.send_response(response.code) response = response_copy = urllib.urlopen(url) response_code = response.code self.send_response(response.code) response_headers = response.headers [self.send_header(i, response_headers[i]) for i in response_headers] #print(self.headers['Origin']) try: self.send_header('Access-Control-Allow-Origin', self.headers['Origin']) except: self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Access-Control-Allow-Credentials', 'true') #print(dict(self.response_headers)) #print('HERE!') self.end_headers() self.copyfile(response, self.wfile) return None f = self.send_head() if f: self.mycopy(f) def send_head(self): """ added support for partial content. i'm not surprised if http HEAD method would fail. """ path = self.translate_path(self.path) f = None if os.path.isdir(path): # oh, we do not support directory listing. self.send_error(404, "File not found") return None ctype = self.guess_type(path) try: f = open(path, 'rb') except IOError: self.send_error(404, "File not found") return None if self.headers.get("Range"): # partial content all treated here. # we do not support If-Range request. # range could only be of the form: # Range: bytes=9855420- start = self.headers.get("Range") try: pos = int(start[6:-1]) except ValueError: self.send_error(400, "bad range specified.") f.close() return None self.send_response(206) self.send_header("Content-type", ctype) self.send_header("Connection", "keep-alive") fs = os.fstat(f.fileno()) full = fs.st_size self.send_header("Content-Length", str(fs[6] - pos)) self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) start = start.replace("=", " ") self.send_header("Content-Range", "%s%s/%s" % (start, full-1, full)) self.end_headers() f.seek(pos) self.mycopy(f) return None self.send_response(200) self.send_header("Content-type", ctype) fs = os.fstat(f.fileno()) self.send_header("Content-Length", str(fs[6])) self.send_header("Last-Modified", self.date_time_string(fs.st_mtime)) self.end_headers() return f def main(port, server_class=NotracebackServer, handler_class=PartialContentHandler): os.setpgrp() server_address = ('', port) httpd = server_class(server_address, handler_class) #httpd.serve_forever() try: # Handle connections at the same time, so loading will not fail while 1: httpd.handle_request() except Exception: print "Finished" os.killpg(os.getpid(), signal.SIGTERM) os._exit(0) os._exit(0) #---------------------------------------------------------------------- def main2(port, folder): """""" sys.stdout.flush() print(folder) ip = '127.0.0.1' print("serving on: http://%s:%s/" % (ip, port)) print("===== local files =====") cwd = folder for f in os.listdir(cwd): if f == sys.argv[0] or f.startswith("."): continue fullpath = os.path.join(cwd, f) if os.path.isfile(fullpath): print("link: http://%s:%s/%s" % (ip, port, f)) print("===== start logging =====\n") main(port=port) os.killpg(os.getpid(), signal.SIGTERM) if __name__ == "__main__": port = 30000 #ip = socket.gethostbyname(socket.gethostname()) ip = '127.0.0.1' print("serving on: http://%s:%s/" % (ip, port)) print("===== local files =====") cwd = os.getcwd() for f in os.listdir(cwd): if f == sys.argv[0] or f.startswith("."): continue fullpath = os.path.join(cwd, f) if os.path.isfile(fullpath): print("link: http://%s:%s/%s" % (ip, port, f)) print("===== start logging =====\n") main(port=port)
mit
Peddle/hue
desktop/core/ext-py/urllib2_kerberos-0.1.6/urllib2_kerberos.py
44
5663
#!/usr/bin/python # urllib2 with kerberos proof of concept # Copyright 2008 Lime Nest LLC # Copyright 2008 Lime Spot LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import logging import sys import urllib2 as u2 import kerberos as k LOG = logging.getLogger("http_kerberos_auth_handler") class AbstractKerberosAuthHandler: """auth handler for urllib2 that does Kerberos HTTP Negotiate Authentication """ def negotiate_value(self, headers): """checks for "Negotiate" in proper auth header """ authreq = headers.get(self.auth_header, None) if authreq: rx = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I) mo = rx.search(authreq) if mo: return mo.group(1) else: LOG.debug("regex failed on: %s" % authreq) else: LOG.debug("%s header not found" % self.auth_header) return None def __init__(self): self.retried = 0 self.context = None def generate_request_header(self, req, headers, neg_value): self.retried += 1 LOG.debug("retry count: %d" % self.retried) host = req.get_host() LOG.debug("req.get_host() returned %s" % host) # We need Python 2.4 compatibility #tail, sep, head = host.rpartition(':') #domain = tail or head host_parts = host.rsplit(':', 1) domain = host_parts[0] result, self.context = k.authGSSClientInit("HTTP@%s" % domain) if result < 1: LOG.warning("authGSSClientInit returned result %d" % result) return None LOG.debug("authGSSClientInit() succeeded") result = k.authGSSClientStep(self.context, neg_value) if result < 0: LOG.warning("authGSSClientStep returned result %d" % result) return None LOG.debug("authGSSClientStep() succeeded") response = k.authGSSClientResponse(self.context) LOG.debug("authGSSClientResponse() succeeded") return "Negotiate %s" % response def authenticate_server(self, headers): neg_value = self.negotiate_value(headers) if neg_value is None: LOG.critical("mutual auth failed. No negotiate header") return None result = k.authGSSClientStep(self.context, neg_value) if result < 1: # this is a critical security warning # should change to a raise --Tim LOG.critical("mutual auth failed: authGSSClientStep returned result %d" % result) pass def clean_context(self): if self.context is not None: LOG.debug("cleaning context") k.authGSSClientClean(self.context) self.context = None def http_error_auth_reqed(self, host, req, headers): neg_value = self.negotiate_value(headers) #Check for auth_header if neg_value is not None: if not self.retried > 0: return self.retry_http_kerberos_auth(req, headers, neg_value) else: return None else: self.retried = 0 def retry_http_kerberos_auth(self, req, headers, neg_value): try: try: neg_hdr = self.generate_request_header(req, headers, neg_value) if neg_hdr is None: LOG.debug("neg_hdr was None") return None req.add_unredirected_header(self.authz_header, neg_hdr) resp = self.parent.open(req) self.authenticate_server(resp.info()) return resp except k.GSSError, e: LOG.critical("GSSAPI Error: %s/%s" % (e[0][0], e[1][0])) return None finally: self.clean_context() self.retried = 0 class ProxyKerberosAuthHandler(u2.BaseHandler, AbstractKerberosAuthHandler): """Kerberos Negotiation handler for HTTP proxy auth """ authz_header = 'Proxy-Authorization' auth_header = 'proxy-authenticate' handler_order = 480 # before Digest auth def http_error_407(self, req, fp, code, msg, headers): LOG.debug("inside http_error_407") host = req.get_host() retry = self.http_error_auth_reqed(host, req, headers) self.retried = 0 return retry class HTTPKerberosAuthHandler(u2.BaseHandler, AbstractKerberosAuthHandler): """Kerberos Negotiation handler for HTTP auth """ authz_header = 'Authorization' auth_header = 'www-authenticate' handler_order = 480 # before Digest auth def http_error_401(self, req, fp, code, msg, headers): LOG.debug("inside http_error_401") host = req.get_host() retry = self.http_error_auth_reqed(host, req, headers) self.retried = 0 return retry def test(): LOG.setLevel(logging.DEBUG) LOG.info("starting test") opener = u2.build_opener() opener.add_handler(HTTPKerberosAuthHandler()) resp = opener.open(sys.argv[1]) print dir(resp), resp.info(), resp.code if __name__ == '__main__': test()
apache-2.0
AndresCidoncha/Python-Bot
telegram/user.py
5
2192
#!/usr/bin/env python # pylint: disable=C0103,W0622 # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015 Leandro Toledo de Souza <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. """This module contains a object that represents a Telegram User""" from telegram import TelegramObject class User(TelegramObject): """This object represents a Telegram User. Attributes: id (int): first_name (str): last_name (str): username (str): type (str): Args: id (int): first_name (str): **kwargs: Arbitrary keyword arguments. Keyword Args: type (Optional[str]): last_name (Optional[str]): username (Optional[str]): """ def __init__(self, id, first_name, **kwargs): # Required self.id = int(id) self.first_name = first_name # Optionals self.type = kwargs.get('type', '') self.last_name = kwargs.get('last_name', '') self.username = kwargs.get('username', '') @property def name(self): """str: """ if self.username: return '@%s' % self.username if self.last_name: return '%s %s' % (self.first_name, self.last_name) return self.first_name @staticmethod def de_json(data): """ Args: data (str): Returns: telegram.User: """ if not data: return None return User(**data)
gpl-3.0
googyanas/GoogyMax-G4
Documentation/target/tcm_mod_builder.py
2358
40707
#!/usr/bin/python # The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD # # Copyright (c) 2010 Rising Tide Systems # Copyright (c) 2010 Linux-iSCSI.org # # Author: [email protected] # import os, sys import subprocess as sub import string import re import optparse tcm_dir = "" fabric_ops = [] fabric_mod_dir = "" fabric_mod_port = "" fabric_mod_init_port = "" def tcm_mod_err(msg): print msg sys.exit(1) def tcm_mod_create_module_subdir(fabric_mod_dir_var): if os.path.isdir(fabric_mod_dir_var) == True: return 1 print "Creating fabric_mod_dir: " + fabric_mod_dir_var ret = os.mkdir(fabric_mod_dir_var) if ret: tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var) return def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n" buf += " u64 nport_wwpn;\n" buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n" buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n" buf += "\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* FC lport target portal group tag for TCM */\n" buf += " u16 lport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n" buf += " struct " + fabric_mod_name + "_lport *lport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n" buf += "\n" buf += "struct " + fabric_mod_name + "_lport {\n" buf += " /* SCSI protocol the lport is providing */\n" buf += " u8 lport_proto_id;\n" buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n" buf += " u64 lport_wwpn;\n" buf += " /* ASCII formatted WWPN for FC Target Lport */\n" buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n" buf += " struct se_wwn lport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "lport" fabric_mod_init_port = "nport" return def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n" buf += " u64 iport_wwpn;\n" buf += " /* ASCII formatted WWPN for Sas Initiator port */\n" buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* SAS port target portal group tag for TCM */\n" buf += " u16 tport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" buf += " struct " + fabric_mod_name + "_tport *tport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tport {\n" buf += " /* SCSI protocol the tport is providing */\n" buf += " u8 tport_proto_id;\n" buf += " /* Binary World Wide unique Port Name for SAS Target port */\n" buf += " u64 tport_wwpn;\n" buf += " /* ASCII formatted WWPN for SAS Target port */\n" buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" buf += " struct se_wwn tport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "tport" fabric_mod_init_port = "iport" return def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* ASCII formatted InitiatorName */\n" buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* iSCSI target portal group tag for TCM */\n" buf += " u16 tport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" buf += " struct " + fabric_mod_name + "_tport *tport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tport {\n" buf += " /* SCSI protocol the tport is providing */\n" buf += " u8 tport_proto_id;\n" buf += " /* ASCII formatted TargetName for IQN */\n" buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" buf += " struct se_wwn tport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "tport" fabric_mod_init_port = "iport" return def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name): if proto_ident == "FC": tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name) elif proto_ident == "SAS": tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name) elif proto_ident == "iSCSI": tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name) else: print "Unsupported proto_ident: " + proto_ident sys.exit(1) return def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#include <linux/module.h>\n" buf += "#include <linux/moduleparam.h>\n" buf += "#include <linux/version.h>\n" buf += "#include <generated/utsrelease.h>\n" buf += "#include <linux/utsname.h>\n" buf += "#include <linux/init.h>\n" buf += "#include <linux/slab.h>\n" buf += "#include <linux/kthread.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/string.h>\n" buf += "#include <linux/configfs.h>\n" buf += "#include <linux/ctype.h>\n" buf += "#include <asm/unaligned.h>\n\n" buf += "#include <target/target_core_base.h>\n" buf += "#include <target/target_core_fabric.h>\n" buf += "#include <target/target_core_fabric_configfs.h>\n" buf += "#include <target/target_core_configfs.h>\n" buf += "#include <target/configfs_macros.h>\n\n" buf += "#include \"" + fabric_mod_name + "_base.h\"\n" buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" buf += "/* Local pointer to allocated TCM configfs fabric module */\n" buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n" buf += " struct " + fabric_mod_name + "_nacl *nacl;\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " u64 wwpn = 0;\n" buf += " u32 nexus_depth;\n\n" buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" buf += " return ERR_PTR(-EINVAL); */\n" buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n" buf += " if (!se_nacl_new)\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n" buf += " nexus_depth = 1;\n" buf += " /*\n" buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n" buf += " * when converting a NodeACL from demo mode -> explict\n" buf += " */\n" buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n" buf += " name, nexus_depth);\n" buf += " if (IS_ERR(se_nacl)) {\n" buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n" buf += " return se_nacl;\n" buf += " }\n" buf += " /*\n" buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n" buf += " */\n" buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n" buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" buf += " return se_nacl;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n" buf += " kfree(nacl);\n" buf += "}\n\n" buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n" buf += " struct se_wwn *wwn,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n" buf += " struct " + fabric_mod_name + "_tpg *tpg;\n" buf += " unsigned long tpgt;\n" buf += " int ret;\n\n" buf += " if (strstr(name, \"tpgt_\") != name)\n" buf += " return ERR_PTR(-EINVAL);\n" buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n" buf += " return ERR_PTR(-EINVAL);\n\n" buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n" buf += " if (!tpg) {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += " }\n" buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n" buf += " &tpg->se_tpg, (void *)tpg,\n" buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" buf += " if (ret < 0) {\n" buf += " kfree(tpg);\n" buf += " return NULL;\n" buf += " }\n" buf += " return &tpg->se_tpg;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n" buf += " core_tpg_deregister(se_tpg);\n" buf += " kfree(tpg);\n" buf += "}\n\n" buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n" buf += " struct target_fabric_configfs *tf,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " u64 wwpn = 0;\n\n" buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" buf += " return ERR_PTR(-EINVAL); */\n\n" buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n" buf += " if (!" + fabric_mod_port + ") {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += " }\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n" buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n" buf += " kfree(" + fabric_mod_port + ");\n" buf += "}\n\n" buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n" buf += " struct target_fabric_configfs *tf,\n" buf += " char *page)\n" buf += "{\n" buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" buf += " utsname()->machine);\n" buf += "}\n\n" buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n" buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n" buf += " &" + fabric_mod_name + "_wwn_version.attr,\n" buf += " NULL,\n" buf += "};\n\n" buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n" buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n" buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n" buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n" buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n" buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n" buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n" buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n" buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n" buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n" buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" buf += " .close_session = " + fabric_mod_name + "_close_session,\n" buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" buf += " .sess_get_initiator_sid = NULL,\n" buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n" buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n" buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" buf += " /*\n" buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" buf += " */\n" buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n" buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n" buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" buf += " .fabric_post_link = NULL,\n" buf += " .fabric_pre_unlink = NULL,\n" buf += " .fabric_make_np = NULL,\n" buf += " .fabric_drop_np = NULL,\n" buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" buf += "};\n\n" buf += "static int " + fabric_mod_name + "_register_configfs(void)\n" buf += "{\n" buf += " struct target_fabric_configfs *fabric;\n" buf += " int ret;\n\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" buf += " utsname()->machine);\n" buf += " /*\n" buf += " * Register the top level struct config_item_type with TCM core\n" buf += " */\n" buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" buf += " if (IS_ERR(fabric)) {\n" buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" buf += " return PTR_ERR(fabric);\n" buf += " }\n" buf += " /*\n" buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n" buf += " */\n" buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n" buf += " /*\n" buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n" buf += " */\n" buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n" buf += " /*\n" buf += " * Register the fabric for use within TCM\n" buf += " */\n" buf += " ret = target_fabric_configfs_register(fabric);\n" buf += " if (ret < 0) {\n" buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n" buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n" buf += " return ret;\n" buf += " }\n" buf += " /*\n" buf += " * Setup our local pointer to *fabric\n" buf += " */\n" buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n" buf += " return 0;\n" buf += "};\n\n" buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n" buf += "{\n" buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n" buf += " return;\n\n" buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n" buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n" buf += "};\n\n" buf += "static int __init " + fabric_mod_name + "_init(void)\n" buf += "{\n" buf += " int ret;\n\n" buf += " ret = " + fabric_mod_name + "_register_configfs();\n" buf += " if (ret < 0)\n" buf += " return ret;\n\n" buf += " return 0;\n" buf += "};\n\n" buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" buf += "{\n" buf += " " + fabric_mod_name + "_deregister_configfs();\n" buf += "};\n\n" buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" buf += "MODULE_LICENSE(\"GPL\");\n" buf += "module_init(" + fabric_mod_name + "_init);\n" buf += "module_exit(" + fabric_mod_name + "_exit);\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_scan_fabric_ops(tcm_dir): fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h" print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api process_fo = 0; p = open(fabric_ops_api, 'r') line = p.readline() while line: if process_fo == 0 and re.search('struct target_core_fabric_ops {', line): line = p.readline() continue if process_fo == 0: process_fo = 1; line = p.readline() # Search for function pointer if not re.search('\(\*', line): continue fabric_ops.append(line.rstrip()) continue line = p.readline() # Search for function pointer if not re.search('\(\*', line): continue fabric_ops.append(line.rstrip()) p.close() return def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf = "" bufi = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h" print "Writing file: " + fi pi = open(fi, 'w') if not pi: tcm_mod_err("Unable to open file: " + fi) buf = "#include <linux/slab.h>\n" buf += "#include <linux/kthread.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/list.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/string.h>\n" buf += "#include <linux/ctype.h>\n" buf += "#include <asm/unaligned.h>\n" buf += "#include <scsi/scsi.h>\n" buf += "#include <scsi/scsi_host.h>\n" buf += "#include <scsi/scsi_device.h>\n" buf += "#include <scsi/scsi_cmnd.h>\n" buf += "#include <scsi/libfc.h>\n\n" buf += "#include <target/target_core_base.h>\n" buf += "#include <target/target_core_fabric.h>\n" buf += "#include <target/target_core_configfs.h>\n\n" buf += "#include \"" + fabric_mod_name + "_base.h\"\n" buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n" buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n" total_fabric_ops = len(fabric_ops) i = 0 while i < total_fabric_ops: fo = fabric_ops[i] i += 1 # print "fabric_ops: " + fo if re.search('get_fabric_name', fo): buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" buf += "{\n" buf += " return \"" + fabric_mod_name[4:] + "\";\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" continue if re.search('get_fabric_proto_ident', fo): buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " u8 proto_id;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" buf += " }\n\n" buf += " return proto_id;\n" buf += "}\n\n" bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n" if re.search('get_wwn', fo): buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n" buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n" if re.search('get_tag', fo): buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " return tpg->" + fabric_mod_port + "_tpgt;\n" buf += "}\n\n" bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n" if re.search('get_default_depth', fo): buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n" if re.search('get_pr_transport_id\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl,\n" buf += " struct t10_pr_registration *pr_reg,\n" buf += " int *format_code,\n" buf += " unsigned char *buf)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " int ret = 0;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" buf += " }\n\n" buf += " return ret;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n" bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" bufi += " int *, unsigned char *);\n" if re.search('get_pr_transport_id_len\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl,\n" buf += " struct t10_pr_registration *pr_reg,\n" buf += " int *format_code)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " int ret = 0;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" buf += " }\n\n" buf += " return ret;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n" bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" bufi += " int *);\n" if re.search('parse_pr_out_transport_id\)\(', fo): buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " const char *buf,\n" buf += " u32 *out_tid_len,\n" buf += " char **port_nexus_ptr)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " char *tid = NULL;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" buf += " }\n\n" buf += " return tid;\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n" bufi += " const char *, u32 *, char **);\n" if re.search('alloc_fabric_acl\)\(', fo): buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n" buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n" buf += " if (!nacl) {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n" buf += " return NULL;\n" buf += " }\n\n" buf += " return &nacl->se_node_acl;\n" buf += "}\n\n" bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n" if re.search('release_fabric_acl\)\(', fo): buf += "void " + fabric_mod_name + "_release_fabric_acl(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n" buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" buf += " kfree(nacl);\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n" bufi += " struct se_node_acl *);\n" if re.search('tpg_get_inst_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n" if re.search('\*release_cmd\)\(', fo): buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n" if re.search('shutdown_session\)\(', fo): buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n" if re.search('close_session\)\(', fo): buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" if re.search('stop_session\)\(', fo): buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" if re.search('fall_back_to_erl0\)\(', fo): buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" if re.search('sess_logged_in\)\(', fo): buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" if re.search('sess_get_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n" if re.search('write_pending\)\(', fo): buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n" if re.search('write_pending_status\)\(', fo): buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n" if re.search('set_default_node_attributes\)\(', fo): buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n" if re.search('get_task_tag\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n" if re.search('get_cmd_state\)\(', fo): buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n" if re.search('queue_data_in\)\(', fo): buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n" if re.search('queue_status\)\(', fo): buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" if re.search('queue_tm_rsp\)\(', fo): buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" if re.search('is_state_remove\)\(', fo): buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() ret = pi.write(bufi) if ret: tcm_mod_err("Unable to write fi: " + fi) pi.close() return def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/Makefile" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n" buf += " " + fabric_mod_name + "_configfs.o\n" buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/Kconfig" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) buf = "config " + fabric_mod_name.upper() + "\n" buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n" buf += " depends on TARGET_CORE && CONFIGFS_FS\n" buf += " default n\n" buf += " ---help---\n" buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name): buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n" kbuild = tcm_dir + "/drivers/target/Makefile" f = open(kbuild, 'a') f.write(buf) f.close() return def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name): buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n" kconfig = tcm_dir + "/drivers/target/Kconfig" f = open(kconfig, 'a') f.write(buf) f.close() return def main(modname, proto_ident): # proto_ident = "FC" # proto_ident = "SAS" # proto_ident = "iSCSI" tcm_dir = os.getcwd(); tcm_dir += "/../../" print "tcm_dir: " + tcm_dir fabric_mod_name = modname fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name print "Set fabric_mod_name: " + fabric_mod_name print "Set fabric_mod_dir: " + fabric_mod_dir print "Using proto_ident: " + proto_ident if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI": print "Unsupported proto_ident: " + proto_ident sys.exit(1) ret = tcm_mod_create_module_subdir(fabric_mod_dir) if ret: print "tcm_mod_create_module_subdir() failed because module already exists!" sys.exit(1) tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_scan_fabric_ops(tcm_dir) tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) return parser = optparse.OptionParser() parser.add_option('-m', '--modulename', help='Module name', dest='modname', action='store', nargs=1, type='string') parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident', action='store', nargs=1, type='string') (opts, args) = parser.parse_args() mandatories = ['modname', 'protoident'] for m in mandatories: if not opts.__dict__[m]: print "mandatory option is missing\n" parser.print_help() exit(-1) if __name__ == "__main__": main(str(opts.modname), opts.protoident)
gpl-2.0
nacl-webkit/chrome_deps
third_party/mesa/MesaLib/src/gallium/docs/source/exts/formatting.py
52
1120
# formatting.py # Sphinx extension providing formatting for Gallium-specific data # (c) Corbin Simpson 2010 # Public domain to the extent permitted; contact author for special licensing import docutils.nodes import sphinx.addnodes def parse_envvar(env, sig, signode): envvar, t, default = sig.split(" ", 2) envvar = envvar.strip().upper() t = " Type: %s" % t.strip(" <>").lower() default = " Default: %s" % default.strip(" ()") signode += sphinx.addnodes.desc_name(envvar, envvar) signode += sphinx.addnodes.desc_type(t, t) signode += sphinx.addnodes.desc_annotation(default, default) return envvar def parse_opcode(env, sig, signode): opcode, desc = sig.split("-", 1) opcode = opcode.strip().upper() desc = " (%s)" % desc.strip() signode += sphinx.addnodes.desc_name(opcode, opcode) signode += sphinx.addnodes.desc_annotation(desc, desc) return opcode def setup(app): app.add_description_unit("envvar", "envvar", "%s (environment variable)", parse_envvar) app.add_description_unit("opcode", "opcode", "%s (TGSI opcode)", parse_opcode)
bsd-3-clause
40223117cda/w16cdaa
static/Brython3.1.0-20150301-090019/Lib/ui/widget.py
706
1774
import random from browser import doc def getMousePosition(e): if e is None: e=win.event if e.pageX or e.pageY: return {'x': e.pageX, 'y': e.pageY} if e.clientX or e.clientY: _posx=e.clientX + doc.body.scrollLeft + doc.documentElement.scrollLeft; _posy=e.clientY + doc.body.scrollTop + doc.documentElement.scrollTop; return {'x': _posx, 'y': _posy} return {'x': 0, 'y': 0} class Widget: def __init__(self, element, type, id=None): self._element=element if id is None: self._element.id='%s_%s' % (type, int(100000*random.random())) else: self._element.id=id def get_id(self): return self._element.id def attach(self, element_id): """ append this DOM component to DOM element element_id""" #document[element_id] <= self._element #this doesn't work :( #doc is actually the global 'doc' not the one we imported from browser :( doc[element_id] <= self._element def show(self): self._element.display='block' def hide(self): self._element.display='none' class DraggableWidget(Widget): def __init__(self, element, type, id=None): Widget.__init__(self, element, type, id) def drag(e): self._element.style.top='%spx' % (e.clientY - self._deltaY) self._element.style.left='%spx' % (e.clientX - self._deltaX) def mouseDown(e): self._element.style.position='absolute' self._deltaX=e.clientX - self._element.offsetLeft self._deltaY=e.clientY - self._element.offsetTop doc.bind('mousemove', drag) def mouseUp(e): doc.unbind('mousemove') self._element.bind('mousedown', mouseDown) self._element.bind('mouseup', mouseUp)
gpl-3.0
knifenomad/django
django/contrib/postgres/forms/array.py
258
6743
import copy from django import forms from django.contrib.postgres.validators import ( ArrayMaxLengthValidator, ArrayMinLengthValidator, ) from django.core.exceptions import ValidationError from django.utils import six from django.utils.safestring import mark_safe from django.utils.translation import string_concat, ugettext_lazy as _ class SimpleArrayField(forms.CharField): default_error_messages = { 'item_invalid': _('Item %(nth)s in the array did not validate: '), } def __init__(self, base_field, delimiter=',', max_length=None, min_length=None, *args, **kwargs): self.base_field = base_field self.delimiter = delimiter super(SimpleArrayField, self).__init__(*args, **kwargs) if min_length is not None: self.min_length = min_length self.validators.append(ArrayMinLengthValidator(int(min_length))) if max_length is not None: self.max_length = max_length self.validators.append(ArrayMaxLengthValidator(int(max_length))) def prepare_value(self, value): if isinstance(value, list): return self.delimiter.join(six.text_type(self.base_field.prepare_value(v)) for v in value) return value def to_python(self, value): if value: items = value.split(self.delimiter) else: items = [] errors = [] values = [] for i, item in enumerate(items): try: values.append(self.base_field.to_python(item)) except ValidationError as e: for error in e.error_list: errors.append(ValidationError( string_concat(self.error_messages['item_invalid'], error.message), code='item_invalid', params={'nth': i}, )) if errors: raise ValidationError(errors) return values def validate(self, value): super(SimpleArrayField, self).validate(value) errors = [] for i, item in enumerate(value): try: self.base_field.validate(item) except ValidationError as e: for error in e.error_list: errors.append(ValidationError( string_concat(self.error_messages['item_invalid'], error.message), code='item_invalid', params={'nth': i}, )) if errors: raise ValidationError(errors) def run_validators(self, value): super(SimpleArrayField, self).run_validators(value) errors = [] for i, item in enumerate(value): try: self.base_field.run_validators(item) except ValidationError as e: for error in e.error_list: errors.append(ValidationError( string_concat(self.error_messages['item_invalid'], error.message), code='item_invalid', params={'nth': i}, )) if errors: raise ValidationError(errors) class SplitArrayWidget(forms.Widget): def __init__(self, widget, size, **kwargs): self.widget = widget() if isinstance(widget, type) else widget self.size = size super(SplitArrayWidget, self).__init__(**kwargs) @property def is_hidden(self): return self.widget.is_hidden def value_from_datadict(self, data, files, name): return [self.widget.value_from_datadict(data, files, '%s_%s' % (name, index)) for index in range(self.size)] def id_for_label(self, id_): # See the comment for RadioSelect.id_for_label() if id_: id_ += '_0' return id_ def render(self, name, value, attrs=None): if self.is_localized: self.widget.is_localized = self.is_localized value = value or [] output = [] final_attrs = self.build_attrs(attrs) id_ = final_attrs.get('id') for i in range(max(len(value), self.size)): try: widget_value = value[i] except IndexError: widget_value = None if id_: final_attrs = dict(final_attrs, id='%s_%s' % (id_, i)) output.append(self.widget.render(name + '_%s' % i, widget_value, final_attrs)) return mark_safe(self.format_output(output)) def format_output(self, rendered_widgets): return ''.join(rendered_widgets) @property def media(self): return self.widget.media def __deepcopy__(self, memo): obj = super(SplitArrayWidget, self).__deepcopy__(memo) obj.widget = copy.deepcopy(self.widget) return obj @property def needs_multipart_form(self): return self.widget.needs_multipart_form class SplitArrayField(forms.Field): default_error_messages = { 'item_invalid': _('Item %(nth)s in the array did not validate: '), } def __init__(self, base_field, size, remove_trailing_nulls=False, **kwargs): self.base_field = base_field self.size = size self.remove_trailing_nulls = remove_trailing_nulls widget = SplitArrayWidget(widget=base_field.widget, size=size) kwargs.setdefault('widget', widget) super(SplitArrayField, self).__init__(**kwargs) def clean(self, value): cleaned_data = [] errors = [] if not any(value) and self.required: raise ValidationError(self.error_messages['required']) max_size = max(self.size, len(value)) for i in range(max_size): item = value[i] try: cleaned_data.append(self.base_field.clean(item)) errors.append(None) except ValidationError as error: errors.append(ValidationError( string_concat(self.error_messages['item_invalid'], error.message), code='item_invalid', params={'nth': i}, )) cleaned_data.append(None) if self.remove_trailing_nulls: null_index = None for i, value in reversed(list(enumerate(cleaned_data))): if value in self.base_field.empty_values: null_index = i else: break if null_index: cleaned_data = cleaned_data[:null_index] errors = errors[:null_index] errors = list(filter(None, errors)) if errors: raise ValidationError(errors) return cleaned_data
bsd-3-clause
disigma/depot_tools
third_party/oauth2client/tools.py
171
8344
# Copyright (C) 2013 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Command-line tools for authenticating via OAuth 2.0 Do the OAuth 2.0 Web Server dance for a command line application. Stores the generated credentials in a common file that is used by other example apps in the same directory. """ __author__ = '[email protected] (Joe Gregorio)' __all__ = ['argparser', 'run_flow', 'run', 'message_if_missing'] import BaseHTTPServer import argparse import httplib2 import logging import os import socket import sys import webbrowser from oauth2client import client from oauth2client import file from oauth2client import util try: from urlparse import parse_qsl except ImportError: from cgi import parse_qsl _CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0 To make this sample run you will need to populate the client_secrets.json file found at: %s with information from the APIs Console <https://code.google.com/apis/console>. """ # run_parser is an ArgumentParser that contains command-line options expected # by tools.run(). Pass it in as part of the 'parents' argument to your own # ArgumentParser. argparser = argparse.ArgumentParser(add_help=False) argparser.add_argument('--auth_host_name', default='localhost', help='Hostname when running a local web server.') argparser.add_argument('--noauth_local_webserver', action='store_true', default=False, help='Do not run a local web server.') argparser.add_argument('--auth_host_port', default=[8080, 8090], type=int, nargs='*', help='Port web server should listen on.') argparser.add_argument('--logging_level', default='ERROR', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='Set the logging level of detail.') class ClientRedirectServer(BaseHTTPServer.HTTPServer): """A server to handle OAuth 2.0 redirects back to localhost. Waits for a single request and parses the query parameters into query_params and then stops serving. """ query_params = {} class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler): """A handler for OAuth 2.0 redirects back to localhost. Waits for a single request and parses the query parameters into the servers query_params and then stops serving. """ def do_GET(s): """Handle a GET request. Parses the query parameters and prints a message if the flow has completed. Note that we can't detect if an error occurred. """ s.send_response(200) s.send_header("Content-type", "text/html") s.end_headers() query = s.path.split('?', 1)[-1] query = dict(parse_qsl(query)) s.server.query_params = query s.wfile.write("<html><head><title>Authentication Status</title></head>") s.wfile.write("<body><p>The authentication flow has completed.</p>") s.wfile.write("</body></html>") def log_message(self, format, *args): """Do not log messages to stdout while running as command line program.""" pass @util.positional(3) def run_flow(flow, storage, flags, http=None): """Core code for a command-line application. The run() function is called from your application and runs through all the steps to obtain credentials. It takes a Flow argument and attempts to open an authorization server page in the user's default web browser. The server asks the user to grant your application access to the user's data. If the user grants access, the run() function returns new credentials. The new credentials are also stored in the Storage argument, which updates the file associated with the Storage object. It presumes it is run from a command-line application and supports the following flags: --auth_host_name: Host name to use when running a local web server to handle redirects during OAuth authorization. (default: 'localhost') --auth_host_port: Port to use when running a local web server to handle redirects during OAuth authorization.; repeat this option to specify a list of values (default: '[8080, 8090]') (an integer) --[no]auth_local_webserver: Run a local web server to handle redirects during OAuth authorization. (default: 'true') The tools module defines an ArgumentParser the already contains the flag definitions that run() requires. You can pass that ArgumentParser to your ArgumentParser constructor: parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.run_parser]) flags = parser.parse_args(argv) Args: flow: Flow, an OAuth 2.0 Flow to step through. storage: Storage, a Storage to store the credential in. flags: argparse.ArgumentParser, the command-line flags. http: An instance of httplib2.Http.request or something that acts like it. Returns: Credentials, the obtained credential. """ logging.getLogger().setLevel(getattr(logging, flags.logging_level)) if not flags.noauth_local_webserver: success = False port_number = 0 for port in flags.auth_host_port: port_number = port try: httpd = ClientRedirectServer((flags.auth_host_name, port), ClientRedirectHandler) except socket.error, e: pass else: success = True break flags.noauth_local_webserver = not success if not success: print 'Failed to start a local webserver listening on either port 8080' print 'or port 9090. Please check your firewall settings and locally' print 'running programs that may be blocking or using those ports.' print print 'Falling back to --noauth_local_webserver and continuing with', print 'authorization.' print if not flags.noauth_local_webserver: oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number) else: oauth_callback = client.OOB_CALLBACK_URN flow.redirect_uri = oauth_callback authorize_url = flow.step1_get_authorize_url() if not flags.noauth_local_webserver: webbrowser.open(authorize_url, new=1, autoraise=True) print 'Your browser has been opened to visit:' print print ' ' + authorize_url print print 'If your browser is on a different machine then exit and re-run this' print 'application with the command-line parameter ' print print ' --noauth_local_webserver' print else: print 'Go to the following link in your browser:' print print ' ' + authorize_url print code = None if not flags.noauth_local_webserver: httpd.handle_request() if 'error' in httpd.query_params: sys.exit('Authentication request was rejected.') if 'code' in httpd.query_params: code = httpd.query_params['code'] else: print 'Failed to find "code" in the query parameters of the redirect.' sys.exit('Try running with --noauth_local_webserver.') else: code = raw_input('Enter verification code: ').strip() try: credential = flow.step2_exchange(code, http=http) except client.FlowExchangeError, e: sys.exit('Authentication has failed: %s' % e) storage.put(credential) credential.set_store(storage) print 'Authentication successful.' return credential def message_if_missing(filename): """Helpful message to display if the CLIENT_SECRETS file is missing.""" return _CLIENT_SECRETS_MESSAGE % filename try: from old_run import run from old_run import FLAGS except ImportError: def run(*args, **kwargs): raise NotImplementedError( 'The gflags library must be installed to use tools.run(). ' 'Please install gflags or preferrably switch to using ' 'tools.run_flow().')
bsd-3-clause
russelmahmud/mess-account
django/contrib/localflavor/uy/forms.py
310
2083
# -*- coding: utf-8 -*- """ UY-specific form helpers. """ import re from django.core.validators import EMPTY_VALUES from django.forms.fields import Select, RegexField from django.forms import ValidationError from django.utils.translation import ugettext_lazy as _ from django.contrib.localflavor.uy.util import get_validation_digit class UYDepartamentSelect(Select): """ A Select widget that uses a list of Uruguayan departaments as its choices. """ def __init__(self, attrs=None): from uy_departaments import DEPARTAMENT_CHOICES super(UYDepartamentSelect, self).__init__(attrs, choices=DEPARTAMENT_CHOICES) class UYCIField(RegexField): """ A field that validates Uruguayan 'Cedula de identidad' (CI) numbers. """ default_error_messages = { 'invalid': _("Enter a valid CI number in X.XXX.XXX-X," "XXXXXXX-X or XXXXXXXX format."), 'invalid_validation_digit': _("Enter a valid CI number."), } def __init__(self, *args, **kwargs): super(UYCIField, self).__init__(r'(?P<num>(\d{6,7}|(\d\.)?\d{3}\.\d{3}))-?(?P<val>\d)', *args, **kwargs) def clean(self, value): """ Validates format and validation digit. The official format is [X.]XXX.XXX-X but usually dots and/or slash are omitted so, when validating, those characters are ignored if found in the correct place. The three typically used formats are supported: [X]XXXXXXX, [X]XXXXXX-X and [X.]XXX.XXX-X. """ value = super(UYCIField, self).clean(value) if value in EMPTY_VALUES: return u'' match = self.regex.match(value) if not match: raise ValidationError(self.error_messages['invalid']) number = int(match.group('num').replace('.', '')) validation_digit = int(match.group('val')) if not validation_digit == get_validation_digit(number): raise ValidationError(self.error_messages['invalid_validation_digit']) return value
bsd-3-clause
hiteshagrawal/python
battleship3.py
1
1558
#!/usr/bin/python import random board = [] size = 0 def board_print(): global board for row in board: print " ".join(row) def new_game(): global board,size board = [] size = input("Enter board size:") ## Input will automatically convert it to integer for row in range(size): board.append(["0"] * size ) board_print() play(3) def play(turn): global board my_ship_row = random.randrange(size) my_ship_col = random.randrange(size) print "My ship position is row: %d , column: %d" %(my_ship_row,my_ship_col) while turn > 0: print "Remaining number of chances: %d"%(turn) guess_row = input("Please enter a row number:") guess_col = input("Please enter a col number:") if my_ship_row == guess_row and my_ship_col == guess_col: print "You busted my ship" board[guess_row][guess_col] = "X" board_print() print "You have WON, starting a new game" new_game() elif (guess_row + 1 ) > size or (guess_col + 1) > size: print "This is not even in the ocean, try again" continue elif board[guess_row][guess_col] != "-" : print "My ship is not at this location" board[guess_row][guess_col] = "-" board_print() turn -= 1 elif board[guess_row][guess_col] == "-" : print "You already guess this location, try again" board_print() continue else: print "Remaining number of chances: %d"%(turn) board[my_ship_row][my_ship_col] = "X" print "My Ship was at location \"X\"" board_print() print "You have lost, starting a new game" new_game() #board_print() new_game()
gpl-2.0
windyuuy/opera
chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/common/system/user_unittest.py
124
7300
# Copyright (C) 2010 Research in Motion Ltd. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Research in Motion Ltd. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest2 as unittest from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.common.system.user import User class UserTest(unittest.TestCase): example_user_response = "example user response" def test_prompt_repeat(self): self.repeatsRemaining = 2 def mock_raw_input(message): self.repeatsRemaining -= 1 if not self.repeatsRemaining: return UserTest.example_user_response return None self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), UserTest.example_user_response) def test_prompt_when_exceeded_repeats(self): self.repeatsRemaining = 2 def mock_raw_input(message): self.repeatsRemaining -= 1 return None self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), None) def test_prompt_with_multiple_lists(self): def run_prompt_test(inputs, expected_result, can_choose_multiple=False): def mock_raw_input(message): return inputs.pop(0) output_capture = OutputCapture() actual_result = output_capture.assert_outputs( self, User.prompt_with_multiple_lists, args=["title", ["subtitle1", "subtitle2"], [["foo", "bar"], ["foobar", "barbaz", "foobaz"]]], kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input}, expected_stdout="title\n\nsubtitle1\n 1. foo\n 2. bar\n\nsubtitle2\n 3. foobar\n 4. barbaz\n 5. foobaz\n") self.assertEqual(actual_result, expected_result) self.assertEqual(len(inputs), 0) run_prompt_test(["1"], "foo") run_prompt_test(["badinput", "2"], "bar") run_prompt_test(["3"], "foobar") run_prompt_test(["4"], "barbaz") run_prompt_test(["5"], "foobaz") run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True) run_prompt_test(["1-3"], ["foo", "bar", "foobar"], can_choose_multiple=True) run_prompt_test(["1-2,3"], ["foo", "bar", "foobar"], can_choose_multiple=True) run_prompt_test(["2-1,3"], ["foobar"], can_choose_multiple=True) run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True) run_prompt_test(["all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True) run_prompt_test([""], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True) run_prompt_test([" "], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True) run_prompt_test(["badinput", "all"], ["foo", "bar", 'foobar', 'barbaz', 'foobaz'], can_choose_multiple=True) def test_prompt_with_list(self): def run_prompt_test(inputs, expected_result, can_choose_multiple=False): def mock_raw_input(message): return inputs.pop(0) output_capture = OutputCapture() actual_result = output_capture.assert_outputs( self, User.prompt_with_list, args=["title", ["foo", "bar"]], kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input}, expected_stdout="title\n 1. foo\n 2. bar\n") self.assertEqual(actual_result, expected_result) self.assertEqual(len(inputs), 0) run_prompt_test(["1"], "foo") run_prompt_test(["badinput", "2"], "bar") run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True) run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True) run_prompt_test(["all"], ["foo", "bar"], can_choose_multiple=True) run_prompt_test([""], ["foo", "bar"], can_choose_multiple=True) run_prompt_test([" "], ["foo", "bar"], can_choose_multiple=True) run_prompt_test(["badinput", "all"], ["foo", "bar"], can_choose_multiple=True) def test_confirm(self): test_cases = ( (("Continue? [Y/n]: ", True), (User.DEFAULT_YES, 'y')), (("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'n')), (("Continue? [Y/n]: ", True), (User.DEFAULT_YES, '')), (("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'q')), (("Continue? [y/N]: ", True), (User.DEFAULT_NO, 'y')), (("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'n')), (("Continue? [y/N]: ", False), (User.DEFAULT_NO, '')), (("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'q')), ) for test_case in test_cases: expected, inputs = test_case def mock_raw_input(message): self.assertEqual(expected[0], message) return inputs[1] result = User().confirm(default=inputs[0], raw_input=mock_raw_input) self.assertEqual(expected[1], result) def test_warn_if_application_is_xcode(self): output = OutputCapture() user = User() output.assert_outputs(self, user._warn_if_application_is_xcode, ["TextMate"]) output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Applications/TextMate.app"]) output.assert_outputs(self, user._warn_if_application_is_xcode, ["XCode"]) # case sensitive matching xcode_warning = "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\".\n" output.assert_outputs(self, user._warn_if_application_is_xcode, ["Xcode"], expected_stdout=xcode_warning) output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Developer/Applications/Xcode.app"], expected_stdout=xcode_warning)
bsd-3-clause
commshare/testLiveSRS
trunk/objs/CherryPy-3.2.4/cherrypy/test/test_auth_basic.py
54
2853
# This file is part of CherryPy <http://www.cherrypy.org/> # -*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab:fileencoding=utf-8 import cherrypy from cherrypy._cpcompat import md5, ntob from cherrypy.lib import auth_basic from cherrypy.test import helper class BasicAuthTest(helper.CPWebCase): def setup_server(): class Root: def index(self): return "This is public." index.exposed = True class BasicProtected: def index(self): return "Hello %s, you've been authorized." % cherrypy.request.login index.exposed = True class BasicProtected2: def index(self): return "Hello %s, you've been authorized." % cherrypy.request.login index.exposed = True userpassdict = {'xuser' : 'xpassword'} userhashdict = {'xuser' : md5(ntob('xpassword')).hexdigest()} def checkpasshash(realm, user, password): p = userhashdict.get(user) return p and p == md5(ntob(password)).hexdigest() or False conf = {'/basic': {'tools.auth_basic.on': True, 'tools.auth_basic.realm': 'wonderland', 'tools.auth_basic.checkpassword': auth_basic.checkpassword_dict(userpassdict)}, '/basic2': {'tools.auth_basic.on': True, 'tools.auth_basic.realm': 'wonderland', 'tools.auth_basic.checkpassword': checkpasshash}, } root = Root() root.basic = BasicProtected() root.basic2 = BasicProtected2() cherrypy.tree.mount(root, config=conf) setup_server = staticmethod(setup_server) def testPublic(self): self.getPage("/") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html;charset=utf-8') self.assertBody('This is public.') def testBasic(self): self.getPage("/basic/") self.assertStatus(401) self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"') self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')]) self.assertStatus(401) self.getPage('/basic/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')]) self.assertStatus('200 OK') self.assertBody("Hello xuser, you've been authorized.") def testBasic2(self): self.getPage("/basic2/") self.assertStatus(401) self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"') self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')]) self.assertStatus(401) self.getPage('/basic2/', [('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')]) self.assertStatus('200 OK') self.assertBody("Hello xuser, you've been authorized.")
mit
chromium/chromium
third_party/blink/tools/blinkpy/web_tests/port/driver.py
1
30937
# Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the Google name nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import base64 import logging import re import shlex import six import time from blinkpy.common.system import path from blinkpy.common.system.profiler import ProfilerFactory _log = logging.getLogger(__name__) DRIVER_START_TIMEOUT_SECS = 30 def coalesce_repeated_switches(cmd): """Combines known repeated command line switches. Repetition of a switch notably happens when both per-test switches and the additional driver flags specify different --enable-features. For instance: --enable-features=X --enable-features=Y Conceptually, this indicates to enable features X and Y. However Chrome's command line parsing only applies the last seen switch, resulting in only feature Y being enabled. To solve this, transform it to: --enable-features=X,Y """ def parse_csv_switch(prefix, switch, values_set): """If |switch| starts with |prefix|, parses it as a comma-separated list of values and adds them all to |values_set|. Returns False if the switch was not a match for |prefix|.""" if not switch.startswith(prefix): return False values = switch[len(prefix):].split(',') for value in values: values_set.add(value) return True def add_csv_switch(prefix, values_set, result): if len(values_set) == 0: return sorted_values = sorted(list(values_set)) result.append('%s%s' % (prefix, ','.join(sorted_values))) result = [] ENABLE_FEATURES_FLAG = '--enable-features=' DISABLE_FEATURES_FLAG = '--disable-features=' enabled_set = set() disabled_set = set() for switch in cmd: if parse_csv_switch(ENABLE_FEATURES_FLAG, switch, enabled_set): continue if parse_csv_switch(DISABLE_FEATURES_FLAG, switch, disabled_set): continue result.append(switch) # Append any coalesced (comma separated) flags to the end. add_csv_switch(ENABLE_FEATURES_FLAG, enabled_set, result) add_csv_switch(DISABLE_FEATURES_FLAG, disabled_set, result) return result class DriverInput(object): def __init__(self, test_name, timeout, image_hash, args): self.test_name = test_name self.timeout = timeout # in ms self.image_hash = image_hash self.args = args class DriverOutput(object): """Groups information about a output from driver for easy passing and post-processing of data. """ def __init__(self, text, image, image_hash, audio, crash=False, test_time=0, measurements=None, timeout=False, error='', crashed_process_name='??', crashed_pid=None, crash_log=None, crash_site=None, leak=False, leak_log=None, pid=None, command=None): # FIXME: Args could be renamed to better clarify what they do. self.text = text self.image = image # May be empty-string if the test crashes. self.image_hash = image_hash self.image_diff = None # image_diff gets filled in after construction. self.audio = audio # Binary format is port-dependent. self.crash = crash self.crashed_process_name = crashed_process_name self.crashed_pid = crashed_pid self.crash_log = crash_log self.crash_site = crash_site self.leak = leak self.leak_log = leak_log self.test_time = test_time self.measurements = measurements self.timeout = timeout self.error = error # stderr output self.pid = pid self.command = command def has_stderr(self): return bool(self.error) class DeviceFailure(Exception): pass class Driver(object): """object for running test(s) using content_shell or other driver.""" def __init__(self, port, worker_number, no_timeout=False): """Initialize a Driver to subsequently run tests. Typically this routine will spawn content_shell in a config ready for subsequent input. port - reference back to the port object. worker_number - identifier for a particular worker/driver instance """ self.WPT_DIRS = port.WPT_DIRS self._port = port self._worker_number = worker_number self._no_timeout = no_timeout self._driver_tempdir = None # content_shell can report back subprocess crashes by printing # "#CRASHED - PROCESSNAME". Since those can happen at any time # and ServerProcess won't be aware of them (since the actual tool # didn't crash, just a subprocess) we record the crashed subprocess name here. self._crashed_process_name = None self._crashed_pid = None # content_shell can report back subprocesses that became unresponsive # This could mean they crashed. self._subprocess_was_unresponsive = False # content_shell can report back subprocess DOM-object leaks by printing # "#LEAK". This leak detection is enabled only when the flag # --enable-leak-detection is passed to content_shell. self._leaked = False self._leak_log = None # stderr reading is scoped on a per-test (not per-block) basis, so we store the accumulated # stderr output, as well as if we've seen #EOF on this driver instance. # FIXME: We should probably remove _read_first_block and _read_optional_image_block and # instead scope these locally in run_test. self.error_from_test = bytearray() self.err_seen_eof = False self._server_process = None self._current_cmd_line = None self._measurements = {} if self._port.get_option('profile'): profiler_name = self._port.get_option('profiler') self._profiler = ProfilerFactory.create_profiler( self._port.host, self._port._path_to_driver(), # pylint: disable=protected-access self._port.artifacts_directory(), profiler_name) else: self._profiler = None def __del__(self): self.stop() def run_test(self, driver_input): """Run a single test and return the results. Note that it is okay if a test times out or crashes. content_shell will be stopped when the test ends, and then restarted for the next test when this function is invoked again. As part of the restart, the state of Driver will be reset. Returns a DriverOutput object. """ start_time = time.time() stdin_deadline = start_time + int(driver_input.timeout) / 2000.0 self.start(driver_input.args, stdin_deadline) test_begin_time = time.time() self.error_from_test = bytearray() self.err_seen_eof = False test_command = self._command_from_driver_input(driver_input) server_process_command = self._server_process.cmd() deadline = test_begin_time + int(driver_input.timeout) / 1000.0 self._server_process.write(test_command.encode('utf8', 'replace')) # First block is either text or audio text, audio = self._read_first_block(deadline) # The second (optional) block is image data. image, actual_image_hash = self._read_optional_image_block(deadline) crashed = self.has_crashed() timed_out = self._server_process.timed_out pid = self._server_process.pid() leaked = self._leaked if not crashed: sanitizer = self._port.output_contains_sanitizer_messages( self.error_from_test) if sanitizer: self.error_from_test = 'OUTPUT CONTAINS "sanitizer",' + \ ' so we are treating this test as if it crashed, even though it did not.\n\n' + self.error_from_test crashed = True self._crashed_process_name = 'unknown process name' self._crashed_pid = 0 if crashed or timed_out or leaked: # We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output. # In the timeout case, we kill the hung process as well. # Add a delay to allow process to finish post-run hooks, such as dumping code coverage data. out, err = self._server_process.stop( self._port.get_option('driver_kill_timeout_secs')) if out: text += out if err: self.error_from_test += err self._server_process = None crash_log = None crash_site = None if crashed: self.error_from_test, crash_log, crash_site = self._get_crash_log( text, self.error_from_test, newer_than=start_time) # If we don't find a crash log use a placeholder error message instead. if not crash_log: pid_str = str( self._crashed_pid) if self._crashed_pid else 'unknown pid' crash_log = 'No crash log found for %s:%s.\n' % ( self._crashed_process_name, pid_str) # If we were unresponsive append a message informing there may not have been a crash. if self._subprocess_was_unresponsive: crash_log += 'Process failed to become responsive before timing out.\n' # Print stdout and stderr to the placeholder crash log; we want as much context as possible. if self.error_from_test: crash_log += '\nstdout:\n%s\nstderr:\n%s\n' % ( text, self.error_from_test) command = ("%s %s" % (" ".join(server_process_command), test_command)).encode( 'ascii', 'replace') if actual_image_hash: actual_image_hash = actual_image_hash.decode('utf8', 'replace') return DriverOutput(text, image, actual_image_hash, audio, crash=crashed, test_time=time.time() - test_begin_time, measurements=self._measurements, timeout=timed_out, error=self.error_from_test, crashed_process_name=self._crashed_process_name, crashed_pid=self._crashed_pid, crash_log=crash_log, crash_site=crash_site, leak=leaked, leak_log=self._leak_log, pid=pid, command=command) def _get_crash_log(self, stdout, stderr, newer_than): # pylint: disable=protected-access return self._port._get_crash_log(self._crashed_process_name, self._crashed_pid, stdout, stderr, newer_than) # FIXME: Seems this could just be inlined into callers. @classmethod def _command_wrapper(cls, wrapper_option): # Hook for injecting valgrind or other runtime instrumentation, # used by e.g. tools/valgrind/valgrind_tests.py. return shlex.split(wrapper_option) if wrapper_option else [] # The *_HOST_AND_PORTS tuples are (hostname, insecure_port, secure_port), # i.e. the information needed to create HTTP and HTTPS URLs. # TODO(burnik): Read from config or args. HTTP_DIR = 'http/tests/' HTTP_LOCAL_DIR = 'http/tests/local/' HTTP_HOST_AND_PORTS = ('127.0.0.1', 8000, 8443) WPT_HOST_AND_PORTS = ('web-platform.test', 8001, 8444) WPT_H2_PORT = 9000 def is_http_test(self, test_name): return (test_name.startswith(self.HTTP_DIR) and not test_name.startswith(self.HTTP_LOCAL_DIR)) def test_to_uri(self, test_name): """Convert a test name to a URI. Tests which have an 'https' directory in their paths or '.https.' or '.serviceworker.' in their name will be loaded over HTTPS; all other tests over HTTP. Example paths loaded over HTTPS: http/tests/security/mixedContent/https/test1.html http/tests/security/mixedContent/test1.https.html external/wpt/encoding/idlharness.any.serviceworker.html """ using_wptserve = self._port.should_use_wptserve(test_name) if not self.is_http_test(test_name) and not using_wptserve: return path.abspath_to_uri(self._port.host.platform, self._port.abspath_for_test(test_name)) if using_wptserve: for wpt_path, url_prefix in self.WPT_DIRS.items(): # The keys of WPT_DIRS do not have trailing slashes. wpt_path += '/' if test_name.startswith(wpt_path): test_dir_prefix = wpt_path test_url_prefix = url_prefix break else: # We really shouldn't reach here, but in case we do, fail gracefully. _log.error('Unrecognized WPT test name: %s', test_name) test_dir_prefix = 'external/wpt/' test_url_prefix = '/' hostname, insecure_port, secure_port = self.WPT_HOST_AND_PORTS if '.www.' in test_name: hostname = "www.%s" % hostname if '.h2.' in test_name: secure_port = self.WPT_H2_PORT else: test_dir_prefix = self.HTTP_DIR test_url_prefix = '/' hostname, insecure_port, secure_port = self.HTTP_HOST_AND_PORTS relative_path = test_name[len(test_dir_prefix):] if ('/https/' in test_name or '.https.' in test_name or '.h2.' in test_name or '.serviceworker.' in test_name or '.serviceworker-module.' in test_name): return 'https://%s:%d%s%s' % (hostname, secure_port, test_url_prefix, relative_path) return 'http://%s:%d%s%s' % (hostname, insecure_port, test_url_prefix, relative_path) def _get_uri_prefixes(self, hostname, insecure_port, secure_port): """Returns the HTTP and HTTPS URI prefix for a hostname.""" return [ 'http://%s:%d/' % (hostname, insecure_port), 'https://%s:%d/' % (hostname, secure_port) ] def uri_to_test(self, uri): """Return the base web test name for a given URI. This returns the test name for a given URI, e.g., if you passed in "file:///src/web_tests/fast/html/keygen.html" it would return "fast/html/keygen.html". """ if uri.startswith('file:///'): prefix = path.abspath_to_uri(self._port.host.platform, self._port.web_tests_dir()) if not prefix.endswith('/'): prefix += '/' return uri[len(prefix):] for prefix in self._get_uri_prefixes(*self.HTTP_HOST_AND_PORTS): if uri.startswith(prefix): return self.HTTP_DIR + uri[len(prefix):] for prefix in self._get_uri_prefixes(*self.WPT_HOST_AND_PORTS): if uri.startswith(prefix): url_path = '/' + uri[len(prefix):] for wpt_path, url_prefix in self.WPT_DIRS.items(): if url_path.startswith(url_prefix): return wpt_path + '/' + url_path[len(url_prefix):] raise NotImplementedError('unknown url type: %s' % uri) def has_crashed(self): if self._server_process is None: return False if self._crashed_process_name: return True if self._server_process.has_crashed(): self._crashed_process_name = self._server_process.name() self._crashed_pid = self._server_process.pid() return True return False def start(self, per_test_args, deadline): new_cmd_line = self.cmd_line(per_test_args) if not self._server_process or new_cmd_line != self._current_cmd_line: self._start(per_test_args) self._run_post_start_tasks() def _setup_environ_for_driver(self, environment): if self._profiler: environment = self._profiler.adjusted_environment(environment) return environment def _initialize_server_process(self, server_name, cmd_line, environment): self._server_process = self._port.server_process_constructor( self._port, server_name, cmd_line, environment, more_logging=self._port.get_option('driver_logging')) def _start(self, per_test_args, wait_for_ready=True): self.stop() self._driver_tempdir = self._port.host.filesystem.mkdtemp( prefix='%s-' % self._port.driver_name()) server_name = self._port.driver_name() environment = self._port.setup_environ_for_server() environment = self._setup_environ_for_driver(environment) self._crashed_process_name = None self._crashed_pid = None self._leaked = False cmd_line = self.cmd_line(per_test_args) self._initialize_server_process(server_name, cmd_line, environment) self._server_process.start() self._current_cmd_line = cmd_line if wait_for_ready: deadline = time.time() + DRIVER_START_TIMEOUT_SECS if not self._wait_for_server_process_output( self._server_process, deadline, b'#READY'): _log.error('%s took too long to startup.' % server_name) def _wait_for_server_process_output(self, server_process, deadline, text): output = b'' line = server_process.read_stdout_line(deadline) output += server_process.pop_all_buffered_stderr() while (not server_process.timed_out and not server_process.has_crashed() and not text in line.rstrip()): output += line line = server_process.read_stdout_line(deadline) output += server_process.pop_all_buffered_stderr() if server_process.timed_out: _log.error('Timed out while waiting for the %s process: \n"%s"', server_process.name(), output) return False if server_process.has_crashed(): _log.error('The %s process crashed while starting: \n"%s"', server_process.name(), output) return False return True def _run_post_start_tasks(self): # Remote drivers may override this to delay post-start tasks until the server has ack'd. if self._profiler: self._profiler.attach_to_pid(self._pid_on_target()) def _pid_on_target(self): # Remote drivers will override this method to return the pid on the device. return self._server_process.pid() def stop(self, timeout_secs=None): if timeout_secs is None: # Add a delay to allow process to finish post-run hooks, such as dumping code coverage data. timeout_secs = self._port.get_option('driver_kill_timeout_secs') if self._server_process: self._server_process.stop(timeout_secs) self._server_process = None if self._profiler: self._profiler.profile_after_exit() if self._driver_tempdir: self._port.host.filesystem.rmtree(str(self._driver_tempdir)) self._driver_tempdir = None self._current_cmd_line = None def _base_cmd_line(self): return [self._port._path_to_driver()] # pylint: disable=protected-access def cmd_line(self, per_test_args): cmd = self._command_wrapper(self._port.get_option('wrapper')) cmd += self._base_cmd_line() if self._no_timeout: cmd.append('--no-timeout') cmd.extend(self._port.additional_driver_flags()) if self._port.get_option('enable_leak_detection'): cmd.append('--enable-leak-detection') cmd.extend(per_test_args) cmd = coalesce_repeated_switches(cmd) cmd.append('-') return cmd def _check_for_driver_crash(self, error_line): if error_line == '#CRASHED\n': # This is used on Windows to report that the process has crashed # See http://trac.webkit.org/changeset/65537. self._crashed_process_name = self._server_process.name() self._crashed_pid = self._server_process.pid() elif (error_line.startswith('#CRASHED - ') or error_line.startswith('#PROCESS UNRESPONSIVE - ')): # WebKitTestRunner uses this to report that the WebProcess subprocess crashed. match = re.match(r'#(?:CRASHED|PROCESS UNRESPONSIVE) - (\S+)', error_line) self._crashed_process_name = (match.group(1) if match else 'WebProcess') match = re.search(r'pid (\d+)', error_line) pid = int(match.group(1)) if match else None self._crashed_pid = pid # FIXME: delete this after we're sure this code is working :) _log.debug('%s crash, pid = %s, error_line = %s', self._crashed_process_name, str(pid), error_line) if error_line.startswith('#PROCESS UNRESPONSIVE - '): self._subprocess_was_unresponsive = True self._port.sample_process(self._crashed_process_name, self._crashed_pid) # We want to show this since it's not a regular crash and probably we don't have a crash log. self.error_from_test += error_line return True return self.has_crashed() def _check_for_leak(self, error_line): if error_line.startswith('#LEAK - '): self._leaked = True match = re.match(r'#LEAK - (\S+) pid (\d+) (.+)\n', error_line) self._leak_log = match.group(3) return self._leaked def _command_from_driver_input(self, driver_input): # FIXME: performance tests pass in full URLs instead of test names. if (driver_input.test_name.startswith('http://') or driver_input.test_name.startswith('https://') or driver_input.test_name == ('about:blank')): command = driver_input.test_name elif (self.is_http_test(driver_input.test_name) or self._port.should_use_wptserve(driver_input.test_name)): command = self.test_to_uri(driver_input.test_name) else: command = self._port.abspath_for_test(driver_input.test_name) # ' is the separator between arguments. if self._port.supports_per_test_timeout(): command += "'--timeout'%s" % driver_input.timeout if driver_input.image_hash: command += "'" + driver_input.image_hash return command + '\n' def _read_first_block(self, deadline): # returns (text_content, audio_content) block = self._read_block(deadline) if block.malloc: self._measurements['Malloc'] = float(block.malloc) if block.js_heap: self._measurements['JSHeap'] = float(block.js_heap) if block.content_type == b'audio/wav': return (None, block.decoded_content) return (block.decoded_content, None) def _read_optional_image_block(self, deadline): # returns (image, actual_image_hash) block = self._read_block(deadline, wait_for_stderr_eof=True) if block.content and block.content_type == b'image/png': return (block.decoded_content, block.content_hash) return (None, block.content_hash) def _read_header(self, block, line, header_text, header_attr, header_filter=None): if (line.startswith(header_text) and getattr(block, header_attr) is None): value = line.split()[1] if header_filter: value = header_filter(value) setattr(block, header_attr, value) return True return False def _process_stdout_line(self, block, line): if (self._read_header(block, line, b'Content-Type: ', 'content_type') or self._read_header( block, line, b'Content-Transfer-Encoding: ', 'encoding') or self._read_header(block, line, b'Content-Length: ', '_content_length', int) or self._read_header(block, line, b'ActualHash: ', 'content_hash') or self._read_header(block, line, b'DumpMalloc: ', 'malloc') or self._read_header(block, line, b'DumpJSHeap: ', 'js_heap') or self._read_header(block, line, b'StdinPath', 'stdin_path')): return # Note, we're not reading ExpectedHash: here, but we could. # If the line wasn't a header, we just append it to the content. block.content += line def _strip_eof(self, line): if line and line.endswith(b'#EOF\n'): return line[:-5], True if line and line.endswith(b'#EOF\r\n'): _log.error('Got a CRLF-terminated #EOF - this is a driver bug.') return line[:-6], True return line, False def _read_block(self, deadline, wait_for_stderr_eof=False): block = ContentBlock() out_seen_eof = False while not self.has_crashed(): if out_seen_eof and (self.err_seen_eof or not wait_for_stderr_eof): break if self.err_seen_eof: out_line = self._server_process.read_stdout_line(deadline) err_line = None elif out_seen_eof: out_line = None err_line = self._server_process.read_stderr_line(deadline) else: out_line, err_line = self._server_process.read_either_stdout_or_stderr_line( deadline) if self._server_process.timed_out or self.has_crashed(): break if out_line: assert not out_seen_eof out_line, out_seen_eof = self._strip_eof(out_line) if err_line: assert not self.err_seen_eof err_line, self.err_seen_eof = self._strip_eof(err_line) if out_line: if not out_line.endswith(b'\n'): _log.error( 'Last character read from DRT stdout line was not a newline! This indicates either a NRWT or DRT bug.' ) # pylint: disable=protected-access content_length_before_header_check = block._content_length self._process_stdout_line(block, out_line) # FIXME: Unlike HTTP, DRT dumps the content right after printing a Content-Length header. # Don't wait until we're done with headers, just read the binary blob right now. if content_length_before_header_check != block._content_length: if block._content_length > 0: block.content = self._server_process.read_stdout( deadline, block._content_length) else: _log.error( 'Received content of type %s with Content-Length of 0! This indicates a bug in %s.', block.content_type, self._server_process.name()) if err_line: if self._check_for_driver_crash( err_line.decode('utf8', 'replace')): break if self._check_for_leak(err_line.decode('utf8', 'replace')): break self.error_from_test += err_line block.decode_content() return block class ContentBlock(object): def __init__(self): self.content_type = None self.encoding = None self.content_hash = None self._content_length = None # Content is treated as binary data even though the text output is usually UTF-8. # FIXME: Should be bytearray() once we require Python 2.6. # TODO(crbug/1197331): Keeping PY2 as str() for now, as diffing modules # need to be looked into for PY3 unified_diff.py and html_diff.py if six.PY2: self.content = str() else: self.content = bytearray() self.decoded_content = None self.malloc = None self.js_heap = None self.stdin_path = None def decode_content(self): if self.encoding == 'base64' and self.content is not None: self.decoded_content = base64.b64decode(self.content) else: self.decoded_content = self.content
bsd-3-clause
Barrog/C4-Datapack
data/jscript/quests/343_UnderTheShadowOfTheIvoryTower/__init__.py
1
7613
# Made by mtrix - v0.2 by DrLecter import sys from net.sf.l2j.gameserver.model.quest import State from net.sf.l2j.gameserver.model.quest import QuestState from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest ORB = 4364 ECTOPLASM = 4365 ADENA = 57 CHANCE = 65 RANDOM_REWARDS=[[951,1], #Enchant Weapon C [955,1], #Enchant Weapon D [2511,550],#SpiritShot: Grade C [736,1], #SoE ] #Roshambo OPTIONS={0:"Scissors",1:"Rock",2:"Paper"} OUTCOME={0:1,1:2,2:0} #Coin Toss TOSS={0:"Heads",1:"Tails"} ORBS=[10,30,70,150,310,0] #Messages start_msg=["One~ Two~ Three~","Go! One~ Two~ Three~","Ready? Go! One~ Two~ Three~","Here we go! One~ Two~ Three~"] tie_msg=["Ah ha! A tie! Take back the orbs that you bet. Well, shall we play again?",\ "Ha! A tie! Take back the orbs that you bet. Shall we try again?"] win_msg=["Well, you certainly got lucky that time! Take all the orbs we put up as a bet. Come on! Let's play another round!",\ "Oh no! I lose! Go ahead. Take all the orbs we put up as a bet. Come on! Let's play again!",\ "Oh no! I lose! Go ahead. Take all the orbs we put up as a bet. Humph... Come on! Let's play again!"] lose_msg=["Oh, too bad. You lose! Shall we play another round?",\ "Oh...! You lose! Oh well, the orbs are mine. Shall we play another round?",\ "Oh, too bad, you lose! I'll take those orbs now... Hey now, shall we play another round?"] again_msg=["Play the game.","Play the rock paper scissors game."] toss_msg=[["You're right!","You win!"],\ ["Hu wah! Right again!","You won twice in a row!"],\ ["Hu wah! Right again!","You won three times in a row!"],\ ["Ho ho! Right again!","You won four times in a row!"]] class Quest (JQuest) : def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr) def onEvent (self,event,st) : htmltext = event marsha = st.getRandom(3) random2 = st.getRandom(2) orbs = st.getQuestItemsCount(ORB) if event == "7834-02.htm" : st.setState(STARTED) st.set("cond","1") st.playSound("ItemSound.quest_accept") elif event == "7834-05.htm" : if orbs : st.giveItems(ADENA,orbs*125) st.takeItems(ORB,-1) else : htmltext = "7834-09.htm" elif event == "7835-02.htm": if st.getQuestItemsCount(ECTOPLASM) : st.takeItems(ECTOPLASM,1) item=RANDOM_REWARDS[st.getRandom(len(RANDOM_REWARDS))] st.giveItems(item[0],item[1]) htmltext="7835-02a.htm" elif event == "7934-02.htm" : if orbs < 10 : htmltext = "7934-03a.htm" else: st.set("rps_1sttime","1") elif event == "7934-03.htm" : if orbs>=10 : st.takeItems(ORB,10) st.set("playing","1") htmltext = st.showHtmlFile("7934-03.htm").replace("<msg>", start_msg[st.getRandom(len(start_msg))]) else : htmltext = "7934-03a.htm" elif event in [ "1","2","3" ]: if st.getInt("playing"): player=int(event)-1 if OUTCOME[player] == marsha: msg=lose_msg elif OUTCOME[marsha] == player: st.giveItems(ORB,20) msg=win_msg else: st.giveItems(ORB,10) msg=tie_msg st.unset("playing") htmltext = st.showHtmlFile("7934-04.htm").replace("%player%", OPTIONS[player]).\ replace("%marsha%", OPTIONS[marsha]).replace("%msg%", msg[st.getRandom(len(msg))]).\ replace("%again%", again_msg[st.getRandom(len(again_msg))]) else: htmltext="Player is cheating" st.takeItems(ORB,-1) elif event == "7935-02.htm" : if orbs < 10 : htmltext = "7935-02a.htm" else: st.set("ct_1sttime","1") elif event == "7935-03.htm" : if orbs>=10 : st.set("toss","1") else : st.unset("row") htmltext = "7935-02a.htm" elif event in ["4","5"] : if st.getInt("toss"): if orbs>=10: if random2==int(event)-4 : row = st.getInt("row") if row<4 : row += 1 template="7935-06d.htm" else: st.giveItems(ORB,310) row=0 template="7935-06c.htm" else : row = 0 st.takeItems(ORB,10) template="7935-06b.htm" st.set("row",str(row)) htmltext = st.showHtmlFile(template).replace("%toss%",TOSS[random2]).\ replace("%msg1%",toss_msg[row-1][0]).replace("%msg2%",toss_msg[row-1][1]).\ replace("%orbs%",str(ORBS[row-1])).replace("%next%",str(ORBS[row])) else: st.unset("row") htmltext = "7935-02a.htm" st.unset("toss") else: st.takeItems(ORB,-1) htmltext="Player is cheating" elif event == "quit": if st.getInt("row"): qty=st.getInt("row")-1 st.giveItems(ORB,ORBS[qty]) st.unset("row") htmltext = st.showHtmlFile("7935-06a.htm").replace("%nebulites%",str(ORBS[qty])) else: st.takeItems(ORB,-1) htmltext="Player is cheating" elif event in ["7834-06.htm","7834-02b.htm"] : st.playSound("ItemSound.quest_finish") st.exitQuest(1) return htmltext def onTalk (Self,npc,st): npcId = npc.getNpcId() htmltext = "<html><head><body>I have nothing to say you</body></html>" id = st.getState() level = st.getPlayer().getLevel() cond = st.getInt("cond") if npcId==7834 : if id == CREATED : if st.getPlayer().getClassId().getId() in [ 0x11,0xc,0xd,0xe,0x10,0x1a,0x1b,0x1c,0x1e,0x28,0x29,0x2b,0x5e,0x5f,0x60,0x61,0x62,0x67,0x68,0x69,0x6e,0x6f,0x70]: if level >= 40: htmltext = "7834-01.htm" else: htmltext = "7834-01a.htm" st.exitQuest(1) else: htmltext = "7834-01b.htm" st.exitQuest(1) elif cond==1 : if st.getQuestItemsCount(ORB) : htmltext = "7834-04.htm" else : htmltext = "7834-03.htm" elif npcId==7835 : htmltext = "7835-01.htm" elif npcId==7934 : if st.getInt("rps_1sttime") : htmltext = "7934-01a.htm" else : htmltext = "7934-01.htm" elif npcId==7935 : st.unset("row") if st.getInt("ct_1sttime") : htmltext = "7935-01a.htm" else : htmltext = "7935-01.htm" return htmltext def onKill (self,npc,st): npcId = npc.getNpcId() if st.getRandom(100) < CHANCE : st.giveItems(ORB,1) st.playSound("ItemSound.quest_itemget") return QUEST = Quest(343,"343_UnderTheShadowOfTheIvoryTower","Under The Shadow Of The Ivory Tower") CREATED = State('Start', QUEST) STARTED = State('Started', QUEST) QUEST.setInitialState(CREATED) QUEST.addStartNpc(7834) CREATED.addTalkId(7834) CREATED.addTalkId(7835) STARTED.addTalkId(7834) STARTED.addTalkId(7835) STARTED.addTalkId(7934) STARTED.addTalkId(7935) for i in range(563,567) : STARTED.addKillId(i) STARTED.addQuestDrop(7834,ORB,1) print "importing quests: 343: Under The Shadow Of The Ivory Tower"
gpl-2.0
jumping/Diamond
src/collectors/exim/exim.py
60
1658
# coding=utf-8 """ Shells out to get the exim queue length #### Dependencies * /usr/sbin/exim """ import diamond.collector import subprocess import os from diamond.collector import str_to_bool class EximCollector(diamond.collector.Collector): def get_default_config_help(self): config_help = super(EximCollector, self).get_default_config_help() config_help.update({ 'bin': 'The path to the exim binary', 'use_sudo': 'Use sudo?', 'sudo_cmd': 'Path to sudo', 'sudo_user': 'User to sudo as', }) return config_help def get_default_config(self): """ Returns the default collector settings """ config = super(EximCollector, self).get_default_config() config.update({ 'path': 'exim', 'bin': '/usr/sbin/exim', 'use_sudo': False, 'sudo_cmd': '/usr/bin/sudo', 'sudo_user': 'root', }) return config def collect(self): if not os.access(self.config['bin'], os.X_OK): return command = [self.config['bin'], '-bpc'] if str_to_bool(self.config['use_sudo']): command = [ self.config['sudo_cmd'], '-u', self.config['sudo_user'] ].extend(command) queuesize = subprocess.Popen( command, stdout=subprocess.PIPE).communicate()[0].split() if not len(queuesize): return queuesize = queuesize[-1] self.publish('queuesize', queuesize)
mit
xodus7/tensorflow
tensorflow/contrib/solvers/python/kernel_tests/util_test.py
25
4551
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.solvers.python.ops import util from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class UtilTest(test.TestCase): def _testCreateOperator(self, use_static_shape_): for dtype in np.float32, np.float64: a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype) x_np = np.array([[2.], [-3.]], dtype=dtype) y_np = np.array([[2], [-3.], [5.]], dtype=dtype) with self.cached_session() as sess: if use_static_shape_: a = constant_op.constant(a_np, dtype=dtype) x = constant_op.constant(x_np, dtype=dtype) y = constant_op.constant(y_np, dtype=dtype) else: a = array_ops.placeholder(dtype) x = array_ops.placeholder(dtype) y = array_ops.placeholder(dtype) op = util.create_operator(a) ax = op.apply(x) aty = op.apply_adjoint(y) op_shape = ops.convert_to_tensor(op.shape) if use_static_shape_: op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty]) else: op_shape_val, ax_val, aty_val = sess.run( [op_shape, ax, aty], feed_dict={a: a_np, x: x_np, y: y_np}) self.assertAllEqual(op_shape_val, [3, 2]) self.assertAllClose(ax_val, np.dot(a_np, x_np)) self.assertAllClose(aty_val, np.dot(a_np.T, y_np)) def testCreateOperator(self): self._testCreateOperator(True) def testCreateOperatorUnknownShape(self): self._testCreateOperator(False) def _testIdentityOperator(self, use_static_shape_): for dtype in np.float32, np.float64: a_np = np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=dtype) x_np = np.array([[2.], [-3.]], dtype=dtype) y_np = np.array([[2], [-3.], [5.]], dtype=dtype) with self.cached_session() as sess: if use_static_shape_: a = constant_op.constant(a_np, dtype=dtype) x = constant_op.constant(x_np, dtype=dtype) y = constant_op.constant(y_np, dtype=dtype) else: a = array_ops.placeholder(dtype) x = array_ops.placeholder(dtype) y = array_ops.placeholder(dtype) id_op = util.identity_operator(a) ax = id_op.apply(x) aty = id_op.apply_adjoint(y) op_shape = ops.convert_to_tensor(id_op.shape) if use_static_shape_: op_shape_val, ax_val, aty_val = sess.run([op_shape, ax, aty]) else: op_shape_val, ax_val, aty_val = sess.run( [op_shape, ax, aty], feed_dict={ a: a_np, x: x_np, y: y_np }) self.assertAllEqual(op_shape_val, [3, 2]) self.assertAllClose(ax_val, x_np) self.assertAllClose(aty_val, y_np) def testIdentityOperator(self): self._testIdentityOperator(True) def testIdentityOperatorUnknownShape(self): self._testIdentityOperator(False) def testL2Norm(self): with self.cached_session(): x_np = np.array([[2], [-3.], [5.]]) x_norm_np = np.linalg.norm(x_np) x_normalized_np = x_np / x_norm_np x = constant_op.constant(x_np) l2norm = util.l2norm(x) l2norm_squared = util.l2norm_squared(x) x_normalized, x_norm = util.l2normalize(x) self.assertAllClose(l2norm.eval(), x_norm_np) self.assertAllClose(l2norm_squared.eval(), np.square(x_norm_np)) self.assertAllClose(x_norm.eval(), x_norm_np) self.assertAllClose(x_normalized.eval(), x_normalized_np) if __name__ == '__main__': test.main()
apache-2.0
npf-ati/linux-2.6-imx
scripts/tracing/draw_functrace.py
14676
3560
#!/usr/bin/python """ Copyright 2008 (c) Frederic Weisbecker <[email protected]> Licensed under the terms of the GNU GPL License version 2 This script parses a trace provided by the function tracer in kernel/trace/trace_functions.c The resulted trace is processed into a tree to produce a more human view of the call stack by drawing textual but hierarchical tree of calls. Only the functions's names and the the call time are provided. Usage: Be sure that you have CONFIG_FUNCTION_TRACER # mount -t debugfs nodev /sys/kernel/debug # echo function > /sys/kernel/debug/tracing/current_tracer $ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func Wait some times but not too much, the script is a bit slow. Break the pipe (Ctrl + Z) $ scripts/draw_functrace.py < raw_trace_func > draw_functrace Then you have your drawn trace in draw_functrace """ import sys, re class CallTree: """ This class provides a tree representation of the functions call stack. If a function has no parent in the kernel (interrupt, syscall, kernel thread...) then it is attached to a virtual parent called ROOT. """ ROOT = None def __init__(self, func, time = None, parent = None): self._func = func self._time = time if parent is None: self._parent = CallTree.ROOT else: self._parent = parent self._children = [] def calls(self, func, calltime): """ If a function calls another one, call this method to insert it into the tree at the appropriate place. @return: A reference to the newly created child node. """ child = CallTree(func, calltime, self) self._children.append(child) return child def getParent(self, func): """ Retrieve the last parent of the current node that has the name given by func. If this function is not on a parent, then create it as new child of root @return: A reference to the parent. """ tree = self while tree != CallTree.ROOT and tree._func != func: tree = tree._parent if tree == CallTree.ROOT: child = CallTree.ROOT.calls(func, None) return child return tree def __repr__(self): return self.__toString("", True) def __toString(self, branch, lastChild): if self._time is not None: s = "%s----%s (%s)\n" % (branch, self._func, self._time) else: s = "%s----%s\n" % (branch, self._func) i = 0 if lastChild: branch = branch[:-1] + " " while i < len(self._children): if i != len(self._children) - 1: s += "%s" % self._children[i].__toString(branch +\ " |", False) else: s += "%s" % self._children[i].__toString(branch +\ " |", True) i += 1 return s class BrokenLineException(Exception): """If the last line is not complete because of the pipe breakage, we want to stop the processing and ignore this line. """ pass class CommentLineException(Exception): """ If the line is a comment (as in the beginning of the trace file), just ignore it. """ pass def parseLine(line): line = line.strip() if line.startswith("#"): raise CommentLineException m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line) if m is None: raise BrokenLineException return (m.group(1), m.group(2), m.group(3)) def main(): CallTree.ROOT = CallTree("Root (Nowhere)", None, None) tree = CallTree.ROOT for line in sys.stdin: try: calltime, callee, caller = parseLine(line) except BrokenLineException: break except CommentLineException: continue tree = tree.getParent(caller) tree = tree.calls(callee, calltime) print CallTree.ROOT if __name__ == "__main__": main()
gpl-2.0
chadnickbok/npm
node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py
1355
44604
# Copyright (c) 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """cmake output module This module is under development and should be considered experimental. This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is created for each configuration. This module's original purpose was to support editing in IDEs like KDevelop which use CMake for project management. It is also possible to use CMake to generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator will convert the CMakeLists.txt to a code::blocks cbp for the editor to read, but build using CMake. As a result QtCreator editor is unaware of compiler defines. The generated CMakeLists.txt can also be used to build on Linux. There is currently no support for building on platforms other than Linux. The generated CMakeLists.txt should properly compile all projects. However, there is a mismatch between gyp and cmake with regard to linking. All attempts are made to work around this, but CMake sometimes sees -Wl,--start-group as a library and incorrectly repeats it. As a result the output of this generator should not be relied on for building. When using with kdevelop, use version 4.4+. Previous versions of kdevelop will not be able to find the header file directories described in the generated CMakeLists.txt file. """ import multiprocessing import os import signal import string import subprocess import gyp.common generator_default_variables = { 'EXECUTABLE_PREFIX': '', 'EXECUTABLE_SUFFIX': '', 'STATIC_LIB_PREFIX': 'lib', 'STATIC_LIB_SUFFIX': '.a', 'SHARED_LIB_PREFIX': 'lib', 'SHARED_LIB_SUFFIX': '.so', 'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}', 'LIB_DIR': '${obj}.${TOOLSET}', 'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni', 'SHARED_INTERMEDIATE_DIR': '${obj}/gen', 'PRODUCT_DIR': '${builddir}', 'RULE_INPUT_PATH': '${RULE_INPUT_PATH}', 'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}', 'RULE_INPUT_NAME': '${RULE_INPUT_NAME}', 'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}', 'RULE_INPUT_EXT': '${RULE_INPUT_EXT}', 'CONFIGURATION_NAME': '${configuration}', } FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}') generator_supports_multiple_toolsets = True generator_wants_static_library_dependencies_adjusted = True COMPILABLE_EXTENSIONS = { '.c': 'cc', '.cc': 'cxx', '.cpp': 'cxx', '.cxx': 'cxx', '.s': 's', # cc '.S': 's', # cc } def RemovePrefix(a, prefix): """Returns 'a' without 'prefix' if it starts with 'prefix'.""" return a[len(prefix):] if a.startswith(prefix) else a def CalculateVariables(default_variables, params): """Calculate additional variables for use in the build (called by gyp).""" default_variables.setdefault('OS', gyp.common.GetFlavor(params)) def Compilable(filename): """Return true if the file is compilable (should be in OBJS).""" return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS) def Linkable(filename): """Return true if the file is linkable (should be on the link line).""" return filename.endswith('.o') def NormjoinPathForceCMakeSource(base_path, rel_path): """Resolves rel_path against base_path and returns the result. If rel_path is an absolute path it is returned unchanged. Otherwise it is resolved against base_path and normalized. If the result is a relative path, it is forced to be relative to the CMakeLists.txt. """ if os.path.isabs(rel_path): return rel_path if any([rel_path.startswith(var) for var in FULL_PATH_VARS]): return rel_path # TODO: do we need to check base_path for absolute variables as well? return os.path.join('${CMAKE_CURRENT_LIST_DIR}', os.path.normpath(os.path.join(base_path, rel_path))) def NormjoinPath(base_path, rel_path): """Resolves rel_path against base_path and returns the result. TODO: what is this really used for? If rel_path begins with '$' it is returned unchanged. Otherwise it is resolved against base_path if relative, then normalized. """ if rel_path.startswith('$') and not rel_path.startswith('${configuration}'): return rel_path return os.path.normpath(os.path.join(base_path, rel_path)) def CMakeStringEscape(a): """Escapes the string 'a' for use inside a CMake string. This means escaping '\' otherwise it may be seen as modifying the next character '"' otherwise it will end the string ';' otherwise the string becomes a list The following do not need to be escaped '#' when the lexer is in string state, this does not start a comment The following are yet unknown '$' generator variables (like ${obj}) must not be escaped, but text $ should be escaped what is wanted is to know which $ come from generator variables """ return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"') def SetFileProperty(output, source_name, property_name, values, sep): """Given a set of source file, sets the given property on them.""" output.write('set_source_files_properties(') output.write(source_name) output.write(' PROPERTIES ') output.write(property_name) output.write(' "') for value in values: output.write(CMakeStringEscape(value)) output.write(sep) output.write('")\n') def SetFilesProperty(output, variable, property_name, values, sep): """Given a set of source files, sets the given property on them.""" output.write('set_source_files_properties(') WriteVariable(output, variable) output.write(' PROPERTIES ') output.write(property_name) output.write(' "') for value in values: output.write(CMakeStringEscape(value)) output.write(sep) output.write('")\n') def SetTargetProperty(output, target_name, property_name, values, sep=''): """Given a target, sets the given property.""" output.write('set_target_properties(') output.write(target_name) output.write(' PROPERTIES ') output.write(property_name) output.write(' "') for value in values: output.write(CMakeStringEscape(value)) output.write(sep) output.write('")\n') def SetVariable(output, variable_name, value): """Sets a CMake variable.""" output.write('set(') output.write(variable_name) output.write(' "') output.write(CMakeStringEscape(value)) output.write('")\n') def SetVariableList(output, variable_name, values): """Sets a CMake variable to a list.""" if not values: return SetVariable(output, variable_name, "") if len(values) == 1: return SetVariable(output, variable_name, values[0]) output.write('list(APPEND ') output.write(variable_name) output.write('\n "') output.write('"\n "'.join([CMakeStringEscape(value) for value in values])) output.write('")\n') def UnsetVariable(output, variable_name): """Unsets a CMake variable.""" output.write('unset(') output.write(variable_name) output.write(')\n') def WriteVariable(output, variable_name, prepend=None): if prepend: output.write(prepend) output.write('${') output.write(variable_name) output.write('}') class CMakeTargetType(object): def __init__(self, command, modifier, property_modifier): self.command = command self.modifier = modifier self.property_modifier = property_modifier cmake_target_type_from_gyp_target_type = { 'executable': CMakeTargetType('add_executable', None, 'RUNTIME'), 'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'), 'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'), 'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'), 'none': CMakeTargetType('add_custom_target', 'SOURCES', None), } def StringToCMakeTargetName(a): """Converts the given string 'a' to a valid CMake target name. All invalid characters are replaced by '_'. Invalid for cmake: ' ', '/', '(', ')', '"' Invalid for make: ':' Invalid for unknown reasons but cause failures: '.' """ return a.translate(string.maketrans(' /():."', '_______')) def WriteActions(target_name, actions, extra_sources, extra_deps, path_to_gyp, output): """Write CMake for the 'actions' in the target. Args: target_name: the name of the CMake target being generated. actions: the Gyp 'actions' dict for this target. extra_sources: [(<cmake_src>, <src>)] to append with generated source files. extra_deps: [<cmake_taget>] to append with generated targets. path_to_gyp: relative path from CMakeLists.txt being generated to the Gyp file in which the target being generated is defined. """ for action in actions: action_name = StringToCMakeTargetName(action['action_name']) action_target_name = '%s__%s' % (target_name, action_name) inputs = action['inputs'] inputs_name = action_target_name + '__input' SetVariableList(output, inputs_name, [NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs]) outputs = action['outputs'] cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out) for out in outputs] outputs_name = action_target_name + '__output' SetVariableList(output, outputs_name, cmake_outputs) # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir) if int(action.get('process_outputs_as_sources', False)): extra_sources.extend(zip(cmake_outputs, outputs)) # add_custom_command output.write('add_custom_command(OUTPUT ') WriteVariable(output, outputs_name) output.write('\n') if len(dirs) > 0: for directory in dirs: output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ') output.write(directory) output.write('\n') output.write(' COMMAND ') output.write(gyp.common.EncodePOSIXShellList(action['action'])) output.write('\n') output.write(' DEPENDS ') WriteVariable(output, inputs_name) output.write('\n') output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/') output.write(path_to_gyp) output.write('\n') output.write(' COMMENT ') if 'message' in action: output.write(action['message']) else: output.write(action_target_name) output.write('\n') output.write(' VERBATIM\n') output.write(')\n') # add_custom_target output.write('add_custom_target(') output.write(action_target_name) output.write('\n DEPENDS ') WriteVariable(output, outputs_name) output.write('\n SOURCES ') WriteVariable(output, inputs_name) output.write('\n)\n') extra_deps.append(action_target_name) def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source): if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")): if any([rule_source.startswith(var) for var in FULL_PATH_VARS]): return rel_path return NormjoinPathForceCMakeSource(base_path, rel_path) def WriteRules(target_name, rules, extra_sources, extra_deps, path_to_gyp, output): """Write CMake for the 'rules' in the target. Args: target_name: the name of the CMake target being generated. actions: the Gyp 'actions' dict for this target. extra_sources: [(<cmake_src>, <src>)] to append with generated source files. extra_deps: [<cmake_taget>] to append with generated targets. path_to_gyp: relative path from CMakeLists.txt being generated to the Gyp file in which the target being generated is defined. """ for rule in rules: rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name']) inputs = rule.get('inputs', []) inputs_name = rule_name + '__input' SetVariableList(output, inputs_name, [NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs]) outputs = rule['outputs'] var_outputs = [] for count, rule_source in enumerate(rule.get('rule_sources', [])): action_name = rule_name + '_' + str(count) rule_source_dirname, rule_source_basename = os.path.split(rule_source) rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename) SetVariable(output, 'RULE_INPUT_PATH', rule_source) SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname) SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename) SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root) SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext) # Build up a list of outputs. # Collect the output dirs we'll need. dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir) # Create variables for the output, as 'local' variable will be unset. these_outputs = [] for output_index, out in enumerate(outputs): output_name = action_name + '_' + str(output_index) SetVariable(output, output_name, NormjoinRulePathForceCMakeSource(path_to_gyp, out, rule_source)) if int(rule.get('process_outputs_as_sources', False)): extra_sources.append(('${' + output_name + '}', out)) these_outputs.append('${' + output_name + '}') var_outputs.append('${' + output_name + '}') # add_custom_command output.write('add_custom_command(OUTPUT\n') for out in these_outputs: output.write(' ') output.write(out) output.write('\n') for directory in dirs: output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ') output.write(directory) output.write('\n') output.write(' COMMAND ') output.write(gyp.common.EncodePOSIXShellList(rule['action'])) output.write('\n') output.write(' DEPENDS ') WriteVariable(output, inputs_name) output.write(' ') output.write(NormjoinPath(path_to_gyp, rule_source)) output.write('\n') # CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives. # The cwd is the current build directory. output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/') output.write(path_to_gyp) output.write('\n') output.write(' COMMENT ') if 'message' in rule: output.write(rule['message']) else: output.write(action_name) output.write('\n') output.write(' VERBATIM\n') output.write(')\n') UnsetVariable(output, 'RULE_INPUT_PATH') UnsetVariable(output, 'RULE_INPUT_DIRNAME') UnsetVariable(output, 'RULE_INPUT_NAME') UnsetVariable(output, 'RULE_INPUT_ROOT') UnsetVariable(output, 'RULE_INPUT_EXT') # add_custom_target output.write('add_custom_target(') output.write(rule_name) output.write(' DEPENDS\n') for out in var_outputs: output.write(' ') output.write(out) output.write('\n') output.write('SOURCES ') WriteVariable(output, inputs_name) output.write('\n') for rule_source in rule.get('rule_sources', []): output.write(' ') output.write(NormjoinPath(path_to_gyp, rule_source)) output.write('\n') output.write(')\n') extra_deps.append(rule_name) def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output): """Write CMake for the 'copies' in the target. Args: target_name: the name of the CMake target being generated. actions: the Gyp 'actions' dict for this target. extra_deps: [<cmake_taget>] to append with generated targets. path_to_gyp: relative path from CMakeLists.txt being generated to the Gyp file in which the target being generated is defined. """ copy_name = target_name + '__copies' # CMake gets upset with custom targets with OUTPUT which specify no output. have_copies = any(copy['files'] for copy in copies) if not have_copies: output.write('add_custom_target(') output.write(copy_name) output.write(')\n') extra_deps.append(copy_name) return class Copy(object): def __init__(self, ext, command): self.cmake_inputs = [] self.cmake_outputs = [] self.gyp_inputs = [] self.gyp_outputs = [] self.ext = ext self.inputs_name = None self.outputs_name = None self.command = command file_copy = Copy('', 'copy') dir_copy = Copy('_dirs', 'copy_directory') for copy in copies: files = copy['files'] destination = copy['destination'] for src in files: path = os.path.normpath(src) basename = os.path.split(path)[1] dst = os.path.join(destination, basename) copy = file_copy if os.path.basename(src) else dir_copy copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src)) copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst)) copy.gyp_inputs.append(src) copy.gyp_outputs.append(dst) for copy in (file_copy, dir_copy): if copy.cmake_inputs: copy.inputs_name = copy_name + '__input' + copy.ext SetVariableList(output, copy.inputs_name, copy.cmake_inputs) copy.outputs_name = copy_name + '__output' + copy.ext SetVariableList(output, copy.outputs_name, copy.cmake_outputs) # add_custom_command output.write('add_custom_command(\n') output.write('OUTPUT') for copy in (file_copy, dir_copy): if copy.outputs_name: WriteVariable(output, copy.outputs_name, ' ') output.write('\n') for copy in (file_copy, dir_copy): for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs): # 'cmake -E copy src dst' will create the 'dst' directory if needed. output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command) output.write(src) output.write(' ') output.write(dst) output.write("\n") output.write('DEPENDS') for copy in (file_copy, dir_copy): if copy.inputs_name: WriteVariable(output, copy.inputs_name, ' ') output.write('\n') output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/') output.write(path_to_gyp) output.write('\n') output.write('COMMENT Copying for ') output.write(target_name) output.write('\n') output.write('VERBATIM\n') output.write(')\n') # add_custom_target output.write('add_custom_target(') output.write(copy_name) output.write('\n DEPENDS') for copy in (file_copy, dir_copy): if copy.outputs_name: WriteVariable(output, copy.outputs_name, ' ') output.write('\n SOURCES') if file_copy.inputs_name: WriteVariable(output, file_copy.inputs_name, ' ') output.write('\n)\n') extra_deps.append(copy_name) def CreateCMakeTargetBaseName(qualified_target): """This is the name we would like the target to have.""" _, gyp_target_name, gyp_target_toolset = ( gyp.common.ParseQualifiedTarget(qualified_target)) cmake_target_base_name = gyp_target_name if gyp_target_toolset and gyp_target_toolset != 'target': cmake_target_base_name += '_' + gyp_target_toolset return StringToCMakeTargetName(cmake_target_base_name) def CreateCMakeTargetFullName(qualified_target): """An unambiguous name for the target.""" gyp_file, gyp_target_name, gyp_target_toolset = ( gyp.common.ParseQualifiedTarget(qualified_target)) cmake_target_full_name = gyp_file + ':' + gyp_target_name if gyp_target_toolset and gyp_target_toolset != 'target': cmake_target_full_name += '_' + gyp_target_toolset return StringToCMakeTargetName(cmake_target_full_name) class CMakeNamer(object): """Converts Gyp target names into CMake target names. CMake requires that target names be globally unique. One way to ensure this is to fully qualify the names of the targets. Unfortunatly, this ends up with all targets looking like "chrome_chrome_gyp_chrome" instead of just "chrome". If this generator were only interested in building, it would be possible to fully qualify all target names, then create unqualified target names which depend on all qualified targets which should have had that name. This is more or less what the 'make' generator does with aliases. However, one goal of this generator is to create CMake files for use with IDEs, and fully qualified names are not as user friendly. Since target name collision is rare, we do the above only when required. Toolset variants are always qualified from the base, as this is required for building. However, it also makes sense for an IDE, as it is possible for defines to be different. """ def __init__(self, target_list): self.cmake_target_base_names_conficting = set() cmake_target_base_names_seen = set() for qualified_target in target_list: cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target) if cmake_target_base_name not in cmake_target_base_names_seen: cmake_target_base_names_seen.add(cmake_target_base_name) else: self.cmake_target_base_names_conficting.add(cmake_target_base_name) def CreateCMakeTargetName(self, qualified_target): base_name = CreateCMakeTargetBaseName(qualified_target) if base_name in self.cmake_target_base_names_conficting: return CreateCMakeTargetFullName(qualified_target) return base_name def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use, options, generator_flags, all_qualified_targets, output): # The make generator does this always. # TODO: It would be nice to be able to tell CMake all dependencies. circular_libs = generator_flags.get('circular', True) if not generator_flags.get('standalone', False): output.write('\n#') output.write(qualified_target) output.write('\n') gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target) rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir) rel_gyp_dir = os.path.dirname(rel_gyp_file) # Relative path from build dir to top dir. build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir) # Relative path from build dir to gyp dir. build_to_gyp = os.path.join(build_to_top, rel_gyp_dir) path_from_cmakelists_to_gyp = build_to_gyp spec = target_dicts.get(qualified_target, {}) config = spec.get('configurations', {}).get(config_to_use, {}) target_name = spec.get('target_name', '<missing target name>') target_type = spec.get('type', '<missing target type>') target_toolset = spec.get('toolset') cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type) if cmake_target_type is None: print ('Target %s has unknown target type %s, skipping.' % ( target_name, target_type ) ) return SetVariable(output, 'TARGET', target_name) SetVariable(output, 'TOOLSET', target_toolset) cmake_target_name = namer.CreateCMakeTargetName(qualified_target) extra_sources = [] extra_deps = [] # Actions must come first, since they can generate more OBJs for use below. if 'actions' in spec: WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps, path_from_cmakelists_to_gyp, output) # Rules must be early like actions. if 'rules' in spec: WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps, path_from_cmakelists_to_gyp, output) # Copies if 'copies' in spec: WriteCopies(cmake_target_name, spec['copies'], extra_deps, path_from_cmakelists_to_gyp, output) # Target and sources srcs = spec.get('sources', []) # Gyp separates the sheep from the goats based on file extensions. # A full separation is done here because of flag handing (see below). s_sources = [] c_sources = [] cxx_sources = [] linkable_sources = [] other_sources = [] for src in srcs: _, ext = os.path.splitext(src) src_type = COMPILABLE_EXTENSIONS.get(ext, None) src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src); if src_type == 's': s_sources.append(src_norm_path) elif src_type == 'cc': c_sources.append(src_norm_path) elif src_type == 'cxx': cxx_sources.append(src_norm_path) elif Linkable(ext): linkable_sources.append(src_norm_path) else: other_sources.append(src_norm_path) for extra_source in extra_sources: src, real_source = extra_source _, ext = os.path.splitext(real_source) src_type = COMPILABLE_EXTENSIONS.get(ext, None) if src_type == 's': s_sources.append(src) elif src_type == 'cc': c_sources.append(src) elif src_type == 'cxx': cxx_sources.append(src) elif Linkable(ext): linkable_sources.append(src) else: other_sources.append(src) s_sources_name = None if s_sources: s_sources_name = cmake_target_name + '__asm_srcs' SetVariableList(output, s_sources_name, s_sources) c_sources_name = None if c_sources: c_sources_name = cmake_target_name + '__c_srcs' SetVariableList(output, c_sources_name, c_sources) cxx_sources_name = None if cxx_sources: cxx_sources_name = cmake_target_name + '__cxx_srcs' SetVariableList(output, cxx_sources_name, cxx_sources) linkable_sources_name = None if linkable_sources: linkable_sources_name = cmake_target_name + '__linkable_srcs' SetVariableList(output, linkable_sources_name, linkable_sources) other_sources_name = None if other_sources: other_sources_name = cmake_target_name + '__other_srcs' SetVariableList(output, other_sources_name, other_sources) # CMake gets upset when executable targets provide no sources. # http://www.cmake.org/pipermail/cmake/2010-July/038461.html dummy_sources_name = None has_sources = (s_sources_name or c_sources_name or cxx_sources_name or linkable_sources_name or other_sources_name) if target_type == 'executable' and not has_sources: dummy_sources_name = cmake_target_name + '__dummy_srcs' SetVariable(output, dummy_sources_name, "${obj}.${TOOLSET}/${TARGET}/genc/dummy.c") output.write('if(NOT EXISTS "') WriteVariable(output, dummy_sources_name) output.write('")\n') output.write(' file(WRITE "') WriteVariable(output, dummy_sources_name) output.write('" "")\n') output.write("endif()\n") # CMake is opposed to setting linker directories and considers the practice # of setting linker directories dangerous. Instead, it favors the use of # find_library and passing absolute paths to target_link_libraries. # However, CMake does provide the command link_directories, which adds # link directories to targets defined after it is called. # As a result, link_directories must come before the target definition. # CMake unfortunately has no means of removing entries from LINK_DIRECTORIES. library_dirs = config.get('library_dirs') if library_dirs is not None: output.write('link_directories(') for library_dir in library_dirs: output.write(' ') output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir)) output.write('\n') output.write(')\n') output.write(cmake_target_type.command) output.write('(') output.write(cmake_target_name) if cmake_target_type.modifier is not None: output.write(' ') output.write(cmake_target_type.modifier) if s_sources_name: WriteVariable(output, s_sources_name, ' ') if c_sources_name: WriteVariable(output, c_sources_name, ' ') if cxx_sources_name: WriteVariable(output, cxx_sources_name, ' ') if linkable_sources_name: WriteVariable(output, linkable_sources_name, ' ') if other_sources_name: WriteVariable(output, other_sources_name, ' ') if dummy_sources_name: WriteVariable(output, dummy_sources_name, ' ') output.write(')\n') # Let CMake know if the 'all' target should depend on this target. exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets else 'FALSE') SetTargetProperty(output, cmake_target_name, 'EXCLUDE_FROM_ALL', exclude_from_all) for extra_target_name in extra_deps: SetTargetProperty(output, extra_target_name, 'EXCLUDE_FROM_ALL', exclude_from_all) # Output name and location. if target_type != 'none': # Link as 'C' if there are no other files if not c_sources and not cxx_sources: SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C']) # Mark uncompiled sources as uncompiled. if other_sources_name: output.write('set_source_files_properties(') WriteVariable(output, other_sources_name, '') output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n') # Mark object sources as linkable. if linkable_sources_name: output.write('set_source_files_properties(') WriteVariable(output, other_sources_name, '') output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n') # Output directory target_output_directory = spec.get('product_dir') if target_output_directory is None: if target_type in ('executable', 'loadable_module'): target_output_directory = generator_default_variables['PRODUCT_DIR'] elif target_type == 'shared_library': target_output_directory = '${builddir}/lib.${TOOLSET}' elif spec.get('standalone_static_library', False): target_output_directory = generator_default_variables['PRODUCT_DIR'] else: base_path = gyp.common.RelativePath(os.path.dirname(gyp_file), options.toplevel_dir) target_output_directory = '${obj}.${TOOLSET}' target_output_directory = ( os.path.join(target_output_directory, base_path)) cmake_target_output_directory = NormjoinPathForceCMakeSource( path_from_cmakelists_to_gyp, target_output_directory) SetTargetProperty(output, cmake_target_name, cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY', cmake_target_output_directory) # Output name default_product_prefix = '' default_product_name = target_name default_product_ext = '' if target_type == 'static_library': static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX'] default_product_name = RemovePrefix(default_product_name, static_library_prefix) default_product_prefix = static_library_prefix default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX'] elif target_type in ('loadable_module', 'shared_library'): shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX'] default_product_name = RemovePrefix(default_product_name, shared_library_prefix) default_product_prefix = shared_library_prefix default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX'] elif target_type != 'executable': print ('ERROR: What output file should be generated?', 'type', target_type, 'target', target_name) product_prefix = spec.get('product_prefix', default_product_prefix) product_name = spec.get('product_name', default_product_name) product_ext = spec.get('product_extension') if product_ext: product_ext = '.' + product_ext else: product_ext = default_product_ext SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix) SetTargetProperty(output, cmake_target_name, cmake_target_type.property_modifier + '_OUTPUT_NAME', product_name) SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext) # Make the output of this target referenceable as a source. cmake_target_output_basename = product_prefix + product_name + product_ext cmake_target_output = os.path.join(cmake_target_output_directory, cmake_target_output_basename) SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '') # Includes includes = config.get('include_dirs') if includes: # This (target include directories) is what requires CMake 2.8.8 includes_name = cmake_target_name + '__include_dirs' SetVariableList(output, includes_name, [NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include) for include in includes]) output.write('set_property(TARGET ') output.write(cmake_target_name) output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ') WriteVariable(output, includes_name, '') output.write(')\n') # Defines defines = config.get('defines') if defines is not None: SetTargetProperty(output, cmake_target_name, 'COMPILE_DEFINITIONS', defines, ';') # Compile Flags - http://www.cmake.org/Bug/view.php?id=6493 # CMake currently does not have target C and CXX flags. # So, instead of doing... # cflags_c = config.get('cflags_c') # if cflags_c is not None: # SetTargetProperty(output, cmake_target_name, # 'C_COMPILE_FLAGS', cflags_c, ' ') # cflags_cc = config.get('cflags_cc') # if cflags_cc is not None: # SetTargetProperty(output, cmake_target_name, # 'CXX_COMPILE_FLAGS', cflags_cc, ' ') # Instead we must... cflags = config.get('cflags', []) cflags_c = config.get('cflags_c', []) cflags_cxx = config.get('cflags_cc', []) if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources): SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ') elif c_sources and not (s_sources or cxx_sources): flags = [] flags.extend(cflags) flags.extend(cflags_c) SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ') elif cxx_sources and not (s_sources or c_sources): flags = [] flags.extend(cflags) flags.extend(cflags_cxx) SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ') else: # TODO: This is broken, one cannot generally set properties on files, # as other targets may require different properties on the same files. if s_sources and cflags: SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ') if c_sources and (cflags or cflags_c): flags = [] flags.extend(cflags) flags.extend(cflags_c) SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ') if cxx_sources and (cflags or cflags_cxx): flags = [] flags.extend(cflags) flags.extend(cflags_cxx) SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ') # Linker flags ldflags = config.get('ldflags') if ldflags is not None: SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ') # Note on Dependencies and Libraries: # CMake wants to handle link order, resolving the link line up front. # Gyp does not retain or enforce specifying enough information to do so. # So do as other gyp generators and use --start-group and --end-group. # Give CMake as little information as possible so that it doesn't mess it up. # Dependencies rawDeps = spec.get('dependencies', []) static_deps = [] shared_deps = [] other_deps = [] for rawDep in rawDeps: dep_cmake_name = namer.CreateCMakeTargetName(rawDep) dep_spec = target_dicts.get(rawDep, {}) dep_target_type = dep_spec.get('type', None) if dep_target_type == 'static_library': static_deps.append(dep_cmake_name) elif dep_target_type == 'shared_library': shared_deps.append(dep_cmake_name) else: other_deps.append(dep_cmake_name) # ensure all external dependencies are complete before internal dependencies # extra_deps currently only depend on their own deps, so otherwise run early if static_deps or shared_deps or other_deps: for extra_dep in extra_deps: output.write('add_dependencies(') output.write(extra_dep) output.write('\n') for deps in (static_deps, shared_deps, other_deps): for dep in gyp.common.uniquer(deps): output.write(' ') output.write(dep) output.write('\n') output.write(')\n') linkable = target_type in ('executable', 'loadable_module', 'shared_library') other_deps.extend(extra_deps) if other_deps or (not linkable and (static_deps or shared_deps)): output.write('add_dependencies(') output.write(cmake_target_name) output.write('\n') for dep in gyp.common.uniquer(other_deps): output.write(' ') output.write(dep) output.write('\n') if not linkable: for deps in (static_deps, shared_deps): for lib_dep in gyp.common.uniquer(deps): output.write(' ') output.write(lib_dep) output.write('\n') output.write(')\n') # Libraries if linkable: external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0] if external_libs or static_deps or shared_deps: output.write('target_link_libraries(') output.write(cmake_target_name) output.write('\n') if static_deps: write_group = circular_libs and len(static_deps) > 1 if write_group: output.write('-Wl,--start-group\n') for dep in gyp.common.uniquer(static_deps): output.write(' ') output.write(dep) output.write('\n') if write_group: output.write('-Wl,--end-group\n') if shared_deps: for dep in gyp.common.uniquer(shared_deps): output.write(' ') output.write(dep) output.write('\n') if external_libs: for lib in gyp.common.uniquer(external_libs): output.write(' ') output.write(lib) output.write('\n') output.write(')\n') UnsetVariable(output, 'TOOLSET') UnsetVariable(output, 'TARGET') def GenerateOutputForConfig(target_list, target_dicts, data, params, config_to_use): options = params['options'] generator_flags = params['generator_flags'] # generator_dir: relative path from pwd to where make puts build files. # Makes migrating from make to cmake easier, cmake doesn't put anything here. # Each Gyp configuration creates a different CMakeLists.txt file # to avoid incompatibilities between Gyp and CMake configurations. generator_dir = os.path.relpath(options.generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = generator_flags.get('output_dir', 'out') # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.normpath(os.path.join(generator_dir, output_dir, config_to_use)) toplevel_build = os.path.join(options.toplevel_dir, build_dir) output_file = os.path.join(toplevel_build, 'CMakeLists.txt') gyp.common.EnsureDirExists(output_file) output = open(output_file, 'w') output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n') output.write('cmake_policy(VERSION 2.8.8)\n') gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1]) output.write('project(') output.write(project_target) output.write(')\n') SetVariable(output, 'configuration', config_to_use) ar = None cc = None cxx = None make_global_settings = data[gyp_file].get('make_global_settings', []) build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir) for key, value in make_global_settings: if key == 'AR': ar = os.path.join(build_to_top, value) if key == 'CC': cc = os.path.join(build_to_top, value) if key == 'CXX': cxx = os.path.join(build_to_top, value) ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar) cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc) cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx) if ar: SetVariable(output, 'CMAKE_AR', ar) if cc: SetVariable(output, 'CMAKE_C_COMPILER', cc) if cxx: SetVariable(output, 'CMAKE_CXX_COMPILER', cxx) # The following appears to be as-yet undocumented. # http://public.kitware.com/Bug/view.php?id=8392 output.write('enable_language(ASM)\n') # ASM-ATT does not support .S files. # output.write('enable_language(ASM-ATT)\n') if cc: SetVariable(output, 'CMAKE_ASM_COMPILER', cc) SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}') SetVariable(output, 'obj', '${builddir}/obj') output.write('\n') # TODO: Undocumented/unsupported (the CMake Java generator depends on it). # CMake by default names the object resulting from foo.c to be foo.c.o. # Gyp traditionally names the object resulting from foo.c foo.o. # This should be irrelevant, but some targets extract .o files from .a # and depend on the name of the extracted .o files. output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n') output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n') output.write('\n') # Force ninja to use rsp files. Otherwise link and ar lines can get too long, # resulting in 'Argument list too long' errors. output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n') output.write('\n') namer = CMakeNamer(target_list) # The list of targets upon which the 'all' target should depend. # CMake has it's own implicit 'all' target, one is not created explicitly. all_qualified_targets = set() for build_file in params['build_files']: for qualified_target in gyp.common.AllTargets(target_list, target_dicts, os.path.normpath(build_file)): all_qualified_targets.add(qualified_target) for qualified_target in target_list: WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use, options, generator_flags, all_qualified_targets, output) output.close() def PerformBuild(data, configurations, params): options = params['options'] generator_flags = params['generator_flags'] # generator_dir: relative path from pwd to where make puts build files. # Makes migrating from make to cmake easier, cmake doesn't put anything here. generator_dir = os.path.relpath(options.generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = generator_flags.get('output_dir', 'out') for config_name in configurations: # build_dir: relative path from source root to our output files. # e.g. "out/Debug" build_dir = os.path.normpath(os.path.join(generator_dir, output_dir, config_name)) arguments = ['cmake', '-G', 'Ninja'] print 'Generating [%s]: %s' % (config_name, arguments) subprocess.check_call(arguments, cwd=build_dir) arguments = ['ninja', '-C', build_dir] print 'Building [%s]: %s' % (config_name, arguments) subprocess.check_call(arguments) def CallGenerateOutputForConfig(arglist): # Ignore the interrupt signal so that the parent process catches it and # kills all multiprocessing children. signal.signal(signal.SIGINT, signal.SIG_IGN) target_list, target_dicts, data, params, config_name = arglist GenerateOutputForConfig(target_list, target_dicts, data, params, config_name) def GenerateOutput(target_list, target_dicts, data, params): user_config = params.get('generator_flags', {}).get('config', None) if user_config: GenerateOutputForConfig(target_list, target_dicts, data, params, user_config) else: config_names = target_dicts[target_list[0]]['configurations'].keys() if params['parallel']: try: pool = multiprocessing.Pool(len(config_names)) arglists = [] for config_name in config_names: arglists.append((target_list, target_dicts, data, params, config_name)) pool.map(CallGenerateOutputForConfig, arglists) except KeyboardInterrupt, e: pool.terminate() raise e else: for config_name in config_names: GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
artistic-2.0
ShineFan/odoo
addons/l10n_be_hr_payroll/__openerp__.py
312
1872
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011 OpenERP SA (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Belgium - Payroll', 'category': 'Localization', 'author': 'OpenERP SA', 'depends': ['hr_payroll'], 'version': '1.0', 'description': """ Belgian Payroll Rules. ====================== * Employee Details * Employee Contracts * Passport based Contract * Allowances/Deductions * Allow to configure Basic/Gross/Net Salary * Employee Payslip * Monthly Payroll Register * Integrated with Holiday Management * Salary Maj, ONSS, Withholding Tax, Child Allowance, ... """, 'auto_install': False, 'demo': ['l10n_be_hr_payroll_demo.xml'], 'website': 'https://www.odoo.com/page/accounting', 'data':[ 'l10n_be_hr_payroll_view.xml', 'l10n_be_hr_payroll_data.xml', 'data/hr.salary.rule.csv', ], 'installable': True } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ofermend/medicare-demo
socialite/jython/Lib/distutils/tests/test_install.py
155
1861
"""Tests for distutils.command.install.""" import os import unittest from distutils.command.install import install from distutils.core import Distribution from distutils.tests import support class InstallTestCase(support.TempdirManager, unittest.TestCase): def test_home_installation_scheme(self): # This ensure two things: # - that --home generates the desired set of directory names # - test --home is supported on all platforms builddir = self.mkdtemp() destination = os.path.join(builddir, "installation") dist = Distribution({"name": "foopkg"}) # script_name need not exist, it just need to be initialized dist.script_name = os.path.join(builddir, "setup.py") dist.command_obj["build"] = support.DummyCommand( build_base=builddir, build_lib=os.path.join(builddir, "lib"), ) cmd = install(dist) cmd.home = destination cmd.ensure_finalized() self.assertEqual(cmd.install_base, destination) self.assertEqual(cmd.install_platbase, destination) def check_path(got, expected): got = os.path.normpath(got) expected = os.path.normpath(expected) self.assertEqual(got, expected) libdir = os.path.join(destination, "lib", "python") check_path(cmd.install_lib, libdir) check_path(cmd.install_platlib, libdir) check_path(cmd.install_purelib, libdir) check_path(cmd.install_headers, os.path.join(destination, "include", "python", "foopkg")) check_path(cmd.install_scripts, os.path.join(destination, "bin")) check_path(cmd.install_data, destination) def test_suite(): return unittest.makeSuite(InstallTestCase) if __name__ == "__main__": unittest.main(defaultTest="test_suite")
apache-2.0
lightning-round/salud-api
app/mod_profiles/models/Permission.py
3
1397
# -*- coding: utf-8 -*- from app.mod_shared.models.db import db class Permission(db.Model): # Attributes id = db.Column(db.Integer, primary_key=True) # Foreign keys analysis_id = db.Column(db.Integer, db.ForeignKey('analysis.id')) permission_type_id = db.Column(db.Integer, db.ForeignKey('permission_type.id')) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) # Relationships analysis = db.relationship('Analysis', backref=db.backref('permissions', lazy='dynamic', cascade='all, delete-orphan', ) ) permission_type = db.relationship('PermissionType', backref=db.backref('permissions', lazy='dynamic')) user = db.relationship('User', backref=db.backref('permissions', lazy='dynamic')) def __init__(self, analysis_id, permission_type_id, user_id): self.analysis_id = analysis_id self.permission_type_id = permission_type_id self.user_id = user_id def __repr__(self): return '<PermissionType: %r>' % self.id
gpl-2.0
XXMrHyde/android_external_chromium_org
tools/telemetry/telemetry/core/chrome/inspector_page_unittest.py
29
2064
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os from telemetry.unittest import tab_test_case unittest_data_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'unittest_data') class InspectorPageTest(tab_test_case.TabTestCase): def __init__(self, *args): super(InspectorPageTest, self).__init__(*args) def setUp(self): super(InspectorPageTest, self).setUp() self._browser.SetHTTPServerDirectories(unittest_data_dir) def testPageNavigateToNormalUrl(self): self._tab.Navigate(self._browser.http_server.UrlOf('blank.html')) self._tab.WaitForDocumentReadyStateToBeComplete() def testCustomActionToNavigate(self): self._tab.Navigate( self._browser.http_server.UrlOf('page_with_link.html')) self._tab.WaitForDocumentReadyStateToBeComplete() self.assertEquals( self._tab.EvaluateJavaScript('document.location.pathname;'), '/page_with_link.html') custom_action_called = [False] def CustomAction(): custom_action_called[0] = True self._tab.ExecuteJavaScript('document.getElementById("clickme").click();') self._tab.PerformActionAndWaitForNavigate(CustomAction) self.assertTrue(custom_action_called[0]) self.assertEquals( self._tab.EvaluateJavaScript('document.location.pathname;'), '/blank.html') def testGetCookieByName(self): self._tab.Navigate( self._browser.http_server.UrlOf('blank.html')) self._tab.WaitForDocumentReadyStateToBeComplete() self._tab.ExecuteJavaScript('document.cookie="foo=bar"') self.assertEquals(self._tab.GetCookieByName('foo'), 'bar') def testScriptToEvaluateOnCommit(self): self._tab.Navigate( self._browser.http_server.UrlOf('blank.html'), script_to_evaluate_on_commit='var foo = "bar";') self._tab.WaitForDocumentReadyStateToBeComplete() self.assertEquals(self._tab.EvaluateJavaScript('foo'), 'bar')
bsd-3-clause
rven/odoo
addons/auth_signup/models/res_config_settings.py
4
1186
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from ast import literal_eval from odoo import api, fields, models class ResConfigSettings(models.TransientModel): _inherit = 'res.config.settings' auth_signup_reset_password = fields.Boolean(string='Enable password reset from Login page', config_parameter='auth_signup.reset_password') auth_signup_uninvited = fields.Selection([ ('b2b', 'On invitation'), ('b2c', 'Free sign up'), ], string='Customer Account', default='b2b', config_parameter='auth_signup.invitation_scope') auth_signup_template_user_id = fields.Many2one('res.users', string='Template user for new users created through signup', config_parameter='base.template_portal_user_id') def open_template_user(self): action = self.env["ir.actions.actions"]._for_xml_id("base.action_res_users") action['res_id'] = literal_eval(self.env['ir.config_parameter'].sudo().get_param('base.template_portal_user_id', 'False')) action['views'] = [[self.env.ref('base.view_users_form').id, 'form']] return action
agpl-3.0
kvar/ansible
test/units/modules/network/fortios/test_fortios_switch_controller_custom_command.py
21
8565
# Copyright 2019 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <https://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest from mock import ANY from ansible.module_utils.network.fortios.fortios import FortiOSHandler try: from ansible.modules.network.fortios import fortios_switch_controller_custom_command except ImportError: pytest.skip("Could not load required modules for testing", allow_module_level=True) @pytest.fixture(autouse=True) def connection_mock(mocker): connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_switch_controller_custom_command.Connection') return connection_class_mock fos_instance = FortiOSHandler(connection_mock) def test_switch_controller_custom_command_creation(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'switch_controller_custom_command': { 'command': 'test_value_3', 'command_name': 'test_value_4', 'description': 'test_value_5' }, 'vdom': 'root'} is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance) expected_data = { 'command': 'test_value_3', 'command-name': 'test_value_4', 'description': 'test_value_5' } set_method_mock.assert_called_with('switch-controller', 'custom-command', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_switch_controller_custom_command_creation_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'switch_controller_custom_command': { 'command': 'test_value_3', 'command_name': 'test_value_4', 'description': 'test_value_5' }, 'vdom': 'root'} is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance) expected_data = { 'command': 'test_value_3', 'command-name': 'test_value_4', 'description': 'test_value_5' } set_method_mock.assert_called_with('switch-controller', 'custom-command', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_switch_controller_custom_command_removal(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'switch_controller_custom_command': { 'command': 'test_value_3', 'command_name': 'test_value_4', 'description': 'test_value_5' }, 'vdom': 'root'} is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance) delete_method_mock.assert_called_with('switch-controller', 'custom-command', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200 def test_switch_controller_custom_command_deletion_fails(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500} delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result) input_data = { 'username': 'admin', 'state': 'absent', 'switch_controller_custom_command': { 'command': 'test_value_3', 'command_name': 'test_value_4', 'description': 'test_value_5' }, 'vdom': 'root'} is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance) delete_method_mock.assert_called_with('switch-controller', 'custom-command', mkey=ANY, vdom='root') schema_method_mock.assert_not_called() assert is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 500 def test_switch_controller_custom_command_idempotent(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'switch_controller_custom_command': { 'command': 'test_value_3', 'command_name': 'test_value_4', 'description': 'test_value_5' }, 'vdom': 'root'} is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance) expected_data = { 'command': 'test_value_3', 'command-name': 'test_value_4', 'description': 'test_value_5' } set_method_mock.assert_called_with('switch-controller', 'custom-command', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert not changed assert response['status'] == 'error' assert response['http_status'] == 404 def test_switch_controller_custom_command_filter_foreign_attributes(mocker): schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema') set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200} set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result) input_data = { 'username': 'admin', 'state': 'present', 'switch_controller_custom_command': { 'random_attribute_not_valid': 'tag', 'command': 'test_value_3', 'command_name': 'test_value_4', 'description': 'test_value_5' }, 'vdom': 'root'} is_error, changed, response = fortios_switch_controller_custom_command.fortios_switch_controller(input_data, fos_instance) expected_data = { 'command': 'test_value_3', 'command-name': 'test_value_4', 'description': 'test_value_5' } set_method_mock.assert_called_with('switch-controller', 'custom-command', data=expected_data, vdom='root') schema_method_mock.assert_not_called() assert not is_error assert changed assert response['status'] == 'success' assert response['http_status'] == 200
gpl-3.0
flavour/helios
modules/s3/s3validators.py
3
69315
# -*- coding: utf-8 -*- """ Custom Validators @requires: U{B{I{gluon}} <http://web2py.com>} @author: Fran Boon <fran[at]aidiq.com> @author: Dominic König <dominic[at]aidiq.com> @author: Michael Howden <michael[at]aidiq.com> @author: sunneach @copyright: (c) 2010-2011 Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ["single_phone_number_pattern", "multi_phone_number_pattern", "IS_LAT", "IS_LON", "IS_INT_AMOUNT", "IS_FLOAT_AMOUNT", "IS_HTML_COLOUR", "THIS_NOT_IN_DB", "IS_UTC_OFFSET", "IS_UTC_DATETIME", "IS_UTC_DATETIME_IN_RANGE", "IS_ONE_OF", "IS_ONE_OF_EMPTY", "IS_ONE_OF_EMPTY_SELECT", "IS_NOT_ONE_OF", "IS_LOCATION", "IS_LOCATION_SELECTOR", "IS_SITE_SELECTOR", "IS_ADD_PERSON_WIDGET", "IS_ACL", "QUANTITY_INV_ITEM", "IS_IN_SET_LAZY"] import time import re from datetime import datetime, timedelta from gluon import current, Field, IS_MATCH, IS_NOT_IN_DB, IS_IN_SET, IS_INT_IN_RANGE, IS_FLOAT_IN_RANGE from gluon.validators import Validator from gluon.storage import Storage def options_sorter(x, y): return (str(x[1]).upper() > str(y[1]).upper() and 1) or -1 # ----------------------------------------------------------------------------- # Phone number requires # Multiple phone numbers can be separated by comma, slash, semi-colon. # (Semi-colon appears in Brazil OSM data.) # @ToDo: Need to beware of separators used inside phone numbers # (e.g. 555-1212, ext 9), so may need fancier validation if we see that. # @ToDo: Add tooltip giving list syntax, and warning against above. # (Current use is in importing OSM files, so isn't interactive.) # @ToDo: Code that should only have a single # should use # s3_single_phone_requires. Check what messaging assumes. phone_number_pattern = "\+?\s*[\s\-\.\(\)\d]+(?:(?: x| ext)\s?\d{1,5})?" single_phone_number_pattern = "%s$" % phone_number_pattern multi_phone_number_pattern = "%s(\s*(,|/|;)\s*%s)*$" % (phone_number_pattern, phone_number_pattern) # ----------------------------------------------------------------------------- class IS_LAT(object): """ example: INPUT(_type="text", _name="name", requires=IS_LAT()) latitude has to be in degrees between -90 & 90 """ def __init__(self, error_message = "Latitude/Northing should be between -90 & 90!" ): self.minimum = -90 self.maximum = 90 self.error_message = error_message def __call__(self, value): try: value = float(value) if self.minimum <= value <= self.maximum: return (value, None) except ValueError: pass return (value, self.error_message) class IS_LON(object): """ example: INPUT(_type="text", _name="name", requires=IS_LON()) longitude has to be in degrees between -180 & 180 """ def __init__(self, error_message = "Longitude/Easting should be between -180 & 180!" ): self.minimum = -180 self.maximum = 180 self.error_message = error_message def __call__(self, value): try: value = float(value) if self.minimum <= value <= self.maximum: return (value, None) except ValueError: pass return (value, self.error_message) # ----------------------------------------------------------------------------- class IS_INT_AMOUNT(IS_INT_IN_RANGE): """ Validation, widget and representation of integer-values with thousands-separators """ def __init__(self, minimum=None, maximum=None, error_message=None): IS_INT_IN_RANGE.__init__(self, minimum=minimum, maximum=maximum, error_message=error_message) def __call__(self, value): thousands_sep = "," value = str(value).replace(thousands_sep, "") return IS_INT_IN_RANGE.__call__(self, value) @staticmethod def represent(value): if value is None: return "" ts = current.deployment_settings.get_L10n_thousands_separator() if not ts: thousands_separator = "" else: thousands_separator = "," return format(int(value), "%sd" % thousands_separator) @staticmethod def widget(f, v, **attributes): from gluon.sqlhtml import StringWidget attr = Storage(attributes) classes = attr.get("_class", "").split(" ") classes = " ".join([c for c in classes if c != "integer"]) _class = "%s int_amount" % classes attr.update(_class=_class) return StringWidget.widget(f, v, **attr) # ----------------------------------------------------------------------------- class IS_FLOAT_AMOUNT(IS_FLOAT_IN_RANGE): """ Validation, widget and representation of float-values with thousands-separators """ def __init__(self, minimum=None, maximum=None, error_message=None, dot='.'): IS_FLOAT_IN_RANGE.__init__(self, minimum=minimum, maximum=maximum, error_message=error_message, dot=dot) def __call__(self, value): thousands_sep = "," value = str(value).replace(thousands_sep, "") return IS_FLOAT_IN_RANGE.__call__(self, value) @staticmethod def represent(value, precision=None): if value is None: return "" ts = current.deployment_settings.get_L10n_thousands_separator() if not ts: thousands_separator = "" else: thousands_separator = "," if precision is not None: fl = format(float(value), "%s.%df" % (thousands_separator, precision)) else: fl = format(float(value), "%sf" % thousands_separator).rstrip("0") if fl[-1] == ".": fl += "0" return fl @staticmethod def widget(f, v, **attributes): from gluon.sqlhtml import StringWidget attr = Storage(attributes) classes = attr.get("_class", "").split(" ") classes = " ".join([c for c in classes if c != "double"]) _class = "%s float_amount" % classes attr.update(_class=_class) return StringWidget.widget(f, v, **attr) # ----------------------------------------------------------------------------- class IS_HTML_COLOUR(IS_MATCH): """ example:: INPUT(_type="text", _name="name", requires=IS_HTML_COLOUR()) """ def __init__(self, error_message="must be a 6 digit hex code!" ): IS_MATCH.__init__(self, "^[0-9a-fA-F]{6}$", error_message) # ----------------------------------------------------------------------------- class THIS_NOT_IN_DB(object): """ Unused currently since doesn't quite work. See: http://groups.google.com/group/web2py/browse_thread/thread/27b14433976c0540 """ def __init__(self, dbset, field, this, error_message = "value already in database!" ): if hasattr(dbset, "define_table"): current.dbset = dbset() else: current.dbset = dbset self.field = field self.value = this self.error_message = error_message self.record_id = 0 def set_self_id(self, id): self.record_id = id def __call__(self, value): tablename, fieldname = str(self.field).split(".") field = current.dbset._db[tablename][fieldname] rows = current.dbset(field == self.value).select(limitby=(0, 1)) if len(rows)>0 and str(rows[0].id) != str(self.record_id): return (self.value, self.error_message) return (value, None) # IS_ONE_OF_EMPTY ------------------------------------------------------------------- # by sunneach 2010-02-03 # copy of nursix's IS_ONE_OF with removed 'options' method regex1 = re.compile("[\w_]+\.[\w_]+") regex2 = re.compile("%\((?P<name>[^\)]+)\)s") class IS_ONE_OF_EMPTY(Validator): """ Filtered version of IS_IN_DB(): validates a given value as key of another table, filtered by the 'filterby' field for one of the 'filter_opts' options (=a selective IS_IN_DB()) NB Filtering isn't active in GQL. For the dropdown representation: 'label' can be a string template for the record, or a set of field names of the fields to be used as option labels, or a function or lambda to create an option label from the respective record (which has to return a string, of course). The function will take the record as an argument. No 'options' method as designed to be called next to an Autocomplete field so don't download a large dropdown unnecessarily. """ def __init__(self, dbset, field, label=None, filterby=None, filter_opts=None, not_filterby=None, not_filter_opts=None, error_message="invalid value!", orderby=None, groupby=None, left=None, multiple=False, zero="", sort=False, _and=None, ): if hasattr(dbset, "define_table"): self.dbset = dbset() else: self.dbset = dbset self.field = field (ktable, kfield) = str(self.field).split(".") if not label: label = "%%(%s)s" % kfield if isinstance(label, str): if regex1.match(str(label)): label = "%%(%s)s" % str(label).split(".")[-1] ks = regex2.findall(label) if not kfield in ks: ks += [kfield] fields = ["%s.%s" % (ktable, k) for k in ks] else: ks = [kfield] fields =[str(f) for f in self.dbset._db[ktable]] self.fields = fields self.label = label self.ktable = ktable if not kfield or not len(kfield): self.kfield = "id" else: self.kfield = kfield self.ks = ks self.error_message = error_message self.theset = None self.orderby = orderby self.groupby = groupby self.left = left self.multiple = multiple self.zero = zero self.sort = sort self._and = _and self.filterby = filterby self.filter_opts = filter_opts self.not_filterby = not_filterby self.not_filter_opts = not_filter_opts def set_self_id(self, id): if self._and: self._and.record_id = id def set_filter(self, filterby = None, filter_opts = None, not_filterby = None, not_filter_opts = None): """ This can be called from prep to apply a filter base on data in the record or the primary resource id. """ if filterby: self.filterby = filterby if filter_opts: self.filter_opts = filter_opts if not_filterby: self.not_filterby = not_filterby if not_filter_opts: self.not_filter_opts = not_filter_opts def build_set(self): dbset = self.dbset db = dbset._db if self.ktable in db: table = db[self.ktable] auth = current.auth if self.fields == "all": fields = [f for f in table if isinstance(f, Field)] else: fieldnames = [f.split(".")[1] if "." in f else f for f in self.fields] fields = [table[k] for k in fieldnames if k in table.fields] if db._dbname not in ("gql", "gae"): orderby = self.orderby or reduce(lambda a, b: a|b, fields) groupby = self.groupby # Caching breaks Colorbox dropdown refreshes #dd = dict(orderby=orderby, groupby=groupby, cache=(current.cache.ram, 60)) dd = dict(orderby=orderby, groupby=groupby) query = auth.s3_accessible_query("read", table) if "deleted" in table: query = ((table["deleted"] == False) & query) if self.filterby and self.filterby in table: if self.filter_opts: query = query & (table[self.filterby].belongs(self.filter_opts)) if not self.orderby: dd.update(orderby=table[self.filterby]) if self.not_filterby and self.not_filterby in table and self.not_filter_opts: query = query & (~(table[self.not_filterby].belongs(self.not_filter_opts))) if not self.orderby: dd.update(orderby=table[self.filterby]) if self.left is not None: dd.update(left=self.left) records = dbset(query).select(*fields, **dd) else: # Note this does not support filtering. orderby = self.orderby or \ reduce(lambda a, b: a|b, (f for f in fields if not f.name == "id")) #dd = dict(orderby=orderby, cache=(current.cache.ram, 60)) dd = dict(orderby=orderby) records = dbset.select(db[self.ktable].ALL, **dd) self.theset = [str(r[self.kfield]) for r in records] #labels = [] label = self.label try: labels = map(label, records) except TypeError: if isinstance(label, str): labels = map(lambda r: label % dict(r), records) elif isinstance(label, (list, tuple)): labels = map(lambda r: \ " ".join([r[l] for l in label if l in r]), records) elif callable(label): # Is a function labels = map(label, records) elif "name" in table: labels = map(lambda r: r.name, records) else: labels = map(lambda r: r[self.kfield], records) self.labels = labels else: self.theset = None self.labels = None #Removed as we don't want any options downloaded unnecessarily #def options(self): def __call__(self, value): try: _table = self.dbset._db[self.ktable] deleted_q = ("deleted" in _table) and (_table["deleted"] == False) or False filter_opts_q = False if self.filterby and self.filterby in _table: if self.filter_opts: filter_opts_q = _table[self.filterby].belongs(self.filter_opts) # For a list field, Web2py now packs elements in "|x|y|" by itself, # so that is no longer done here. The unpacking is left in for now, # in case someone enters a list by hand that way, or in case we get # here from the JSON widget over in s3widgets... # Note that on the way in, nothing checks that the values supplied # for a list:reference actually exist in the target table -- that # is only "assured" by sending out an option list containing only # valid ids. But what if someone constructs a request by hand? # Or what if some of the ids get deleted between the time the form # goes out and when it gets submitted? I just left a form sitting # for about 10 hours (longer than session expiration, even), and it # was accepted. So it's actually not that wildly impossible that # an id in the list would be gone. if self.multiple: if isinstance(value, list): values = value elif isinstance(value, basestring) and \ value[0] == "|" and value[-1] == "|": values = value[1:-1].split("|") elif value: values = [value] else: values = [] if self.theset: if not [x for x in values if not x in self.theset]: return (values, None) else: return (value, self.error_message) else: for v in values: q = (_table[self.kfield] == v) query = query is not None and query | q or q if filter_opts_q != False: query = query is not None and \ (filter_opts_q & (query)) or filter_opts_q if deleted_q != False: query = query is not None and \ (deleted_q & (query)) or deleted_q if self.dbset(query).count() < 1: return (value, self.error_message) return (values, None) elif self.theset: if value in self.theset: if self._and: return self._and(value) else: return (value, None) else: values = [value] query = None for v in values: q = (_table[self.kfield] == v) query = query is not None and query | q or q if filter_opts_q != False: query = query is not None and \ (filter_opts_q & (query)) or filter_opts_q if deleted_q != False: query = query is not None and \ (deleted_q & (query)) or deleted_q if self.dbset(query).count(): if self._and: return self._and(value) else: return (value, None) except: pass return (value, self.error_message) # IS_ONE_OF ------------------------------------------------------------------- # added 2009-08-23 by nursix # converted to subclass 2010-02-03 by sunneach: NO CHANGES in the method bodies class IS_ONE_OF(IS_ONE_OF_EMPTY): """ Extends IS_ONE_OF_EMPTY by restoring the 'options' method. """ def options(self): self.build_set() items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)] if self.sort: items.sort(options_sorter) if self.zero != None and not self.multiple: items.insert(0, ("", self.zero)) return items # ----------------------------------------------------------------------------- class IS_ONE_OF_EMPTY_SELECT(IS_ONE_OF_EMPTY): """ Extends IS_ONE_OF_EMPTY by displaying an empty SELECT (instead of INPUT) """ def options(self): return [("", "")] # ----------------------------------------------------------------------------- class IS_NOT_ONE_OF(IS_NOT_IN_DB): """ Filtered version of IS_NOT_IN_DB() - understands the 'deleted' field. - makes the field unique (amongst non-deleted field) Example: - INPUT(_type="text", _name="name", requires=IS_NOT_ONE_OF(db, db.table)) """ def __call__(self, value): db = current.db translate = lambda m: m # workaround value = str(value) if not value.strip(): return (value, translate(self.error_message)) if value in self.allowed_override: return (value, None) (tablename, fieldname) = str(self.field).split(".") _table = db[tablename] field = _table[fieldname] field = db[tablename][fieldname] query = (field == value) if "deleted" in _table: query = (_table["deleted"] == False) & query rows = db(query).select(limitby=(0, 1)) if len(rows) > 0: if isinstance(self.record_id, dict): for f in self.record_id: if str(getattr(rows[0], f)) != str(self.record_id[f]): return (value, translate(self.error_message)) elif str(rows[0].id) != str(self.record_id): return (value, translate(self.error_message)) return (value, None) # ----------------------------------------------------------------------------- class IS_LOCATION(Validator): """ Allow all locations, or locations by level. Optimized for use within the S3LocationSelectorWidget's L0 Dropdown. """ def __init__(self, level = None, error_message = None ): T = current.T self.level = level # can be a List or a single element self.error_message = error_message or T("Invalid Location!") def __call__(self, value): db = current.db table = db.gis_location level = self.level if level and level == "L0": # Use cached countries. This returns name if id is for a country. have_location = gis.get_country(value) else: query = (table.id == value) & (table.deleted == False) if level: if isinstance(level, list): query = query & (table.level.belongs(level)) else: query = query & (table.level == level) have_location = db(query).select(table.id, limitby=(0, 1)).first() if have_location: return (value, None) else: return (value, self.error_message) # ----------------------------------------------------------------------------- class IS_LOCATION_SELECTOR(Validator): """ Designed for use within the S3LocationSelectorWidget. For Create forms, this will create a new location from the additional fields For Update forms, this will normally just check that we have a valid location_id FK - although there is the option to create a new location there too, in which case it acts as-above. @ToDo: Audit """ def __init__(self, error_message = None, ): T = current.T self.error_message = error_message or T("Invalid Location!") self.no_parent = T("Need to have all levels filled out in mode strict!") auth = current.auth self.no_permission = auth.messages.access_denied def __call__(self, value): db = current.db auth = current.auth gis = current.gis table = db.gis_location try: # Is this an ID? value = int(value) # Yes: This must be an Update form if not auth.s3_has_permission("update", table, record_id=value): return (value, self.no_permission) # Check that this is a valid location_id query = (table.id == value) & \ (table.deleted == False) & \ (table.level == None) # NB Specific Locations only location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Update the record, in case changes have been made location = self._process_values() # onvalidation form = Storage() form.vars = location gis.wkt_centroid(form) db(table.id == value).update(name = location.name, lat = location.lat, lon = location.lon, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent) # onaccept gis.update_location_tree(value, location.parent) return (value, None) except: # Create form if not auth.s3_has_permission("create", table): return (None, self.no_permission) location = self._process_values() strict = gis.get_strict_hierarchy() if strict and not location.parent: return (value, self.no_parent) if location.name or location.lat or location.lon or \ location.street or location.postcode or location.parent: # onvalidation form = Storage() form.vars = location gis.wkt_centroid(form) value = table.insert(name = location.name, lat = location.lat, lon = location.lon, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent, wkt = form.vars.wkt, lon_min = form.vars.lon_min, lon_max = form.vars.lon_max, lat_min = form.vars.lat_min, lat_max = form.vars.lat_max ) # onaccept gis.update_location_tree(value, location.parent) return (value, None) else: return (None, None) return (value, self.error_message) def _process_values(self): """ Read the request.vars & prepare for a record insert/update Note: This is also used by IS_SITE_SELECTOR() """ db = current.db auth = current.auth gis = current.gis request = current.request response = current.response session = current.session table = db.gis_location L0 = request.vars.get("gis_location_L0", None) # Are we allowed to create Locations? if not auth.s3_has_permission("create", table): return (None, self.no_permission) # What level of hierarchy are we allowed to edit? if auth.s3_has_role(session.s3.system_roles.MAP_ADMIN): # 'MapAdmin' always has permission to edit hierarchy locations L1_allowed = True L2_allowed = True L3_allowed = True L4_allowed = True L5_allowed = True else: if L0: ctable = db.gis_config query = (ctable.region_location_id == L0) config = db(query).select(ctable.edit_L1, ctable.edit_L2, ctable.edit_L3, ctable.edit_L4, ctable.edit_L5, limitby=(0, 1)).first() if L0 and config: # Lookup each level individually L1_allowed = config.edit_L1 L2_allowed = config.edit_L2 L3_allowed = config.edit_L3 L4_allowed = config.edit_L4 L5_allowed = config.edit_L5 else: # default is deployment_setting L1_allowed = response.s3.gis.edit_Lx L2_allowed = L1_allowed L3_allowed = L1_allowed L4_allowed = L1_allowed L5_allowed = L1_allowed # We don't need to do onvalidation of the Location Hierarchy records # separately as we don't have anything extra to validate than we have # done already # We don't use the full onaccept as we don't need to onaccept = gis.update_location_tree L1 = request.vars.get("gis_location_L1", None) L2 = request.vars.get("gis_location_L2", None) L3 = request.vars.get("gis_location_L3", None) L4 = request.vars.get("gis_location_L4", None) L5 = request.vars.get("gis_location_L5", None) # Check if we have parents to create # L1 if L1: try: # Is this an ID? int(L1) # Do we need to update it's parent? if L0: parent = L0 query = (table.id == L1) location = db(query).select(table.parent, limitby=(0, 1)).first() if location and (location.parent != parent): db(query).update(parent=parent) onaccept(L1, parent) except: # Name # Test for duplicates query = (table.name == L1) & (table.level == "L1") if L0: query = query & (table.parent == L0) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L1 = location.id elif L0 and L1_allowed: parent = L0 L1 = table.insert(name=L1, level="L1", parent=parent) onaccept(L1, parent) elif L1_allowed: L1 = table.insert(name=L1, level="L1") onaccept(L1) else: L1 = None # L2 if L2: try: # Is this an ID? int(L2) # Do we need to update it's parent? if L1: parent = L1 query = (table.id == L2) location = db(query).select(table.parent, limitby=(0, 1)).first() if location and (location.parent != parent): db(query).update(parent=parent) onaccept(L2, parent) except: # Name # Test for duplicates # @ToDo: Also check for L2 parenting direct to L0 query = (table.name == L2) & (table.level == "L2") if L1: query = query & (table.parent == L1) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L2 = location.id elif L1 and L2_allowed: parent = L1 L2 = table.insert(name=L2, level="L2", parent=parent) onaccept(L2, parent) elif L0 and L2_allowed: parent = L0 L2 = table.insert(name=L2, level="L2", parent=parent) onaccept(L2, parent) elif L2_allowed: L2 = table.insert(name=L2, level="L2") onaccept(L2) else: L2 = None # L3 if L3: try: # Is this an ID? int(L3) # Do we need to update it's parent? if L2: parent = L2 query = (table.id == L3) location = db(query).select(table.parent, limitby=(0, 1)).first() if location and (location.parent != parent): db(query).update(parent=parent) onaccept(L3, parent) except: # Name # Test for duplicates # @ToDo: Also check for L3 parenting direct to L0/1 query = (table.name == L3) & (table.level == "L3") if L2: query = query & (table.parent == L2) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L3 = location.id elif L2 and L3_allowed: parent = L2 L3 = table.insert(name=L3, level="L3", parent=parent) onaccept(L3, parent) elif L1 and L3_allowed: parent = L1 L3 = table.insert(name=L3, level="L3", parent=parent) onaccept(L3, parent) elif L0 and L3_allowed: parent = L0 L3 = table.insert(name=L3, level="L3", parent=parent) onaccept(L3, parent) elif L3_allowed: L3 = table.insert(name=L3, level="L3") onaccept(L3) else: L3 = None # L4 if L4: try: # Is this an ID? int(L4) # Do we need to update it's parent? if L3: parent = L3 query = (table.id == L4) location = db(query).select(table.parent, limitby=(0, 1)).first() if location and (location.parent != parent): db(query).update(parent=parent) onaccept(L4, parent) except: # Name # Test for duplicates # @ToDo: Also check for L4 parenting direct to L0/1/2 query = (table.name == L4) & (table.level == "L4") if L3: query = query & (table.parent == L3) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L4 = location.id elif L3 and L4_allowed: parent = L3 L4 = table.insert(name=L4, level="L4", parent=parent) onaccept(L4, parent) elif L2 and L4_allowed: parent = L2 L4 = table.insert(name=L4, level="L4", parent=parent) onaccept(L4, parent) elif L1 and L4_allowed: parent = L1 L4 = table.insert(name=L4, level="L4", parent=parent) onaccept(L4, parent) elif L0 and L4_allowed: parent = L0 L4 = table.insert(name=L4, level="L4", parent=parent) onaccept(L4, parent) elif L4_allowed: L4 = table.insert(name=L4, level="L4") onaccept(L4) else: L4 = None # L5 if L5: try: # Is this an ID? int(L5) # Do we need to update it's parent? if L4: parent = L4 query = (table.id == L5) location = db(query).select(table.parent, limitby=(0, 1)).first() if location and (location.parent != parent): db(query).update(parent=parent) onaccept(L5, parent) except: # Name # Test for duplicates # @ToDo: Also check for L5 parenting direct to L0/1/2/3 query = (table.name == L5) & (table.level == "L5") if L4: query = query & (table.parent == L4) location = db(query).select(table.id, limitby=(0, 1)).first() if location: # Use Existing record L5 = location.id elif L4 and L5_allowed: parent = L4 L5 = table.insert(name=L5, level="L5", parent=parent) onaccept(L5, parent) elif L3 and L5_allowed: parent = L3 L5 = table.insert(name=L5, level="L5", parent=parent) onaccept(L5, parent) elif L2 and L5_allowed: parent = L2 L5 = table.insert(name=L5, level="L5", parent=parent) onaccept(L5, parent) elif L1 and L5_allowed: parent = L1 L5 = table.insert(name=L5, level="L5", parent=parent) onaccept(L5, parent) elif L0 and L5_allowed: parent = L0 L5 = table.insert(name=L5, level="L5", parent=parent) onaccept(L5, parent) elif L5_allowed: L5 = table.insert(name=L5, level="L5") onaccept(L5) else: L5 = None # Check if we have a specific location to create name = request.vars.get("gis_location_name", None) lat = request.vars.get("gis_location_lat", None) lon = request.vars.get("gis_location_lon", None) street = request.vars.get("gis_location_street", None) postcode = request.vars.get("gis_location_postcode", None) parent = L5 or L4 or L3 or L2 or L1 or L0 or None form = Storage() form.vars = Storage() form.vars.lat = lat form.vars.lon = lon # onvalidation gis.wkt_centroid(form) return Storage( name=name, lat=lat, lon=lon, street=street, postcode=postcode, parent=parent, wkt = form.vars.wkt, lon_min = form.vars.lon_min, lon_max = form.vars.lon_max, lat_min = form.vars.lat_min, lat_max = form.vars.lat_max ) # ----------------------------------------------------------------------------- class IS_SITE_SELECTOR(IS_LOCATION_SELECTOR): """ Extends the IS_LOCATION_SELECTOR() validator to transparently support Sites of the specified type. Note that these cannot include any other mandatory fields other than Name & location_id Designed for use within the ???S3LocationSelectorWidget. For Create forms, this will create a new site & location from the additional fields For Update forms, this will normally just check that we have a valid site_id FK - although there is the option to create a new location there too, in which case it acts as-above. @ToDo: Audit """ def __init__(self, site_type = "project_site", error_message = None, ): T = current.T self.error_message = error_message or T("Invalid Site!") self.no_parent = T("Need to have all levels filled out in mode strict!") auth = current.auth self.no_permission = auth.messages.access_denied self.site_type = site_type def __call__(self, value): db = current.db auth = current.auth gis = current.gis table = db.gis_location stable = db[self.site_type] try: # Is this an ID? value = int(value) # Yes: This must be an Update form if not auth.s3_has_permission("update", stable, record_id=value): return (value, self.no_permission) # Check that this is a valid site_id query = (stable.id == value) & \ (stable.deleted == False) site = db(query).select(stable.id, stable.name, stable.location_id, limitby=(0, 1)).first() if site and site.location_id: # Update the location, in case changes have been made location = self._process_values() # Location onvalidation form = Storage() form.vars = location gis.wkt_centroid(form) # Location update lquery = (table.id == site.location_id) db(lquery).update(name = location.name, lat = location.lat, lon = location.lon, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent) # Location onaccept gis.update_location_tree(site.location_id, location.parent) if stable.name != location.name: # Site Name has changed db(query).update(name = location.name) return (value, None) except: # Create form if not auth.s3_has_permission("create", stable): return (None, self.no_permission) location = self._process_values() strict = gis.get_strict_hierarchy() if strict and not location.parent: return (value, self.no_parent) if location.name or location.lat or location.lon or \ location.street or location.postcode or location.parent: # Location onvalidation form = Storage() form.vars = location gis.wkt_centroid(form) # Location creation location_id = table.insert(name = location.name, lat = location.lat, lon = location.lon, addr_street = location.street, addr_postcode = location.postcode, parent = location.parent, wkt = form.vars.wkt, lon_min = form.vars.lon_min, lon_max = form.vars.lon_max, lat_min = form.vars.lat_min, lat_max = form.vars.lat_max ) # Location onaccept gis.update_location_tree(location_id, location.parent) # Site creation value = stable.insert(name = location.name, location_id = location_id) return (value, None) else: return (None, None) return (value, self.error_message) # ----------------------------------------------------------------------------- class IS_ADD_PERSON_WIDGET(Validator): def __init__(self, error_message=None, mark_required=True): self.error_message = error_message or \ current.T("Could not add person record") self.mark_required = mark_required def __call__(self, value): db = current.db manager = current.manager request = current.request T = current.T try: person_id = int(value) except: person_id = None ptable = db.pr_person table = db.pr_contact def email_validate(value, person_id): error_message = T("This email-address is already registered.") if not value: return (value, None) value = value.strip() query = (table.deleted != True) & \ (table.contact_method == "EMAIL") & \ (table.value == value) if person_id: query = query & \ (table.pe_id == ptable.pe_id) & \ (ptable.id != person_id) email = db(query).select(table.id, limitby=(0, 1)).first() if email: return value, error_message return value, None if request.env.request_method == "POST": _vars = request.post_vars mobile = _vars["mobile_phone"] # Validate the phone number if _vars.mobile_phone: regex = re.compile(single_phone_number_pattern) if not regex.match(_vars.mobile_phone): error = T("Invalid phone number") return (person_id, error) if person_id: # update the person record # Values are hard coded, but it looks to work ;) data = Storage() fields = ["first_name", "middle_name", "last_name", "date_of_birth", "gender", "occupation"] for f in fields: if f in _vars and _vars[f]: data[f] = _vars[f] if data: db(ptable.id == person_id).update(**data) # Now check the contact information record = ptable(person_id) pe_id = record.pe_id if pe_id: # Check to see if the contact details have been set up # First Email record = table(pe_id=pe_id, contact_method="EMAIL", ) email = _vars["email"] if record and email: # update if email != record.value: db(table.id == record.id).update(value=email) else: # insert table.insert(pe_id=pe_id, contact_method="EMAIL", value=email ) # Now mobile phone record = table(pe_id=pe_id, contact_method="SMS", ) if record: # update if mobile != record.value: db(table.id == record.id).update(value=mobile) else: # insert if mobile: # Don't insert an empty number table.insert(pe_id=pe_id, contact_method="SMS", value=mobile ) pass else: # Filter out location_id (location selector form values # being processed only after this widget has been validated) _vars = Storage([(k, _vars[k]) for k in _vars if k != "location_id"]) # Validate the email email, error = email_validate(_vars.email, None) if error: return (person_id, error) # Validate and add the person record table = db.pr_person for f in table._filter_fields(_vars): value, error = manager.validate(table, None, f, _vars[f]) if error: return (None, None) person_id = table.insert(**table._filter_fields(_vars)) # Need to update post_vars here, # for some reason this doesn't happen through validation alone request.post_vars.update(person_id=str(person_id)) if person_id: # Update the super-entity manager.model.update_super(table, dict(id=person_id)) person = table[person_id] # Add contact information as provided table = db.pr_contact table.insert(pe_id=person.pe_id, contact_method="EMAIL", value=_vars.email) if _vars.mobile_phone: table.insert(pe_id=person.pe_id, contact_method="SMS", value=_vars.mobile_phone) else: return (person_id, self.error_message) return (person_id, None) # ----------------------------------------------------------------------------- class IS_UTC_OFFSET(Validator): """ Validates a given string value as UTC offset in the format +/-HHMM @author: nursix @param error_message: the error message to be returned @note: all leading parts of the string (before the trailing offset specification) will be ignored and replaced by 'UTC ' in the return value, if the string passes through. """ def __init__(self, error_message="invalid UTC offset!" ): self.error_message = error_message @staticmethod def get_offset_value(offset_str): if offset_str and len(offset_str) >= 5 and \ (offset_str[-5] == "+" or offset_str[-5] == "-") and \ offset_str[-4:].isdigit(): offset_hrs = int(offset_str[-5] + offset_str[-4:-2]) offset_min = int(offset_str[-5] + offset_str[-2:]) offset = 3600*offset_hrs + 60*offset_min return offset else: return None def __call__(self, value): if value and isinstance(value, str): _offset_str = value.strip() offset = self.get_offset_value(_offset_str) if offset is not None and offset > -86340 and offset < 86340: # Add a leading 'UTC ', # otherwise leading '+' and '0' will be stripped away by web2py return ("UTC " + _offset_str[-5:], None) return (value, self.error_message) # ----------------------------------------------------------------------------- # class IS_UTC_DATETIME(Validator): """ Validates a given value as datetime string and returns the corresponding UTC datetime. Example: - INPUT(_type="text", _name="name", requires=IS_UTC_DATETIME()) @author: nursix @param format: strptime/strftime format template string, for directives refer to your strptime implementation @param error_message: dict of error messages to be returned @param utc_offset: offset to UTC in seconds, if not specified, the value is considered to be UTC @param allow_future: whether future date/times are allowed or not, if set to False, all date/times beyond now+max_future will fail @type allow_future: boolean @param max_future: the maximum acceptable future time interval in seconds from now for unsynchronized local clocks @note: datetime has to be in the ISO8960 format YYYY-MM-DD hh:mm:ss, with an optional trailing UTC offset specified as +/-HHMM (+ for eastern, - for western timezones) """ def __init__(self, format=None, error_message=None, utc_offset=None, allow_future=True, max_future=900 ): if format is None: self.format = current.deployment_settings.get_L10n_datetime_format() else: self.format = format self.error_message = dict( format = "Required format: %s!" % self.format, offset = "Invalid UTC offset!", future = "Future times not allowed!") if error_message and isinstance(error_message, dict): self.error_message["format"] = error_message.get("format", None) or self.error_message["format"] self.error_message["offset"] = error_message.get("offset", None) or self.error_message["offset"] self.error_message["future"] = error_message.get("future", None) or self.error_message["future"] elif error_message: self.error_message["format"] = error_message if utc_offset is None: utc_offset = current.session.s3.utc_offset validate = IS_UTC_OFFSET() offset, error = validate(utc_offset) if error: self.utc_offset = "UTC +0000" # fallback to UTC else: self.utc_offset = offset self.allow_future = allow_future self.max_future = max_future def __call__(self, value): _dtstr = value.strip() if len(_dtstr) > 6 and \ (_dtstr[-6:-4] == " +" or _dtstr[-6:-4] == " -") and \ _dtstr[-4:].isdigit(): # UTC offset specified in dtstr dtstr = _dtstr[0:-6] _offset_str = _dtstr[-5:] else: # use default UTC offset dtstr = _dtstr _offset_str = self.utc_offset offset_hrs = int(_offset_str[-5] + _offset_str[-4:-2]) offset_min = int(_offset_str[-5] + _offset_str[-2:]) offset = 3600 * offset_hrs + 60 * offset_min # Offset must be in range -1439 to +1439 minutes if offset < -86340 or offset > 86340: return (dt, self.error_message["offset"]) try: (y, m, d, hh, mm, ss, t0, t1, t2) = time.strptime(dtstr, str(self.format)) dt = datetime(y, m, d, hh, mm, ss) except: try: (y, m, d, hh, mm, ss, t0, t1, t2) = time.strptime(dtstr+":00", str(self.format)) dt = datetime(y, m, d, hh, mm, ss) except: return(value, self.error_message["format"]) if self.allow_future: return (dt, None) else: latest = datetime.utcnow() + timedelta(seconds=self.max_future) dt_utc = dt - timedelta(seconds=offset) if dt_utc > latest: return (dt_utc, self.error_message["future"]) else: return (dt_utc, None) def formatter(self, value): format = self.format offset = IS_UTC_OFFSET.get_offset_value(self.utc_offset) if not value: return "-" elif offset: dt = value + timedelta(seconds=offset) return dt.strftime(str(format)) else: dt = value return dt.strftime(str(format)) + " +0000" # ----------------------------------------------------------------------------- class IS_UTC_DATETIME_IN_RANGE(Validator): def __init__(self, format=None, error_message=None, utc_offset=None, minimum=None, maximum=None): if format is None: self.format = current.deployment_settings.get_L10n_datetime_format() else: self.format = format self.utc_offset = utc_offset self.minimum = minimum self.maximum = maximum delta = timedelta(seconds=self.delta()) min_local = minimum and minimum + delta or None max_local = maximum and maximum + delta or None if error_message is None: if minimum is None and maximum is None: error_message = "enter date and time" elif minimum is None: error_message = "enter date and time on or before %(max)s" elif maximum is None: error_message = "enter date and time on or after %(min)s" else: error_message = "enter date and time in range %(min)s %(max)s" d = dict(min = min_local, max = max_local) self.error_message = error_message % d def delta(self, utc_offset=None): if utc_offset is not None: self.utc_offset = utc_offset if self.utc_offset is None: self.utc_offset = current.session.s3.utc_offset validate = IS_UTC_OFFSET() offset, error = validate(self.utc_offset) if error: self.utc_offset = "UTC +0000" # fallback to UTC else: self.utc_offset = offset delta = IS_UTC_OFFSET.get_offset_value(self.utc_offset) return delta def __call__(self, value): val = value.strip() # Get UTC offset if len(val) > 5 and val[-5] in ("+", "-") and val[-4:].isdigit(): # UTC offset specified in dtstr dtstr = val[0:-5].strip() utc_offset = "UTC %s" % val[-5:] else: # use default UTC offset dtstr = val utc_offset = self.utc_offset # Offset must be in range -2359 to +2359 offset = self.delta(utc_offset=utc_offset) if offset < -86340 or offset > 86340: return (val, self.error_message) # Convert into datetime object try: (y, m, d, hh, mm, ss, t0, t1, t2) = \ time.strptime(dtstr, str(self.format)) dt = datetime(y, m, d, hh, mm, ss) except: try: (y, m, d, hh, mm, ss, t0, t1, t2) = \ time.strptime(dtstr+":00", str(self.format)) dt = datetime(y, m, d, hh, mm, ss) except: return(value, self.error_message) # Validate dt_utc = dt - timedelta(seconds=offset) if self.minimum and dt_utc < self.minimum or \ self.maximum and dt_utc > self.maximum: return (dt_utc, self.error_message) else: return (dt_utc, None) def formatter(self, value): format = self.format offset = self.delta() if not value: return "-" elif offset: dt = value + timedelta(seconds=offset) return dt.strftime(str(format)) else: dt = value return dt.strftime(str(format)) + "+0000" # ----------------------------------------------------------------------------- class IS_ACL(IS_IN_SET): """ Validator for ACLs @attention: Incomplete! Does not validate yet, but just convert. @author: Dominic König <[email protected]> """ def __call__(self, value): """ Validation @param value: the value to validate """ if not isinstance(value, (list, tuple)): value = [value] acl = 0x0000 for v in value: try: flag = int(v) except (ValueError, TypeError): flag = 0x0000 else: acl |= flag return (acl, None) # ----------------------------------------------------------------------------- class QUANTITY_INV_ITEM(object): """ For Inv module by Michael Howden """ def __init__(self, db, inv_item_id, item_pack_id ): self.inv_item_id = inv_item_id self.item_pack_id = item_pack_id current.db = db def __call__(self, value): db = current.db error = "Invalid Quantity" # @todo: better error catching query = (db.inv_inv_item.id == self.inv_item_id) & \ (db.inv_inv_item.item_pack_id == db.supply_item_pack.id) inv_item_record = db(query).select(db.inv_inv_item.quantity, db.supply_item_pack.quantity, db.supply_item_pack.name, limitby = (0, 1)).first() # @todo: this should be a virtual field if inv_item_record and value: query = (db.supply_item_pack.id == self.item_pack_id) send_quantity = float(value) * db(query).select(db.supply_item_pack.quantity, limitby=(0, 1)).first().quantity inv_quantity = inv_item_record.inv_inv_item.quantity * \ inv_item_record.supply_item_pack.quantity if send_quantity > inv_quantity: return (value, "Only %s %s (%s) in the Inventory." % (inv_quantity, inv_item_record.supply_item_pack.name, inv_item_record.supply_item_pack.quantity) ) else: return (value, None) else: return (value, error) def formatter(self, value): return value # ----------------------------------------------------------------------------- class IS_IN_SET_LAZY(Validator): """ Like IS_IN_SET but with options obtained from a supplied function. Options are instantiated when the validator or its options() method is called, so don't need to be generated until it's used. Useful if the field is not needed on every request, and does significant processing to construct its options, or generates a large collection. If the options are just from a database query, one can use IS_ONE_OF instead. Raises an exception if an options collection is passed rather than a callable as this is a programming error, e.g. accidentally *calling* the options function in the constructor instead of passing the function. That would not get lazy options instantiation. The options collection (theset) and labels collection parameters to IS_IN_SET are replaced by: @param theset_fn: Function of no arguments that returns a collection of options and (optionally) labels. Both options and labels can be supplied via a dict or OrderedDict (options are keys, values are labels), list (or tuple) of two-element lists (or tuples) (element 0 in each pair is an option, element 1 is it's label). Otherwise, labels are obtained either by calling the supplied represent function on each item produced by theset_fn, or (if no represent is supplied), the items themselves are used as labels. @param represent: Function of one argument that returns the label for a given option. If there is a function call that returns the collection, just put "lambda:" in front of the call. E.g.: Field("nationality", requires = IS_NULL_OR(IS_IN_SET_LAZY( lambda: gis.get_countries(key_type="code"))), label = T("Nationality"), represent = lambda code: gis.get_country(code, key_type="code") or UNKNOWN_OPT) Keyword parameters are same as for IS_IN_SET, except for labels, which is not replaced by a function that parallels theset_fn, since ordering is problematic if theset_fn returns a dict. """ def __init__( self, theset_fn, represent=None, error_message="value not allowed", multiple=False, zero="", sort=False, ): self.multiple = multiple if not callable(theset_fn): raise TypeError("Argument must be a callable.") self.theset_fn = theset_fn self.theset = None self.labels = None self.error_message = error_message self.zero = zero self.sort = sort def _make_theset(self): theset = self.theset_fn() if theset: if isinstance(theset, dict): self.theset = [str(item) for item in theset] self.labels = theset.values() elif isinstance(theset, (tuple,list)): # @ToDo: Can this be a Rows? if isinstance(theset[0], (tuple,list)) and len(theset[0])==2: self.theset = [str(item) for item,label in theset] self.labels = [str(label) for item,label in theset] else: self.theset = [str(item) for item in theset] if represent: self.labels = [represent(item) for item in theset] else: self.theset = theset def options(self): if not self.theset: self._make_theset() if not self.labels: items = [(k, k) for (i, k) in enumerate(self.theset)] else: items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)] if self.sort: items.sort(options_sorter) if self.zero != None and not self.multiple: items.insert(0, ("", self.zero)) return items def __call__(self, value): if not self.theset: self._make_theset() if self.multiple: ### if below was values = re.compile("[\w\-:]+").findall(str(value)) if isinstance(value, (str,unicode)): values = [value] elif isinstance(value, (tuple, list)): values = value elif not value: values = [] else: values = [value] failures = [x for x in values if not x in self.theset] if failures and self.theset: if self.multiple and (value == None or value == ""): return ([], None) return (value, self.error_message) if self.multiple: if isinstance(self.multiple,(tuple,list)) and \ not self.multiple[0]<=len(values)<self.multiple[1]: return (values, self.error_message) return (values, None) return (value, None) # ----------------------------------------------------------------------------- class IS_TIME_INTERVAL_WIDGET(Validator): """ Simple validator for the S3TimeIntervalWidget, returns the selected time interval in seconds """ def __init__(self, field): self.field = field def __call__(self, value): try: val = int(value) except ValueError: return (0, None) request = current.request _vars = request.post_vars try: mul = int(_vars[("%s_multiplier" % self.field).replace(".", "_")]) except ValueError: return (0, None) seconds = val * mul return (seconds, None) # END -------------------------------------------------------------------------
mit
mmiklavc/incubator-metron
metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/management_ui_master.py
4
3498
""" Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from resource_management.core.resources.system import Directory from resource_management.core.resources.system import File from resource_management.core.source import Template from resource_management.libraries.functions.format import format from resource_management.libraries.script import Script from resource_management.core.resources.system import Execute from resource_management.core.logger import Logger from resource_management.core.exceptions import Fail from management_ui_commands import ManagementUICommands class ManagementUIMaster(Script): def install(self, env): from params import params env.set_params(params) self.install_packages(env) def configure(self, env, upgrade_type=None, config_dir=None): from params import params env.set_params(params) File(format("/etc/default/metron"), content=Template("metron.j2") ) File(format("{metron_config_path}/management_ui.yml"), mode=0755, content=Template("management_ui.yml.j2"), owner=params.metron_user, group=params.metron_group ) File(format("{metron_management_ui_path}/assets/app-config.json"), content=Template("management-ui-app-config.json.j2"), owner=params.metron_user, group=params.metron_group ) Directory('/var/run/metron', create_parents=False, mode=0755, owner=params.metron_user, group=params.metron_group ) if params.metron_knox_enabled and not params.metron_ldap_enabled: raise Fail("Enabling Metron with Knox requires LDAP authentication. Please set 'LDAP Enabled' to true in the Metron Security tab.") def start(self, env, upgrade_type=None): from params import params env.set_params(params) self.configure(env) commands = ManagementUICommands(params) commands.start_management_ui() def stop(self, env, upgrade_type=None): from params import params env.set_params(params) commands = ManagementUICommands(params) commands.stop_management_ui() def status(self, env): from params import status_params env.set_params(status_params) commands = ManagementUICommands(status_params) commands.status_management_ui(env) def restart(self, env): from params import params env.set_params(params) self.configure(env) commands = ManagementUICommands(params) commands.restart_management_ui(env) if __name__ == "__main__": ManagementUIMaster().execute()
apache-2.0
meisterkleister/erpnext
erpnext/setup/page/setup_wizard/test_setup_data.py
52
159337
from __future__ import unicode_literals args = { "attach_letterhead": "erpnext.jpg,data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEASABIAAD/4gxYSUNDX1BST0ZJTEUAAQEAAAxITGlubwIQAABtbnRyUkdCIFhZWiAHzgACAAkABgAxAABhY3NwTVNGVAAAAABJRUMgc1JHQgAAAAAAAAAAAAAAAAAA9tYAAQAAAADTLUhQICAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABFjcHJ0AAABUAAAADNkZXNjAAABhAAAAGx3dHB0AAAB8AAAABRia3B0AAACBAAAABRyWFlaAAACGAAAABRnWFlaAAACLAAAABRiWFlaAAACQAAAABRkbW5kAAACVAAAAHBkbWRkAAACxAAAAIh2dWVkAAADTAAAAIZ2aWV3AAAD1AAAACRsdW1pAAAD+AAAABRtZWFzAAAEDAAAACR0ZWNoAAAEMAAAAAxyVFJDAAAEPAAACAxnVFJDAAAEPAAACAxiVFJDAAAEPAAACAx0ZXh0AAAAAENvcHlyaWdodCAoYykgMTk5OCBIZXdsZXR0LVBhY2thcmQgQ29tcGFueQAAZGVzYwAAAAAAAAASc1JHQiBJRUM2MTk2Ni0yLjEAAAAAAAAAAAAAABJzUkdCIElFQzYxOTY2LTIuMQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAWFlaIAAAAAAAAPNRAAEAAAABFsxYWVogAAAAAAAAAAAAAAAAAAAAAFhZWiAAAAAAAABvogAAOPUAAAOQWFlaIAAAAAAAAGKZAAC3hQAAGNpYWVogAAAAAAAAJKAAAA+EAAC2z2Rlc2MAAAAAAAAAFklFQyBodHRwOi8vd3d3LmllYy5jaAAAAAAAAAAAAAAAFklFQyBodHRwOi8vd3d3LmllYy5jaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABkZXNjAAAAAAAAAC5JRUMgNjE5NjYtMi4xIERlZmF1bHQgUkdCIGNvbG91ciBzcGFjZSAtIHNSR0IAAAAAAAAAAAAAAC5JRUMgNjE5NjYtMi4xIERlZmF1bHQgUkdCIGNvbG91ciBzcGFjZSAtIHNSR0IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZGVzYwAAAAAAAAAsUmVmZXJlbmNlIFZpZXdpbmcgQ29uZGl0aW9uIGluIElFQzYxOTY2LTIuMQAAAAAAAAAAAAAALFJlZmVyZW5jZSBWaWV3aW5nIENvbmRpdGlvbiBpbiBJRUM2MTk2Ni0yLjEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHZpZXcAAAAAABOk/gAUXy4AEM8UAAPtzAAEEwsAA1yeAAAAAVhZWiAAAAAAAEwJVgBQAAAAVx/nbWVhcwAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAo8AAAACc2lnIAAAAABDUlQgY3VydgAAAAAAAAQAAAAABQAKAA8AFAAZAB4AIwAoAC0AMgA3ADsAQABFAEoATwBUAFkAXgBjAGgAbQByAHcAfACBAIYAiwCQAJUAmgCfAKQAqQCuALIAtwC8AMEAxgDLANAA1QDbAOAA5QDrAPAA9gD7AQEBBwENARMBGQEfASUBKwEyATgBPgFFAUwBUgFZAWABZwFuAXUBfAGDAYsBkgGaAaEBqQGxAbkBwQHJAdEB2QHhAekB8gH6AgMCDAIUAh0CJgIvAjgCQQJLAlQCXQJnAnECegKEAo4CmAKiAqwCtgLBAssC1QLgAusC9QMAAwsDFgMhAy0DOANDA08DWgNmA3IDfgOKA5YDogOuA7oDxwPTA+AD7AP5BAYEEwQgBC0EOwRIBFUEYwRxBH4EjASaBKgEtgTEBNME4QTwBP4FDQUcBSsFOgVJBVgFZwV3BYYFlgWmBbUFxQXVBeUF9gYGBhYGJwY3BkgGWQZqBnsGjAadBq8GwAbRBuMG9QcHBxkHKwc9B08HYQd0B4YHmQesB78H0gflB/gICwgfCDIIRghaCG4IggiWCKoIvgjSCOcI+wkQCSUJOglPCWQJeQmPCaQJugnPCeUJ+woRCicKPQpUCmoKgQqYCq4KxQrcCvMLCwsiCzkLUQtpC4ALmAuwC8gL4Qv5DBIMKgxDDFwMdQyODKcMwAzZDPMNDQ0mDUANWg10DY4NqQ3DDd4N+A4TDi4OSQ5kDn8Omw62DtIO7g8JDyUPQQ9eD3oPlg+zD88P7BAJECYQQxBhEH4QmxC5ENcQ9RETETERTxFtEYwRqhHJEegSBxImEkUSZBKEEqMSwxLjEwMTIxNDE2MTgxOkE8UT5RQGFCcUSRRqFIsUrRTOFPAVEhU0FVYVeBWbFb0V4BYDFiYWSRZsFo8WshbWFvoXHRdBF2UXiReuF9IX9xgbGEAYZRiKGK8Y1Rj6GSAZRRlrGZEZtxndGgQaKhpRGncanhrFGuwbFBs7G2MbihuyG9ocAhwqHFIcexyjHMwc9R0eHUcdcB2ZHcMd7B4WHkAeah6UHr4e6R8THz4faR+UH78f6iAVIEEgbCCYIMQg8CEcIUghdSGhIc4h+yInIlUigiKvIt0jCiM4I2YjlCPCI/AkHyRNJHwkqyTaJQklOCVoJZclxyX3JicmVyaHJrcm6CcYJ0kneierJ9woDSg/KHEooijUKQYpOClrKZ0p0CoCKjUqaCqbKs8rAis2K2krnSvRLAUsOSxuLKIs1y0MLUEtdi2rLeEuFi5MLoIuty7uLyQvWi+RL8cv/jA1MGwwpDDbMRIxSjGCMbox8jIqMmMymzLUMw0zRjN/M7gz8TQrNGU0njTYNRM1TTWHNcI1/TY3NnI2rjbpNyQ3YDecN9c4FDhQOIw4yDkFOUI5fzm8Ofk6Njp0OrI67zstO2s7qjvoPCc8ZTykPOM9Ij1hPaE94D4gPmA+oD7gPyE/YT+iP+JAI0BkQKZA50EpQWpBrEHuQjBCckK1QvdDOkN9Q8BEA0RHRIpEzkUSRVVFmkXeRiJGZ0arRvBHNUd7R8BIBUhLSJFI10kdSWNJqUnwSjdKfUrESwxLU0uaS+JMKkxyTLpNAk1KTZNN3E4lTm5Ot08AT0lPk0/dUCdQcVC7UQZRUFGbUeZSMVJ8UsdTE1NfU6pT9lRCVI9U21UoVXVVwlYPVlxWqVb3V0RXklfgWC9YfVjLWRpZaVm4WgdaVlqmWvVbRVuVW+VcNVyGXNZdJ114XcleGl5sXr1fD19hX7NgBWBXYKpg/GFPYaJh9WJJYpxi8GNDY5dj62RAZJRk6WU9ZZJl52Y9ZpJm6Gc9Z5Nn6Wg/aJZo7GlDaZpp8WpIap9q92tPa6dr/2xXbK9tCG1gbbluEm5rbsRvHm94b9FwK3CGcOBxOnGVcfByS3KmcwFzXXO4dBR0cHTMdSh1hXXhdj52m3b4d1Z3s3gReG54zHkqeYl553pGeqV7BHtje8J8IXyBfOF9QX2hfgF+Yn7CfyN/hH/lgEeAqIEKgWuBzYIwgpKC9INXg7qEHYSAhOOFR4Wrhg6GcobXhzuHn4gEiGmIzokziZmJ/opkisqLMIuWi/yMY4zKjTGNmI3/jmaOzo82j56QBpBukNaRP5GokhGSepLjk02TtpQglIqU9JVflcmWNJaflwqXdZfgmEyYuJkkmZCZ/JpomtWbQpuvnByciZz3nWSd0p5Anq6fHZ+Ln/qgaaDYoUehtqImopajBqN2o+akVqTHpTilqaYapoum/adup+CoUqjEqTepqaocqo+rAqt1q+msXKzQrUStuK4trqGvFq+LsACwdbDqsWCx1rJLssKzOLOutCW0nLUTtYq2AbZ5tvC3aLfguFm40blKucK6O7q1uy67p7whvJu9Fb2Pvgq+hL7/v3q/9cBwwOzBZ8Hjwl/C28NYw9TEUcTOxUvFyMZGxsPHQce/yD3IvMk6ybnKOMq3yzbLtsw1zLXNNc21zjbOts83z7jQOdC60TzRvtI/0sHTRNPG1EnUy9VO1dHWVdbY11zX4Nhk2OjZbNnx2nba+9uA3AXcit0Q3ZbeHN6i3ynfr+A24L3hROHM4lPi2+Nj4+vkc+T85YTmDeaW5x/nqegy6LzpRunQ6lvq5etw6/vshu0R7ZzuKO6070DvzPBY8OXxcvH/8ozzGfOn9DT0wvVQ9d72bfb794r4Gfio+Tj5x/pX+uf7d/wH/Jj9Kf26/kv+3P9t////4QDKRXhpZgAATU0AKgAAAAgABwESAAMAAAABAAEAAAEaAAUAAAABAAAAYgEbAAUAAAABAAAAagEoAAMAAAABAAIAAAExAAIAAAARAAAAcgEyAAIAAAAUAAAAhIdpAAQAAAABAAAAmAAAAAAAAABIAAAAAQAAAEgAAAABUGl4ZWxtYXRvciAyLjIuMQAAMjAxMzowOToyNyAxODowOTo0OAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAEOqADAAQAAAABAAABrQAAAAD/4QJlaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLwA8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJYTVAgQ29yZSA1LjEuMiI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyI+CiAgICAgICAgIDx4bXA6TW9kaWZ5RGF0ZT4yMDEzLTA5LTI3VDE4OjA5OjQ4PC94bXA6TW9kaWZ5RGF0ZT4KICAgICAgICAgPHhtcDpDcmVhdG9yVG9vbD5QaXhlbG1hdG9yIDIuMi4xPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iPgogICAgICAgICA8ZGM6c3ViamVjdD4KICAgICAgICAgICAgPHJkZjpCYWcvPgogICAgICAgICA8L2RjOnN1YmplY3Q+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgr/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAARCAGtBDoDAREAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD+/igAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBGZVBZmCgAsxYgAKvLMSeAAOSTwO9AHxV4x/4KU/8E6Ph5rl74Y+IH7ff7FPgbxLps8ttqPh7xj+1R8C/DOuWFzA2ya3vdJ1rx3ZX9rPC/ySxT28ckbHa6g8UAcv/wAPYf8Agln/ANJK/wBgH/xMj9nX8f8Amo1ACf8AD2H/AIJZf9JLP2AP/EyP2de//dRu+f1oAX/h7D/wSz/6SV/sA/8AiZH7Ov8A88agA/4ew/8ABLP/AKSV/sA/+Jkfs7f/ADxqAD/h7D/wSz7/APBSv9gH/wATI/Z1/wDnjUAH/D2H/gln/wBJK/2AeOv/ABmR+zrx/wCZGoAP+HsP/BLP/pJX+wD/AOJkfs6//PGoA+hvg1+1N+zH+0al7J+z3+0b8B/jummxLcai/wAGvi98PviglhA7KqTXreCPEOuC1idnVVknKIzMoDEsMgHvHXmgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAQkKCx6AEn6Dk0Af5wP/B1n/wAFovjN4n/aI8a/8E0v2evHOs+APgv8I7LRtO/aI1jwfqt7pGs/GL4ka/olh4ivPAmra1YPbXx+HHgDSdW03StS8L2s9taeI/Gs2vt4ni1Wz0LQIbUA/iMbnOV/75VRnkf3QM/U89SeaAGjrnB6senXI/yPegAHB6HkL+HH+c/jQApJOcqeAR9ckd/8+tAC55xg9euPf/Jz/wDroARuexPH/sw4/HH5c80AGfvHDc+31/yT7/mAL3A56k5xx/F/n/8AXQB6F8Kfi18TfgZ4/wDDHxU+Dvjvxb8MviN4N1ODV/C/jbwNruoeGfE+h39vIHjnsNY0qa3uowwXy7i3leW0u4Ge2vLe4tpZYmAP9ef/AIN/v+CpWs/8FUP2FdM+JXxLTS7b9oX4P+Kp/g98dl0i1i03T/EviPTdG0zW/DvxM03R7eNLXSbP4heG9St77UdPsUt9MsvGGm+LLPRrKz0a3sLaIA/cmgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBCMgg9Dwfcd/wA6AP8AFM/4LVzzXH/BWv8A4KLvPI8rr+2J8d4VZzkiK28a6hbW8YP92GCGKJB/CiKO1AH5gc4P178Dkr1B555+vP8AeoAQZz/30Oo3Hjue+P0oAXn+R65PQc8fr/eHSgA556dPoOvcdc46n8KADnrjv3Pqf5eg659aAA5//Wf9rv6jB/DJHegBOfm6Z4789O/9emRmgBecj6nvx1bt6/5NABzx069zk/e556env2oA/wBDj/gx5mmf4Yf8FErdpGMEPjv9mqeKInKRzT+GPi/FPIo7PLHbQI57rEg/hoA/u7oAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAP8AFF/4LS/8paf+CjPr/wANkfH3/wBTvVPf/wDX60AfmMc4Of8A638PU8H1/wA4oAQZyPXLc8dcc9+oP4GgAGc47kLnt0A98nvnj/64ApyScjsR147d+OvvjjmgAyc49we3r9c/Q9c8HrQAN7+/6sMDr7df60AHPzevGenp356EfzPXFABzkdcZPp1+b3z/AJ9aADnAHvknj+96e/8AkmgD/Q0/4Md/+Sb/APBRf/sdf2ZP/Ub+M1AH939ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/ijf8ABaX/AJS0/wDBRn/s8j4+8/8Ac+ap3/zzQB+Y3+Prx1Ht/TqT60AIM+p/i53c+nPH5dcGgBec9+MdT+PoefXnoe+RQAvPX29ePr06/gaADBz1PXPX8x06fj+XWgBDn3/P1P8AnHXjIoAOeRz25z/9bvxnPqT9QA7g5PUjrx3P+fT8KADkfn1J56/5789OtAH+hn/wY7/8k3/4KL/9jr+zJ/6jfxmoA/u/oAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAP8UX/AILS/wDKWn/gozxn/jMj4+9s/wDM+ap+fNAH5jEdeP8Ax0H+76/j+vpQAgHPT1/g/L3/AK9jQAd+hxx/APxoAO7cHv8AwD1/X/JoACOenf8AuZH59f8APFACkdePX+EH+L/D9OaAE7Hj0/gH4/8A1/060ALjnoOp/g7c9+n4/ie9ABj2/wDHPf8AT8e3PWgD/Q0/4Md/+Sb/APBRf/sdf2ZP/Ub+M3+fegD+7+gAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA/xRf+C0v/KWn/goz/2eR8fffr481X/P55oA/Mc85P8AMH1HXuenP/6qAGjqOB1bnB75xn2Pbnp34NAC4Oc46Y7ZPAHTk/j+mTQAYPXA5z29wfm75/Dr3oAMHrhfXpz/ACz75657UABHt+nqwPPf/HnPNAAAeRgdeuODj/PPpk0AHcH/AGj2OepOeme/596ADHTgdR7H72eOvGPf1zQB/oaf8GO//JN/+Ci//Y6/syf+o38ZqAP7v6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAMzWda0jw7pOp69r+qadomiaLp97q+saxq99a6ZpWk6VptvJeajqep6jfTQWWn6fY2sUtzeX15PDa2sEbzTyxxqWAB+H/AMUv+DlH/giz8JfE194T139trwp4k1bTbmW0vp/hl4A+LfxT8PRzRMysLXxf4D8C654U1WLKnFxo+s6hbE8CY9wDzP8A4io/+CIf/R2mu/8AiPH7RX/zsqAD/iKj/wCCIf8A0dprv/iPH7RX/wA7KgA/4io/+CIf/R2mu/8AiPH7RX/zsqAD/iKj/wCCIf8A0dprv/iPH7RX/wA7KgA/4io/+CIf/R2mu/8AiPH7RX/zsqAP8xH/AIKa/Gj4d/tF/wDBQn9s747/AAi1yTxN8MPi7+0n8XPiF4B8QTaVquhzaz4U8U+LL/VNE1J9H12007WdNa7sp45WstTsbS+t9xjubeGUFFAPho55+uc8e3v25/TnPUATJz27+mef+Bfn/XrQADIPYZx6c8fX/H+VAC5OfpnnjPb/AGvz6dqADJz/APqzj/vr8KAA5/rzj1Hv6fTmgBMnnp79P1+b+Z/+sALzkfU+nqff8+M9evQgCZP689P73+91/rn6gA/sV/4NaP8AgrL+wp/wTU8E/tm6P+2L8YNQ+F+o/FzxR8DtR8BwWXw3+JPjwazZ+C9F+Jdn4ilkl8BeFvEcWmmyuPEOkxqmpvaPdfaS9os6wzmIA/q//wCIqP8A4Ih/9Haa7/4jx+0V/wDOyoAP+IqP/giH/wBHaa7/AOI8ftFf/OyoAP8AiKj/AOCIf/R2mu/+I8ftFf8AzsqAD/iKj/4Ih/8AR2mu/wDiPH7RX/zsqAD/AIio/wDgiH/0dprv/iPH7RX/AM7KgD9Bf2O/+Ct3/BOn9vbWn8K/srftVfDn4l+N47O4v2+Hdx/b3gb4kS2VnE09/faf4B+IWj+FvFWtWGnwr5t/qGh6ZqdnZIQ91PEvzUAfo0CCAQcg8gjkEHuD3zQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/nWf8HiP/AAU2+KV78bPDP/BNL4aeJdU8LfCrwf4I8KfE39oO20e9udPm+JXjjxqJtZ8D+CfExt5EbUPBngnwpBpPi2LRJJDpWs+KfE9rqGr2N1deE9BltQD+GIsxxyxxwBvYAAdABngY6AdOBgdgBNxPdugP3m9/f/8AX3oANx9W7j7zYz83+HXH9MgBuOerf99H0z6/565oAAxI6nPPBdux/wA8/hQAbjjOW7fxNzkD3/zz60AJnJHBzzySeuM89Tz7/hkckAU9DkZH69R/nrQA3uOOct3PXH9aAAdeh/h55x2x75/ycc0AKe+fQ/Xtn/J70AKfcdxzn34/Xr7d6AEb3GevTOcZH/66AD+9x2Gffg9P1/zmgA9OO579+efx/rQAcenf1/2hn3680AKCR0yM57kZ5/yc9s8daAAs2T9/1+82D+v8s0AAY8ct1PVjnOM+v88evegA3Hj7/J/vNn69env/AJIABj1yevdm74/x+n55AAbj6t1P8ROeM/59DQB0/gvxv4v+HXi3w3488B+J/EPgzxp4P1iw8Q+FfFvhfV7/AEPxL4a13S7qK807WtB1vTZ7fUdJ1XT7qKK5tL2yuIZ4Zo0bdtBVgD/ZL/4IW/8ABQDxJ/wUk/4Jw/Bf9oL4hC0b4v6TP4g+EfxqudPt4rSx1X4lfDe5ttOvvFEFrbpFaWb+OfD174b8b3unWNvb6fpmpeIb7TtPgjsrWBQAfr/QAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAIeh+hoA/yKv8Ag6jZm/4LdftZbmZtuhfs8KuWJ2r/AMM8fDYhVznaoJJwOMknqSaAP549vuR9D/n6fSgBMZ7noDkHnv8Az6k9zzQAY57+n3h/tc/X09PwNABjJ79v4gf4Tz9ff8aAADp1HB6EevX684z6cUAAX6jp0PsP8/me9ABjkDk8nksM9B+P+HXvQAEcH8uoHofw/wDrn1oAQDnv1P8AED+P1/UdTQAoGSeT0HcHPHf/AOv1oAUjryehPXGMenp6Z7CgAI9z16Z45PT6e34UAIw6+49QO49fyz6cUAGPvcnIxzuGeh/L8aAFwMj6njIx359z+o/CgBMe56+o556n1P8A+qgBcc/Xd3756/Xnr26UANI5br0/vD8sdh9aAFA6devqD2z+OO350AJjgcnr/eHHXke/60AOx15J5A5PXOOv+HfJ9aAEIxnr3zlhz8vf/PHXvQAMMA8k8dz7j/H+negD/UY/4My2Y/8ABKv4nKWYqn7anxYCgkkKD8LfggxCgnABYknHUkk8mgD+tugAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBryJGu6R1Rc43OwVcnoMsQMn60AQ/a7X/n5t/+/wBH/wDFUASpLFKCY5EkAOCUdXAPuVJwaAH0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAIeh+hoA/yKP8Ag6i/5Tdftaf9gP8AZ4/9Z4+GtAH88xOPT8c+o9Pr/nmgBAfp0X9Sfy68UANz349fb+P8f896AFH3u3UdP90/pQAKcAdOh6/X1/z2oAP4Tz6d/ZfrQAmeQeD9768Dvx1/DpQArHgj/wDX1X/H+XvQAgPPbq/8gev9fSgBVBYnAzwp4GfTnGP8P8ACQo5/hPQ/wnqfw6f55oANkn909R2PT8uv+fegBGSQj7p/75P94Y7Ht1oAjz9/p2/PGD/k/j3oAdnkf7zHrz/F2/rQAdu3Xt/vf5+vWgAz9P4zz9f5etADT95/93/D/PvQAo7dP69Pp+vpjk55AEB4X6j+bD/PfPNACg8dQef6r7cnnr/kACHv/wAC7eigfh/kc9aABiSD9AenqR3/AM59BQB/qL/8GZX/ACis+KH/AGer8V//AFVfwPoA/rdoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA/gT/AOD2b9sfyNO/ZL/YJ8OariS/n1f9qP4rafDNtcWloNa+Gnwct7gRNmSC6upfi5qF3ZXWEE+m6BfLFI6QSxAH+fnQB/ow/wDBj/8A8m9/t6/9ll+Dv/qEeLKAP7l6ACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPQ/Q0Af5FP8AwdQ5/wCH3X7Wn/YD/Z474/5t4+Gv+f1oA/nkP+P8WO/19z/KgBPc56L/ABEdc5PX+f8AWgAznHX6bj/te/PQc5/KgA6nqeo/iPPyk+v455z60AA6dznPfnr9R+J/x5ADPGec8dWxngH6f40AGeRyTkt0bPb1z179eP1oAUnr/wDFf7v+PP8A9egBM8jr1Yfez0H16/j+fWgD+rb/AINMP2QP2Yv2x/2v/wBpXwR+1H8Dvh38dPCnhj9miLxV4d0H4jaFHr+naN4i/wCFr+DNIOsWFvLIghv20u+u7Ezqd5t55I87HdWAP72v+HFf/BIP/pHj+y5/4bi1/wDkqgA/4cV/8Eg/+keP7Ln/AIbi1/8AkqgA/wCHFv8AwSDUq3/DvD9lw4ZeP+Fb2hz8w7Ncsp+jBge4IyCAf41/xY02w0f4ofEfStLtILDTdM8eeMtP0+xtgYrazsrLxPq1paWsCbjshtraGKCJSxKxxquTjNAHAdx16nvxxn39vSgA7Z5646n+9j1oAPU8/wAX8R7HHr/+r1oAQnluvT+9+PH6nvigBR269f72e2eeeen09qAGgjjJP/fZ9+v+e9AC5z+fqT3Xvn3P8vXIAmev/Au5OflHvz685xQAN0P0B+8T1P4/iaAP9Rf/AIMyv+UVnxQ/7PV+K/8A6qv4H0Af1u0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAf4rX/AAWx/bG/4br/AOCnn7Wfx703Vf7W8CP8R734bfCaeKbztPf4V/CaKP4feDdT0xQzLBa+LLPQZPHM8KMR/afijUJiS8rGgD8rKAP9GH/gx/8A+Te/29f+yy/B3/1CPFlAH9y9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFACHofoaAP8ij/g6i/5Tdftaf8AYD/Z4/8AWePhrQB/PKQT39ewPfP+H8z7ACDJ/JT0+v4Z/wA+9ACdx07g8f73Ufn0989qAD+L8s/XaevPXr04980AKBkD6Htnv7/oPxPSgBP4eo7ZyM9h1/8A1Ht70AHpznO7n6j2yOvX1P40AK3Q5/Hj6fn09fY9KAGgc9c8t24zjn+mentnrQB/aT/wZNf8n1ftZf8AZpMX/q6PANAH+lZQAUANbp/wJP8A0NaAP8Fz418/GD4rf9lI8eZ4z/zN2te9AHmOOQeOrdv949f88880AHPH1x04+99f/r+/NAC//Zds9/f+XfmgBpHzNz2547e/6dMn6UAKOq++T09u3oD+B9aAE+bjn6cDPQ//AF+p9e/UAB+HX0Hcp/jQAdOM/wB78flB5/r1yefegBCCM5Pb+o9+np+PTuAf6jH/AAZlf8orPih/2er8V/8A1VfwPoA/rdoAKACgAoAKACgAoAKACgAoAKACgAoA4j4gfEz4b/Cfw9c+L/in8QfBHw08J2eftnij4geK9B8G+HrXClz9p1rxHf6bpsGEVnPm3K4UFjwCaAPzC+Iv/Bev/gjn8Lrh7XxN/wAFC/2dtSljdo3Pw88S6h8Xody5ztufhNpPjW2deDh0mZD2Y5GQDxSL/g5q/wCCHE121kv7d2gCZSMvL8EP2m4LQ5JHy383wVjsX6clbkgdTgEEgH0p8LP+C3P/AASR+M11ZWPgb/goP+zGL/UXWKw0/wAa/EbTfhZqN3PJxHbQWHxRHg68ku5WISG0WE3MshEccTSEKQD9MfD3iTw74v0ex8ReE9f0XxR4f1OLz9N13w9qtjrWj6hASQJrHU9NnubK7iJBHmQTyISCN2RQBtUAFABQAUAFADSyjqy59yKADev95f8AvoUAKCDyCD9DmgBaACgDkvHHj7wL8MvDOpeNfiT418JfD3wdo0ay6v4t8ceI9H8J+GdKidgiSalr2vXlhpdjG7kIr3V3ErMQoJJxQB+T/wASv+Dgj/gjN8J9UvNH8U/8FA/ghqV5YErcP8OD4x+MmnMylgRa6z8IfCvjnSL4gqQRY31wen94ZAOB0T/g5S/4Ih+IJYYrD9vTwZbvO6Ih1z4WftA+GYlZyFBmn8SfCXSYbdAT88lxJFHGMs7KoJAB+nv7N37X/wCy3+2H4Z1fxh+y38fvhV8evDvh69tNN8R6h8MfGWjeKW8Nalf2z3ljp/iSy0+5l1Dw9f3trHJc2tnrNrZXFxDFLJDG6xSFQD6NJA5JA9ycUAJvX+8v/fQoAN6/3l/76FADqACgBkkkcUbyyukcUaNJJJIwSOONAWd3diFVFUFmZiAoBJOBQB+cvxr/AOCv3/BLz9ni91HSfi3+3l+zF4e1/SJHh1bwtpfxV8N+N/GOlzx/ft9S8HeA7vxN4osbkdre60iKZuqoc0AfIM//AAcy/wDBDu2vVsJP27/DjTsSBJB8Fv2lrqy4OPm1K2+DM2nJ14LXQB5IyATQB9FfCX/guB/wSP8AjbeWmn+Av+Cgn7NQ1G/nS2sdO8c+PLf4Taje3UjbIrWzsPivB4Ku7q6nchILeCGSad2VIUdmUEA/T7Rta0fxFpdjrnh/VtM13RNUt0vNM1jRr+11TS9RtJRmO6sdQspZ7S7t5Byk9vNJG45VjQBp0AFABQAUAFADd6/3l/76FABvX+8v/fQoAcCDyDn3BzQAUAFAHh/xr/ab/Zv/AGbNJh179oj4/fBf4E6Ncqz2mp/GD4n+CvhvZ320suywm8X63pC38rOpjjhszPNLL+6jR5CFIB+YvjL/AIOK/wDgir4EvrnT9b/b9+Fd9PakiWTwb4b+KvxFsnIJH+jan8Pvh/4n028HBw1pdzqRggkEZAKPhj/g48/4Im+LSg0r9vn4c2nmEBf+En8DfGjwUBnp5h8Z/DTQREPUylAOpIoA+8/gH/wUL/YU/alnhsf2d/2vf2dfi/rM8nlp4Y8E/FvwVqvjFXJwon8GLq6eK7YSnPkNc6PEs4BMJkAJAB9i0AFABQAUAFABQAUAFABQB8tfHX9uH9jP9mF2t/2iv2rP2ePgjfhd6aP8T/jD4B8G+ILrKeYFsfD2ua9Z65qMpjzIsNjp9xKyAuEKgmgD86vE/wDwcff8ETfCN1cWeq/t8/Du7mtWKyP4Y8CfGrxtasR1Nvf+DPhnr9jeL6PaXE6t2Y0AUfD3/Byf/wAERfE1xBbab+3r4JtpLlwkbeIfhh8ffCVurN0Nxd+K/hPotrap6yXU0Ma/xMKAP0A+Bn/BRD9g39pm6t9O+AP7Y37Nfxa1u5dI4vDPgv4y+A9W8X+ZLjykl8Hx64PFEDSk7YhPpMZlYMqbmVgAD7IoAKACgAoAQso6sAfcgUAJvX+8v/fQoAN6/wB5f++hQAoIPQg+uDmgBaACgAoAKACgAJA5Jx7k0AN3r/eX/voUAG9f7y/99CgA3r/eX/voUAOoA/Hb/gut+3t4T/YF/wCCbn7Snj0ePPD/AIa+Nfjr4a6/8MvgB4cn8Qadp/jPX/iF8Qhb+B4PEHg3RZ7qHUtal+Glp4jn+ImryWME8Gn6d4bllvCokijmAP8AGQoAKAP9GD/gyAIH7Pf7euSBn4y/B3qcf8yR4s9aAP7lt6/3l/76FABvX+8v/fQoAN6/3l/76FADgQeQc+4OaACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPQ/Q0Af5FP/B1AM/8ABbv9rTqf+JH+zx0/7N4+GvrQB/PKefX8CPX60AJj69vTt/nnPXtQAY/3vzH+1+vP8vegAxznnt3HoR9e/PvQAYwOM9/TPX8v/rZ70AGOCOe3pnjH+H86ADHs3Ge47/j+Xv19aAAjOc5P/wCsep9v5nuKADHf5s5J7dSP849+tAH9o3/Bk1/yfV+1l/2aTF/6ujwDQB/pWUAFADW6f8CT/wBDWgD/AAXPjVz8YPit1/5KR486df8Akb9a9aAPM8c9+pPbGTn8e9ABjjv1z2z1z/n296ADH179/U5/P0/WgBCOSeeeO39aAADp14+npjnk/pQAY6deMdx6H39+ffHagBMYx169yPY/0/IHvigAxnqG5z6dx9fbAznnrQAEZByDnHXjsc+p59fXtQB/qLf8GZX/ACis+KH/AGer8V//AFVfwPoA/rdoAKACgAoAKACgAoAKACgAoAKAPm/9q39rr9nP9iL4NeIvj7+1D8U/Dvwm+GHhwx282ta5JPPf6zq9xHNJYeGvCXh7TYbzX/F/irU1t520/wAOeHNN1LVrmG3u7sWq2VleXMAB/nqf8FKf+Dw39qL406n4j+HH/BPLwtD+zB8KPPuLC1+MfjDTdF8WftBeK9PBMf2+w029Gs+APhZbX8LyobGys/G3ii0222oab430a7MlpCAfyNfGX48/G79orxld/EP4+fF74lfGjx1fbxceLfij428R+OtfMTuZPssWp+JdR1K6trGNji3sLaSGyto1SG3giijRFAPJ6ACgAoA+nf2aP20/2tP2N/E0fi79lv8AaJ+LfwN1f7ZDfXsPw/8AGmsaPoGvTW5QxxeLPCIuJfCfjGx/dxiTTPFWi6xps4jRZrSRUUAA/tk/4Ja/8HjlzqGr+Hfg/wD8FS/B+mWltf3FlpNh+1f8I/Dz2VtYPK0cJ1D4x/CfTDPELQyNLcah4q+FVtbpZRiC2g+F8sf2rVYgD+8rwH4+8D/FPwZ4Z+Ivw18X+G/H3gHxno9n4g8JeM/B+taf4i8MeJdD1CITWWraJrmlXF1p2pWF1GQ0VzaXEsTcjduVgADraACgAoA/xbv+C7n/ACmG/wCCh/8A2cv44/8AQrSgD8mKAP7+f+DHn4uYk/4KC/Ae9uvvJ8Bfi54bst/TY3xI8G+Nbryyec+Z4Ai3oOMYkPMdAH9/9AH8f/8AwWm/4Oo/hL+xXrXin9mr9hay8I/tCftN6Fd6h4f8efEXWJLnUvgd8FdatWltL7R1bSL2xuPij8QtIuUeC/0XSNUsfCXhfUU+za9rur6xp2s+DYQD/O2/ax/bn/a6/bl8c3PxC/au+P8A8R/jTr0l9c32m2XirXrj/hEPCzXZbzbTwP4C077D4J8C6bhmA03wloGj2RLSSPA0sssjgHyhQAUAf6cf/BlX4C/sT/gm7+0L8QJ4fKuvHn7Y/inSreQrzc6L4J+EHwgjs5g/8SrrPiHxFbheQrQOc5cgAH0B/wAHgn/KHrVP+zl/gf8A+g+MKAP8pygAoA/3FP8AgmZ+0I37Vn/BPj9jT9oS5vv7S1r4m/s6/C7WfF92ZPOLeP7LwvYaH8RITNktK1p460vxDaNI+JHMBaREkLIoB+a3/BZv/g4S/Zl/4JQ6bcfDLRbOy/aB/bD1XTYLvR/gZoeupp+leAbLU7QXWl+KfjV4mtYNQfwrp1xbSwalo/hCztp/Gfiq0ms54bbQvD+pR+LbYA/zWf29P+Cyv/BRD/go3rGrn9oz9oPxQfh1qF7Pc6f8CPh1c3XgD4IaJavIz2tgvgXRLtY/FJ05WeGy1z4hX/jLxUsTyJLr8yyOCAfl1QAUAFAH2r+x7/wUX/ba/YI8TweJ/wBk79o74lfCNRqEWpap4Q0vW5NU+Gnie4iZCf8AhL/hhr66p4C8T7408jz9Z8P3d5bxPJ9jubaRvMAB/oZ/8EYf+DqP4N/tv6/4U/Zs/ba0nwr+zl+094hu7LQPA/jfR7i7tPgT8Z9dudkFno1rPrd7fX/wv8eavcsLbS/DniDVtV8N+JL8x2eheKbbXdV0jwhKAf14UAFABQAUAf4N/wC0h/ycR8ev+y0fFL/1ONdoA8XoA/0sv+DKT9oT/hNP2Jv2of2bb+++06n8CP2gNK8f6XBJJ+8sfBvx08Hw29jYwRk/8eqeLvhX431JmUEi61mYSNhohQB/Tp+3Z/wUD/ZX/wCCcXwT1H47ftV/Emy8E+Gla5sfCnhuzSPVviD8TPEkFv8AaIvCPw48IxzwX3iTXZw0RuH8y00TQraYat4o1nQtEiudTgAP84j/AIKPf8HZv7fn7WOq614N/ZRvp/2IvgZIbuytf+EF1C21b49+KLByY477xH8W5bGG58G3MqpHeWun/Cuz8LX+jyzT2F34v8UwpHdsAfy5+LvGPi7x/wCItU8X+O/FXiPxt4s1y5e91rxR4u1zU/EniLWLyU5ku9U1vWbq91LULmQkl57u5mlcnLOTQBzlABQAqsyMrqxVlIZWUkMrA5DKRyCDyCDkHkUAftV/wT+/4OAf+CmP/BPC70zR/h18cNQ+Lfwfs5o/tPwJ/aAm1X4l/D4WYcGS28MXt7qlr42+How08sUPgTxVoOky30xvNW0jVyDE4B/pS/8ABIT/AILh/sqf8Fa/h8kHgu+g+FX7TPhfRor/AOKH7NninWLa48S6XHF5UN74r+HuqNFYp8Rvh2buRIv7d02ytdX0GSeztPGGg6BNqGkPqgB+1FABQAUAFABQB+Ev/BW3/g4B/Yz/AOCU1jf+BdfvJvjr+1Vc6TFqHh/9nLwBqtrb6hpC39v9o0vVPi54ya31LTPhfoV5C0NzbW9zY63421SyurPUdF8Gaho88urWwB/nfft4/wDBxv8A8FSP27NT1vTdQ+O2r/s6/CPUvPtrT4N/s13+r/DPRP7KlLo1n4n8Z2Gov8SPG73tr5UWsW2v+LJfDV5Ksslh4Y0i2uJLMAH4UXFxPdzz3V1PNc3NzNJcXNzcSPNPcTzO0k0880jNJLNLIzSSSSMzu7MzMWJJAIaACgBQSpDKSGBBBBIIIOQQRyCDyD1zQB+y/wCwv/wX1/4Kg/sCajpNr8NP2jfEvxO+GWnG3hl+CH7QV7q/xb+GUumW5BTS9Fg13VU8WeAbQctj4b+KvCBkkYtdfakZ4nAP9D7/AIJBf8HG/wCyH/wVGl0n4SeIoF/Zq/a4ks0z8F/GmvWd74e+I91BC0l/c/BPxzJFpsXi2WKNGvbjwXq2naJ44srb7XLYaX4k0fSNR8RKAf0RUAFAH8e3/B1v/wAEcf8Ahrb4Ev8At9/s/wDhX7X+0h+zX4UnX4r6Bolnu1P4v/s/6R9p1LULsW8CF9S8ZfCAS33iLSmAW91bwPP4o0YvqV9pHg7S4wD/ADEKACgD9Qf+CQf/AAUq8ff8Esf21vh5+0f4c/tPWfh1eOvgX4+/D6xnCr8Qfg7r99Zt4jsLeCWWG2bxN4cntrPxh4JuZpreOHxRoWnWt7cDRb/V7a6AP9nr4WfFDwD8bfhr4D+MHwr8T6Z41+G3xN8JaD458DeLNGlM2m+IPC/ibTbfVtG1S1Z1SVFurK6id7e4jhurWXzLa7hhuYpYkAO9oAKACgAoA/zK/wDg7O/4K+/8NPfHdf8Agnl8CPFH2r4C/s1eKZbr4161o15nT/id+0LpiXOn3Xh95oHZb7wz8F0nvtBWFmS3vPiJeeKJ7m1u08LeFtUAB/G3QAUAf1Pf8G5H/BO74L+IPEniv/grJ/wUC1/wr8MP2D/2LfEGnXvh3W/iZItj4S+J/wAfbW801vDdr9nmimm8R6D8O9U1HQ9QbRNOtry68ZfEfU/BPgzS7HxCF8VaGgB+pv8AwU0/4PKr+7/4SD4V/wDBLv4eHTYD9q06T9qb41+H4ptQkHzxDU/hZ8GtQEtpaDcEutM8QfFb7bJLFJJban8LLOZUuAAfw7fHf9oT44/tP/EjW/i9+0P8V/Hfxl+JfiF86p4y+IPiPUfEesPbpJJJb6bZS380sWk6JYebJFpWg6TFY6LpFsRaaZYWlqiQqAeO0AFABQAUAFABQB/rO/8ABpR/yha+Df8A2V39oD/1ZWq0Af0t0AFABQAUAFABQAUAFABQBy3jbxv4O+G3hHxH4++IPinw/wCCfBHhDRtQ8ReK/F3ivV7DQPDXhvQdKt3u9T1rXdb1Se107S9L0+1jkuLu+vbiKCCNSXfJAIB/Lb8ev+Dw7/glf8JvF2peFPh9oP7SX7RMGl3txZy+Nfhf8PfDOheBb5raZreWTRdQ+KXjfwR4h1W38xJDDfJ4Wt9PvIfLuLK6uIJUlIB4N/xGwf8ABPr/AKNc/bO/8FHwO/8Anu0AH/EbB/wT6/6Nc/bO/wDBR8Dv/nu0AH/EbB/wT6/6Nc/bO/8ABR8Dv/nu0AH/ABGwf8E+v+jXP2zv/BR8Dv8A57tAAf8Ag9g/4J9EH/jFz9s7/wAFPwO/+e6f60AfxB/8FlP25/hx/wAFHv8AgoT8bP2vPhP4S8ceCPAvxM034W2mkeG/iNFoMPi2xk8C/C7wl4F1J9Ri8M6vrujLHeanoF1d2C22q3TfYJoPtBjuPMjUA/L0j/d79R7/AP1+ff60AJ+XRevTvzjj8KAEHUdD/L+P9P59aAFHJ6dx29VPH/1qABeg6Hg9f971waAAdO3br/ujuQf5f40AHcdOrdBgHj8fx/KgAbgHp/nb/k/hQAg6jgdXH6d/zoA/tI/4Mmv+T6v2sv8As0mL/wBXR4BoA/0rKACgBrdP+BJ/6GtAH+C58a/+SwfFbp/yUjx4ef8Asb9aoA8y7j/ebnv/ABf560AHYcDr19Pm6f5//WAJ6/R/5nv/AJ60AIRy/Tt1Hr6e/wDM0AL3HTv268Dof5njPpQAmPu/d5x29j155/x/UAUfUde31Xvjr/X6cACevT+Lt/sj8vf3/OgAPQ9Og6Y9Rzn0OeM/p3AP7DP+CCv/AAcVfsrf8Eov2M/GH7N3xp+Cv7QvxC8XeIf2gPGnxbttd+Fll8N7rw5DofiXwZ8PPDlpps7eLfHXhnU11a3u/B9/PdrHYy2Zt7mzMN0ZRcRqAftp/wARsH/BPv8A6Nc/bO/8FHwP/wDnu0AH/EbB/wAE+v8Ao1z9s7/wUfA7/wCe7QAf8RsP/BPr/o139s7/AMFHwO/+e7QAf8RsH/BPr/o1z9s7/wAFHwO/+e7QA5f+D1//AIJ8l1D/ALL37Z6qWUMw0f4HsVUnDMF/4W8NxUZIXI3EYyM5oA/YD/gnZ/wcBf8ABOP/AIKV+LLf4X/Bv4i+Jfh58bL61lu9J+Cvx08PWngPxx4lhtYZbi+/4Qu9sNa8R+CvG15ZQQXN7caJ4b8VXviWHTLe41OXQksLe5uIQD9ss55ByDyD1znvQAUAFABQB8h/t1fttfA3/gnp+zJ8Rv2p/wBoPXX0vwP4CsFTT9F09raTxR4/8ZaissXhb4eeCdPup7dNU8V+Kb+M29nC80Nlp1lFqPiDW7vTvD2javqdkAf49f8AwVA/4Kk/tK/8FVP2hNW+NHx01250vwjpd1qNj8HPglpGqXlx8P8A4N+D7iZPK0jQbSUW8Wp+JNSgt7Ofxn44u7KDWfFuqQxySx6dolhoPh/RQD816ACgC9pumalrN7BpukaffarqN0xS10/TbS4vr25cKXKQWtrHLPMwRWYrHGxCqWPAJoA9avP2b/2iNP0lNfv/AIC/Gix0KUMYtbvPhb44ttJkCgMxTUptCSzcKCCxWY4BBPWgDxuaGW3llguIpIJ4JHhmhmRo5YZY2KSRSxuA8ckbhkdHAZWBVgCCKAI6ACgD+lX/AIN7f+C7Hj3/AIJk/Grw/wDA342+KtW1/wDYN+KniZbTxxoF8brV2+BXiPXZlgT4weArdfOvLHTLe9eGb4l+F9LSWDxDoJvtcsNLvPF2m6ct8Af6yGia3o/iXRtI8R+HdW03XvD+v6ZYa3oWuaNfW2p6RrOj6raxX2matpWpWcs1nqGm6jZTwXljfWk0ttd2s0U8EskUiuQDToAKAP8AFu/4Luf8phv+Ch//AGcv44/9CtKAPyYoA/q1/wCDOj4uf8K//wCCteoeAZ7rZa/Hb9mH4ueA7ezd8R3Gt+F9V8FfFuzuETI33Vpo3w88RRxnnbbXl7xzuUA/dP8A4Okv+C82ufs42esf8E4f2N/G93oXx08S6NbP+0z8WfDF61tq/wAJfBniTS473TvhZ4Q1W3YXOmfEbxrol/a6v4l8QWUkF54M8H32nWmj3B8R+Jpb7wiAf5vBJJJJJJJJJOSSeSSTyST1NACUAFABQB/rc/8ABqB4C/4Q3/gif+zxrbQ+TN8TviD+0D49mBXa8nkfGTxd8PreZwcH97ZeArV4mP37cwupKMpIB5T/AMHgn/KHrVP+zl/gf/6D4woA/wApygAoA/tS/wCCeP8AwcSeDv8Agn5/wQHu/gX4N16y1z9ubwR8YPix8Kv2ffAOpadc6jbeEvA3xDvI/ilF8dvEjXdpJod34X8HeIPHPjbTtD8O3FzdXeveMtL0jSbzRx4Vk1XUrQA/jd+IPxB8cfFfxz4t+JnxL8V6946+IPjzxBqnivxl4x8Ualc6x4h8S+I9bu5b/VdY1fU7ySW5vL6+u5pJppZXJy21QqKqgA4+gAoAKACgAoAUEqQykhgQQQSCCDkEEcgg8g9c0Af6V/8Awauf8FxvE/7WHh4f8E7v2sPFs3iH4+fDDwhcaz8Afih4i1OS58Q/GP4ZeG4l/tjwP4nvL52uNb+I3w20vytR0/V/PudU8XeALbUL7V4f7T8Ea1r3iIA/tIoAKACgD/Bv/aQ/5OI+PX/ZaPil/wCpxrtAHi9AH9IH/BtF/wAFRPgp/wAEwf2qf2hfGv7SnifVvD3wW+JP7Mfimxe20PQ9X8Q6rr3xW8BeItA8W/DjQtN07SbW6X+1fEGmjx14V0e61aTS9CtdV8R2U2ta7o+mrdXagH5kf8FMf+Ckf7QH/BUL9p3xX+0R8ctauodOa61DSPhL8L7W/muPCPwc+G/2+WfRvBfhq3KW8Et0sHk3PinxK1pb6l4u1/7TrGoLEjWdjYgH57UAFABQAUAFABQB7R+zt+0H8W/2U/jf8NP2ifgV4uvvA/xX+Evimx8W+DvEdid3kX1mXiudP1K0YiDVtA13TZ73QvEmhXqy6dr2galqWjalDPY31xC4B/s7f8Epv+CiHgT/AIKhfsUfC79q7wdpsXhnW9b+3+D/AIseAo7sXp+Hvxe8Ji2t/GXhiO53ySXGkz/a9O8UeE7q6KX974L8SeHL7Urez1G5urK3AP0aoAKACgD+U7/g48/4L7wf8E3vBsn7KH7Lmrabqn7bPxP8L/b9S8Sr9j1PT/2afAutxNFY+LtUsJhcW178T/Ets0tx8PPDOoQyWuk2Kp478TWsulv4Y0fxgAf5bPizxb4p8e+J/EHjbxx4k13xj4y8WaxqHiHxR4r8Uatf694j8R69q11Je6prWua1qk91qOq6rqN5NLdX1/fXM91dXEsk00ryOzEA56gAoA7bwh8NPiP8QZTD4C+H/jbxvMJfIMXhDwprviWUT4VvJMejWF64l2ujeWRvw6nGGBIBe8ZfCH4s/DkOfiF8L/iJ4ECOsbnxl4K8S+GAkjsERHOt6ZY7XdiFVThmYgAEmgDzugAoA0dI1fVvD+raXr2g6pqOia7omo2Wr6LrWkXtzpuraRq2m3MV7p2qaXqNnLDeWGo2F5DDd2V7aTRXNrcxRTwSxyxq4AP9R3/g2f8A+C7d/wD8FDPh7d/sjftW+LLO5/bM+EOg/wBo+GfFuoG3sbv9ov4WaXFBBN4ikVPKt7v4peCCY4fHdtbRQz+ItDmsPG9rBeTw+NptJAP6yqAGuiSI8ciLJHIrJIjqHR0cFWR1YFWVlJDKwIIJBBBoA/yeP+DmT/gjo/8AwTi/am/4Xv8ABTww1j+x3+1Dr+r6z4LtdMtWXSPg98VpRPrPjD4PSeSn2bTdDu1N14t+F8DC1R/C7ax4X0+C4HgC/v7kA/mQoAKAP7y/+DQj/gr1/wAI3rs//BK34+eKNmgeJ7vXPGH7IGv61ebYNI8Tzm613x78DluJ38uK08Ut/aPj3wFbt5Ea+JU8aaMs95qPirw3psQB/oW0AFABQB/PF/wcbf8ABXS2/wCCYn7G954Z+F/iCC2/a7/aWs9d8DfBCC1njbVPh9oaW0dt45+N1zBkm3Hgyy1C307wY04K3vxA1fQ7lbTVNJ0HxLDbgH+RXc3Nze3NxeXlxPd3l3PLc3V1cyyT3NzczyNLPcXE8rPLNPNK7SSyyO0kkjM7szMSQCCgD70/4JqfsAfFr/gph+198Mf2VfhNFNYv4ovTrfxG8cvZSXulfC74U6HcWr+NviDrKK0UTppVncw2GhafPc2a+IfF2q+HfDEN3b3WtQSqAf6MH/Bwx+zf8Jf2Q/8Ag3G+J/7NfwL8NQ+FPhX8H3/Zj8I+FNKQxyXc0Fr+0J8OZ9R1zW7yOKE6p4m8Tavcah4j8Ua1LGtxrXiHVdT1W5Hn3clAH+VXQAUAFABQAUAFABQAUAf6z3/BpT/yha+DP/ZXP2gP/Vl6tQB/S1QAUAFABQAUAFABQAUAFAH8BX/B6j+3B4/0HVP2bf8Agn34S1u70PwN4u8EP+0p8Y7GyuJbf/hOUXxlrngr4S6Dq5hmj+16B4d1zwZ438UT6PepPp994hi8L6s8JvfDdjJGAfwAMSx3HBJ5ZiWJJJPJOcknuTznjrgUAAx/s9+57j6n88/rQAgx146N3Oe/bP8AnmgAHX+HuOp649ckd/8A9RIyAHGO3Udzjoeeuf8APqDgAT/vn8z1/wC+v8ffGeAAP/AT+J/qf89enNADjg/3e/Un1J7Hv+f9ABDjnp0Xuefrz/8Aq7+tABxkdOh5yf8Aa9+nr/8AXoAXPP8AXPP3f979e+etAAMEduh6k46/Xj19/wA6ADII/LjJ9B2zn9D0oAOOOn8Xc4zj6+/P5jqMgCnHPQ/ie+3qc/rn+tADR1/h/i7nHTHr39+1AH9pH/Bkz/yfT+1l/wBmkRf+ro8A+tAH+lbQAUANbp/wJP8A0NaAP8Fv41/8lh+KvT/kpHjzgk8/8VfrXuP6/wBaAPMuNw6dT3Oc5Pv3/XPvQAnp06joT/ePbOTz7HvQAvHPTo3Un+99e/fue1ADT1bgdfU89ffr+H1xQA4fw9Ordz+nJx7g4PtQA38ufc+h6/N+H8+oBAFH4df9r1X1/wA5x2zQAfl/F3Pp9eff8x60AIcYPQ8DuT3Hv0/Ud+vIAdyeDwe/PT/eOf8APPoAKPvHp198/eHvj+ue2OoAccnj8zn7w7k9ffuehPNACDtwPfk9j67v54/GgBR34H6+jdyf1zjrzQAh7fd6epHc+pH60AdB4V8T+IvBXiTw/wCL/CGu6r4X8VeFtb0jxJ4Z8SaFf3el634d8R6Hfw6lomv6LqljNBe6bq+jalbW+o6bqFpNDd2l1bxSwyoy0Af7Xn/BI/8Aa5179uz/AIJvfskftUeLhbf8Jt8T/hiIfH89lBFa2d/8RPAPiPXvhn8QNWtLOEmKytdY8Z+Ddc1W3sYzssobxLVeIqAP0ZoAKACgD/Jd/wCDm7/gq3qX/BQf9t7W/gz8OPEU8/7LH7Iuu+Ivhx8P7SwvWfRfiF8TLG7fSfiZ8XZ44JHtNSivNVsZPCPgS98y7t08FaLFrmlNYy+NNdgmAP5qaACgD+0L/ghN/wAGtt9+2D4L8Jftff8ABQaTxX4C/Z38W6fp/iP4PfA7w1qTeG/iF8ZdAuzHd2PjPxtriQy6l4D+GOuWWG8P2Ojmx8deM9Ou18Q2GreENCGh6n4qAP8AQe/Zw/Yz/ZP/AGQfDdt4T/Zi/Z2+EHwO0i3sINNmk+HngbQdC1zWLe3RESXxP4qgsz4o8XahJ5aPdav4o1jV9VvZVE15ezzfOQD6YoA+B/2xv+CX37BX7e3hvW9C/af/AGZfhf4+1fWbVreL4lW3hyw8M/GLQpVTFteeHviz4ch03x3pkltKsU5sRrc2i6gYIrbWdL1Ow8y0kAP8zP8A4Ltf8G/vxR/4JNeJrb4ufDTWNb+MH7FHjrxFFoHhL4h6tHZt46+GPibUILm7sPAHxct9Ks7DTpZ76CzvP+EZ8eaPp9hoPiQ2stlfaZ4b1trLTNRAP5y6ACgD/Sb/AODOf/gpXrHxs+BPxL/4J6fFrxbda346/ZttLb4gfAWTWr03Op3n7P8ArV7a6Nr3gyyllZ7mfTvhN42u9NOnC4kkay0D4jaLoGmLDo3hm1trUA/tdoAKAP8AFu/4Luf8phv+Ch//AGcv44/9CtKAPyYoA+rf2IP2xfiv+wF+1J8Kv2ufghaeFL/4n/CC78T3Xhmw8c6fq2q+Er0eL/BHiXwBrVrrmnaHrnhvVLy0m8P+K9VRYrXW7E/aDC8rywrJBKAeFfEz4k+OvjJ8RPHPxZ+J3ibU/GfxF+JXizX/ABz448WazMJ9U8ReKvE+p3Osa5q99Iqonn32o3dxO0cUccEIcRW8UUKRxqAZHhPwj4r8e+JdE8GeBvDHiHxp4w8Tajb6P4c8KeE9F1LxH4l8QateOI7TS9E0LR7a81TVdRupCI7eysLW4uZ3IWKJmOKAP6n/ANjP/g0B/wCCkv7Reh+H/G3x88RfDH9jbwdrkMN2dD+IT6r47+Ndrp9zh4Lqf4X+Elt9D0yd7f8AezaJ4t+JPhXxHYSslpqej2V0LiO2AP238G/8GQv7L1jHEPiD+3J8e/E8oQCd/Bvw3+HngWOSTHzNFHrd78RWhQtkhHlnZR8pkY/NQBl+P/8AgyB/Z51DTrlPhb+3h8Z/CerHJtLnx/8ACTwR8QtOTAOEubLw74j+GNzJuOAZYr+LYMt5Mh+UgH9XP/BOD9kFv2B/2If2d/2QZPFln47ufgf4Mu/Dd/4y0/R5vD9l4m1PUvEuu+J9U1i30S4v9Um0xL3Uddupvskuo3rxMxBuZvvkA/Eb/g8E/wCUPWqf9nL/AAP/APQfGFAH+U5QAUAFAH6U/wDBMf8A4JV/tU/8FWfjbN8I/wBnPQbDT9B8M21rq3xU+MfjP+0rH4ZfCvQrt5ks5vEOq6fYahdX3iHXZLa5tvCfg/R7W71zxBc215dCKx8P6R4g1/RgD/Q5/Yz/AODSD/gl3+zxoHh2+/aA0Lxj+2T8VLGGG51rxD8RvEWveCfhs+srgyP4e+FHgLXNMtk0UAbE0jx54k+IfmlpZbi5fdBDbAH7J+Gf+CTn/BLvwfAYPD3/AATq/Yist0TQPcTfsu/BbUtQlhdSjxT6nqvgy91GeN1JDpNdOr5O4HJyAfP/AMdf+CBP/BH39oHw7qXh/wAUfsF/AbwQ9+Gkh8QfA3wrD8BPEml3m1hFe2Go/CJvCEbtA7eaLDUrXUdGunVV1DTLyHMRAP4kv+C0H/Bqn8T/ANiXwf4z/af/AGHvE3if4/fs0+DdKufEnxE+Hfi77BdfHb4SaBZJ5ureJIJ9B0rSdH+KHgTRoVm1DWdR0rR9C8VeFNHU3ep6Fr+j6Xrvi20AP496ACgD174BfHT4m/syfGr4X/tBfBrxHceE/ij8IPGmh+O/BWuwbnW11nQrxLqO3v7YOialouqQrNpWvaPclrLWtEvdQ0m/jlsr2eJwD/bZ/YB/a98Jft6/sa/s9ftc+DLWHTNM+Nfw803xHqmgQ3Yv08KeNLCe68O/ELwb9u2xtef8Ih470bxH4bF48UD3i6Wt20EBn8pQD7BoAKAP8G/9pD/k4j49f9lo+KX/AKnGu0AeL0AFAEkUUs8scMEck000iRQxRI0ksssjBI4440Bd5HchURQWZiAASaAP7XP+CUf/AAaDfFL4/wDhrwh8d/8Agov428R/s/8Aw38TaXZ+INB/Z78CRWsHx81fTL+FbrTpfiHrfiLStR0D4SrcQPb3MvhgaJ4s8Z/ZribTtch8Ba5bSRIAf17/AAK/4N5/+COPwA0WDSfDv7C/wi8f3SiJr3xB8dbXU/jrrep3MS7TdT/8LR1DxNo+nmXrLZeH9I0bSi3zLp6HOQD6wuf+CWP/AATHvNKTQ7n/AIJ1fsLy6REGEOnH9kv4Cra25YAF7aJPAKi3l4B86Dy5QQGD7gDQB+Vv7Zv/AAav/wDBJ/8Aal8Oa9J8N/hJc/shfFS/jkm0f4ifAPUdTsfDtjfqHa2i1b4N61qd58M73QfObdf6f4a0fwXrd1APItPE+mERyxgH+bZ/wVA/4JeftIf8EqP2ibz4F/HvT7bVtD1uG/174P8Axe8P286eCPjB4ItbtLb+3tD895p9I1zS5J7Wz8Y+DdRmk1bwtqk8CtNqeh6l4f8AEOuAH5u0AFAH9p//AAZeftl6/wCAP2vPjh+xHrWpb/h9+0H8Nb34r+ENPuLhj/Z3xf8AhG9kt6NJtiRHGfFfw01bxDceIZl3Tzj4feGUCGK2kdAD/SpoAKAPgX/gp1+3l4G/4JsfsUfGr9rXxrb22sXfgXQ49M+HXg24ufsz/ED4r+J5ho3w+8HIyOt0LK+1y4iv/El1ZLNd6R4P0zxFr0dvOulPGwB/iqfG/wCNPxL/AGjfi98R/jt8ZPFOoeNfij8V/F+teN/G/ibU5C9xqeu65dyXdz5MWTFY6bZq8en6PpNosWnaNpFrY6TptvbafZW1vGAeWUAe4fs3/s4/Gb9rf43fDz9nX9n3wRqXxD+LnxR11NA8I+F9NaCA3E4gmvdQ1HUtQvJYNP0XQNC0q1vdb8Q69qlza6Xomi2F9qmo3MFpayyKAf6dH/BLL/g1a/Yf/Y38LeFPiB+1v4W8M/tj/tOPp8F54h/4T3Tk179nvwPq86+ZcaR4D+F2s2MemeL4dNLrZ/8ACWfE/T9cvNSuLVdb0Xw74Ie4OlwAH9QPhfwp4W8D6Dp3hbwX4a0Dwh4Y0eAWuk+HPC+jadoGg6XbKSVt9O0jSba00+ygUkkQ21vFGCThaAL+raRpWv6ZfaLrumafrWj6nbS2epaTq1lbajpmo2c6lJrS+sLyOa1u7aZCVlguIpIpFJDqQaAP5x/+CnP/AAbG/wDBPr9uzwr4j8UfBr4f+GP2Pf2lV0y9m8MfEH4NeHdP8L/DbxHrqpJNZ2vxV+E2h2tp4X1fTr67eT+1PE3hWw8OePBNcLqN3rWvwWS6HeAH+XN+19+yL8d/2F/2hPiD+zJ+0f4Qfwb8UvhzqENtqVtDcDUND13SdQt47/QPFvhPWo0jg13wp4m0qe31PRtTiSGUwzPZ6laadrFnqOm2YB80UAes/An44/FD9mn4x/Df4+fBbxZqPgj4p/CfxZpXjPwV4n0yRkn0/WNJnEixXMWRFqGk6lbtcaVruj3iy6drmiX2oaPqdvc6ffXVvKAf7c37Bv7XXgb9vH9j/wCAP7Wnw+MEOh/Gj4faV4j1DR4LgXTeE/GVq02i/EDwRczgky3ngrxxpniDwvczHi5l0o3Ue6GaN2APrmgD5G/bq/Yv+D3/AAUE/ZZ+LH7KXxw037T4N+JugSWllrltbwTa74D8YWDfbfB/xC8LST4W38ReENditNVs1Z1tdSgiu9D1VLnRdV1OyuQD/Fl/bT/ZB+MX7B/7TfxZ/ZW+Omj/ANl+P/hT4kn0mW9t45xovi3w/col/wCFfHfhe4nSOS88L+M/D9zp/iDRZ5ES5itb4WWowWeqWl9ZWwB8tUAdJ4O8YeKfh74u8L+PfA+v6r4V8aeCfEWi+LfCPifQ7yXT9a8OeJvDuo22r6FrukX8DLPZ6npOqWdrf2N1EyyQXMEUqEMoNAH+yZ/wRB/4Kl+Fv+CrH7EvhH4v3Nzpen/H34efYfhx+0x4KsPJtv7F+Jen6ekieLdL0xSJLXwb8TNOjHizwwVR7Ownl13wjFe31/4R1SagD9iKAPIfj98dvhf+zF8Ffid+0F8afE1r4P8AhZ8IvB2s+OPGviC7w32TR9Gtmma3sbbcsupa1qtybfSdA0a133+ua5fafpGnRT319bwuAf4tX/BT/wD4KD/FD/gpv+2R8Uf2qfiS11ptj4hvR4d+FfgSW7N3ZfDD4Q6Bc3aeCPA1iyn7O9zaWtzcaz4nv7WO3g1zxprXiTxAttbf2r9niAPz6oAu6bpuo6zqNhpGkWF7qurare2um6Xpem2s99qOpajfTpa2VhYWVqktzeXt5cyxW9ra28Uk9xPIkUSPI6qQD/Xg/wCDd7/gkFp3/BLb9kG11j4laJZH9r79omy0Txn8edUdILm88DaclvJdeD/ghpl7HvRLLwNbX89x4sls5ZYNZ8fajrkq32p6JpXhdrMA/av47/AD4LftPfDPXPgz+0F8NPCfxd+FfiW40e71/wAB+NtMj1fw5q1z4f1ey17RZ72wlISaTTNZ06x1K0Yn91dWsMo5WgD4B/4cXf8ABH7/AKR2/sv/APhu7H/45QB/JR/wdu/8E9v2JP2Nf2Zv2T/Ff7LH7Mfwk+BHiPxj8dvFXh/xRrPw68MW+g32u6JafD+71K20zUZoXYz2kF+i3UcTcLMocc0AfweUAFAH90P/AAaM/wDBP/8AYr/bN+CP7Zev/tU/s0/Cf48az4H+Knwv0jwjqXxG8NQa9deHtM1bwj4jvdSsdMkmdTb297d21vPcIv8ArJIUY/doA/r5/wCHF3/BH7/pHb+y/wD+G7sf/jlAB/w4u/4I/f8ASO39l/8A8N3Y/wDxygA/4cXf8Efv+kdv7L//AIbux/8AjlAH3v8AAP8AZ2+B37LXw3074P8A7O3wu8IfB74YaRqGrarpngfwNpcej+HrHUddvZNR1i8t7GIsiT6jfSyXV04OZJnZzyaAPZ6ACgAoAKACgAoAKACgAoA/zB/+D1T/AJSm/ALp/wAmBfCzr/2cV+1T+JPPHp+JyAfyCcZ/hx9e2T/npzk80ALxnt+fbH+c+q9aAEHTtk54z9ehz/X1oAPb5e3fngfr/h160ABx/s9RyTx0P6/n6/QAXjdnjp1zz1+v05+nNACHHbb09cdwfUe5/wD18gDuO+PxOf5/Tn396AEGOvHQZ54zyfX6YoATjPbHse3ze/5/U0AL37dR35+6ff8A/WM/WgBBjAzjv3z3+p4/r70AHY9O3U5HQe5/D8KADuOB3yc/r7+/Hr26gCnoemfcn1HU5z/kUAJxn+HGW7/T3/P2+poA/tH/AODJr/k+r9rL/s0mL/1dHgGgD/SsoAKAGt0/4En/AKGtAH+C58av+Sw/Fb/spHjzr0/5G/WutAHmWBntnJ789/fP1H14oATtjjnHQ+/bn8RjuDQAvr/wI8n3+vT+9njNACcfN07Dk9Tg9ef8KAF646fn/wCg8/nQAccdOMZ56cH3/wAe596AE/7559Pw/L69c7aAA9+nOc889Aex9fvf5NAA3I7dO5xnnjoenXqetACnv05z36/L9fz9qAD8s89+fvfX6/8AAsUAHvx1/HtnPPXr+OKADuDx/F39T256mgBO5Py/UH6nnnr6+2aAFBA7r7c+/wD9YfjmgBD347evHf3xj8O5z1NAH+vt/wAGuf8Aygp/Ya+v7TX/AK2H+0F/n19eaAP39oAKAPx0/wCC9f7b91+wJ/wS6/aU+MfhrW5NC+KfjDQIfgd8Fb21nNtqlr8Tfi2Lnw7Z67os+QI9Y8C+Fv8AhK/iRYltymTwaVMcu7y3AP8AGTJJJJJJJJJJySTySSeSSepoASgD96/+Dc//AIJp6Z/wUm/4KJ+DfD/xJ8PRa/8As5/s/wCmj43fHjT7+Nm0rxPpei39vZeCPhreYKrcRePvGk+nW+taczo994G0jxoYZEmgQ0Af7A1ra21jbW9lZW8FnZ2cENraWlrDHb21rbW8axQW9vBEqRQwQRIscMMarHHGqoihQBQBPQAUAFAHhP7Tn7OXws/a5+AHxZ/Zr+Nfh628TfDL4xeDNW8G+J9OuIo5JreO/iEmm67pMsiubDxH4Y1iHT/EnhnVodt1o/iHStN1S0kjurSKRQD/AA8v2nv2fvG/7KX7RXxt/Zr+I8ca+N/gb8TvGPwy8QzwIyWepXfhPW7zSo9b03ezs+ka/a29vrekTF28/TNQtJgzCQEgHhVAH6e/8EZP2qdW/Y1/4Kefsa/G201yfQfDsPxq8I+APiXOs2y0uPhR8VNSh+HnxETUoHP2e8trDw14jvddtobkbIdW0jTdQgkt72ytbqAA/wBrugAoA/xbv+C7n/KYb/gof/2cv44/9CtKAPyYoAKAPVfgd8Evij+0j8Xvh38CPgr4Q1Tx78Vfir4p0zwd4I8J6PGHu9V1rVZvLj82aRkttP02xgWfUta1nUJrbS9D0azv9Y1a7tNNsbu6iAP9c3/gi5/wQt/Z1/4JL/DOz14WulfFb9r7xn4ftrf4s/H3UdPR5NM+1xQz6j8PPhFb3kX2jwj8O7K7HlXN0gh8R+PLm2h1jxXOlrBoPhrwyAfutQAUAFABQB/LV/weCf8AKHrVP+zl/gf/AOg+MKAP8pygAoA63wB4G8UfE/x34K+GngjSp9d8afEPxb4c8DeENEthm51jxR4t1mz0DQNKtwes+oatqFpaRDvJMtAH+2F/wS6/4J9fDf8A4JmfsY/CX9lnwDFpt/rPh3SU134teO7OzW2uvib8YNfhguvHXjS8keNL2Wzm1FRo/hS11B5rrRPBWkeHNAaaUaYJHAP0JoAKACgBkscc0ckM0aSxSo8csUqrJHLHIpV45EcFXR1JV1YFWUkEEE0Af5MP/Bzx/wAErtH/AOCdv7cMXxH+DfhK28MfsuftZ2mr/ED4caNo1r9n8PfD34iaRNZRfFj4Y6dbxgQabpVpqWqaX418KadDHa6dp/h3xjD4Z0WA2vhO48oA/mooAKAP9FP/AIMpP2y7rxR8If2of2EPE1/LcXXwq8RaZ+0J8K453aVo/B3j9rbwn8SdFtgSFtNN0DxjpXhTXoIQGNxqfxE1qcsu3DAH90tABQB/g3/tIf8AJxHx6/7LR8Uv/U412gDxegAoA/tW/wCDQb/glNoX7QXxf8Yf8FGPjf4X0zxD8Mv2dPEh8CfAXw/rtlb6hp+vftAjTNL13VvHc+n3sc1vNB8IvDetaNc+HJpraRP+E68VaXr2lXdrrHgFqAP9KKgAoAKACgD8Bv8Ag5S/YQ0D9tv/AIJafHTU7Tw9Z6h8Yv2XNA1X9pD4Q64Yl/tbT1+H9kdX+Kfhy0nQpc3Np4y+F1j4lsRoiym21HxRY+EdQltrm90TThGAf5AFABQB+sf/AAQp+Ler/BX/AILA/wDBPLxfos/kXOtftNeAPhNdN5jRq+jfHi7l+COvRORncsmjfEK+AVvlL7dxX7wAP9pWgAoA/wA3j/g9G/bgu/Hv7SXwI/YJ8Ka3K3hL4CeEY/jP8VtMtbgi0vPi58T7SW18Fafq9sSd2oeCvhfF/bOkzqEAsvi7qCMZWIEIB/EdQAUAf6cP/BoX/wAEydD+AP7JN/8At/fEnwtZt8b/ANqz7fYfC7UdRtlk1XwT+znompfYrJNOMoL6bc/FXxVpV74p1SWHP9p+EtI+HssckaPeRSgH9i9ABQAUAFAH8gP/AAeAf8E8dC+Pv7EOkftw+DPDVkfjN+yHq2k23jHW7S3VNY8R/s8eNdZXRNc0W8eLZNqaeBfHOt6B4z0j7U00Xh/RLz4iXFrHEdYvXYA/zBaACgD/AEOv+DJT9qnVtd+HX7Y37GXiLXJ7my8Aa94H+P3wv0e5m882eneN49S8F/FSOwEhMtpplprHh74c3/2OEmz/ALU8S6pfLFBd313LeAH931ABQB/LR/wc+/8ABHIf8FBf2ZP+GnfgZ4WF/wDte/st+GtU1HT9P0mz83WfjP8ABS0e61vxX8MhHbobrVPE/hmWTUPGfwztkF1cXGpy+JvCWn2ct943gubEA/ynyCCQQQQcEHqD3B96AEoA/Yf/AIIhf8FSvFP/AASn/bb8IfGC4udV1H4CfEL7D8OP2l/BVh5tz/bfw01DUI3TxZpemKTHdeMvhnqLjxb4XZUjvL+GHXPCMd7Y2Hi7VJiAf7Jvg3xh4W+IfhHwt4+8Da/pfivwV438O6L4u8I+KNDvItQ0XxH4Z8R6bbaxoWu6RfQM0N5puraXeWt/Y3UTNHPbTxSoSrCgD/OP/wCDuP8A4K+/8Lw+K6/8EzfgL4o8/wCEvwM8RW+s/tMa5o15m08d/HDSyzaX8NXuLZzHe+Hvg8JGn8Q2kkslvc/FC5ls76xt9T+G2n3c4B/E3QAUAf2+f8Gkn/BHD/hcfxCt/wDgp9+0P4V874W/CTxBd6Z+yr4b1qz3Wnjr4vaJO9rrPxca2uk8q88OfCe7WXTPCNykc0N38UFutStruy1H4bNDfAH+j5QAUAFAH8SP/B7r/wAmjfsVf9nHeMv/AFWV7QB/m60AFAH+jF/wY/8A/JvX7en/AGWb4Pf+oR4qoA/uWoAKACgAoAKACgAoAKACgAoAKACgAoA/zB/+D1T/AJSm/AL2/YB+Fh/82K/ap9jmgD+QTAzk9c+/qTnpz6jtjPoTQAuee/Xvn0/zx2+8eKAE7Y+vfjHfJxz169uvO00AA6jHtnk56Y5GB/nnpmgAPJz+PJIz77v6fj35AF7556e/rnpj+vNACNg++M9+/Hsfp9eOpoAcD/nBH8/8/wBQBB/T3J4z/nP8XagBO+e49fx4/HPGM/jj5gA753L2/QY6Z9+n457UAA47r+fv6/8A1vzzkABxjqp/H6d/w9OevfgAOMg5XjPf2x7k5/T3oACcjG5fz/z/AJHvwAHGc5HUnr6/z/THvQB/aN/wZNMv/Ddf7WQ3KSf2SIjgMCf+S0eAM+/G5c/7y+ooA/0raACgBrdP+BJ/6GtAH+C38aiG+L/xUKuhDfEjx5yCDwfF2snrnupBHqCD3zQB5l3+8vr178/zz/Lk45AEH+8ufr75659evr+tAB6/MvII/M59f/1e/WgAPf5hz79eCPw4PvmgA9PmXjPf2x68+2eaAD/gQ7d+nGPx/TPXtggCHOR9c9Dx0/PHT1/E0AHX/wAez27YyfT8enXocUAHU89xk8YHJz159OvOelACnJOee+eMdRjnrjp36de+KADnP69Cf4sn/wCv+XXmgBP/AK/bP8Q7duAOPQD1oAXr9f8AE5bA/ix3Hboc9aAE/mevUknDdfQ/57GgBOTjqfz9T+fXr70AO7de3PB7579up+bv+ByAf6+//Brn/wAoKf2Gv+7mf/Ww/wBoL/OO1AH7+0AFAH8BP/B73+0PeJB+wz+ydpt4VsLib4mftD+M9P8ANJE15Zppvw2+Gl55IICm2hvvizB5sgYv9r2QlNk/mAH+f7QAUAf6Z/8AwZZ/s7aX4F/YJ/aC/aRuLPy/Fn7QH7Qsng+O8aIDz/h98EPCum2/h1Ypm+c48afED4kpcImIz5FsSWkQiMA/spoAKACgAoAKAP8AJb/4OzPhHpXwv/4LNfF/X9Jg+yxfG34U/BT4u3lukax266rJ4PX4a6ncW6rwf7RvPhrLql5Ifmm1O9vpWyzkkA/mroAcjvG6yRuySIyujoxV0dSGV1ZSGVlYAqwIIIBBzQB/urfsQ/FnVvj3+xf+yL8c9enW5174zfsxfAT4q65cKSwm1n4hfCvwp4t1WTJ5y1/q9wWDfMGyGAYEUAfUFAH+Ld/wXc/5TDf8FD/+zl/HH/oVpQB+TFABQB/oa/8ABmN/wTp0/RvAvxc/4KX/ABE0O0udf8ZX+rfAf9nR762hnn0bwpoVxBJ8X/HemtKsy29z4m8RJp/w+03ULZrTU7Gy8JeO9NkMml+JnWYA/vDoAKACgAoAKAP5av8Ag8E/5Q9ap/2cv8D/AP0HxhQB/lOUAFAH9C//AAa4/s96T+0B/wAFlv2cZvEWlrq/h74F6N8Qv2hL+0kQNFFq3w/8NTWHgHVJCVby20P4n+JfBGt2zjB+2afbJuG+gD/XhoAKACgAoAKAP5p/+Dr/APZZsf2h/wDgkT8TfH9vaxyeMf2U/HXgT49+G51j/wBKl0hdU/4Vx4+0z7Qqs8dgfB3jzUvEt1Af3Nze+FNLaTD28MkYB/kv0AFAH9Ln/Bpd8cP+FRf8FlfhV4Unu/senftDfCT40fBS/kd9lu8ieFl+MGi282TgveeI/hHo9haDBZ728t4lx5hNAH+s5QAUAf4N/wC0h/ycR8ev+y0fFL/1ONdoA8XoAKAP9oz/AIIU/sxaV+yX/wAEm/2JfhlZ2q2+teIvgv4b+NHjmVofKvZ/HXx2g/4W14httRbG6e58Oy+LYPCEEr5I03w7YQJ+6hjAAP1soAKACgAoA53xf4W0bxz4S8UeCfEdqL3w94x8O634W16ybGLvRvEOm3Ok6pancrLi4sbueI7lYYflSOCAf4IXiLR5/DviDXfD9ySbnQtZ1PR7gldpM+mXs9lKSuTtJkgYlcnB4yaAMagD60/YG16bwr+3X+xZ4nt932jw5+1p+zlr0Gw4fztH+MPg7UItp7Nvt12nPBoA/wB0SgAoA/xDP+Crn7Q95+1Z/wAFJf21/jzc3hv7Hxt+0P8AEW18LXBlM5/4V/4M1mXwF8NrfzSSJPsfw/8AC/hqz3JtiPkfukSLYigH59UAdP4J8Jat4+8Z+EfAugRefrvjXxPoHhLRYMM3nat4j1W00fTotq5ZvMvLyFMKCxzgc0Af7v3wN+EXhX9n/wCCvwi+BPgWD7N4L+DPwz8C/CvwnCY0iZPDvgDwxpnhXRzKkeVEz2GlQPOQWLzM7szMxYgHqdABQAUAFAHyt+3P8I9K+Pv7Fv7WfwU1qD7Rp/xT/Zx+NHgZwI1klgufEXw88Q6dp9/aq3AvtN1Ge11Gwk4aK9tbeVSGQGgD/CwoAKAP6X/+DSf4uax8OP8AgtB8IfBunXPkad8fPhF8evhT4iiZ2VLrTNG+HWp/G6zjK52NIuv/AAe0Z4y/PDqh3PggH+szQAUAFAH+XR/wdSf8Ecf+GLP2hW/bc+AfhX7H+y7+0/4rvZfGei6NZ+VpHwZ+P+qLd6xrWjJbwIIdM8HfFFINS8XeEY4yLPS9ft/GHhmCDStKtPClneAH8j1ABQB/VX/wTg/4OXfi1+wz/wAEuPj7+xld2et+KfjX4Zsv7J/YZ+ItwsOo6f8ADXSfiDeXsPjew8VyX0rM9l8J5bi78dfCi0e01iDUNa1c+EdVisvCej6bbxgH8seqapqeuanqOta1qN9q+s6xf3mqatq2qXdxf6nqmp6hcSXd/qOo311JLdXt9e3U0tzd3dzLJcXNxLJNNI8jsxAKFAH6jf8ABIP/AIJmfEX/AIKqftm+Bf2d/C/9p6F8N9MMXjj4/wDxJs7cPD8O/hHo99ax65eW080U1o3izxNNPb+FPA2nzR3C3PiPVba+vLY6DpWuXlkAf7Nfwf8AhH8OvgJ8LPh98FfhH4V0zwR8Mvhb4S0TwP4G8KaRGY7HRPDnh6xh0/TrRGdnnup/JhE19qF5LPqGp30tzqOoXNzfXVxcSAHo9ABQAUAfxI/8Huv/ACaN+xV/2cd4y/8AVZXtAH+brQAUAf6MX/Bj/wD8m9ft6f8AZZvg9/6hHiqgD+5agAoAKACgAoAKACgAoAKACgAoAKACgD/MG/4PVf8AlKb8A+uf+GAfhbjgn/m4r9qrPRhQB/ILznoc/T3/AN73P+OMigA59D1zyOc9O7/5HNACZ46HAz2P4/x+/wDP3oAXnrg/iPw/vZ79yfbAoAQ5PGD7cfX1Yj1/D8KADJ9+noffr8/1xnn0oAD7g/kfb0f1x+PvmgBcn3/EH39X/wA8e1ACZPv07Dtz/t/XHr1oA/pJ/wCDUbRNG8Q/8FjPhFpev6RpWuadP8I/2hGl0/WdNstVspHi+Gt48TvaahDc2zPG3zIzRsRyM4ZgQD/Ve/4Uv8Iv+iX/AA6/8IXwl/8AKagA/wCFL/CL/ol/w6/8IXwl/wDKagA/4Uv8Iv8Aol/w6/8ACF8Jf/KagA/4Uv8ACL/ol/w6/wDCF8Jf/KagA/4Uv8Iv+iX/AA6/8IXwl/8AKagA/wCFL/CL/ol/w6/8IXwl/wDKagDf8PeAfA/hK6nvfC/g/wALeHby6txa3N3oPhzRdGubi2EgmFvPPpljaSzQCVVkEMjtGJFV9u8BqAOuoAKACgDzWT4NfCSV3kk+GPw8eSR3kkd/A3hR3kkkdpJJHdtHZnd3Zmd2JZmJZiWJJAGf8KX+EX/RL/h1/wCEL4S/+U1AB/wpf4Rf9Ev+HX/hC+Ev/lNQAf8ACl/hF/0S/wCHX/hC+Ev/AJTUAH/Cl/hF/wBEv+HX/hC+Ev8A5TUAH/Cl/hF/0S/4df8AhC+Ev/lNQB8zftqfCL4V2H7HX7WF5Z/Db4f213bfs0fHua3uYPBPhaGeCaP4T+L3jlhmi0hJIpEcBldGVgR1wSCAf4esn3l/3IT/AOQIj07/AOec8EAj/wADng9x3+b6fj1wTmgA9c56env/AL3+A9O+QAz169DnIPp3+Y0AL379SO/rz/Fnr36dfpQAn5/kc8kHs2c9M5OfU8gUAH4Hv2Prz/F+eOfXvkAPzz16cng+h+vX5u5PBwAHT14z0B9eejc4/wD180AL2/Dnr/tf7XT+ee3GAD/X2/4Nc/8AlBT+w1/3c17f83h/tBe5/nQB+/tABQB/lTf8HhfxEn8Z/wDBX648LvdtNB8JP2ZPgr4Eht9+Y7NtXuvGnxQmQIOFkmPxESeRiN7o8QZiiRhQD+WCgAoA/dz9iH/g4s/4KMf8E+v2cPBX7LH7Ot58DtO+FngS/wDFuqaQPFfwpj8R+JLq/wDGnivWPGGs3Wr622v2Rv5f7S1qe1sz9li+zaXbWNn8/wBm81wD6z/4jAP+CxP/AEHf2bv/AAx0P/zU0AH/ABGAf8Fif+g7+zd/4Y6H/wCamgA/4jAP+CxP/Qd/Zu/8MdD/APNTQAf8RgH/AAWJ/wCg7+zd/wCGOh/+amgA/wCIwD/gsT/0Hf2bv/DHQ/8AzU0Afi9/wUN/4KLftGf8FOvjfoP7Qf7T83ga4+Inh34aaF8KbGfwD4UXwfpMvhXw74j8XeKNNN5pq3+oi41RdR8a6xHLf+ehlsksLbyl+y75AD4RoAKAP9p3/ghl4obxf/wSA/4J26szs5tP2XPht4Xy2chfBGmt4LRef4UTQFRe21RjigD9W6AP8W7/AILuf8phv+Ch/wD2cv44/wDQrSgD8mKACgD/AGx/+CNHwO0f9nX/AIJV/sD/AAt0aEQLbfszfDPxvrSCIQg+Mfi5okXxc8dybBkkS+NPHGvyK7/vJFYPIFdmUAH6Y0AFABQAUAFAH8tX/B4J/wAoetU/7OX+B/8A6D4woA/ynKACgD+0r/gyV8L2t5+3R+1x4zktY5Lzw9+yfb+Gra8ZcyW8Hi74veA9TuoI2/hF03gy2d+7fZVGcBsgH+ldQAUAFABQAUAfF/8AwUd+Gtl8Yv8Agn3+298Lr9FaLx1+yd+0D4egkZPMNpqV98K/FK6RqMaYO6fTNWFlqNvkMPPtYyVYZBAP8NegAoA/TX/gi94ml8Jf8Fav+CcWqw3D2z3X7ZPwC8MtJG7Rs0XjT4h6H4OngLKQTHdQa9JbSoTtkimeNwyuVIB/th0AFAH+Df8AtIf8nEfHr/stHxS/9TjXaAPF6AOj8H+HZ/F/i7wt4TtpfJufFHiPRPDtvNsMnlT63qdrpsUvlgqZPLkuVfYGUvjbkZzQB/vceFvDmleDvDPh3wjoNuLTQ/CuhaR4c0a1GMW2laHp9vpmnW42hVxDZ2sMYwqj5eABxQBu0AFABQAUAFAH+DN+0GiR/Hz43xxqqRx/F/4lIiKAFRF8Z60qqoHAVQAABwAKAPIKAPpP9jP/AJPA/ZS/7OT+Bn/q0PC9AH+7LQB49+0N4/b4UfAH45fFNJxbP8Nfg98TPH6XJ24t28HeC9b8RLOdwK4iOnCQ7gV+XkEZoA/wa5JJJpJJppHllld5JZZHaSSSR2LPJI7Es7uxLO7EszEkkkk0AMoA9D+EXxN8RfBX4r/DD4yeEIdIuPFvwl+Ifgv4m+F7fX7D+1dCn8ReA/Emm+KdEh1vSzNb/wBpaRLqelWyalYGeH7ZZtNb+dH5m8AH9Jn/ABGAf8Fif+g7+zd/4Y6H/wCamgA/4jAP+CxP/Qd/Zu/8MdD/APNTQAf8RgH/AAWJ/wCg7+zd/wCGOh/+amgA/wCIwD/gsT/0Hf2bv/DHQ/8AzU0AH/EYB/wWJ/6Dv7N3/hjof/mpoAguv+Dvb/gsFe21zZ3Wsfs2TW13BNbXML/A2EpLBcRtFNE4/wCEp5WSN2Vh3BNAH8u9ABQB+0X/AAbu+KG8If8ABab9gDVldkN38XNa8LkrnJXxv8NPHXgt1OP4ZE19kbttY54zQB/srUAFABQB8+ftV/sxfCH9sz9nn4r/ALMfx28Op4m+F/xf8KXvhfxFZjyk1DT5JGju9F8TaBdzQzrpvinwlrtrpvibwvqohlbTNe0rT73ypRCY3AP8Wv8A4KL/ALB3xd/4Jt/tcfFL9lH4xW73Gp+CtSGoeC/GMNnLZ6N8TfhlrUtxP4I+Ivh8SNKn2HxBpsTR6jZRXN23h/xNY6/4WvbmTU9CvgoB8PUAFABQB0ng3wf4q+Ifi7wv4B8DeH9W8WeNfG3iHRvCXhHwvoVnNqOt+I/E3iLUbfSNC0PSLC3V573U9W1O7trGxtYUaW4uZ44kBZhQB/sY/wDBC/8A4JQeFf8AglD+xloPw61O10rU/wBpH4rjSviB+0143sfJuhf+N3sXXS/h/oupqGkufBXwtsL258P6CVl+yarrFx4o8Yw21jL4suLK3AP2joAKACgAoA/iR/4Pdf8Ak0b9ir/s47xl/wCqyvaAP83WgAoA/wBGL/gx/wD+Tev29P8Ass3we/8AUI8VUAf3LUAFABQAUAFABQAUAFABQAUAFABQAUAf5g//AAeqf8pTfgF6/wDDAPwtx9f+Giv2qf8AZNAH8gfGe3frjoCf9nHPX370AL9T/wCg9Mdfu/h9PagBOMDnse4zjn/Zz6//AF6AD8vzXOMf7vTH6UAHHrxkemM8/wCzjOOv8+lABxnqOnPI/D+HHp/9fFAAceoP4j29F6/59aAHEDue567fx/h6+tACcc89hnpj2z8uPx/+tQB/S3/waXf8pmvg9g/80i/aH/8AVZ3npQB/rMUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfLv7b/wDyZj+1v/2bH8fv/VS+L6AP8LGT7y/7kH/omL/P/wBfFAEf/wBlnJ5zt78fn155PPFACeuc9O59x6A8enUdcUAL35z37j+77D0x6/iaAAnk9+vpk/MPbI5z+PQ96AEyeev6Zzkd8dfXv070AH/18cjrnnHy9Qf/AK3rQAuRjP59PRvYf1/LqAJnpxn/AL5/vH2P6dz78gDu3OenPPb5vbp6de34gH+vt/wa5/8AKCn9hv6/tNf+th/tBfSgD9/aACgD/IT/AODpjUHvf+C537aEDFiulWP7Nmnx5yPlb9lL4Iai2AecedqEvPQnLDIIJAP586ACgAoAKACgAoAKACgAoAKACgD/AGaP+DfP/lDH/wAE+/8AsiJ/9TTxZQB+yFAH+Ld/wXc/5TDf8FD/APs5fxx/6FaUAfkxQAUAf7x37NmmWmi/s6fALRrBPKsdJ+Cvws0yzj/552lh4G0K1t04AHyQxIvAHTpQB7VQAUAFABQAUAfy1f8AB4J/yh61T/s5f4H/APoPjCgD/KcoAKAP7mv+DH4W/wDw0B+3wzBPtY+DvwZEJP8ArPs7eNfGBugv+wZVtN/+0I6AP9F6gAoAKACgAoA81+M2n22rfB/4r6VeOkdpqfw18dafdSSf6tLa98L6pbTu/X5FilZm4PANAH+CjQAUAfZH/BOrVbrQv+Cgv7CmuWSSSXmjftkfsxaraRxcyyXWnfGzwReQJH/00eWFFT/aIoA/3MKACgD/AAb/ANpD/k4j49f9lo+KX/qca7QB4vQB9E/sg6QniD9rP9l7QZACmt/tE/BPSHDfdKal8SvDNmwbPGCJjnPagD/dvoAKACgAoAKACgD/AAaP2h/+S/8Axy/7LD8TP/U11ugDx6gD6T/Yz/5PA/ZS/wCzk/gZ/wCrQ8L0Af7stAHwd/wVO1B9J/4Ji/8ABRnVIywk079hH9ru9jK5Lebbfs//ABBmjxjODvReeg6sQATQB/h8UAFABQAUAFABQAUAFABQAUAFAH62/wDBBz/lMT/wTy/7OR8H/wDonUKAP9ougAoAKACgD+QH/g8Z+AH7J/iv9gXwd+0J8UvEdh4I/ac+F/xB0bwh+zldWNlDd+I/iva+Mb+CTx98KNSgSWG4m8KaX4dtL/4krrtyZIfCOr+GktLJ45fG17p2uAH+YLQAUAFAH9j3/Bmt8Av2Tvid+2v8Xfip8WPEdhqn7S3wL8A2HiD9m/4Va3ZQjT3sfEE1/oHxF+MOkXE0rpq/inwHa3Wi+G7DTDAh0G18dXPieBLy/trG/wDDgB/ptUAFABQAUAFAH8SP/B7r/wAmjfsVf9nHeMv/AFWV7QB/m60AFAH+jF/wY/8A/JvX7en/AGWb4Pf+oR4qoA/uWoAKACgAoAKACgAoAKACgAoAKACgAoA/zBv+D1U/8bTfgF15/YB+FnT/ALOK/ap9++fUfU0AfyC857//AK2+vt16YP4EAUZ756jr+fqep469e+OKAEySM898/hzz+Xp3577gABPHX6889+ee/wBT17cggASQf6fUk+/Xp0z9KADnOOehP55/2v6n8OtAAxI9e/T8B6+/+fvUAKPrnn1Pv/tHrnP9PQAOvftnrzzn3HI7/pigD+lv/g0v/wCUzfwe/wCyRftD/wDqs7ygD/WXoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA+Xf23/+TMf2t/8As2P4/f8AqpfF9AH+FhL95P8Ach/9ERc/z/WgCPJ4OTxnn14BHfv7559RwAA+737evv06npn3/rQApOD1PGfXnjPc+/Gc89cjigAOc8k+mef72PX2yeenqeaAE/4Efrn1Iweo6j69D3HAAp+pHXnPTBx6/wBfxY0AHPqeOc5PcHnt9OpHPXrkAbyOpI659snvz36/49aAHZwOvT69eeP078dOOuQD/X3/AODXPn/ghT+w1/3cz/62H+0F/n+p60Afv7QAUAf5Gn/B1n4dudE/4LfftSanPAYovF/hD9nPxFZSEcXNtbfs9fDXwk86+oW88L3dtnn5rdh2oA/nSoAKAP7tv+CSn/Brf+wt/wAFDf8Agnl+zd+2J46/aA/ao8LeMvjHovjefxP4e8D618I4fCmla14J+KXjn4b39vosOvfCjXNXitTN4PaZkv8AVr6ZZpZf33l7FUA/Rn/iCd/4J1/9HP8A7af/AIP/AIG//OToAP8AiCd/4J1/9HP/ALaf/g/+Bv8A85OgA/4gnf8AgnX/ANHP/tp/+D/4G/8Azk6AD/iCd/4J1/8ARz/7af8A4P8A4G//ADk6AD/iCd/4J1/9HP8A7af/AIP/AIG//OToAP8AiCd/4J1/9HP/ALaf/g/+Bv8A85OgA/4gnf8AgnX/ANHP/tp/+D/4G/8Azk6AD/iCd/4J1/8ARz/7af8A4P8A4G//ADk6AP6i/wBi/wDZV8E/sQ/sufBj9lD4ca/4q8U+B/gj4T/4RDw54g8bTaTceK9UsP7U1HVvtOtzaFpWh6RJd+fqc0ebHSbGHykjHlbw7uAfT1AH+Ld/wXc/5TDf8FD/APs5fxx/6FaUAfkxQAUAf7z3wC/5IT8Ff+yS/Dj/ANQ7RqAPWqACgAoAKACgD+Wr/g8E/wCUPWqf9nL/AAP/APQfGFAH+U5QAUAf2uf8GR+trB+2j+2P4cMqh9V/Zf0LW1hLANIvh/4reGrB5QmcssJ8TIrMAQpnUEguMgH+k5QAUAFABQAUAfP37WniSDwb+yt+0x4vurmOztfCv7P3xm8SXN3K4jitYND+HPiTVJrmSQkBI4I7VpXckBVUsTxQB/hEUAFAH3//AMEoPD3/AAlX/BUL/gnRoLR+bDf/ALcX7K32xMbt2n2vxv8ABF7qXHfFhb3J544yeM0Af7fNABQB/g3/ALSH/JxHx6/7LR8Uv/U412gDxegD6l/Ya/5PY/Y9/wCzpf2ff/Vs+EqAP91OgAoAKACgAoAKAP8ABo/aH/5L/wDHL/ssPxM/9TXW6APHqAPpP9jP/k8D9lL/ALOT+Bn/AKtDwvQB/uy0AfF//BSDw7c+L/8Agnh+3p4SsoDc3nij9i/9qPw7aWyjLXFzrfwP8c6bBAo5yZZblIwO5agD/DXoAKAPrT9gv4GeAv2nv21/2Uv2bvifrniTwz4D+Pnx++Fnwb8Ra/4Pn0u28T6TbfErxhpXg+3vdEn1vTNa0mPUIr/WLUwnUNKvrYkkPA+RgA/0G/8AiCd/4J1/9HP/ALaf/g/+Bv8A85OgA/4gnf8AgnX/ANHP/tp/+D/4G/8Azk6AD/iCd/4J1/8ARz/7af8A4P8A4G//ADk6AD/iCd/4J1/9HP8A7af/AIP/AIG//OToAP8AiCd/4J1/9HP/ALaf/g/+Bv8A85OgA/4gnf8AgnX/ANHP/tp/+D/4G/8Azk6AD/iCd/4J1/8ARz/7af8A4P8A4G//ADk6AD/iCd/4J1/9HP8A7af/AIP/AIG//OToA+kf2P8A/g04/Yd/Yy/ac+Cn7U/w+/aC/at8TeNfgb450zx94b0DxlrXwhn8L6rqmlrOsFprkOifCbRtWksJPPYyrYarY3BwNlwnOQD+pigAoAKAOa8Z+MvCvw78IeKfH/jvxDpPhLwT4I8O614t8X+KdevYdO0Tw54Z8O6dc6vruu6vf3DJBZabpWmWl1fX11M6xwW0EkjkKpoA/wAc3/gud/wVe8Vf8FX/ANs3X/iRp1zq2l/s4fCo6r8P/wBmXwPf+dbNp/gZL5G1Px9rWmMRHbeNvilf2dt4h18NGbrS9Jt/C/g6a6v4fCdtezgH4vUAFABQB9A/srftOfF79jX9oT4U/tN/AnxHJ4Y+KPwg8V2Xinw3fHzZLC/SNZLTWfDev2kU0Dan4X8WaHdaj4a8UaS00SapoGq6hYtJH5/mKAf7SH/BOL9vb4Rf8FKP2Rvhf+1b8H7hLax8YaedL8deC5byK81n4YfFDRYbaLxv8O9eZFikN3oWoTpcaVfzW1mPEPhfUfD/AIptLWLT9dswQD7noAKACgAoA/iR/wCD3X/k0b9ir/s47xl/6rK9oA/zdaACgD/Ri/4Mf/8Ak3r9vT/ss3we/wDUI8VUAf3LUAFABQAUAFABQAUAFABQAUAFABQAUAf5g/8Aweqf8pTfgFxn/jAH4W98f83FftU/r/WgD+QTjPQenUY5J/Xj69c80ALx6d8c4/u59/5/pxQAnG0cdd3/ALNxnr6/4+oADGQMDnvkHqCcfTt9MevIAEgDpnn+np/Q+o+gAF4z07Z6jPU+/wD+rj0GABGx6evt3H9T1/oc0AO69RQAg+nZT/P6c0Afoz/wSv8A+Chmtf8ABL/9sDwn+1r4f+FulfGDUvCvhL4g+FYvBGs+Kr3wbY3iePPDk3h2W+fXNP0bXrmF9NWT7UkC6dILrDQma2YrOgB/Un/xHAfF/wD6R6/Dn/xInxb/APOvoAP+I4D4v/8ASPX4c/8AiRPi3/519AB/xHAfF7/pHt8Of/EifFv/AM6+gA/4jgPi/wD9I9fhz/4kT4t/+dfQAf8AEcB8X/8ApHr8Of8AxInxb/8AOvoAP+I4D4v/APSPX4c/+JE+Lf8A519AH7g/8ELP+DhDxz/wV++P/wAYPgx4p/Zj8KfA6y+GPwcT4oW2v6D8Udb8d3Wr3L+OfDvhH+x57DU/B/h2G0h8vXZL37XHdTyh7VIPJZZ2eEA/qFoAKAEJxz7gfmQP60Af58Xjf/g9f+LnhLxn4t8Kp/wT/wDh1fJ4a8T+IdAjvG/aE8WQPdR6LrN9paXLwr8MXWJ5xaea0au6qWwGxwADl/8AiOA+L/8A0j1+HP8A4kT4t/8AnX0AH/EcB8X/APpHr8Of/EifFv8A86+gA/4jgPi//wBI9fhz/wCJE+Lf/nX0AH/EcB8X/wDpHr8Of/EifFv/AM6+gA/4jgPi/wD9I9fhz/4kT4t/+dfQB5v8ZP8Ag86+Kvxg+EPxV+Et7+wR8PtFtPij8NfHnw6uNag+P/iq+n0eLxx4U1fwvJqsNlL8NII7yTTl1VrxbR7i3W6MPkfabYuLiMA/iaZixU9OEX1+7HGoP44yR69+M0AN/wDss+vTn+fvzySaAA+/90fjkjpyePw/AZoACDnrnGc5P+zn9R9emaAF5z29ef8Ae+vTIzj37nmgA5/X9QwPr15weecdT1oATGMdzzj22nJ/PHt7nvQAeo4HY+/Dc9sd+T/+sAOTjGORnjjoT7+p/H6igBecf8BBP0+b3+vr1HAxQB/r7/8ABrn/AMoKf2G/r+01/wCth/tBUAfv7QAUAf5j3/B6b8G7vwj/AMFE/gF8Z4YGTRPjL+yzo+iPOUwJ/F3wq+IPjWy1wLIMBxD4Z8XeA1KHMkbMSzFJIlUA/jooAKAP9T7/AIM7fj/p/wAUP+CUl78HmmjTXv2Zv2gPiX4MmsPM3zt4Y+Isth8X9C1pk3HyrfUNe8aeNNKgX5S03h67cr8wdwD+rqgAoAKACgAoAKACgAoAKACgD/Fu/wCC7n/KYb/gof8A9nL+OP8A0K0oA/JigAoA/wB574Bf8kJ+Cv8A2SX4cf8AqHaNQB61QAUAFABQAUAfy1f8Hgn/ACh61T/s5f4H/wDoPjCgD/KcoAKAP6e/+DRL4uRfDX/gsf4K8JzXsdpH8ePgJ8b/AITBJZViS8n07SNJ+M9taDewV5pJvhErQJy7yIEjBdgCAf6vtABQAUAFABQB+NX/AAcGfHCx+Af/AARy/bw8UXOpx6de+M/gxf8AwS0ZDJsudS1H476rpnwkm0+xRf3k0/8AY/i/Vr6dYwTDp1jfXkhSC2mkQA/xnKACgD9u/wDg3I+Dt58av+Cz/wCw3ocEcn2LwV8Q9e+MWsXaLujsbP4PeA/FXxDsZLg4bbHfeINA0XRY2x/x9apbjcgYyKAf7HtABQB/g3/tIf8AJxHx6/7LR8Uv/U412gDxegD6l/Ya/wCT2P2Pf+zpf2ff/Vs+EqAP91OgAoAKACgAoAKAP8Gj9of/AJL/APHL/ssPxM/9TXW6APHqAPpP9jP/AJPA/ZS/7OT+Bn/q0PC9AH+7LQBz/izw3pvjLwr4m8Iayhk0jxX4f1nw3qsa7S0mm67p1zpd8i7wy5a2upVG5WXJ5BGRQB/gw/FD4f658J/iX8RPhZ4nj8nxL8NPHXi34f8AiGLY0fla54N1/UPDurR+W5Lx7L/TbhdjEsuNrEkGgDhaAPYf2efizefAT4//AAN+Omn2rX1/8F/jD8M/izY2SP5b3l58OfGuieMLa1WTcmxribR0iV967S+7cMZoA/3efC3ibRPGvhjw54x8M6hDq3hvxboOkeJvD+qW5LW+paJr2n2+q6TqEBPJhvLC7t7mInkpIpoA3qACgAoAKACgAoAKACgAoAKACgD/AD/v+Dun/gsh5j3H/BKj9nXxV8kZ0bxD+2P4r0K94eQfZdc8H/AGG8t36Rn+zvGvxNjhJzJ/wiXhOS7DReNtEIB/ARQAUAfs5+x5/wAEQP2tv20P+Cf/AO1N+358M9LkXwj8AGj/AOEB8Cy6Rd3PiX9oCPwvv1H4zv8AD8pLGzj4Y+Gmgv4PKtNR/wCEz1+PVPBfh/zfEek3logB+MdABQB/Qz/wbq/8Fgr/AP4Jdftc2/hn4oa5dj9j39o3UNE8I/HGwmkmnsvh5raTPZ+D/jjplmu9op/B897LYeNo7NGm1jwDfaq5stV1vw/4VhtgD/XU0/ULDVrCy1XSr201PTNTtLbUNO1LT7mG8sNQsLyFLmzvbK8t3kt7u0u7eSOe2uYJJIZ4ZElid0dWIBboAKACgD+JH/g91/5NG/Yq/wCzjvGX/qsr2gD/ADdaACgD/Ri/4Mf/APk3r9vT/ss3we/9QjxVQB/ctQAUAFABQAUAFABQAUAFABQAUAFABQB/mD/8Hqn/AClN+AXXn9gH4W98f83FftU/56igD+QTvjn/AL69yM/XuT+FAC9+/X19R/hz1+hzxQA0dM89+/sf1P0HT8wBe4PPPvnHB75OfX/OCAB/4FyR3Oe+eOvbP4+2KADvj5umfve59+/1/LrQAHj+8evVsdx69ucf5zQAuM/3h+Pr36n/ACaAEH49B0P19x1/r2OaADvjnv0Pu3+e/Tv3ADqe/Ufxeq59f17+vagAXnHXkHq3v/n0/HqAAHI/i+uc9gfXv9O/4UAHcDnqe+T0/pn19+c0AKeM9e3fHXHf8ufc8+gA059/4uSSenGf1/8A1k8gH9o//Bk3/wAn0/tZf9mlQ/8Aq6vANAH+lbQAUANbp/wJP/Q1oA/wWvjT/wAlf+Kn/ZSPHffH/M3a1/n9e2aAPMuc9e59fV/f+v8A9cABnA/DnJ7t6Z578nH9aADnn6N/M54z+vJ56HFABz83J4wep44J9e/6Z70AHOR6jPXnPHXr+OOPxzQAvPHXnHc8/KT6+v8AnmgBuDx04Pv/ALPr36enXr1wAHr/AMC756qD1zzxx+vNACnIz9OuSe4569+Ppj3oAU9/x6k8fLnoTznmgBOc/n69d3Xr644zk/jmgA5zyP1PqO5x0455z79gBcHjqfvcZPY465460AJjGffnB/4F3+mefx57gAM/Xr3I7n+Rz1xnPfGaADn9PXn+Lv69fpnpxQB/r7/8Guf/ACgp/Yb+v7TXX/s8P9oKgD9/aACgD+Qf/g8n/ZIuvjN/wTy+Gf7Tnh3R5NR8SfsifF+C48Q3UMJll0/4RfGyHT/BXiy4/dgzFI/iHpXwgkmODDb2S393MY44WkAB/l/0AFAH9T//AAaa/wDBRSw/Y8/4KCXX7PHxF8Q2mh/Bj9tvS9H+HNzfardxWel6D8b/AAvNqd/8F9XuLq4fZAniK41nxP8ADJYIlT7brXjrw3c3kyW2kAqAf6rNABQAUAFABQAUAFABQAUAFAH+Ld/wXc/5TDf8FD/+zl/HH/oVpQB+TFABQB/vPfAL/khPwV/7JL8OP/UO0agD1qgAoAKACgAoA/lq/wCDwT/lD1qn/Zy/wP8A/QfGFAH+U5QAUAfTn7Fv7R2t/sg/tb/s4ftP6B9qkvvgZ8ZfAHxHubG0cJNrWheHfEVjd+KPDbEvHm38T+Ghq3h68TzI/MtNTnTzYy29QD/cn+G3xF8FfF/4eeBviv8ADfxDYeLfh98SvCPh3x54H8UaXIZdO8Q+E/Fmk2mu+H9ZsnZVc2+o6XfWt1GJESVFlCSokisgAO1oAKACgAoA/gD/AOD1D9vLQby1/Z4/4Jz+Cta+2a5petRftK/HO3sp18rSP+JPrfhL4OeF9ReBmLX1/aa1448X6no135TWlkPAmteTOup2E8AB/AHQAUAf2of8GUn7Omp+Lf2z/wBp/wDafvdJlm8LfBj4A2vwv0zVJowttB4++M3jPRtVtTYyuAZ7608HfDLxZa3qQFxZ2mv25vBGb+xMoB/pUUAFAH+Df+0h/wAnEfHr/stHxS/9TjXaAPF6APqX9hr/AJPY/Y9/7Ol/Z9/9Wz4SoA/3U6ACgAoAKACgAoA/waP2h/8Akv8A8cv+yw/Ez/1NdboA8eoA+k/2M/8Ak8D9lL/s5P4Gf+rQ8L0Af7stABQB/kFf8HN/7JF1+yj/AMFev2i7qx0eTTfAn7SkulftQeBbnySlvqL/ABSW4b4lyI6DyPOi+Muk/ETfAjebFZy6fNMiC6jZwD+fygAoA/1nP+DWf/gopYftq/8ABN3wd8HvF3iG0u/jv+xfDpPwO8X6XNdxHWdW+Fmm2AT4H+ODZ72uDp03g+0Pw+uL+ZpJ73xF8PNa1C6MZ1K2EgB/S5QAUAFABQAUAFABQAUAFABQB+K//BdX/grD4X/4JQ/sZ678QdJu9K1L9pT4tLq3gD9mXwTfCG6F340NjG2r/EPWtMcs9z4L+FljfWuv62rxG01bW7rwr4PnuLFvFcV9bAH+Ol4w8X+KPiD4t8T+PPHGv6t4r8aeNfEOs+LPFvijXr2fUtb8R+JfEOo3Gr67rusahcvJcX2p6tqd3dX9/dzu8txdTyyyMXcmgDnKAPvb/gmh+wF8VP8Agpf+2J8K/wBlP4WpPYDxXqB1v4j+NxZvead8MPhNoM9rN468f6qmUhb+y7CeLT9Bsbm4tItf8Yat4c8Mpd29zrUEqgH+01+zr+z98K/2Vfgb8Lv2dPgl4ag8JfCv4P8Ag/SvBXg3Q4Skksem6ZERNqOqXaxxNqniDXb+S813xLrVwn2zXPEGpalrF88l5fTyMAf5e/8Awc+/8Egf+HfH7Vp/aO+Cvhf+zv2R/wBq3xBq+t6DZaVaeVovwi+NEwn1rxt8LdlugtNK0DXQbvxv8NbQC0gTRX8R+FdKsvsfgGS6uAD+XegAoA/0g/8Ag0o/4LJf8Lo+Hdr/AMExP2iPFXnfFf4Q+HrrUf2WfEmt3m678ffB/Q7drnVvhO9xdP5l54l+Etmj6h4Ut0lmnvfhek9hbWlpY/Dae4vgD+3WgAoAKAP4pf8Ag9w0q5m/Ys/Y51xR/oenftRa3pU5weLnWfhR4ovLUZ6DMWhXhweTjI6GgD/NhoAKAP8ARH/4MeNdtLj4O/8ABQnwyjob/SfiX+z/AK7cRhwZFtPEPhf4n6fZu0edyo03hi/VHIw7I6g5RqAP7saACgAoAKACgAoAKACgAoAKACgAoAKAP8yf/g9c8Mazbf8ABST9mvxnNZzJ4f1z9h/wj4a02/MbiC51fwl8evj5qeu2cUpAjeays/G/hyaaNGLxJqEDSKFljLAH8cfp9PVuvzY5z+pPTPOKAF/+v1Jz933IP/1ueM5IAg6fge5x/F2z/TPU84yAA7n1xzy390nnJ9enfH0zQAHpz6jqTjp9fU9zjHPvQAdz+Hds/jz9cdz6c8gB/PHqx5z7E8f1+nAA5v8AHqSO/wBR78/T2yAJ6/RehP8Aj+X9c8gCenryep/2u+cfrnrzQAuOevcdzn7p69s/5PWgBOg4PY9yB97tkg/59+QAxwT9O5x0HUf596AFPb/gXUn09c8e+TkH0oAD1x7+/qv+P+eSQBPT3DZ5P+P58+/PWgD+0f8A4Mm/+T6f2sv+zSoff/mtXgGgD/StoAKAGt0/4En/AKGtAH+C38af+Sv/ABU/7KR469f+hu1r0/T396APMv4h079+erdvx649efUAAOB26dCcfe/HP/1+e1AB69+H9fX/AOvz/OgBD/Fz6dz39f6dfT2oAUfw/j3Pp2yc4/OgAA6fh3PPDf57dx7UAIO3Tr6+6+pz/wDqHrggB6/8C5yT2HX+vXn0PFAARweeoByST3/Hj0/Hk80AKR1/Hux/h7//AF/50AHf8+5zy3p/9bGepIoAMdfqRnJz1X36469ume4oAMcjnqW7nPXt/XP60AIMZP1/2s9G9ec/560AKO2fQ9CT39ic+/XtntQAoUkhR1baMdfvbgMn0yeue1AH+wp/wbL+GdY8J/8ABDr9hHS9cs5rG9u9A+N3ieGGeN43fR/Gv7TXxp8ZeHrwLIqsYtQ0DXtMv4JANk0FzHLEWjdWIB+71ABQB5B+0D8D/AX7S/wN+Lv7PfxR09tU+Hnxp+HXi74Z+MLSLyluxofjHRLzRLy702aaKZbTWNOS8/tHRtQEbS6dqtrZ38G2e3jYAH+If+25+yP8Tf2E/wBqr42/sofFu1ePxj8HPG2o+HP7VW0ms7Dxd4bk2al4N8d6LDOzyDQvHHhS90fxTpCyO00NlqsVtdbLyC4ijAPlagCSKaW3linglkhnhkSaGaJ2jliljYPHLFIhDxyRuA6OrBlYBlIIzQB/pdf8G/n/AAcwfDL9ozwX8Pv2NP2/PHWnfDn9pfwvo9h4T+H3x68ca3aaf4H/AGhrPTIls9HsvF3iTVJLa28K/GZ7GO2s7iTW7s6V8TNSge/0/VbfxfrEXhm9AP7OVZWUMrBlYBlZSCrKRkMCMggg5BBwRzQAtABQAUAFAHwv+2F/wUv/AGEf2CdFn1b9q79pv4Y/Cm/S2N3Z+CLrWW8RfFHWotm9H0H4V+E4dd+IWsQOWjja+svDkunWzzwG9vLaOVZCAfHf/BLD/gt/+z//AMFcPix+0z4J/Zy+GfxM8NeBf2c9G+Guoj4g/E1tC0bUvH1z8QtR8c6fjSvBGjXmvS6HpGnDwU11aX+r+Iv7V1KLVI0vPDuiT2skcoB+1lABQB/i3f8ABdz/AJTDf8FD/wDs5fxx/wChWlAH5MUAFAH+898Av+SE/BX/ALJL8OP/AFDtGoA9aoAKACgAoAKAP5av+DwT/lD1qn/Zy/wP/wDQfGFAH+U5QAUAFAH9z/8AwbGf8HCHgH9n3wp4c/4Jy/tyeM9K8EfCfTbvVG/Zs/aA8U6j9i8PeA5db1S61nUPhN8UNZvZTaaN4OudW1C/1HwN40v5bbTPCtxdXXhnX7m38OSaFdeHwD/RM03UtO1nTrDWNHv7LVdJ1WytdS0vVNNuoL7TtS0++gS5sr+wvbWSW2vLK8tpY7i1ureWSC4gkSWKR43ViAXaACgD8j/+Ctv/AAWF/Zm/4JP/AAN1vxf8RvEmh+K/j/4g8O383wL/AGdLDVVPjT4ja+xkstN1PV7WzFxd+E/hrp2oh5/E3jjVYbaxW0sL/SvD51rxXLpuhXoB/j1/tM/tH/Fz9rz49/FP9pX47eJX8W/Fj4weKrzxb4v1nyRa2n2maOGz07SNIsFZ49L8PeHNGs9O8O+G9Iid4dI0DS9N02FmitUJAPC6ACgD/X8/4Nov2IJf2J/+CUnwSh8TaJJovxV/aSlvP2mviZDe2rW2qWsnxHstNj+Heh3kdwiX1m+jfCjSPBAv9HuxG2l+JL3xGpt4Li4ut4B+/lABQB/g3/tIf8nEfHr/ALLR8Uv/AFONdoA8XoA+pf2Gv+T2P2Pf+zpf2ff/AFbPhKgD/dToAKACgAoAKACgD/Bo/aH/AOS//HL/ALLD8TP/AFNdboA8eoA+k/2M/wDk8D9lL/s5P4Gf+rQ8L0Af7stABQB/KX/wdm/8E2NQ/bE/YW0/9p/4Z6LLqnxp/YibxH44u9P0+1M+peK/gL4hh07/AIW1paJCglubrwUNE0X4lWLXEskdloXh7xva2FtJqGvoGAP8rugAoA+8v+Cbn/BQr43/APBMj9qvwL+1H8D7lL290LzdB8f/AA/1G+ubLwz8WPhrq89s3if4f+JntknaK21FbW11HRtWFpezeGvFGmaH4ltbO6udJjtpgD/Xa/4Jo/8ABV39kb/gqZ8HbD4k/s8+N7O08a2FhA3xN+BHijUdNtPi38K9XAhjurfX/D0Vy02qeGpbmZU0Dx5osd14X1+JhFFeWmtWuraJpYB+llABQAUAFAFW9vrLTbO61HUby10/T7G3mu72+vbiK1s7O1t0aWe5urmd44be3hjVpJppnSONFZ3YKCaAPwQ/bk/4OWf+CVH7Ef8AbHh6T42j9pb4raZ59v8A8Kx/Zjj034kyW+oRbojba/8AEYanp3wp0A2l4Bb6xYSeNLzxRpm2cjwzdzwG2cA/Wn9jz9oi0/a4/ZW/Z5/agsPCtx4Gsfj/APCDwJ8W7Pwdd6vHr934ZtfHXh+y8QW+i3OtQ6fpUOqXGnxXyW817FptlHPIjOlvGpAoA+kKAPNfjJ8X/hx+z/8ACn4hfG34v+KtN8EfDD4WeEtb8ceOfFWrSFLLRvDvh+xlv9QuSiK9xd3TxxfZ9P02zin1DVdQmtdN062ur+7t7eUA/wAZH/grv/wUx+I//BVL9szx3+0Z4s/tLQ/h5YtJ4J+Afw2u7kSQ/Dj4RaPfXUmg6dPFDLLaP4q8QzXFz4q8c6jDJMt54m1a8tbKddC03RLKyAPy/oAlggmuZoba2hluLi4ljggggjeWaeaVwkUMMSBnklkdlSONFZ3dgqgkgUAf64P/AAbdf8EiIf8AgmZ+x3beOviv4ditP2vf2nNP0Lxp8YmvrdDq3w18JrA954F+CUErrvs5vDdrfSa349jhCG78e6rqGmXFxqmm+E/Dd1EAf0ZUAfH37en7FXwi/wCChP7Kfxc/ZQ+NNlu8KfE3w/Jb6X4it7WG51vwB4205vt/gz4h+GjMyCPXfCOvw2mpww+dFb6tZpfaBqhm0bV9StbgA/xWf2vf2Vfi7+xJ+0j8W/2XPjnoh0T4k/CDxXeeG9X8pZjpeu6eUjv/AA54w8O3E8UMt74X8Z+HbvS/FHhu+khhludG1Wze4gtrnzreIA+bqAPR/g/8XPiN8A/in8P/AI1fCLxVqfgj4nfC3xbonjjwN4r0iUR3+ieI/D19DqGnXaK6vBdQGaEQ32n3kU9hqdjLc6dqNtc2N1cW8gB/s0/8Egf+Cm3w5/4Kq/sZ+B/2h/DH9maF8StKEXgj9oD4a2dwXm+Hfxb0mxtpNatbaCeWW7bwj4nhmg8VeBdQmkuDc+HdUg0+8uTr+ka7aWQB+pFABQB/KR/weOfCfVviD/wSR0jxvpcO+D4FftWfB/4j+IphGXMPhzxB4d+I3wcwWHESS+Kfil4VBduC6xx/ecUAf5X1ABQB/S5/wa3f8FKPAf7AH7f9/wCDvjZ4jsfCPwJ/a18Kab8JfFXi/VpzaaH4I+Iela1/avwm8XeI71nFvY+HRqV/4g8Gavqd4sen6FB43HiPVb7T9G0bUrgAH+szHJHLGksTpLFKiyRyRsHjkjcBkdHUlXR1IZWUkMCCCQaAH0AFABQAUAFABQAUAFABQAUAFABQB+Hv/BdX/gjr4Y/4K7/s0aL4T0XX9F+H37SHwY1HV/FPwD+I2uWc9zoIutbs7a28VfDjxw1jDcapB4G8eQ6do5vNW0mC51Xwt4g0TQPElrYaxZ2OqeH9ZAP8xD49f8EWv+CqX7OXi/UvCHxB/YT/AGktSmsLy4tIPEfwy+GHiT4xeBtXjhneOG80Xxv8L7HxT4dv7e7jCXECNeW1/HDMi3thaTh4UAPB/wDh3V/wUG/6MT/bM/8AEXvjZ/8AMV680AJ/w7p/4KD4x/wwn+2Z3z/xi/8AG3knPf8A4Qn3oAX/AId1f8FB/wDoxP8AbL/8Re+Nn/zFUAIf+CdP/BQbr/wwn+2Z1/6Ne+Nvv/1JXfNAB/w7p/4KDZJ/4YT/AGy+n/Rr3xt9/wDqSvf1oA+aviJ8NviN8I/FmpeAviv4A8bfDDx1oyWT6x4M+IfhTXfBPizSk1KzttT059S8OeJLHTtYsVv9OurbULJrqziF1ZXMF3AZIJY5GAOMLAdff17Z9v8AP8wBN3c/3V7HrzQAZ5B9c9j23f4igBAeRk+h7nPy4J6UAKp4GT2JPB9evv780AAbA59v/QR+H60AITnnPZvX+6OP6/8A16AFJ5PPGf6p/jQAm70POH7epyP8aAP7R/8Agyb/AOT6f2sv+zSof/V1eAaAP9K2gAoAa3T/AIEn/oa0Af4LXxqOPi/8VOcf8XI8d/8AqW63/wDr/wAehAPM88ge5P6vn+lACBicZPXH1zu/LpQAuTzz2Y/juPNACE8vz147+hH59P1oAUMSRzwSf0A9f1xQAm7pz09j/dI5/GgBdxOOe/p/uj+p9aAEz79Qc++FH49aAAtkHkfr6j1/z9ewB9BfC79k39qj44+Hbrxf8Ff2aP2gvjB4StdVuNCuvFHwt+DHxG+IPh221y0tbS7utGudb8KeHdV02DVbW0v7G6udOkuVvILe8tZpYliuIWcA9I/4d0/8FBv+jE/2zOpP/Jr3xt9c8/8AFFUAL/w7q/4KDf8ARif7Zn/iL3xt/XPgo0AJ/wAO6f8AgoP/ANGJ/tl9/wDm17429zn/AKEr/PvQAf8ADun/AIKDZz/wwn+2Z/4i98bff/qSvfNAD1/4Jz/8FCHdUT9hL9sxndgir/wy98bMszNhQP8AiiupJAHvQB+zX/BMb/g2D/b8/bI+J/hjVf2lfhZ47/Y//ZosdQsL/wAeeMPivoZ8KfFPxLoUdwJb7wt8L/hhroTxQPEetWq/ZLbxb4v0XSfCHhy3un1wnxJe2Ft4a1MA/wBVP4YfDbwT8G/hx4D+Enw28PWHhP4e/DPwh4d8B+CPDGloyafoHhTwppNromg6Rah2eRorDTLK2txLK7zTmMzTO8sjsQDuqACgAoA/lw/4OV/+CINx/wAFKfgtp/7R/wCzpoVo/wC2f+z74a1C30zRLeGKG6+P3wrtpLnWLv4Wy3RKf8Vp4fvptS1z4W3Nw5trrUdU17wjfeXH4msNX8PgH+VJqml6nomp6joutadfaRrGkX13peraTqlpcWGp6ZqdhcSWl/p2o2N3HFdWV9ZXUUttd2lzFFcW1xFJDNGkiMoAKNABQB+xP7FX/Bev/gqZ+wX4e0fwH8E/2m9c1v4UaEY0034R/GHR9H+LXgXTbGFVWLR/Dx8X2t54r8E6Gm0sujeAvFPhbTxLLNP9n86aSRgD92fA3/B7h+2RptpZR/En9jb9mrxfeRQxJf3XgzxN8T/h/HeSqoEs0FtrOsfEf7GJSCwjaa6EZbhmUAUAd1rf/B8F8frgN/wjf7A/wf0okfIdb+NHjTxAFPqwsPBvhkuM9gyfXvQB8t/FP/g9A/4KbeLoJbL4a/B/9kr4R28kZC6nB4K+InjnxNBKQRvhvPE/xMPhkoPvLHP4PnbcPmlZcoQD8h/2lP8Agvh/wV2/ass7rR/if+298WdG8M3aSW83hT4OyaH8BtCuLGbd5mm6pH8G9I8E3/iOwcMyyQeKNQ1szJtSZ5ERFUA/Im+vr3U7261HUry61DUL+4mu76/vria7vby7uJGlnubq6uHknuLieVmkmmmkeSSRmd2ZiSQD+8D/AIMcf+Sg/wDBR3/sTf2Yf/T38c6AP9C6gAoA/wAW7/gu5/ymG/4KH/8AZy/jj/0K0oA/JigAoA/3nvgF/wAkJ+Cv/ZJfhx/6h2jUAetUAFABQAUAFAH8tX/B4J/yh61T/s5f4H/+g+MKAP8AKcoAKAOqbwJ43TwRF8TX8G+Kl+G9x4quPAkHxBbw9q48ET+N7TSLXxBd+DYvFZs/7Bk8VWug31lrdx4eS/bV4dIvLXUpLNbO4imcA5WgD9PP2NP+Cy3/AAUt/YI07SvDP7NX7V3xC8NfDvR5nksvhN4sbSviZ8KbWG4na5vrPSvAfxD0/wASaH4Yh1GZ5Zb2fwjB4f1GSeWW7jvorx/tFAH7Y+Ev+D0P/gqbocS2/iP4Q/sU+NkCYa71D4Z/F7RtUeQLhXMvh/48afpYBb5pUXRhu6RtCKAPCfjz/wAHcv8AwWB+NHhzVPDXhfxV8CP2dItVVoZta+BXwnuYfE9vZyK6TW2na58WfF3xZn0qSZGx/amlJZa1auqzadqNjMA4AP5yfin8Wvil8cfHev8AxQ+M/wARvG/xX+JHim5S78SePPiL4o1rxl4u1yeKFLeB9T8QeIL3UNUuxbW0UNpaRzXLRWlpDDa2yRW8McagHn1ABQB/Rv8A8G1//BJef/gpN+2pp3j74qeGJdQ/ZJ/Zcv8ARPH/AMXH1Gzd9C+IvjFLk3fw9+Cyyyo1vfxeItRs28QeObILNGvgHRNU0q9k0688VaBcTAH+t+qqiqiKqIihURQFVVUYVVUYCqoAAAAAHAoAdQAUAf4N/wC0h/ycR8ev+y0fFL/1ONdoA8XoA+pf2Gv+T2P2Pf8As6X9n3/1bPhKgD/dToAKACgAoAKACgD/AAaP2h/+S/8Axy/7LD8TP/U11ugDx6gD6T/Yz/5PA/ZS/wCzk/gZ/wCrQ8L0Af7stABQBDcW9vd289pdwQ3VrdQy29zbXESTW9xbzI0c0E8MitHNDNGzRyxSKySIzK6lSQQD/KN/4OP/APght4j/AOCb/wAcNW/aT+Afhi5vv2Hfjj4svLzRE0iymkt/2ePH+tzzX918Jtf8pXSz8HX87XVz8KNamMcUmlRzeDNQZtY8P22p+JQD+X+gAoA7b4dfEr4ifCDxpoHxH+E/jvxh8M/iD4VvU1Lwz448BeJNY8I+LNAv0BVbzR/EOg3lhqunXGxmRpLW6iZ43eNyyOykA/pJ/Zq/4O5P+CuXwLt7LSPiN4o+D37U+g2kKWir8a/htb6Z4pitIkCReR4v+Eeo/DW+vr5dq+Zqfiq18U3lyDI1288zrOgB+jmg/wDB8D8f7e0dPFH7A/we1i/MeI7nQfjP418N2iy8fO9jqHg7xVM8fX90NQjbp++oA5nxB/we7/tY3KTDwt+xL+zxo0jK4gbxB46+JPiVI3IOxpk06Twm06q2C6JJblwCA6E7gAfB/wAXf+Dvb/gsX8SBdJ4M8V/s/fAOOfesJ+FXwQ0vWbi1RshTHN8bdZ+MCvMq9ZXhKl8ukcYwqgH4c/tLf8FAP22/2xrye6/ae/an+OHxptZrj7Unhzxn8QNeu/BGnziTzQ2i/D+1u7TwPoCiQCQR6J4f0+IOFYJuUEAHyBQB/tj/APBGP/lEv/wTh/7My/Z8/wDVbaBQB+mNAH+bz/wdsf8ABZD/AIXV8R7j/gmL+zz4q874T/B7xFbaj+1J4j0W93Wnj/4x6HOtxpXwpW4tZPLvPDXwju1W+8U20kk0F78UhFY3VnaX/wANba5vgD+I2gAoA/sP/wCDTr/gkD/w1b+0A37f/wAdvC/2v9nr9mHxVbx/CXRtZs92m/FP9onTUtdV0zUFhmQrf+GfgzHPp/ii+f8Ad29747vPB9jFLf2uj+K9MjAP9OmgAoAKAP5GP+DrL/gkB/w2T+zgP24fgX4X+2ftL/sr+Fb6XxxpOj2Xm6v8XP2eNPkutZ17TBDChm1HxP8ACia41Pxr4ZjQ/aL3w5deONDhh1TVbrwzZ2wB/l3UAFAH7N/8ENv+Crvi3/glB+2ZoHxLvbjVtV/Zz+KR0rwB+014FsPNuTqngR75307x1oumhjFceN/hffXlz4i8OkIt1qemTeJvB0d3YW3i28vIQD/Y28E+NfCXxJ8G+FPiH4C8RaT4u8D+OvDmi+L/AAd4q0G8i1DRPEnhjxHp1vq+ha7pF9AzQ3mm6rpl3bX1lcxsUmt543HDUAdPQB8U/wDBRz9ku0/bq/YY/af/AGTbi7t9PvvjR8KNe8P+FtTvQGsdJ+IGmNbeJ/hvrGoKUcvp2keP9C8NalqCRhZ3s7WdbeWGcxzIAf4fnivwt4i8DeKPEngrxfo994e8WeD9f1jwt4o0DU4Tb6lofiLw/qNzpOt6PqEBJMF9pmpWlzZXcJJMdxBIhJK0AYFABQB+8/7A/wDwch/8FQv+Cfvg7QvhZ4J+J3hf43/BrwxbWun+GPhZ+0Z4f1Hx9pPhPSLRfJi0fwn4r0fxB4T+I+h6Na2ojttK8PJ4yuPC+iRW8EelaDbQiaGcA/U/V/8Ag9m/4KBzafFHoP7K37HOm6qIyJ73V9P+Net6fJLzh4tMsvi34fuYY+mY31edup83nAAPjP42f8Hb3/BZD4uaZLpXhb4hfBP9nuK4WSO5u/gn8GNJbU5YJchootS+MWq/F+805gpKx3mkzafqMPDw3kcoEgAP6of+DRT9q39pf9rn9nb9sz4gftPfHf4qfHjxhYftB+E9N0nWvih4113xdLoGlTfDu0vZdH8NWurXlxY+GdFe8lluzo2gWunaX9qlluBaCaR3YA/rtoAKACgAoAKACgAoAKACgAoAaUXngjJydrMuSepO0jJPqcmgBNg9X/7+Sf8AxVABsHq//fyT/wCKoANg9X/7+Sf/ABVABsHq/wD38k/+KoAQoMHl+h/5aSf/ABVAH+RZ/wAHUTE/8Fuv2syxLY0L9ncDezPgD9nf4ahQCxJCqAAoBwAMAY4oA/nk+btj8c98/wBDz75oATnoMdB64xyP59PUUAHORwM+2ep3fp1zQAmTnoO3TPUrx+H9KAFBJHGO+OvXOT+H9eKAAZI4x+OQeg6d+/5fXkAOe+P4s9fbr+Y+goAD17Z/HuV6/l/L1oATnvj+Lu3rg559f8aAP7R/+DJv/k+n9rL/ALNKh/8AV1eAaAP9K2gAoAa3T/gSf+hrQB/gt/Gn/kr/AMVOn/JR/HfX/sbdb/z9M0AeZd+3U+uf4v8A69AB6fd7Z655bsfT+uaAD16chh3z1PJ9vfsaAE5y3A9TnPufz9fXnNACjqM4zk9M55H6n1z25oATnjhefr6Hr+HHegBfrjP4+q/mePzx68gCev3f4u5/ug8c/wCR0oACCAc46ds56jnn9T9M5oA/1F/+DMvLf8Eq/iepZ9o/bW+K5Ch3ABPwr+B+SAGABbA3Efe2rnO1cAH9bWwer/8AfyT/AOKoANg9X/7+Sf8AxVABsHq//fyT/wCKoANg9X/7+Sf/ABVABsHq/wD38k/+KoAcFAyQOT1Pc46ZPU4HqTQAtABQAUAFABQB/K3/AMFzP+Daf4T/APBSC58R/tM/swX3hz4GftotYvda+l5bfYPhV+0LcWsWIE+ISabazXXhf4gvGiW1l8TNMs7/APtKNI9O8aaPqqGw8QeHQD/M9/am/Y//AGmf2J/ijqPwa/an+DPjb4L/ABC0/wA2WLSvFmmhNP1/T4pjB/bng7xNYyXvhjxt4clmVooPEfhLWNZ0SeZJIY79popY0APm2gAoAKACgAoAKACgD/QU/wCDJT4NfF3wc37dHxW8XfDDx/4W+GXxK8Nfs9af8OviB4i8I69ong7x7eeHdV+MNxr8HgzxFqVhbaV4obRIdY0l9WbRLq+TT/7TsRdtE11CHAP75KACgD/Fu/4Luf8AKYb/AIKH/wDZy/jj/wBCtKAPyYoAKAP9574Bf8kJ+Cv/AGSX4cf+odo1AHrVABQAUAFABQB/LV/weCf8oetU/wCzl/gf/wCg+MKAP8pygAoA/vo/4NU/2dPgT/wUB/4Jf/8ABRn9hn9o3wwniz4aax8evBfjCaK3khtvEXhDxB42+Gdto+gePvBWqzW92dA8ZeHNQ+Gcd9omrLb3EDvbTadqtnqmiXuqaVegH82v/BXf/giP+1X/AMEmvibqC+NNE1L4l/sy6/rktp8KP2l/DmkTDwnrtvcmSfTfDfjy2t5Lz/hXfxGitlaO58OazcCx1uW0v77wbqviHTLS7ntAD8YaACgAoAKACgD9jP8Agkn/AMEVP2r/APgrJ8UbGx+HehX/AMPP2dPD+uQWfxb/AGlvE2kTnwV4TtIfLuNR0LwjDNLZf8LD+I8lnJGLDwhoV0UsJr3Tr3xfqfhnQ7pNUYA/1vf2Jv2LfgJ/wT+/Zx8A/swfs4+Fv+Eb+HngW0d5729a3uvFPjfxTfrE3iLx/wCOtZgtrT+3fGPie7iS51S/+z21pa28VjomiWOleHdJ0fSLAA+r6ACgAoA/wb/2kP8Ak4j49f8AZaPil/6nGu0AeL0AfUv7DX/J7H7Hv/Z0v7Pv/q2fCVAH+6nQAUAFABQAUAFAH+DR+0P/AMl/+OX/AGWH4mf+prrdAHj1AH0n+xn/AMngfspf9nJ/Az/1aHhegD/dloAKACgDg/ij8Lvh18bPh54x+E3xc8F+HfiL8NPiBoV94Z8aeCfFmmW+seHvEeh6jH5d1p+pWF0jxSoflmglXZcWl1FBeWk0F3bwzRgH+a//AMFkP+DUT49fsxav4m+PH/BOvRfFn7Rv7Ok8t/rerfBa1D698e/g7alpLmSx0TTogdR+M/gyzz5Ol3eg2918SLC1aC11vw/4hjsNR8aXYB/HZe2V5p15d6fqNpc2GoWFzPZX1jewS2t5ZXlrK8FzaXdtOqT29zbzo8M8EyJLDKjxyKrqQACtQAUAFABQAUAFAG/4W8KeKPHPiHSPCPgrw3r/AIw8WeIL2LTdB8MeFtH1HxB4h1vUZ8iGw0jRdJt7vUtSvZiD5VrZ2008mDsQ4oA/20/+CTvgTxp8MP8AgmT+wR8O/iN4U8QeBfHvgr9kz4GeG/GHgzxZpN7oPifwv4i0n4faHaaroXiDRNSht9R0jWNMu45bTUNNv7eC8srqKW3uYYpo3RQD87/+Din/AILAWH/BLr9kS48O/DDXLQftg/tG2GueD/gZYRSQ3F98PtGSBLTxj8cNSs33pFb+DIL2Ky8GR3qPDrHj/UNIP2LVdE0HxVFagH+RRqGoX+rX97quq3t3qeqand3OoalqWoXM15f6hf3kz3N5e3t5cvJcXd3d3Ekk9zczySTTzSPLK7u7MQCpQB9qf8E9v2G/i1/wUY/a2+Ev7J/wdtni1z4ha0snifxXNZy3ej/Dj4d6QUvPHHxF8QiN4lGl+GNFE09vayXNrJruuTaP4Y0+Y6vrmnwygH+1J+yt+zL8Jf2Nv2efhN+zH8DdAXw58MPg74RsPCfhuzbynv79oTJd6z4k166higTUfE/i3XbrU/E/ijVfJibVPEGrajftHGbjYoB9AUAFABQA10WRWR1V0dWR0cBldWBDKynIZWBIYEEEEg5oA/yZf+DmH/gkG3/BN/8Aa4k+Mfwd8MtYfsg/tTaxrXij4eQ6baldH+FPxLLNqnjv4Nv5KC303S4pbiTxX8NbZ1tYpPB95deHNNjvX8B6xesAfzR0AFAH99P/AAaMf8FkvsF1bf8ABKn9ovxViyv5tX179jjxZrt7hLS/la61vxd8AZ7y4fasWoyHUfGXwyimKY1E+K/CcV1NNqPgrRYgD/QPoAKAP8+//g6T/wCCCHjXVPHHin/gpd+xT8Nr7xRp/iWDUPEP7YPwo8F2H2rW9K8QWiCe8/aB8LeHLNTd6xp2vWolm+Ltjo9tNqOm6xaP8R7i11Cz1zxrq2ggH8C9ABQAUAFABQB/pF/8GRX/ACaJ+2p/2cf4O/8AVZWNAH9ttABQAUAFABQAUAFABQAUAFABQAUAFABQAUAIeh+hoA/yKf8Ag6hz/wAPuv2tMdf7D/Z4Pv8A8m8fDX/9dAH88hb0K/if/r/WgAB68j+HnPHfPOev/wBbNACZPqPzyP4vfpyM0AHIPJ4+uegOf16+9AACcDkZ5+8T6jr+H4/rQAZPrz05PHQdfxB/HPvQAE98jkN3/LoevH/oWKADPvz6Z56qfp0z+H40AGT6j+LPPHJ47/l7dKAP7Rv+DJv/AJPp/ay/7NKh/wDV1eAaAP8AStoAKAGt0/4En/oa0Af4LXxpOPi/8VOf+aj+O/8A1Ldb/wAj3xQB5nnkcjGTnnnvjv05H14oAM+/p0OR1JPJ56DmgAz156hu59cj9D9cYxQAZPzYPXp+ucc/yzzigBc9CSO+efw4HpkHrn16mgBM9OenXnnoc559SOvfr2oAM+pX3wT7e/8Ak/U0AGevPqBzyeOPr3z/ALVAAxOOvJGePqM/h6e2cmgD/UW/4Myv+UVnxQ/7PV+K/wD6qv4H0Af1u0AFABQAUAFABQAUAFABQAUAFABQB4V+0J+zD+zx+1j4Bu/hd+0r8F/hx8b/AAFeea//AAjnxH8K6V4ltdPu5Y/K/tTQri/t5L/w5rkKY+ya9oF3pus2TqktnfwSorgA/lQ/az/4Mwf2Fvipc6jr/wCyh8cPi9+ynrN5NNPD4U1+2t/j58LbFCzSRWWl6Z4i1jwn8SrJWLGBrzVPin4k8mIRSJYyPFIlyAfhT8XP+DL7/gpb4QvbuX4T/GX9lL4xaFHu+w+f4u8f/DnxbdBScfadB134fal4asy67doj8d3oDl1dkVVkcA+MNZ/4NS/+C3ulySpY/su+DvEaxuypLo37Rn7PkEc4UkCSIeIfiRoMoV/vKJ44XAPzojZFAFjSP+DUX/gtzqW37b+zR4G8P7sZ/tf9or4DT7M9d39g+P8AW8477N/tmgD6I+HP/Bm9/wAFaPGM6f8ACX+If2UPhLaBx57+L/i54p1698rI3NaWvw5+GnjW2nmwTsiudRsY2IIeePgkA/T34Af8GQRXUbbUP2pv26/N0mMxfbPCXwA+Fn2fUbtScz/ZviJ8RdaurbTygBji834X6kJS/nOYvL8mUA/os/Y5/wCDcT/gkp+xdqmmeK/CX7N9r8ZviJpPkPY/EP8AaW1X/hcerW1zbMJLfUrDwnqljp/wq0bWbecC5ttb0L4eaXq9pcBHtb2ERRLGAfuZDDFbxRQQRRwQQRpDDDCixxQxRqEjiijQBI440AREQBVUBVAAAoAkoAKAP4t/27v+DQb/AIbY/bD/AGiP2sP+Hhf/AArT/hffxM1z4i/8ID/wyb/wmf8Awin9tGE/2P8A8JT/AMNL+FP7d+zeV/yEP+Ec0fzt3/HjFjkA+S/+IGP/AKyif+aT/wD5W9AB/wAQMf8A1lE/80n/APyt6AP7z/APhb/hB/AngrwV9u/tT/hD/CXhzwt/af2b7F/aP/CP6PZ6T9u+x/aLv7J9r+yfaPs32u68jzPK+0TbPMYA62gAoAKACgAoA/lq/wCDwT/lD1qn/Zy/wP8A/QfGFAH+U5QAUAf3t/8ABjf4okg8V/8ABSHwU7s0Wp+Hv2W/FEEZOVik0LUvj1pN06DPDTr4is1lIHzC3hBPyjIB/fn448C+Cfib4S1/wD8SPB/hfx/4F8V6dNo/ijwZ410DSvFHhXxHpNzj7Rpmu+H9btb7SdWsJtqmW0v7SeByqlkJUEAH8l37c3/BnV+wj+0Bqes+Nv2S/iL42/Yy8Z6rcz38vhK200fFz4HPcTM080em+DNd1vQPGfhQXdwzqq6P8RLjw5o1u6RaR4OitraKzYA/nM+Mf/Bm9/wVX8A3l+/wy8T/ALMfx30ZJHOlv4a+Jmt+CPEl3bg4Q6jo3xI8HeGtD027bGWt7Xxhq9sgK4v3O4KAfK7f8Gqv/BcQXotR+yX4beA9dSX9o/8AZr+xDnHMbfFhdR568aeeOvPBAPp34P8A/BnJ/wAFYPH97ZH4ka1+zL8CdIeZf7Tl8W/FPVfGWvWtrn94+n6R8MfCHi/SNRuwOY7a68UaTbyc77+E4yAf0XfsN/8ABnD+w78CNT0jxr+198T/ABt+2P4t0y5t7+LwVFprfB34IpPEyzpBq/hrRNd1/wAdeLhaXKIA178QNH0DV7dZYNY8IXFtcyWiAH9bngH4feA/hV4O8P8Aw8+GPgvwp8O/APhPTotI8L+CfA/h/SvCvhTw7pcBYw6donh/Q7Sx0rS7ONmdlt7K1hiDu77dzsSAdfQAUAFABQB/Bv8AEj/gyS/4WD8RPHvj7/h5p/ZH/Cb+NPFPi/8Asr/hjL7f/Zn/AAkuuX2tf2f9u/4ausvtv2L7b9m+1/Y7T7T5fnfZoN/lKAcX/wAQMf8A1lE/80n/APyt6APU/gb/AMGV3/Cl/jZ8HvjF/wAPKv8AhJP+FT/FP4ffEv8A4R3/AIY4/sf+3/8AhBPFukeKf7F/tf8A4ao1X+yv7V/sr7D/AGl/ZmpfYfP+1fYLzyvs8gB/dTQAUAFABQAUAFAH8GnxD/4Mj/8AhPfH/jnxz/w81/sr/hNPGPibxZ/Zf/DGP27+zf8AhI9avdY+wfbv+Gr7P7Z9j+2fZ/tX2S1+0eX532aDf5SgHHf8QMf/AFlE/wDNJ/8A8regD0r4M/8ABlN/wqP4wfCn4r/8PLf+Eg/4Vj8SvAvxD/sH/hjb+yf7c/4QvxRpfiT+yP7U/wCGqtS/s3+0v7N+xf2h/Z2ofY/P+0/Yrry/IkAP7sqACgAoAKACgD8p/wBu3/gij/wTc/4KKG/1n9on9nPw2nxNvVJHxv8Ahk7fDP4wifbsjuNV8XeGo7dPGv2eMulpZfETTfGGlWm9nt9Pjm2yKAfywftIf8GQ0L3up6r+yJ+3E9vpzmQ6P4F/aO+HIu7y3HzNGNR+KvwzvLSK63ZWNzb/AActCm0yjzS4iQA/JH4hf8Gfv/BYbwZJKnhvR/2bPi2iOVSb4f8AxwXTFmXdgSIvxU8KfDSRQR82JURgOME0AeFS/wDBqz/wXGju1t1/ZF8PzQnOb+P9pH9mUWq4PVkm+LsV8c9Rts2OM5wcAgHc6N/waWf8Fp9UaNb74K/Cbw4HKhn1n9oH4YTrCGIBaT/hHtZ15yFzlvKWViAdoY4BAPsn4Y/8GVP/AAUR8RfZ7j4qftGfsk/DOym2mW10DWPip8R/ENoD98XFgPhv4P0F5F52ra+KrmN8czJQB+yH7M//AAZVfsX+BP7P1X9qb9pf43/tB6vb+VNceH/h9pfh74FeArqQ4aay1CFn+JPji+tUyYo7vSvGvhe6m2i4aK33/ZkAP6cP2P8A/gnH+w9+wR4f/sD9kr9mv4afB6Sa0FjqfizStJk1v4k+IbUFW8jxP8UfFNxrnxE8SW4kXzY7PWfE17ZW0jyG0t4A7LQB9sUAfx1/8FHf+DV74xf8FLf2tfiX+1d8Zv8AgqP/AGfqXi+7j0rwR4Ftf2OZ9U0L4V/DLRZLiPwb8OPDtzN+1fYi4stCs7ia51TVI9N0xvE3inUfEHiy70601DXruFQD4X/4gY/+son/AJpP/wDlb0AH/EDH/wBZRP8AzSf/APK3oA/og/4Im/8ABCn4Rf8ABG/wz8Wbyx+J3/DQ/wAcvi/qVlZ698ab/wCHEHwzm0n4b6Mlvc6N8OPDnhb/AITb4iy6Vp7a/wDbvEXibUo/FLN4qvx4fW+sIIvCmkbAD93KACgAoAKACgD4o/4KGfsK/CH/AIKPfsl/FX9k74zRfZdC8f6ULjwv4xttPh1HXPhn8RNH8y78FfEfw5DNPaGTU/DOrFZLrT0v9Pj8RaDc614V1C8i0nXdQDgH8bX/ABAx/wDWUT/zSf8A/K3oAP8AiBj/AOson/mk/wD+VvQBv+Ff+DI3xJ4G8UeHPG3g3/grDqvhfxf4P17SPFHhXxLoX7Gk+m634e8R6BqFvquia5o+o2v7Xcd1YappWp2ltf2F7byJPa3UEU8TrIisAD+6H4XaJ4+8NfDfwL4e+KnjrSvid8SdD8KaHpPjn4jaJ4MHw70rx14o0/T4LXWPFln4EXxH4uj8JR6/exS6m2gQeJdYtdNluZLa0u2tkiRADvKACgD+aX/go7/wa0/8E7P269S8RfEf4a6fqX7G3x68Q3t5q+p+O/g3pFnqHw78Ta1fzPcXmpeNvglfX2leGry4uZ5ri8u7zwHrHw31fU9Sne+1vVdVkLo4B/LJ8d/+DML/AIKP+A9Rlk+Bnxi/Zr+P3hsyvHbPea/4p+EfjQqu5lnvvDfiPw/rnhW2ikXaoFp8RdSmWVirQiJfPYA+KtX/AODUz/gt9pt6lrZ/st+D9fgZiralpP7Rn7PUNlGB/G6a78StF1Eq3YJYO/qg5wAej+Bf+DRT/gsv4uvY7XxB8P8A4EfC6B9u7UvHXx58MahZRbjz5ifDO1+IuonZ1fyrCXI+5vPFAH6i/s+f8GQ3xXv57O+/ar/bh+HvhS2ilR9Q8Mfs+/DrxJ8QJ9Qh3DzYLPx18Rrr4aR6PLsyUu5vh5rqBsBrMg7gAf2Of8Ezf+CWH7Lv/BKP4QeJfg/+zGvxCv7Hxz4js/F/jzxZ8TfFkfijxV4s8SWOkwaLbahdJpmleHvDGkRQWEAijsvDnhvR7ZtzSXKXM2JQAfpFQAUAFABQAUAFABQAUAFABQAUAFABQAUAFACHofoaAP8AIp/4Oof+U3X7WnJH/Ej/AGeOn/ZvHw19xQB/PIc9vfoM9/c9f580AHPqc4XqPX2z+dACZPqc/Tv83vjt+PFAC8569+/rtz6/j9fzoATJwOSeD2yev19/60AAzjjPbtnsvvQAEn17N2/+v+Xrx60AKTz17j+a/wCP60AJk8cn+Ptz19P8+negD+0b/gyb/wCT6f2sv+zSof8A1dXgGgD/AEraACgBrdP+BJ/6GKAP8Fv40Z/4XB8VMf8ARSPHfv8A8zdrX+TQB5lzkderduM5bv6+1AByR17jqMc7vr/n86AFz15/vn8j1oAQk/Pye34fTn8z+NAC5ORknn29u/PHf8c9qAEGfl+g/wDQT780AAJ7569wB6f4/kSe3IAc/N/wLPHsPf8ALrkUABzg5PYdsdT9fzoA/wBRb/gzK/5RWfFD/s9X4r/+qr+B9AH9btABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/LV/weCf8oetU/7OX+B//oPjCgD/ACnKACgD+6L/AIMfP+S7ft/f9kl+CH/qY+O6AP8ARWoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAEPQ/Q0Af5FP/B1D/ym6/a0ycf8SP8AZ47Z/wCbePhrQB/PESD1K9+xPf1/z696ADj26Dkgnpn8fr6dOaAE4PGQevb/AHj+GM9O/H4AC5Ge2evTtt/ljnHrx70AAxgf4HJy2e2e4wB9TQAcdD39iT0Hp7YJ/wDrZIAce3RucEdcZ/Q8eucccZAAkZznv9f7v1z09e59KAD5fy3dj3Pf8On9DQB+in/BOH/gqB+05/wS1+Jfjz4r/svP8O08V/EbwGnw68RH4i+D5fGOm/8ACOp4j0rxSPsFnHrGim1vv7T0i03XLTzL9n8yIwbykyAH7Df8RgX/AAV5/wCe/wCy9/4Yy5/+bugBf+IwL/grz/z3/Ze/8MZc/wDzd0AA/wCDwP8A4K8ghvP/AGXvlIb/AJIZdgHDAjJHjwEA+oIPdSOoAP5fvEevX3inxBrviXVTb/2l4g1nVNc1D7NCYbf7dq9/c6ld+RAGbyYftN1N5UQZhHFtTc2NxAMbvnvk/T+LOT+f5e/AAgxwMgnjHHPXPXt/k98UAHryOjfqT378g4HfrQAHaS3v7Z6DnB+vOTjP60ALxkcjgn1H1yT3Hr3oATK8dP8Avk+h6+vJ7f8A16ADj1HX3GOR1zk9QB+J9KADjn8ex7gE49PUZ65I96AA4wcY/Ig8H1PXnGe/egD/AFF/+DMr/lFZ8UP+z1fiv/6qv4H0Af1u0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8tX/B4J/yh61T/s5f4H/+g+MKAP8AKcoAKAP7ov8Agx8/5Lt+39/2SX4If+pj47oA/wBFagAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAQ9D9DQB/kU/8HUP/Kbr9rTk/wDID/Z46df+TePhr70Afzxt9SOv8/cj16/4jIAde56L69/x6n/9WSaAE/E9Dz/33nvz+f8AOgBe/U5z7/3OvXr+p9aAE7ZyenX/AIF9fpn+dAByR1PbPvlV65I/X+Z5AD8T/Gefpjpn6+/86AFPX3z9O6e+aAE9OT0Y9SfUdz/Ln1POaAD05P8AD79hjPPHOT+PGaADnJ5OcHsfUdOc/wBPegAx82cn73+e/wCHrjnGKAFPXqf8vzzQAmODyeo9fT6/17DHUUALn5hz3PH4t7/0/GgBB2OSeR69d3Xr1/P8yKAF9evR/wD0I579f85oAQ/xcng+/fPv/n3zQAo6jk9SR+X17dvX9SAJjpyeg9ePlPv/AJ6DvQAD6k88/mvuc/8A1z+IAc+pyN38h3ye/P19KAAjAPJ6D8ifXJoA/wBRf/gzK/5RWfFD/s9X4r/+qr+B9AH9btABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/LV/weCf8oetU/7OX+B//oPjCgD/ACnKACgD+6L/AIMfP+S7ft/f9kl+CH/qY+O6AP8ARWoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAA8gj1oA/yZP+Ds/wCHfiPwZ/wWa+MvibWbC5ttJ+LXwq+Afj7wfdTRFYdT0PT/AIbaX8Nr+4tpCNsiWvinwFr+nzAHcktuQ+AylgD+ac/7v6A/4/5P1IAEx14/u8Hn1/z/ADxzgAMdsHp+P8ftnn6UAL36d/Qf3OnX9OnvQAg6Dg/5bPPBz+X4HnAAYyOmenbP8K/U/ofw60AGPb+9wPfH19f8M8ZAA8547/h1X3Pp6+vPoAGOnB/j6d+39e4Ht6EAOfTpjtyDgfT8f129aAFI5OB1HXAwec569+vXPqPUAMc5x/ETyP6+nf8AXJPFAAfp+ffLZ/zkj8eoAExweO47e3+c59TnGTgAX+IH3b+bf56/40AIOg47jk4z9716/p3oAX14PRh78k/5/lnnAAh/i45/D3+uPc859uoAF5yDg9Sew6gdfr74Pt2oATHTjqB2Hoffn1659cY5AD8D1z9eVPoPTPPoc85wAGOvH97t04H+ep/HrQAYJzxycAYGMknj8z7c9wvcA/1VP+DPn4d+IvBX/BIz/hI9csbmzsPi1+0/8aPiB4RlnjMaal4a0/TfAnw1bUbfcAXt5PEngLxHaJJ0drKQoSuCQD+qCgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoA/lq/4PBP+UPWqf9nL/A//ANB8YUAf5TlABQB/dF/wY+f8l2/b+/7JL8EP/Ux8d0Af6K1ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAfiV/wAFrf8Agix8IP8Agr58GPD+kaj4hj+FP7RHwmXWrz4K/GWHSBq9tax60kEus/D74g6TBJbXuv8Aw98SXdlY3haxuote8Ia9bQeIvD73MMuv+HvEgB/n1fFL/g1b/wCC03w78S3uh6B+zX4Y+L2k29xLDZ+M/hj8bvhJJ4d1eKNmUXdpaePPFPgXxZZxSbcrDrXhvTrpchWiOC1AHmn/ABDN/wDBb3/oxjxD/wCHl/Z2/wDntd+p9TzQAf8AEM3/AMFvf+jGPEP/AIeX9nXr6/8AJWu3b0wPSgA/4hm/+C3v/RjHiH/w8v7Ov5/8laznPP15oAP+IZv/AILe/wDRjHiH/wAPL+zt1z1/5K117fTigA/4hm/+C33/AEYx4h/8PL+zr+ufi1+P1JPegD8a/jH8IPiN8Afir8Q/gl8XfDkvg/4n/Crxfr3gPx94Wn1DStVm8P8AizwzfS6brWkyanoV/qWjXz2N9FJC13pmoXllMV8y3uZoislAHmvPXnP/AOr2B6Z7dz3IyAKcn179sc9Rj+fX64NACZIH8j/P16nPXr1G7OKAA5I75/HPUc//AFwO56ZxQAvt7/pnp6dOevTtQAnQ/wCT3P8ATj1xx1wKAA55P5Hn0PT6nH4kdeKADHPsc8fnj+Y4I9PQ4ADnpyc4znv8xzk8jp79OMHsAffv7Ff/AAS7/bs/4KIab8QtX/Y5+Amo/GfTvhXe+G9O8e3Fj42+G3hIeH7zxfb6xd+HYnj8e+MPC8t82o22g6rIj6Yl6lsLQrdtA0sAlAPuH/iGc/4Lff8ARjHiH/w8n7Ov/wA9mgA/4hm/+C3vf9hfxCfr8Zf2dufr/wAXa/H680AH/EM5/wAFvv8AoxjxD/4eT9nX/wCezQAf8Qzf/Bb3/oxjxCf+6y/s6/8Az2f85PqaAD/iGb/4Le/9GMeIf/Dy/s7f/Pa7dR780AfpX+wR/wAGff7dHxa+I/h/Vf2577w1+y18FNPvYLzxTo3h/wAaeE/iT8bfFWnxSLLJo3hGz8H3PiPwL4Ul1ONJbJ/FPinxBeTeH3ljv7bwd4gkiFo4B/pO/Bn4PfDj9n34U/D34JfCHwppvgj4Y/CzwjofgfwP4V0lXFlovhzw9Yx2GnWgmmaS7vrpo4zcajqmoTXOp6tqM93qmpXVzf3lxPIAem0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH8tX/B4J/wAoetU/7OX+B/8A6D4woA/ynKACgD+6L/gx8/5Lt+39/wBkl+CH/qY+O6AP9FagAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgBCoPUA/UA/wA6AE2L/dX/AL5FABsX+6v/AHyKADYv91f++RQAbF/ur/3yKADYn91f++RQB/ij/wDBaQf8bZ/+CjP/AGeT8f8A1/6H3VPT/P40AfmN7/r36r6ZHb37deaAE9BgcBv7349vz6+3NAB/9bPXsB6jt1GSP9qgAOTn3HX5s4znsMH8M98HHFAC85/HP8XX67fz5xjt3oADn/O7+9n06/nnjjFACc4/EdN3p/u5/wAjOe4AvcHjqfX1bjOMf1oABnj8P73r7jucd+oHagD/AEM/+DHgA/Db/govkA/8Vt+zJ1H/AFLnxm9f59+tAH932xf7q/8AfIoANi/3V/75FABsX+6v/fIoANi/3V/75FABsX+6v/fIoAcAB0GPpQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB+WP/AAWG/wCCasv/AAVd/Y6uv2T4vjNH8CHufiZ4H+In/CeSfD1viaqDwaNYB0j/AIRhfG3gAsdR/tbi/wD+EgX7J5H/AB53Pm/uwD+Ub/iBmvv+kndp/wCIaTf/AEVFAB/xAzX3/STu0/8AENJv/oqKAP3W/wCCHP8AwQKuP+CNPjz9oDxrN+1ZD+0WPjn4S8D+Fl02L4IP8JT4YPg3Wde1Y3xvH+LnxK/tn+0f7b8gWwtdK+yfZvN+0XPneXEAf0Y0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAf4ov8AwWk/5Sz/APBRrP8A0eT8f/8A1PdU/pn/ABoA/MY9fTnv/wAB75Pt/XvkATg9x/F+H6jr3yfpgcgAUfUdvqcYOP5YwfqTzQAHHJyffkZHPTOfU/8A1+xADj15znqM56Z69fXtj+HNAB+P5/73+POcYzjoOKADjnnuOhHp9evHHOOmckGgA79ecn69W/n3yMcn2AAAY459OMjHX6k/r168cUAf6Gn/AAY7/wDJN/8Agov/ANjr+zJ/6jfxmoA/u/oAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKAP8AFF/4LSf8pZ/+CjX/AGeT8f8A/wBT3VKAPzFJ5J9+/wBU70AGQe3Zsc+uc8Y/LP60AGf129+eAPbn8+DycA0AGeSe+PUdMj2wP1Pr2oAM8/8AAj375+np+GOPvUABP9T1/wBv/PJ/xoAM8Hp1Hf29+e3f8ehoAUfeHrk/zb/PX0oAQHgDscdSP73p1NAH+hr/AMGO/wDyTf8A4KL/APY6/syf+o38Zs0Af3f0AFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH+KL/wWj/5S0f8FGucf8Zk/H/r/wBj7qn/AOv9fqAfmOfbHXp+K+3uT+INADefUfxenb8PXk9c9aAF5yeR2z05yB7c5/CgBeeeRj6j1+n4d+aADnPUYz6j8unX8f8AGgAOfUfp/e+nbpnnmgBMnnkZyOcj0+nft/8AW5AF5yPqf/Zuen659etACc8HIPTPQ9W+n9RyPagD/Q0/4Md/+Sb/APBRf/sdf2ZP/Ub+M1AH939ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQB/ii/8ABaT/AJSz/wDBRr/s8j4//wDqe6pQB+Yx9D0z9B1Tt+P86ADPvzh/55/+vQAA+uCTjr7qO3Xn2/HHWgBCcgjjvnkdcjnOePofpQAuQfrn27N09f8AH6mgBD156d8+z/8A16AAkHP1/mp/qf1J9aAFB5x7k/q9ACA56nk7fTsx7UAf6Gv/AAY7/wDJN/8Agov/ANjr+zJ/6jfxmoA/u/oAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAa2drbeWwSo9Tjj9aAP8bP/g4Y+B3jL4Ef8FhP23tK8XaZcWdv8R/i5qfxz8H6jIjiy8QeDPjNbW/jXS9U0q4Py3dtZ6hfax4bvpYiyW+uaBq+nE+fZyKAD8WSc56fjn/Z64P8vb/aoATjPbv/AHvQ+v69/TmgA4yDx2/vdgPw+mT9aADj5unP+96jr/8AWzz7ZoAON3bOf9rPX8s/pQAp79Oh65/vd8H1/HPtzQAnGD93qP73v+P+TntQA7+IHHr655J98eucn1x2oAb6dP8Ax7+91H/1+/4UAf6UP/BlL8DvGfgz9kD9rD4767plzpvhb43/ABu8HeFPA812jRHXbL4K+FNYtvEOu6crAC50hfEfj+58OpfRFon1fw/rViT5thKoAP7VKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgAoAKACgD8dP+Cuv/AARY/Zj/AOCu3w40LSvifd6l8MPjf8PbW/t/hP8AtA+ENMsdS8S+GrLUpRdX3hLxXod7LZ2nj74e3t8F1NvDV9qOl6hpGq+fqPhbxD4fn1PXBqoB/F34x/4Mof8AgovZ67eweAP2m/2KPEvhpJpV0/VvF/iP47eB9durcOPJlvPD2j/BD4i2FhPIg3SwQeJ9Rjhb5UuZx89AHLf8QVf/AAVMz/yXz9gHnOc/FP8AaKPU5/6NW79/8mgA/wCIKv8A4KmZ/wCS+fsA9R/zVP8AaJ/X/jFbt29OvWgA/wCIKv8A4Kmc/wDF/f2Avr/wtP8AaJz9M/8ADK3T2/WgA/4gq/8AgqZ/0Xz9gH8Pil+0T+f/ACat19T3HGKAA/8ABlX/AMFTP+i+fsA/+HT/AGifXP8A0at17n1PoKAD/iCr/wCCpnP/ABf39gLr/wBFT/aJ/wDoVu3bnjjrigA/4gq/+Cpn/RfP2AepOf8Ahaf7ROe//Vqvv+poA+y/2Q/+DKD4oRePNG1r9uf9qz4aQ/DzSr22vNW8Dfsw2/jHxD4k8X28UqPNo6/EL4meEfAtt4MguFDRy6raeBfE995RZLWKxuHS9twD+974LfBj4X/s7/CnwF8EPgr4K0T4d/Cv4Y+G9P8ACXgfwZ4egeHS9C0PTUKwwI80k15fXt1M82oavrOp3N5rGu6vd32s6zfX2qX13dTAHp9ABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFABQAUAFAH/2Q==", "attach_logo": "logo-2013-color-small.png,data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAYAAACtWK6eAAAEJGlDQ1BJQ0MgUHJvZmlsZQAAOBGFVd9v21QUPolvUqQWPyBYR4eKxa9VU1u5GxqtxgZJk6XtShal6dgqJOQ6N4mpGwfb6baqT3uBNwb8AUDZAw9IPCENBmJ72fbAtElThyqqSUh76MQPISbtBVXhu3ZiJ1PEXPX6yznfOec7517bRD1fabWaGVWIlquunc8klZOnFpSeTYrSs9RLA9Sr6U4tkcvNEi7BFffO6+EdigjL7ZHu/k72I796i9zRiSJPwG4VHX0Z+AxRzNRrtksUvwf7+Gm3BtzzHPDTNgQCqwKXfZwSeNHHJz1OIT8JjtAq6xWtCLwGPLzYZi+3YV8DGMiT4VVuG7oiZpGzrZJhcs/hL49xtzH/Dy6bdfTsXYNY+5yluWO4D4neK/ZUvok/17X0HPBLsF+vuUlhfwX4j/rSfAJ4H1H0qZJ9dN7nR19frRTeBt4Fe9FwpwtN+2p1MXscGLHR9SXrmMgjONd1ZxKzpBeA71b4tNhj6JGoyFNp4GHgwUp9qplfmnFW5oTdy7NamcwCI49kv6fN5IAHgD+0rbyoBc3SOjczohbyS1drbq6pQdqumllRC/0ymTtej8gpbbuVwpQfyw66dqEZyxZKxtHpJn+tZnpnEdrYBbueF9qQn93S7HQGGHnYP7w6L+YGHNtd1FJitqPAR+hERCNOFi1i1alKO6RQnjKUxL1GNjwlMsiEhcPLYTEiT9ISbN15OY/jx4SMshe9LaJRpTvHr3C/ybFYP1PZAfwfYrPsMBtnE6SwN9ib7AhLwTrBDgUKcm06FSrTfSj187xPdVQWOk5Q8vxAfSiIUc7Z7xr6zY/+hpqwSyv0I0/QMTRb7RMgBxNodTfSPqdraz/sDjzKBrv4zu2+a2t0/HHzjd2Lbcc2sG7GtsL42K+xLfxtUgI7YHqKlqHK8HbCCXgjHT1cAdMlDetv4FnQ2lLasaOl6vmB0CMmwT/IPszSueHQqv6i/qluqF+oF9TfO2qEGTumJH0qfSv9KH0nfS/9TIp0Wboi/SRdlb6RLgU5u++9nyXYe69fYRPdil1o1WufNSdTTsp75BfllPy8/LI8G7AUuV8ek6fkvfDsCfbNDP0dvRh0CrNqTbV7LfEEGDQPJQadBtfGVMWEq3QWWdufk6ZSNsjG2PQjp3ZcnOWWing6noonSInvi0/Ex+IzAreevPhe+CawpgP1/pMTMDo64G0sTCXIM+KdOnFWRfQKdJvQzV1+Bt8OokmrdtY2yhVX2a+qrykJfMq4Ml3VR4cVzTQVz+UoNne4vcKLoyS+gyKO6EHe+75Fdt0Mbe5bRIf/wjvrVmhbqBN97RD1vxrahvBOfOYzoosH9bq94uejSOQGkVM6sN/7HelL4t10t9F4gPdVzydEOx83Gv+uNxo7XyL/FtFl8z9ZAHF4bBsrEwAAAAlwSFlzAAAZxQAAGcUB/Hz7SgAAJcZJREFUeAHtXQmsHVd5njMzd3m7n5c4jQOJTUiIbRwggCJKwG4hoJZNVNdFqKUKSEArVKVqGrWU8PwUQCgEFQmQSKUSKUiI+qGItYIINRa0AaUssbEdEnAWhSTEjp+f33qXmTn9vjNzX952Z+4699z7zrHn3XtnOef/v///zn9m5ixCSmmZFI+AsCwBlBRQr/7ZzVf6QfBeaYs3YOdu7B7SFUIBwSHfghT2EyKQDzq2fd+vbrjnSe5dqRN/m7QxAsIQZGNgqnuFJcCDkAL7H7z5fbYV3Aam7IKHlUGZIs7zq+dq+ulA1jxkzYIUzwSWfefJN9zzDcq6UjdNZe+6WHbXJdBdgMkjqh5+5U//5oO2kJ8HWbZix1nUzLPwME938UMZxSxlpuwg+Oepi5I70k17HboooIkgMeAfOnbIfeDgA951P/vga6T0vy6lyAhLLuKSLByvZ4KvampJSGxZZXwMIiZWhHDef/yGr/6yqmMMDJv6kLuptY9RXjU/DkoVIUCOm8GGLXC0c2yqqPsRdUcSk4FGh6J7JAmGZEGOReiyAy3DmyHiL1kBmKZWbWOZJlYNbApTBYXNK/73Q5fBwV4rhFhC7WszctS4RP/djHrQIdTFuv410I1CV3XVX4H0JTQEqYV5ITyQsSp0onF4FqIJKuHepQfEp04SzWroIq2tlVA3MCTU1fxdj4AhyHpMwj1T4QdixhCcCvceVgCnYju+txN0ULoIyw11gzqRrr2tWGekNwRJwFUKp/dJsZGOeOLQt7ptpG+T+wxBkoDz9H+Sm6RCzeN9rFpNnRs8YAiSCJh50JcIUR+fYAjSx8Y1qrWOgCFI6xiaHPoYAUOQPjauUa11BAxBWsfQ5NDHCBiC9LFxjWqtI2AI0jqGJoc+RsAQpI+Na1RrHQFDkNYxNDn0MQKGIH1sXKNa6wgYgrSOocmhjxEwBOlj4xrVWkfAEKR1DE0OfYyAIUgfG9eo1joChiCtY2hy6GMEDEH62LhGtdYRMARpHUOTQx8jYAjSx8Y1qrWOgCFI6xiaHPoYAUOQPjauUa11BAxBWsfQ5NDHCBiC9LFxjWqtI2AIUgNDM5daDWA22e7emdMGE8qmOe3nkR0H1YRxLhDyghVegb1cTScuJRyOu7SlY43OcCdFoC45t+McwOWEiykllIQJtLsFU0NK9kAEwQSZU8KxjmDaT2VCAtv57dTBHRsbcOO9q0Cnq6lt1d7O/6BocdtaCYRjK212QFcuEpTGpmx45IgQU4cxZeWk9v6ncQSBi01ZtlWQ/vLkyhNrTdy530eto2rlqIeFW9xvBRJBhLPaBvCi+mZ45zTqmCianFZe2AFRl/PmIlj1xFeeJTjHMFdCsKWdVStkWVVdOyDiuiyjyLEMCYmC6eUDknPdyRrs0HQBHeVXgEfK5++/dCjjVG6Eax6AWbcHgcyg2qGbdjT50rHHrPPF+zOv2/PJ7O6b8pi8mqsHwLfgXDVsCUcNLBH4ll32LHepIuwyAMZyA9SkQ+LaQKYSZOVSZSAoB1lLBlgIS9XTGxbIMIHWKpeVs+WSf78seY/j9LwSfcMr2rcTELAlVwF8LwDME0Oy+JPjf/3DBcpDSHUkiYYR5EV3euH+re/MWPJWeN0BGpEgRljW9NF2mZO1s4fytgQLtmOVM1w1BxGBItRO0UF4IIlUKVm5i4tW9gJk5y4uO9C+pHxcCu/C0rZgtjwmvQBQMTI0ksS7VURJ0quRLJPOjUCwA6u4ZOdPvPxr77wLYH2Xl5E9upFEswhCcjBJeeFHWz+G9synAJoDv5rFPp8VcXi8838DyxGDwfng55n9A/+Ye/lWrltGgvB+PdELwzAD0aVdtjKzc9bg82AII1BbSKICQIC8zy3sDObLoxaiSFgHw7/qT0L6chrOuQTuotnYVvpuIMUq1ICN5eCkUUDqQ/ZPPPpX3/6SiiTYCZka0WODstq3S68IEt1zMHLAH0EOqwK4ZoBmBm6pZAV0q5BuHxSrc1IWCtvrLFf5H32o3sJJCFbMWVEZHbKK3pw1cJZNH17fivXV9WCaP43IAXJgdQbcK5EYjB71RxCqAn1cXJKJuL8agLb/irRGYcxaCSzleXxiGW3xKUSSpxhJ1D2JRisHd7gl3wjKqMVwQ857DjSib2XkwNULAC8HEFX9FtUrBLjjGxnBQsAIsiKhbcUT16SouYOL/Zwsj2WlN4hqvq4AtCan5Z8kGO5oAtxvDPrzpbGQHDxcPzGqmYWMV8Sigox56WyUVoms6pocWL2Ab44Q9q3Xfe1tQ7JwFC0FfZ5u6UOQKQWYxRtyAIh7DjarEDmqzsmqM80tKo62bClBEVtYds6qDKt8oA/VaCbhQuVb/nxlGM/29LFdM8rgmoiYaB1Ys6DnKxdE/kaV1dSpZiFqUpLal+kBMu+8T4fG59MqMAEPjeACYf0dEqO2DnofQc3MsOFIPwcnsKMo2LzMuPeQZT8XVidhc6X5zLp/JaMiLO+D+QN4KwPbI53eh4CmWqNdF1APgrBePBISBLXkdoWKqiu7jk/bBGA7H2DjZli5RHP58tqAD9PwvKC/Umjtqu2PHGm8SdshPPQBOnpywfcc1FXdA7B26ZdEXVrUpno5b7D7BRaQHbpEtaEMba/TUyx9CBJZHAL1WeyIFIvuH9rh2OoGux0ZaZbHi0zRRzDtCKLbredKtq78ro8JjSSdREA/gnRS2wbzJiHwogBvtMImcf+0a0IgqoTvN70aNHPs6YYgG8Gz/KjJtkascjCC3x68qK8cCS/10f7HA7a+0moja7a0zxCkJnzsAzGI3pEX/d1B2V/A24zEvlg189LtAB+hSg4GUW9TqpFENyl1kMcQZCMrwH94T122cmIsOBe8yb9Q5FtL1rVNA6bTkyfIgodHRQRGdocxIWQjH4j2NW3vmDz745BqZjGKDFt/4p1ZenuwUP6tyNjoGyHZB4ZeVc9GMAiyevLEqpr5RnmHD+zUTh5I3HgnFHaXxKnNJNWsQiC0ZFkEwRJf8ZsUj4BenRXjZU33KKMImuhlkROjwWzw98XfzJZy14790B3KXGH5GIBR/5AUjttjewaOGaDjLL8255l4UajeMJNLzSVwW1ag1iw6vCg5ms+qOQF67SpDkDiLKZL4VlEMi8uD57zbS+WZA8FVQz90x3PPWA5GQ9X1cgM9scAIabuuEGMgC0bPNUcQsIqvUxmQ3IhhzDlOgxePqeaUXMT1C2hY+bgMd+kmJSFgCJKEUBRJSJJtcsb/YOl/Zt9Weal7xh3PnBd5p5TQTkFbP7CtIF8U7rPfzL70nictZ3GrsDIgV2AjniQVr45zjCD7cdlWRXpyUPrBzYhFl+FYEUdImJqJHRxRiA9iVEAML2zv1cuqmtlumgOGIPWYGh7G5laJI1PRj5LR5MrKkxXWwAgHSU7OgYnsu//kx94s76inuKRzXn7vuw6Bt9tQ+EUQEDaMiQUgiDqKxhmjRtypSeVuxuOGIPVaXZGErs5qexgV90jYwIqjR0ge1vC82n3uRy8b2/OWMxenpvZnDxcKXgHduuuaf6uwT+6bOuWeLBwtv2zq8BgGT+D9JQIDBlGwEyQfStVSgwWrY/hb86RaF5v9eFFsUv0IhK4Gd2QTngl/o33rMqE3quaNqsD5S44uzrLnOyZpOYXu3SeD5dlauDMhCeuwmmUlM1cKODUDTo+22uRgljzJpOYRMARpBjvEjnpSfWfVk5M5p1sIxN7gdUsoU65BQBcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sE+oMgpsORls7VD0Jt0BcLHY24DEGa6bQljqFn38EJjHbgNAIoHV38wq5M+JvUp6nOURVparTpy1KjxJTlkmovWDfsmUybK787PGU5B08/IA7vTbev5VRBDWZbJfBqgkyiGzWcFL1MVc/RNK18sFqYK4oY2ENW8K9aGGaZLNVz1nySQNAq5BHVS2LUmuvNz3YhwFqNI/ZhD8wJYWG1NZhQ/Y75w+7OAQYTY9DMfJHnHVX+dzDmks4dOjR5zH1g4qBXLSEiCKLGJHQjOZCm79x2uWuLXRgjNAxOi+Wzq1e1+RMzumPBVQxJKgVFcUBeZeWwwqS0BtBb3F1F5zXlckAGwYUlfExF4IMlHBKEGTtwoiHJGrQ6/1Nwmj1FjEVY5BLb8nY6IhhEVIhpkCB6wGwYjYyBl/62PddNPnlD4MznpY2FpwIuLtbZhPXLMDuYWKh4mWd/M3HNsySHmITAE8q3pDs5KeyJCVABO87fuf31GIDzEWHL14MU2/BdTSTNSQc66XBqng16N4cBzYthmcHCTL61XQ0JUsFhPUgQSQUO/MGFaJhhzTssLgPLqKgTP0hifXZmT4sIsEkl7Tkp/C22WLhpwF7anRf+CEYbc+qU2rUVxhKjniNHApBp4CbfnX8N4w9JRep0OoEcKER6bqYyvf/Tv/5FINx75MS1v2S5FNxV5MCPC3du/ahti0+i9h2Hyy1hF+IjgkdVxuonr2xzUvcQYf6sT9Q3/mWEqFUUhK8e41kZUDyLWJJHJTYDyNVEiNUTauVh9rcHgZAc81i2dLfrTL99VJR2ZjF5C0zI+SlohdqWUHaM7AyycKYVNljwkVJS/OCIaPlHKLFgS/+t+z5z8s5TH9//VQqvYt/M58Y/AG+8C/V3FrKehXxYmhcTBKjmi9KSmnZuC5FUiAIZ1WiNmk9hnIPkhHjdhh0kEjYe4nWMPFuwhw1h7jOpwwgocoglTAmx03HOv2NMlC7NWk4JjWa4D5dPXG+11ftUlIjMpypHdREntFP3oDi5s59hOViaWizC986Bmhlw4I79nzn1PkLnXvzClj1Y8v52eFkZB+dRheeqNTc+000oD86uSuWnKj9OhhXHCDF+cvaOLGd7A0Uupiv85ixN1U1oaDhzbx4S5e0Zyy2iuduA50SGU9ZTtleVNkxJ46ZWxylPQmkuOLoIMVDBBrdd+9mHf2YHFZtMeQm2uZXk6DVzKzhJKtY4bGpxBVcgXNW81/TpBXkZPSx7EcFij2sXr8SjlUobpjKt3nikRg66SbSxfkYrxJJz2LHL8Z332mivvwl+VQJjMUNGepTtiAOADRGsnGc6u6yPYklHStzkmRLYsrRLV2REkANb0BoKq6QexoXtPDw24tTMlngDv+zBHj5/jnkW10P6giFR1AgfyPeQ6L0pKh6r+CPAuj/cJ6piuZYkXuTI3Ywaw/AoNRVlbxqoptQmbtSEpl0HGK/5kDaaCbXaOmpX9l3Lh2FQYr12MdTpVxxdU9EUbBBoHQH0fTHVbOswmhz6F4F+aTj2r4U2k2bouqGbuloRhM/Y0GlEO5B0M1rfyoOZ8HXTTSuCEBw8cGY3ETxpi54n6IaYkaf9COAFHDJFxeiWdaseV3d3b7/qDeXIt3xyTpTRycVHnypH9c9tkcIqHJmY1JAdUj0ZzSo8TsW9sFMRgbuER2IkizZJK4KAEgJvZDzrAnrl7pSjeO/EFZXUi426UVtxIq7lq092WekKRVT54WsZNq5XSNYO+7M6UXEWX7rUdmdH3FYDPaIHOGKLIH/Rkhm8nEO3d42SXgQhMHxFc1YsyJyVsbbIAXRdYydG1QdA4ZZgkogMPIuvddkzlIB3hSAolx7EBXQ8vJv1to227sjuSE5WiiX0AsToS/S2Vm3RbumH2h62UZ1YFcK8yVacgeZJic0qWgkdS/EWftbyBy/oRg5Kpx1BODaEkcP+vZhBQ8uXY3IQkSXsBkOiUOqYhOMqauBvDiejZ68yQ9JlMTm2dIiM8OAzY3CkkRcyUmxvKTtkNldCf1NrBJ44RkWRP23YHf1IBwwewhCcHASArgHeqMPrY6RZ5g/JwZtyOTgt/cHzYRTUq3lFU2lHEArFKMK6XzwtZsUMBkKNWXn8czHOkERJThLnZqw/oMvi4zBB1xyITsPIQXKA94+UF5zwKU0hzoVqqIdVpngklykGRSv3CCMH/rFTnYsvMS5ZI7/27EYMqWDMUP5KBIJdGF9Hq9XMGWIi4oBHqot5pmQF2Xkhc4tkSrjVvLRrB8T057Y+27XS6ygYRFHGx2hDNWw5jiA8EUjTCbfg7C+M/8P0vz75rfEtw7ab+hj7qmrb0axi5PBBjp03PU9naMmZUWWLA/feNFiq5G02t1REqRaW8mfFu9Y+8+HPXtz/2V/cbvnZv4NuGGLA7uq1SRKKyMe5dlhZaHbPsRZCPSPICilFJqwfMWKw6vwrjq7+GpmFwLOOgjNa1pXvuTCz+qz0f7XarFopMZ5akGALK/d1+7v0BhbR0uIjleoIqQSRGDECdHPiXT5bofom7QlSbTyoe5OEqimKIKq/Moa+qPH0p6dEdi8GUnXdBKpZRZq3njDDhm1hAdDWc2oth32n9zknJyawonUlw3tt5Iat/vsI3clBdPQnSCM2hPtVzYPqSTnjqSnL33tU1WyN5KT1udKaaGgB0E4pg9k/otrfJfKdKqar+fKZUV+ltdVqodBX6umlzKmpEG4114JeorVLmr4jSLuAMfkYBIiAIYjxA4NADAKGIDHgmEMGAUMQ4wMGgRgEDEFiwDGHDAKGIMYHDAIxCBiCxIBjDhkEDEGMDxgEYhAwBIkBxxwyCPQXQaBNtatJ1bRTU9Vv5rPtCOwrhP1L0FOx7XlrkmHv9MWqs5sfu8Ozv2sQhC9B91n7nKnDazugpIt+AWvaWfum0IWqTZ0VJ6FbtZtHuqqsKu36t+xBlXQ9LMNhst3FeJVgbfyhP0GqxGAllTAwKDIRgwiGTAVcIM/ae/RkGb15+yrJCXbE1KKTmeolLYSDxUBUN3xGkuRoEs5ighNNd/fmHZPEYAPQ5ZBupABLN1TQ6Z3uX2sIwYumwYhEd5CXXXjra8fOLs1XacZdqaatGZeD7sSzc7ngwIkTi/CjF6VsQhIOmLrsw98dGPHOOm4eI2+7mJawLMiZuwsXsbjkoJRYZVJIrPinhnjUDic4SXVzFw5XkwKdfFpZ26TniEK4M6DGeFUpvemBrPfcYN6/6LiyZHM2+thE90OT2A0Wnaf9i5nH+V1FlNir2n8wEpPu4GGU6Qi855GLRfm3B44fX1ADcbF0ZCOlcmFJRo5dH/neYC6z9G+49mroNY+8HDVtQiOZtetcTNYjg8CzM/mrhZO5ErZB1F57F7imMBgEHMdSgNmykx+et7ODWO4P10RRZc3ZXf+pXxOLbsN6CFMdFB/dOlp+LD8YlKK1S1jlcEsgCc7hUp6XIt6Mh1fUCjk42tEEaiCWofQxDLezLhsphbVloUAtGkvqnqNgMXKUMyMkx3XIgKtocUw6UuNZ8qpWEywTSL8yBqKgIlLDbRMF4aB0q7w46BdnR5386Kw7sm0ag/c5IjHx2lblbfR6vQhCcvBeAysALz68dUv5sYEBOx8E9kC1tk1iBtyEYYe5eFYJa7vMwG+QY2vNmkZBXXE+DK4iCAfUz0k0t3hsCv+avYNgswrV9DyyATnELGqLiCDJ2KyQq01foZ7EaE3HxrJ99hAybSAqhmTwFi6MW4HnZMYuPdsmodqajV4EoWq2L0tntg0rcgzhWRTTMuzJFQzchPGDlOCTFejHWfu6E0F4E0oPQsKNiHSncS+yg79aTZiGMNINC2dzVGh39INuLBiNJkQOZZrGI4BwHN9bmhu13Fw5M7x9Wrd7En0IQhK4iNYLebf824FBkWWtj//hrVxTLoXbRdqvi+GDZbN43pi2UQxitcwJfsH/riSWvVx7NSkBsLHtwF+6OOoOjMzjXgYrzjZOtCYLT7xMrycIaIb65/PZYNZ2hAuHaoEcKzVn5Zb2trb8lb/N95UIgAxoFlu+lwnKiwOqRbzycJe/60UQVEbBvEtqdKtV1GVzbNbiw3AYeJVMt2JhLeT1IYiSBE9xK2bRq1rG6vv9km/k29RsaBNY+hBkpUK6VSMrZTPfNxUC/dvLbFOZ0SjbKQRUe8ZU2J2C1+Tb2wjwkYHESyeJ5+rq9WZvq7NGesP7NYC0/2f4OBZA9xnWfGgg0NlJLvAFz+N4X5AHeK0+0G4//i3kiN4+y/Px9pn1WkClM5fCnTAHfxvf83RGzDpzVW882e0lj0epTyCCyB/jJVYOlQBXclJH68xJz9OUBlyYBWsd9oM+eqIcSaVqWrpOETUu1yfuff+B44APATTJQrEHbTcTfAPaPo1tBO9rSj1NEr5wYj8sXxQxXawHJUO6ROY0H+1GQDECT0JlCV1EFgA+e1s3H7DVG/RucIxlht0u8K0MrUaw4xnfydxnj90y8zjCyR04IQvVhnEaa142TxhRGDhT21AmO/cogNnpEL/rTuHJEvdSogyCzHcD5rqF7asTQ8eSgY81BrEaGHsX04aRHRtUlWZkU5+faW4skwNTyogfg+jGg1Vp7Dsf+edXPKneg2z5pwv3gga34pU/2XMJZBsCMTJwMj7lSm2DkBgswC7TkKDaHyfJ03mcnRX4wCGA/BV7Bu+a/Gr0YF4mdRIB+jFrMwxn8yvPgy5YyhljVNihsu4mF6we2pvNG3TEVEPl6Aed3+A3KAeEkIMQdztkxuhIcfvJj+9jy8qyJyfDpWnGb5v+ShCI92Df16HgM1AQUQSDjdALtePbinJQPkDiX1WFMIywp9+qTR1j8yk8kdhiZXUxC3JMs2mFDBoLPyzPpBYQIBPoR2iiB95ziCbnYZQSMgwf/PBw3EZDkhrsGKx6YKt1a9iRtrObUNEOZUgfAeEP+PymI5z3n/r4/q8SDErlTqiJBCDdpCW2TciHsP+h6Tu3Xe7aYhccb9iL3JAXdCqh+hAOWGjnikXvXO4W4PROWUYkkBjrsEFincWET1RcGLCDJiHE9MEKSqvIobgTnmb+poKAalMxasDZPAyAQoVFZ1eRhHVe1WprhaEbojKWwZgsL/6XLZwv43ERHhqlkFj7YnAeKtUFz6s8+5uJV6v1Ojl605qA90HoyAHxHTusSSg0Ib2tt53/PcTjlno69++vege6ugdYPHkJvq5WKIR0sf6uDjJURmaIPTl1jTZVgWQBAzjDgY/u2NESrPEYoEbjBBt59Ob97Yl/ecVP48/u7NFDk8dcOXEQq/WGaXUNDXIo/aZUu6x6Tuc/T1viGEo5yPKzfP4M6iKgAG3c/6B1VafHK+t0XlpTQiwCtAITbwvpR0mtXb5sxGN51YgWfB9nHZ6ynHOnj4kde88hs2bHXjKnxtJUAQ+mVpCDV68miMoP9XVBPcVqLPdWzgaWBxHpmAXu0gkWvy7/ieOHOgkn85y485ihSWkiUDVf1UK1yqaxw3PAEXXPchT+B49Qo81qXZXW/g0IklbR7SnHkKI9OJpcNkaAj7hMMggYBGogYAhSAxiz2yBABAxBjB8YBGIQMASJAcccMggYghgfMAjEIGAIEgOOOWQQMAQxPmAQiEHAECQGHHPIIGAIYnzAIBCDgCFIDDjmkEGg57uadMOE1d5FvdDNpSorceoFebthz7gyDUHi0KlxrB5H4zkcrIIP9MYLPzPFYRWx953GwqL1dlGOZDhSOMKRdv5e74Jzwh1X3fuYWTjDd0yPWQx2CGWxLKxgt5IvUc7mIw4BQ5A4dGodg8ejOk50NnbUR7d99FDlh+Xt/vnPuSKUtfdkUwuLqmmMjt794Yvilu9h/IRaO4WjPhP4GpJiFEMud/rhxMccfJFwUS3NN91+Q5A6TE4mKIciMZDg7ZyBPouf9Yy75ozMuVIQ7Pn+NVd9uiy9RQzrzGDJsmih6joEAMWk7dqOv1Ap57cNfPKlf3xtyc5uwSD8ASyuAdFUL/ENMwrsrJzJjXi/G9u19OOhS0rjGL05HnjCkGRDuNbtNARZB8nqHSvJAWI4fiCHQRMO7KnrAUc0zEVgHOpLMNzhFs5KgcGcGAeGHBJjUCQLGklY6hB0zEu3PC/2nvlODhN/sfzh6IyYDy7X6Fo35Hd4N156/fwPrnjj9Kn8aGWXV7HLkEERP+bqzX7IECTGA9aQwwU5tuB0TijGKrsu91YjIkMycRjnDNpmuCmAx0fjiOvKBEVhDUBcwouEXcmMb/VtB7POqOZbrI+rliAuy3jzzt4z/zm+Y+bMwH37//K5h4Z3lnZVik6Zq87GYLDZD9VVC25WkJTjqPpe2iQHnDkL11b3AvViAp9e6X/AmzU/1/SjZ8Lr69wgBtpSKmrweojBgZe8yeFn7Y1EZGJTa2lwj7d9+nj+3ae/tfPK0rw762SCDI7VR1KVzab7YwiSZHLW9ZJzJoEcqLHXOHzS1WuOr+TKmkP1/Fx3OV27vo0ksoOKKA1c7l3ywkODb3vuV2PP2ZifGWldtvXIsknOMQTZwNDKa7if0QMt/wD3HHCi5d0bXFLnrjZkUWdJG5/G8vFg2B6Su5//9fD+0oI7B5Kwnd1tyTaWt/t7DUGSbIClIdCewTuIqK2SdL7ux/FeJHBzcmjxD5krijOZcyAIoosJIjXsZgiyATBrvIUz4K/ZtcFFPbJL3bRjCWHXXxLDlUWnOgFU3yjYZjsYgiQAygeyaH70mf/wbsrDU4JqwyrmTXwCPv1+2BAkxsLL7mOa6DEo9fch7QgSoFeGbpC3L4Jop5pWULcP5/appQ9B1EyVFu6IOf28Srg37p/QH+oUadai/fjkucUsNLs8rDhQNapbIvUiVRMJ9SFItbtTYL+gCTbtEwP2R9eQALM5s5Nj02EErOC1DLF9RhBqReXkeQX6kSNQsWmY2mc35KQHQfCYyCoUQkREcAJgFdHSYkdA1d7qA28QJT/w0B+Lr9Bb8G7GVIkXM5g1vV8S4VCP0UURyp1Sap3aSzW1MLseBCEq+6YUILMl6ycA7AS4MYod6HTKbhVhlckT0tooEgpWMvF7M4kmtpEJXr8HMxUPC122J2UCv0QP0qWWbUErmrYClLEmoDxVLA0+qPLaV2gJ9xbkWXepPgThQj6HDzsHjh9fgJR3wTu5jBqWghNYIDKMJGGIUS0MfO3sZ7VIltlMq6ha/4EgNsixOOd5ZbxxbPmmCtWFcAO/nPX9pahH7zqj1r+DGMIFlIKhpiHpUvhOKARsa0msCYjZ3KX1xWfufseiODzlyImY/vv1K9eWM/XqzTs1hV53QuyW8rtPve5Vn8D73U8BwG0wGlYr4tolKqVVu6CSRuWPZbqq4Z7+jUTviU08AcTASA0hZrzK0rPF0hz2LUfC2IvrOAgJRN4vzwciZ5dtewA/satKyYQMACTOoGxUBJhKrsnHEScJF7br8DJ8bEJvVeSw5Kd/9+XCDwAXD6YoS7JOehGEzYYQJOuK/3v4S0+8/rqnAOCt2H0AqqjFVZbhTdat5TM4dAP/bce2o0gLJ1I2TMgaTliR0r9QKi8+Xy4vkFUkS/tcEJQAIQa90qzjZLyy7QwENvqM1EPeKskBtS/sMThpHlnZYHT7xIuFB8WwJIl7DkseBzW/SHKoSyBTdDQ2hzQP6kUQag4rV0nCSHLiuuv+ezRn3Yie3geEHWzHk6CMGnTUYZTwPgasEMVi4F+14Ht/BmaQL4wotTkK6/JGnCtZzlf88lLge1wiq73kCBUnSPS0vFdeyAq7WLHtbCBsl02wOGhCFdh1HmtSFud+YNnuo1i3Oe9Lu/awxLgMGzgGeBi2sMgqnlb5wamiP/igalZRZg3JQdXYZmhAxRRPJWiFgm0dPdrQ+It2Szj1sive6DrOfXBGD0ixV1+sA7J8IsqQQ2JUf/OzU6lBI4JbjBfCHZx54i/+9Kz8Safkqidf3nNYU4c5jEBLR9TnJn0tmgSM5JjEoCLcvKPC5OCiROdcm02zv48dOqSia9Z183h5yVV4sYXRgI4ft2Hd7OWokYbVGTXq30hwnm+J+fE9ajXZyUjXZrFq5DqIKsTkpE1icDVZebTAoJsGTI2IuXyufk2sZdGiL2qZapoURuUufK49pSO/Dx5U2aJ2his1VoK21l6jRiBBeaQ3YwOqDWq5JrO6fwKdI4x5eGrZA0l/gkQgpl3LnIv4iJoZDOkVl2/M42xMJcErzh07xjo8RSVTLKoxSNadrW8Ta52oZodBIH0EDEHSx9yU2EMIGIL0kLGMqOkjYAiSPuamxB5CwBCkh4xlRE0fAUOQ9DE3JfYQAoYgPWQsI2r6CBiCpI+5KbGHEDAE6SFjGVHTR8AQJH3MTYk9hIAhSA8Zy4iaPgKGIOljbkrsIQQMQXrIWEbU9BEwBEkfc1NiDyFgCNJDxjKipo+AIUj6mJsSewgBQ5AeMpYRNX0EDEHSx9yU2EMIGIL0kLGMqOkjYAiSPuamxB5CwBCkh4xlRE0fAUOQ9DE3JfYQAoYgPWQsI2r6CBiCpI+5KbGHEDAESTAWpjjjpGp9l6hT+L/vVGurQoYgteAshAeEDOYxKSdWM8CEz+G81LWu6In91IG6UCelG6WOdO0JBVIW0hAkAXBfus+gqj0Ph8pgmt6ejyVKB+hCnZRuCfpv9sMNzpy/ieDiJNlR4+pbV+/+D8zt/H5ofxZ7s73aNEH0gL2tMj4vwfJtX3/PY098SFl0ha6byMJ1qWoiSC2YQI5jh0Q4ubcQdyOCXIB7DYEcJTparct03a/IAdmVDkoX6ISklnnoy7us9ljCRJAEHCex1McEpur/ztW7Pwpa3AVulFELz+EytuNZwehOFs7bzqUGAkQ/riabxe9b3/XYE1+p6pYAwaY+bAiSZP4VzY9vX7PnA7gLuR2XvAS0KIEoWNvb6uoKWEnigwxY+4frEFpcLOdprOVwx7sfffxedd0K3RLz2aQnGILUYfiVNe33r7lij2c574PDvQmhYw8cbriOLLp2Cgg9j2bh4yDyj13L/8afP/rU4xRmpU5dE64HCv5/TkFf8RZsb3gAAAAASUVORK5CYII=", "attach_user": "rushabh.jpeg,data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD//gA8Q1JFQVRPUjogZ2QtanBlZyB2MS4wICh1c2luZyBJSkcgSlBFRyB2NjIpLCBxdWFsaXR5ID0gMTAwCv/bAEMAAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAf/bAEMBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAf/AABEIALIAsgMBIgACEQEDEQH/xAAfAAABBQEBAQEBAQAAAAAAAAAAAQIDBAUGBwgJCgv/xAC1EAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiUmJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+fr/xAAfAQADAQEBAQEBAQEBAAAAAAAAAQIDBAUGBwgJCgv/xAC1EQACAQIEBAMEBwUEBAABAncAAQIDEQQFITEGEkFRB2FxEyIygQgUQpGhscEJIzNS8BVictEKFiQ04SXxFxgZGiYnKCkqNTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqCg4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2dri4+Tl5ufo6ery8/T19vf4+fr/2gAMAwEAAhEDEQA/AP7qv+FLfB3n/i1Hw268f8UL4X+n/QK9fXnj2JJ/wpb4Oc/8Wp+GvUD/AJEXwvxn/uFH8zx26816Znr8x646dPbp/nj8Vz7nj2Pr06d+grs/tHMP+g7Gf+FNf/5M8f8A1eyD/oR5P/4bMF/8oPMv+FLfBzn/AItR8Nhzj/kRfC/fHrpWe/8A9eg/Bb4OAEn4U/DUAdSfAvhfjp/1Cuw/+ufTo/G3jnwV8NfCmveO/iN4x8L+APA/hewl1bxN4y8a69pPhXwp4d0q32i41PXfEWu3VjpGk6fDuUS3uoXlvbR7hvkGRX84f/BXH/go9/wRq+J/7OXiTQPid+3x4j+LenJbSaXY/s5/8E/f2n7Wb4ifGvXdYElvp3hPWLT4Ravdf2vol7JEIr1fHWtW/gSwiVZ721n1KfT7a+P7RzD/AKDsZ/4U1/8A5MP9Xsg/6EeT/wDhswX/AMoMH/grF/wWZ/Ym/ZO8M+J/gF+xp4W/Z/8A2mf28vEQvPC/hrwd4N8L+D/F3w++Bt/LFLbah8QfjZ4p0nS7rw1YJ4MfM6/Dsas/ifVdZSy07WLDRtJnuNQT+Qz4X6B4p8D6NqcviDxv4h8YePvGOv6r42+I/jTUL+6W88V+NvENw97rWqFVkSO2s/tEjQ6fZQxQwW9uisIhNLcO/wAwfAb9nCz8KeOfFfxStNI8UfCPwn4i1u71PwB8A4PiNrniqHwX4ek3Jo9t8QfE6ppEfjzxRb2OwXksuk2Wki8e5uBpdsZLbStI+uvEfiPQfCOjah4i8TatY6HomlwNc3+p6jOltaW0QO0bpHA3SSOyxwwoHnuJpEhgjllkRHyq4nE10lXxFesou8VVq1Kii3o2lOUkm1pdHVhcsy3ASnPA5fgcHOpFRnPC4Shh5TindRnKjTg5RT1SbaT1tc5Jvh6uueMh4+8eaxqvi/X7MtD4a0/UNQvZPC/g2w3h44dE0OSdrO41SVo4rjUfEWpw3GpXV7HHLYrpNlFaada+jav44vdEt/PvNY8SS53eXbaXHr2s6hLtALCHTtHivb6VVJUPKluYYy6ebIoYZ/JT4zf8FFNTnuLvRPgpo8FlZI0kI8aeJLQXN9c4JX7Ro+gS4tbOIkboZtaF7NNE48/SrOUFa+GNc/aF+OXiKaSbVfiz49cyks8Fl4l1PSbLLEk7NP0m4sbGMcnCx26qBwABxWB3H7reJv2ifjPa+Yvgn4EfFzxQEyFutb8S6F4OtZz2khhn1TV9VEZGCBc6ZaTdmiHBPzH4y/bc/ay8G/aLzXP2f/EGhaVCCzXl9e+L9SsbdFGWafW9Pgi0voCd37pSoJAIBI/J3/haPxMHT4i+Ov8AwrvEH/ywroLD49/G/TCv2P4u/EiNVxiJ/GfiC4g4xjdb3N/NAw4Aw0ZGOOlAH6A6X/wVR8cRvjW/h8LyM4GdK8eavprqe7BbvStVVx3CFkyeC+Dx6VpH/BUnwvOyjXfCnxJ0xSQHfS9d07WtnPJC3V7oRbH1B9s1+Qfijxp4i8Z3K3viS7tNRvwSZNQXR9FsdRumIwXv9Q07T7S81F/R7+e5cHkEHmuWoA/oe8Gft5fBTxnPBZp8TtX8M39wyLHaeLxquiRhnIXbJqzSXGgxHccHzNWQHOV3AE17V450Cb4k6RHaT+PviVocMsO+21XwJ8RfE/hqZ4Z0DK//ABKdSXS9SjkVleM6hZX0RQgx/Ix3fy717j8OP2kPjT8KoIrDwd451O20eEjy9B1NLbXNFjTOWitbDVobuPT0c8v/AGabN2OWL7uaAPtv49/s7/HT4P6TqnxQ+H3x5+I/iHRvDxXU9Rg1LxZ4isPFmk2yzIrX8V/aaiLPWEtS4mvZFh0qaOBXkS1nVHxl+Kv2orj9ob9l7xH4b8XeJ73Tvi/8PW0XxHZs2qXFkni62029h0+/1fSwk0SjUhoeoak+s6VB0kT+0bOEWLSw6f5X4g/b3+K3i7wV4o8EeJvDXgS8svFPhzV/Dt1qNjY6zp2owRavYT2El3GDrd3ZNPAs3nRILOOMyIMjbgD4boA+v/gB8f8A+yvK+GPxX8UeMo/hrqvifTPFGneIdE8S6zpniP4d+NdP2Q2HivR9Usp2voLYxqkGpxQB5bZFTUrFBPDcQX39Ff7D/wDwVi/aQ/Y/8QxXHhHxx4K/4KRfs86VJDceOfgJ8d4PC/iX49aB4faRftGs/C74zX2i3XjZ9Rgg2ppui+L7fXvCs9tCdP0zRH1O7iv7X+Y74J/DPwB8VLnUNC8V/FbQ/hTq9r/pGk3fiDTHubHxAkyKjWf9pXWvaNpWmy2EkW9Ypj9rvhe/6N5/2Z44/wBjfgp8Kb248M+ENR8ea3q+v+IvAGsFfCHimLWvDur/AGzSbGL7Ms3h/wAV+H9M03VdT8DeJ9PuDDeeHfFf2u9geJ7WaWRbKxvpNqWJxFDm9hXrUea3N7KrOnzW25uSUb2u7X2uceLy7L8e4PHYDB4x07qm8XhaGIdNStzKHtoT5b2V+W17K+x/pxfsMfH/APYW/wCCh/7Pvhj9o79m3wn8Ode8H63LPpGv6Bq3w/8ACOm+N/hx4106K3fXvh/8Q/DyWVzJ4f8AFuhPdQG4tfPubDUbC5sNd0HUNW0DVNM1S7+wv+FLfB3IH/CqPhtzn/mRfC/b/uE1/nT/APBLX9sS/wD+Ca//AAUV+G3jebVpNJ/Zb/bM8TeH/gP+03oDSmHw54e8d65dTWnwe+OJgJW00690LxLenR/FmsuscCeE9Z8RTXCXGo3ttLF/pSA5A5PPt/8AW/Xpx1652/tHMP8AoOxn/hTX/wDkzj/1eyD/AKEeT/8AhswX/wAoPM/+FLfBzOP+FU/Dbv8A8yL4W/8AlV/n8DQfgt8HM/8AJKfhr0z/AMiL4X7f9wr2OR/KvTcj39AMEf06e/Sk9OT6dMfoRwP0FH9o5h/0HYz/AMKa/wD8mH+r2Qf9CPJ//DZgv/lB5n/wpT4Of9En+Gv/AIQnhb/5VUV6Z+Lf98//AGNFH9o5h/0HYz/wpr//ACYf6vZB/wBCPJ//AA2YL/5R5L7j8Qv+HiHx6I/5B3w65/6l7V8nGO3/AAkmTngfz4o/4eI/Hrn/AIl3w74/6l7V/wD5pM/55r4QyfUdSeccE49c+3PXOc9CaT8Rx2xn/EZ//VxgV/U/+pnC3/Qiy/pb9wtdvP09V66/5Yf8Rk8U/wDouuItdn9da7d4776W16bH3Tcf8FCfjndwT2l3o/w2uba5ikt7m2uPDWqSwTwSo0c0E8UniNo5YpY2aOSJ1ZJEYoykEg/xC/8ABXD4KeGvh7+3n8DvHf7PPwS+F/7LH/C7dH8T3/iXxl4Esb0fCjxz4x0y4Nx4g8M2nwemiuNF8C+Kk0lrK/E/hXX/AAxpHiePWpJLaystdtNRvb7+pHPGOP19/fHv6846cV/OL/wUl8b/APC0/wDgoD8M/hrbym58P/sw/BnVPGuqxqxMMPxH+M13Dp1raXEQyDNB4H0nTNUspJBuiF7K0QUuzH4zj7h/h3KuGsXicLluEweLlWwtHDVaFGCnKpKvCU6fNJS5U8PCtJuDjP3Lc3K5KX7P4BeIHiLxT4lZXluacS5rnWUwwWa4vMsNjsXWlQp0KOCqQo4hQpypxnOOPq4SlCNZVKS9vzOHtI05x+cfGnxC8G/DfQ59c8ceJdI0G0tbSWdmvbuG2nvnghZ3g0rT5J2u7+7lZStvY2a3VzI7JEgkYg1+Cn7R/wC0p4q+PviNjK9xo/gTSrmU+GfCqS/u0Ubo11bWfLby73WriInLkvBp0MjWdj8rXNzefrP8ffhZ+yxpWj6r8QvjD4Z023Ylg2owaprtjrusX7I7w6fpkGmaraS6lqExBMcGx4o4w9xcNBawzTx/hZ4zv/C2qeJtTuvBHh+78NeGJLjZo2j32pz6zqENsuFR7y+m5kurg5leKIGK33rbpJceWbmb+fz/AEAOVr6L+Bf7I/7S37S139m+B3wX8dfEG3Wf7Nca5pmktZ+E7G4zgwal4y1h9O8KabMOT5V/rFvIQGKoQpx/Ub/wSw/4II+B9P8ACnhP9oP9uPw+3inxVr1lZeIvCP7PupCW38N+FNNuo47vTb34p26mK41/xJcQtFcS+CZ3h0LRI2ax8TW2t6hLc6do39DFvpmh6PDHpPhrRdJ8PeHtNVbHRND0PTrPSNI0rS7VRDZWWnaZYQ29lZWkECIkVtawRQxgYRFFfm+eeIWGwVWphcqoQx1anJwniasnHCRnHRqnGDVTEJO6clOlB2vCdSLufrnDnhVjMwoUcbneJnltCtGNSng6MIyx0qckmpVpVE6WFcotNQlCtUV7VKdKS5T+Nb4S/wDBu5+1P4sitb34s/E74WfCO0nCNLpuntq3xH8T2YOPMS4stLj0Lwy0idB9j8Y3cbH/AJaAc17l8S/+DbnxRZeHoLn4QftNaJ4j8Uw27fatJ+IfgO98JaJqFwMsr2mt+Hdb8YXmlqw2xrbXOh6mC5EjX0akov8AV/0/z/n/AAFFfE1OPeJZ1VUji6NKKd/YU8Jh/ZNdm6kKlVr/ALi3W6aep+iUvDLhCnQlSlgcRWnKNvrNXHYr26fSUVSqUqClfX+ByvZpx0P84f8Aao/YQ/ae/Y0utL/4Xt8O5NC0HxBfXWneHPGei6rpniXwfrl5aRC4ktbfV9JuZ20++ktt91baZr1rpGq3NtDc3EFi8VpcvF738J/+CPH/AAUA+MPhbw9428P/AAXt9D8K+KtHsdf0HVfGfjjwV4bmvtJ1OCO70+7bQbnXJfE9kt5aSpdQpqOiWkjQMshRVki3/wB3PxT+Evw2+Nvg6/8Ah98V/Buh+O/BmpXOnXt54f8AEFmt5Yy3mk30Go6bdqCVlhurK8t4poZ4JI5AA8TM0Mssb+goiRIscapHGiqiIqhVVVAVVVVAUAAAKBgAYAGBXr1PErMnhKMKeDwkcapVFiK041ZUJU0o+ydGkq0ZwqNufteec4Lli4L33Gn4NPwhyhY7ETq4/HSy506TwtCnOjHFQq3l7dV60sNKnOkkoOj7OnTqPnmptezUqv8ADO//AAQV/wCCg6qWHhr4XOQOEX4m6UGY+gL2iJnr95lHB5rybxj/AMEZv+Cjfg2Oa4l/Z8ufEdpChdrjwf45+HfiKRwByIdLtPFQ1yZwQRsj0tnJHCnIz/fpSYB6gGuWHiPn0ZXnQy2pHrF0K8dPJwxSs/NprXY7avhJwzKNoYnN6Uuko4nDT+9Twck16NO19eq/zNfGH7NP7RXw/wBVOieN/gR8YPCmqhyiWWvfDjxfpss53bQ1r9p0iNLyJjxHNatNFJ1R2HNe4aH/AME9P2q7z4DfGT9pDxR8IfiD8Pvhd8HvDWh6/PqvjLwP4m0W88ZTa94t0PwzFZ+EdO1Cws7vUrDR7PVb3xR4n8SpE2geHtB0W7kv71bq6sLa4/0W1LIyvGdjoyujLwVZSGVgQOCCMj8DS/tJeLvivov7OHxG8UfAz4SaH8dPiang++Hh34WeI9WtNK0TxRcXcZs9TstQN6nkavb2tpLeXMvhk3GnyeJ47ZtBt9TsLjUIrlPSh4k46vLD0YZbhaVSdehGpUninGm4OrBVEnVhGFBShzRdWpUnGknztPl18p+EWW0IYuvVzfHVqdLDYmdKlSwKnVjNUZ+zk40ak6mJdOpyz9hRp0512lTi1za/5Y1fp5/wTe+I2tp4p8XfC26up7nw/c+H5vF2lW0rvJFpep2GpaZp2oLaKTiGPVbfVYpbpB+7M2nxSqFlklMv5yeLLy+1DxR4jvdS0Wx8N391r2s3N74c0vS/7E0zw/d3Op3VxdaHpuinnSLDSrmWWys9LJP9nwQpa/8ALKv0U/4JU2/wx1v9o+bwZ488WT+Atb8ZeD9b0bwR4ruFt7jwx/a0L2Wt3OieJ7WZreW2h1C10Uy6brNtf20en3NvNDf217FewSWH68ndJ7XSdk7rXz6+vU/BpK0pJNtJtJtWbs7Xaeqfk9tj9N/i/wCAG+KXwz8ZeA4LeS71LxDotzbaFFCpef8A4SSDbeeG5bdVJkM8WuW9hJGIyJGZQoIyK/sI/wCCff8AwVw/aE+O37Fn7N3xM1IeBtV13VvhdoGjeKtT1DQtVl1DUfGPgpJPBPjDUb6RPEESm71DxL4d1W9nxHGN85wiggV+ZnwY+AXwOg0nSrPS77SvGvjD4eeLdK1rxF4q0uSOYSeKLew+22mmR3flzI/h61+1wzJYWc/lPfWEbX7tf293APNv+CQEn9l/sweO/h1jYfg7+1L+0h8NGtzjNqdO+Id74g+zlf4Ao8S7tmf493cV+h+G2Cy7Ms8xGCzPB4fGUamXVatKOIpqfJXo18PaUb6q9KdVSS3sux/PP0k874i4b4Hy/OuGs6zDJcXh+IsHhsVVwFZ0XWwWLwWPUoVbJ83LiaOGcG17rcrfFc/pB/4eI/Hv/oHfDvj/AKl7V/8A5pPY/kfSk/4eJfHr/oHfDv8A8J7V/wD5pK+Ec+uO2cc9OnQ4Pfvj17CjJ9uOh/H29OcenbtX7j/qbwt/0I8u/wDBHp/X399f4d/4jL4paf8AGd8Qvv8A7bLyvf3dNLuz8+yPuz/h4l8fO2nfDn/wntY/+aSivg/P+0v+f+BUUf6m8Lf9CLLv/BC/z8vz7sz/AOIzeKn/AEXfEP8A4Wv/AORP6Uz8Avgdz/xaL4ajnH/Ik+HuP/Kf+Z9cdO6/8KC+Bpzj4Q/DUcj/AJknw9x0GDnTvXNet5xnJPPOcehA9OowOeOvSlyB3PJHVeeMHHQc+n6dK/lv+1Mz/wChjj//AArxH/yzyX3H+p3+q/DP/RO5F/4aMv8A/mc8j/4UF8Def+LRfDXtz/whPh44H1GnY59f0r/MK8c/EDQvjv8Atkft6/tJeGrLTLHwp8U/2pvHnh74cx6Pa21lpj/Cb4Q3TfDr4c3Flb2qw20Ud1omk+fKluvltcNJIdzsWP8Apo/tY+IfHHhL9lj9pbxV8MbWW/8AiV4Z+AHxj8QfD2wiDLLe+ONG+HXiLUfCdpGURpPNuNettPiTYpfc42KTiv8AK7/ZUg0q3/Z4+FI0ebz7efwxHe3U24lm1m9vbu78QCVi+Xkj1ybUInZjuLRnPpWNbG4zERUMRi8TXgpKShWr1asVJJxUlGc5JSUW0mldJtXs2deDybJ8uqyrZflWW4GtKDpSrYPA4XC1ZU5SjJ05VKFKE3ByhCTg24uUItq8U15V+2RL8APDvh2x8V/F3wlJ428Tyw3OleCPDyeI/EWlT3c0eJrl4xpmrWttp2m2zSW0msamtpLId1rb7Lq5ktYD+dn7CHwqtv2gv25f2avhqmj20GieL/jh4Pu9a0O0N5dWtv4M0LWo/FXiyxt31C5vb6WGDwpo2rRxy3t1dTbEElzLKQ7N61/wUe8I+L0+IfhjxrLbXt14Kn8LWmh2V9FHLJp+laza6lqd1e2N0yborW4v4ru1u7aSYxtfossUXmDTnEf1f/wbs+D9O8Tf8FINA1m+WNrj4ffB74p+MNILgFl1G7s9J8Bu0WQcSf2X421IZGD5ZkGea8LO8TLB5PmeKjpOjgcTOm10qeykqb+U3FvyPrOG8HHMM/ybBTt7PEZlg6dW/Wj7eDrKz3bpqdl1dkf3reJL0WGi30y/LJJF9mhxwTJc4iBXpyiM0g6H5DXhijAHH5+pHP0//X616P4/vCTYacrf37yYZOeP3MHr/wBNzz7HryfOfwP54/r75/8Ar4r+Yz+yKjblpsru17tvT09Nfz1S+v8An+X1P40f59KP8/5+tH8/8/5NBCX36dNel7676flvYKTjn1+vPf3+uO2eetLR+H+f8/yoD7vLTpp/X3dtSiiigLenlpstP8vlppoJ/n9R/hz/AFzXp3gK+D293prtlreQXUAJ58qUBZQo9ElCk+82cg15iPpj8vb0/L8PpUSeOvC3gO+s9V8U+JtB8M6ZI7QXF74g1jT9GsxDMVjZ2udRuLaLZDI0crHzPl2gZGQKaTk0optvRJJtt9klqxqah70pRjFfE5WUUtN23ZfN/k7/AMC3/BZv4C2/7P3/AAUV/aA0LSrIWPhn4g6zYfGXwzGkflQm1+J1kniDxAlrEAI4rSy8cyeLNMtIoh5UVvYxxosYXyk/OLwLqXivRPF3h/W/BEOoT+KdF1O01TRo9Ms59Qumu7OZJY1+xWySSXUMpHk3NvsaO4gkkgkVkkZT/R7/AMHM/hmw1H9oH4BfFjw/JZaxpN78Mta+Euva5o1za6lY6b4s8DeJ7rxj/wAItrF1Yyzx6b4gg0T4l2WrrpV+YL99Mv4LyOF7Y71/m18Ma1rWg6zZX2heJr7whfedHENfsL/VtOk0+N5F33Etxocc2qeRFgSypZ29zOypiK3lfah/pPhrFSxmQ5VXm+ap9Up0qjbvJzw96EpSvrzSdPmlfq2fyJxjgqeX8UZ3haUVGksdVrUor4Y08VbFQhG32YKsoR8oo/sR/wCCbH7UENnYeILz4seGfEPwmXxB4ftrnUNO8U6ZeWsSeINA86aI6SkkRvZrPVLK81AWYubW3vWuYrWxlgeV4JJv3a/4Nm3+Fnxcf/gqd4W8Q/D3wrrU+jfty3Hxf0qPxLoGkaxqmmaF8e/Aum6jY2nn3VrO0Nu9z4Hv7oW0T/Zkuri7MW7czt/Gn+zd8avB0uk6f4c8TftL6d8VPFuqS21tptpf6E3haW2nkG0adYvquk6Xr2vXMsjbRd6nKbifC7LOE8H+nL/g2U+LFl8OP+Ckn7a3wC1Nkt3/AGl/2dvhF8b/AAtK7bI59U+APiDWPh9ruj24Pyz6jcaX8SRrTxYMiWOkzyjCq+foaNeth5+0oVqtCpZr2lGpOnOz3XNBxlZ9Vez6nyOLwWDx9F4fHYTDY2g5Rm6GLoUsTRc4/DJ0q0Jwco3fLLluujR/aUPgF8Dv+iQ/DUnr/wAiT4e9j0/s/uD69enpR/woL4G8f8Wi+GvT/oSfDo79f+Qdx6c/z6+uZx3PTsPp0+U9frjnijOMDJ/L9RgH147dPx6v7UzP/oY4/wD8K8R/8s8l9x5f+q/DP/RO5F/4aMv/APmc8k/4UF8De/wh+Gn/AIRfh0fp/Z/H0or1v8W/75/+xoo/tXNP+hlj/wDwsxH/AMsD/Vfhn/onci/8NGX/APzOfiL/AMPEvjz/ANAn4c88/wDIva51/wDCn+mP06cH/DxL48/9An4cc/8AUva3z0/6mf8AU46e1fB+P9noTwT9M9hn0BycE9ehpcH09e47jHpxntj8eK/qT/UvhX/oR5f/AOCltpv7/wCHr5s/y4/4jL4p/wDRdcQf+Ffp/c8vz7n3bJ/wUP8AjvKjxy6N8NpI5VZJI38O606OjAqyOh8TkMrKSGVgQwJGCK/gqj8FXHwM/aB/a2/Zss7Ox0iz+HXxY1bxn8MdNSO4i0bT/hp8X4pPHPgrS7GIzSXEmm6B9vl026aKaSSOXfA0hnRs/wBdG3/Z9uvTknOcfl1Pr7/zwf8ABU3wYvw9/bY/Zs+MNnD5OnfHT4Y+Ofgr4peMbYf7d+HV3B458KXt0RhZL/ULTW73RraU5k+y6d5IOxBXwviFwjlGD4enmGVZfh8HWwWKoTrSoQ5XUw1aX1aUZLmbaVWrRqXa91Rl0bZ+5/R78WeLc38QKXD/ABTxDmGc4POssx1HBU8fWVRYfMsHTjmFOrTlyx5XPCYXG0ZR+3OpT6xSf85n7YPxz1Hxzc3Xww+IPwy/4Rrxt4G1hm0/WfD/AMQJtV0RzdRwmVp9FfQIrfU7XUbExTWcz3Nlq2mysqO0O7UdOuP2j/YY/tr9kz9lnSrfwZ8EL74B/tOfGX4S+INF8bfFXxnoV5L8XNVvvjP+0f8ACP4Ofs1eIfhvFr87yeCPBg8O658UvG+o6PPo1ra+IdX+EcPiSW1u7GDTr9v5w9H+Huv/ABY/aA0z4VeF5rCPxR8SfjBZ/D/w7cazejTtMTXvF/jOPw7pM2qag6y/YrFdQ1C3e8uykht4BJLscptP+jL8RvgX4I8Lfsx/CrSv2lviv8PdQ1P4NeGvh9D4y+PfxXj0bwsviLxH4D8P6npGm6/qHiK+1CyOmOL7XfEN/p0d3e3939r1a6uA8uqXd5c3P8l8b5nSwkMtwVSHtlisRKpUw8XUc6kKKUYRnSivZ1qMqlTWnUbvVhSlCEnCTh/qD4a5LVx9XN8whV+q/U8LCjSxk40lTozrycqsoV5v22HrwpUlarRS/czrU6lSCqRU/wCNb4tf8Frf26bf9oX4ueJ/hv8AG+Zfhte/ErxdJ4K8F694U8EeKdCsfBUGuXlv4Z0yC41fw7cazDAujQ2TSy2GrWc01w884lVpnz9zfs5/8HGet29xZ6N+1T8F7HULEmOKXx18F5JLHUIEBCebfeBPFerXNpqDsD5lxcad4v0lYwrfZtJlLrGvf+I/An/BMPU7690X4Ufss63+0hawsYG8Qfs/fBX4m6p4YlkbC7bLx5rEHgfw3OVJwLqw8QTWhOWhu5FXfXzX8Q/+CcHgX4n201z8J/8Agnj+2z8N55AXtLzRvGnwpe0d3/1T3nhvx98TfFN20LD5ntLLVNJkjO1PNTOD57hwtj6UMPi8gq4HlpwhGvKGEwNdKEVFTm44mhXnK2r56dVSfxJ7HsOlxtltapjMv4sw2ac9WpVlhaU8yzPDNzm5ypUlLA4nCwgruNoVqDglaMovb+iT4Jf8FJ/2Iv2gILMeAf2hvAFvrF4I0Twp431QfD3xYLqTaDZQ6H41XRLnVLiNjsd9EOqWrkFoLmaPDn6O+NPxJHwu+CPxY+Lmm2tprrfDr4XeOfiJY2Ml35Vhq7+EvCup+I7azkvoBKY7W/awSB7qFZSkUpljDkKD/B94+/4JT/tveE9WngsP2ePiW2lNF9psj4oPw40PxBJbs8ihpNE0j4j+Jg6Bo3jSeC7cXEiSBYYmUxjwPxX4S/bK/Z00C98PeKtN/aD+EXg/xJbXnh+/0+a48deGPBniK01W3lsr/RLhrSe38Oa1b6laTzWt3pzSXUd5bzSQzQyRSMrec+BMnxGIpvLs+pVIOcJSwld0K1WULqUoc9CrSnFuN1Z0Lq+tmrnf/wARH4iweFqrN+FsTRl7KcYY+hTxWHoQqcrjCqoYmhXpTiptSusTyvZXTP8AQ1/Z8+JV/wDGX4EfBj4vappNtoOo/FL4WeAPiJeaJZ3E11aaRP408K6V4jk062uriKGe5gs21IwQzyxRvMiLIygtT/j/APEO++EXwI+NXxX0uCwutT+GXwm+IvxB0211VJ5dLur/AMG+ENY8RWdtqMVpc2V1LYz3OnRw3cdteWlxJA8iQXMEpSVeJ/Y6tJNM/ZG/Zc065tbzT59N/Z2+ClhcWWoWdzpt9ZT2Hw18M2txaXthdx291ZXVtJE8NxbXEMUsEqNG6Kylazf20fCnjH4lfsiftH+A/hnpD+J/Gvjn4NeP/CfhjRrW+0+zfVdS8Q+H73SI7OG91K8s9NheVbqRVa6vLeHd8hkBbB/O1So/2oqMuSGH/tBUpc8rQhR+s8j5pyekI0/ilKWiTbfU/VHWxP8AYrrx9pUxf9lutH2cOarUxH1RTThCKu6kqnwwjHWTSUXon4D8Fv2/PhNafBP4b+Pf2kPi58JPht4h8b/Dnwl491W0vfFmnaPbWOs+KPDmneIdX0DR9G1XWdQ8Qy21jdahNDpFmz6nqIto0tpZp5ViL/I3x8/4OAf2PvhvBe2Hwe0nxv8AH/xHEJEtJtK0648BeBzOmVKXniTxdYxa+I9+Nk2leC9XtpkDNHcBSjv/ACwv+xD8dNP8ay/DnXNFtk8f2XlSav4C8GPcfF3xpotvIAwk1vSPhJb+NbLw3MylDDb+LdX8OGYSRyK627Gdf0I+C3/BKXxVO1pq3jP9lf8Aba+LUWUmGmaPo/wa+DWiTfdLQXcniD4p+IvEtxbH5l8yGTw1dvw22A5Wv018J8LYarLE4rHVcZGcnUp4XD1KWHw84Sd4qE+e8oWaSksXTTV3ft+Sf668bY+EMLgMro5bKlGNDEYzF0q+LxVOtTioTlVpKk3Co5JynTlgKsot2a2v49+0t/wWf/be/aHubvTtE8dn4EeC52eO38LfBqW+8O6rLAx2xf2r48a4l8Z3t0YiYrldM1TQ9Iutxb+xYshV/NzVND+LHjW8l8Q61o/xD8W6hfMZJ9c1TT/EmvXl4zHcZJdTu4bqe4YlixZ53JJyTzX9cnw1+F2t/s0aVHd+Cv8Agkt8VvAFrbmC3n8USa58AtX8UzMw2wxy+Jte+Kl54kvndlYrBJq7IrNJIiL8wPrP/DanhrQGjb4q/Av9qD4P6ZGyLqPiHxF8G9T8W+GdMi3ASz3Gt/CvUvHtmlvEuW86c28bAZ3KnzjvpZ7Ryxewyjh2hCmkrKljMI8TUXedLCrE1Zydrc0qlWT01Zx1OD6mcf7VxBxliZVpNuUq2WZjHB0pJK8aeIx7wOHpxSe0aNGMb3slc+NP2ZvC7/tf+B/gfc/Fr4JeKfi14Wk134Aa78aPBl14V8U3kN5rWlafqn7Bnxr8S38ejwQ3+m+Jovh7qP7L/wAfbfXtJu9P8SWVv4B8d+IrV1sbLWJov51v2vPhEvwD/af+O/waiHhlLb4c/E3xT4ZsYvB+papq3h6DTbHUZf7Mt7K81zVdc1sS22nyW0Go2Ws6vqOraZqcd5pmpXUt7aTtX+jV+zjqv7Pfxm+GPiLxB+zn8dNO8by+IdAv/Dt/8RPh5r2i3fiTwTfavZSJBKvh7U7TVoPCviTSpxHqen6Z408O3dzHeWkR1DT7y0EltJ/np/t9fDL4HfBz9qT4l/Db4CfFrxn8bvC/hXVJ9P8AEvxG8bjSp9R1n4hi9vZPGMFlrOk+XB4nstN1B1sLjxNJaWLaxrcGsXFrDcaaLHUr6OD80qYzNcyoSpV8JClSXJgZQruFFKqrc7lGnToTpqTpxUoKtiFJt8qo2eXiFklDLsmyjE0a+HzCVau/aZmp4aFXEydBKfs4wnVq4inWlH2snCpKhhXCMU3LENrE/Yp12TQv2g/CTLrmgaDBqsd7o15ca/b+ct9b3gidNG0iUhVs9c1a7gtrOwunuLZQJJoA1y066fe/1Qf8EhNQ8WP/AMFJvGH7TvhS3sZdE/ZD+GGp/C/RJ9Qiv30vWvif8ZrGeDxDb3A0/UNObVrTw14Miv7W70y6nlt9O1y70nURELgW7r/Mf+yN4X8J6d4a+Nfxp8e6Xa6v4c+H/hEabp2mXwP2XUtf1WT7ZbwxurI8d6k9hplhaSI6tFPrUUqMskaOn9sv/BL79nrUP2ef2Pvh5pviq2eL4mfFB7742fFe4uI/KvpfHHxKNvrMllfoy7kvNA0D+wvDVzGWZBdaRcMp/ekn+g/D3IKOe55/tlFVsvwFCeIxVOavTqznelhqMtVfmqSdbl2lChNPRn8VfSD4/wAXwLwPbKMZPBZ/n2NpZflmIoySxGFo0XHFZjjaXMmv3dCEMJzWvTq46jNaq6/fj/h4n8ev+gT8Of8Awntb/wDmoo/4eJ/Hr/oE/Dn/AMJ7W/8A5qK+EMf7I5PqOMdO3uT/ADJ6UuAe3XjPpj9fYg9MYxiv3f8A1L4V/wChHgP/AAUt1a/2vJ27+d2fwj/xGXxT/wCi64g/8K15f3PL8X3Pu7/h4r8eB/zB/hv/AOCHXB+n/CTcUV8E7R/kL/hRS/1N4W/6EWX/APgn/gmX/EafFX/ous//APCqP/ys/pKP7O/wF/6I/wDDjrz/AMUjo3qcj/j0POB/nPB/wzt8Bhn/AIs/8OTyP+ZR0bAzgcn7Hjrnv7V7Keh6ZyP4W59M9/59vWgkeo7Z4P1Hcenrnt1AB/l7+182/wChnmP/AIW4n/5b5L7j/Un/AFT4V/6Jnh//AMM2Xf8AzMf503/BTf8AaPuP24/27v2qPgp4P/aK0P8A4J9f8E6v+CfviTTvhT8Svip8MrCDwv4w+Lvx1vtR1Dw1qGjz6zoE+l61rofxfoni3RNB8OWmoahoVlofg5fEc+gahrGvwzad+S/7Zv7NX7Rfww+APgz9ob9nj9tef/goF+yN8K/iRpnj+e+125h8WeLvhN4ntbWfQftV5qcuqeI9bbwRNbay2meKtO0fxB4et9Lv72yvNf8AB1u9jBr1p6h/wTwmT4g/EL4M+OvGsIvtN+K3/BUj9sLxt4pl1BRJbXvxK8J/szWniv4VjUBJvjm1HTtX8Z/EHWdFaQtJFqZeW3xMN9fa3jXxX49+AX7S/iL9pqT9k6/+EP7FPxB1mf4Fftg2Pi7xV4Om034teHvFHiKL4e6H8d7/AOCWiW96nhmDSdU1Z4ta8T32q3l7418Aam0+p6NZySRahN+bZtxbxBHPKtCOaYqvRVJWwOJxdH6tjKdOvOg8CvbSWKnjcRWw9aphp0qlWCnLD0p4VU/aVz9g4f8ADrgxcPYfErhzKcHifrHN/auAyycMbl2Ir4WliI5mnhabwVLAYTDYuhRxkK1KhVlRji6tLG+19lh3/GfN4u1aHxvL490C6u/DmuxeKpPF2i3mmXcsN9oOrJq51nTrrT7+IQzRXel3ghltLuMRSpNBHMgRwMf2H/8ABHmTSv2nP2q/FHhL/gob8d9O/a+8bfCrQfh1P+zL4c8T6J408WeGdPPxEtNb13xf4vHhjxb8PPDl9p3izTBo/gvSLjxL458PWV94bhub9NP1b7Pe2l8PxZ8A/wDBIX4oftK/tAftu/CD4CeMvh5oXiL9lv4tahomleBviDqWu6VL4t+Hus+JPGdr4U1nw74hstL19JZYtM8P6X56axb2ltImt6bcTawjTbW/pR+GPw8+LX7BfxY/ZX/aQ+POmfGfVPhrpf7JHwU+CP7SkH7N2p+JfF+m+H/i7+zuNb0nwR8QPi74W8F2ieMPH/wsvfA/i7X4rmbRdP1bS9E8RaXHJ4p0S+0vVLWe3jiHNsrxKo4WnWpzxdSmnGMOSGNpqtDC4ynGlKVKVTkxWF9pSlChUjHEKp9XvKdSPL28J5HneDeIxtShWhgaVZqc5+1nl1aVCeMwFWdaMK8KPtMFjfY1ozxNKUsK6P1u0KdGfP8AvR4+8GWeg6tr1rpGnaR4Ynms510XVLHRdPCWFtJbPHp12kBijiul09sNJBM/lzTQSpM2HZj+fUvwZ/ab1S6+Bg12Xw18Nh+0Tb+LNQ+GWo/Hj9on4teFtR1vSfCGkwazd65rmn+D9Z8O6B4Ni160vtMn0DR30y3u7uXV7C1srIQCSaP9A/AX7Yv7Fv7U/hpNT+F3x9+GXjxUhMyQ+HfFWkXPiLSHlUA2uq6IJ21PSJ2O1brS9ZsrWePA8+GF0DrwPx70j4zfHSx0K1179oL4XeItG+H1/wCItS8E6j4g8BR3/jbTNC8QadYx6r4P1PWdA8Z6Jpuv6K0+kaTd291e+Hz4labSNNF3rl2Rfm/4ckzDKcJKtSzCcMPKEYOnFqcHHkupxlTpR9pFtOLUZxUVaWie/scT4DPcwp4OrlUJ4lOVRVWpUpqpGfs3SqQqV5OjOKkpJypycm5x1cb2/jA+LP7af7UHjj/gof4h/Zf+CT+JfGnxFuPi9Z/s6fDG38AfHODV/AvjfxzpWoxeFWstPk/aDg+IHgrU9K1vxZNeR6VqNvrPgZr60u9OnbWrGW4Wev3z+DPw18Z/EvwRaeGf2mvg/wCIvBfiay02bw18ZfhX8WvBKaFeN4k0yWbSdYsL/wAL30uq6bc+HvENzaSa/ot3YX+s6JqnhjUNM1DS9UvbS+tLuT5h+B//AAT7/aD/AGdfjv8AD79orwr8UP2QtS8Q/C/4k638Y9C0DxZ+zR8QV8Hf8LD1S31JYfFOrx6F+0JoOsXd7ol9fxa/pksGuaZGus6Ro81/Fe2Onx2LfYH7Zf8AwUYvvGel694P8NfFHwj+0L+3J428HzeCfAXgb4NabY3KaV4tvbabT7DxJrmg+Hr/AMR2vwy+HXgy/wBRm8RX974+8SyT2+i2EkF54h1vVHe8uPK4jxeQ5rRoR4fm62YyxMPaV6VDE04UcO4y9pWxNevTpqnTptwmp83LTSlN8sU5L2uD6XFWR1cUuKIxoZOsFOnQw9fE4OrOtiuen7KhgsNhatR1ataPtKcqfI5VZShCLlNqL+Mv2Gf2PPg18Z/gDaeKvitP8Svil4Oi+Jfxu8NfCfwF4y+LnxIvvhj4P+FXgn4yeOfBngTR/D/gK08SWPh26sIvD2g2iRXfiKz169e3EMdveRadHZ2sH0x4/wD+CeP7L+jeDPGGs/CD4WT/AAr+IeneEtfuPB2u/Bvxx8QvhXq1r4ktdKu59CmU+AfFWgW9+66mlsWg1K2vbe5GYriGWNmU/UP7Pfwl0f8AZ7+Bfwp+DWm3cdxZfDXwN4e8LTao+Y/7X1PT9PhXWdcm8zG2fXdZa/1e4BwfPvpOB0r2cMkihkYMrAEMvII6ggjgjvnOPzFfDYvOMbLHYmtQxuLWHliqs6NNV6sKbpOq5QjKlGSh70OX2i5febbldt3/AEDA5Bl8MuwdHFZdgpYuOCoU69Z4ejUqxrqhGNSUa04yneFTmVJqXuJRUbJI8K/4JW/Bf4Y6f+wl+zz8TfAXhnTP+Eo+Ivw08HeN/iLr0kR1LxL4l+IGv+G9Lv8Axr4h1rWbw3Go6l4i1DxJcapcaxc3E8k8s0nkoy21rb28fK/tS/FP9ob4R6nol3418E/EPwL4Z+Ii65qfww0vU9Y0nwLe6r4e0O6trKXVtU0LRLO4+JVily1zb3Fq2s+IPC73cFwZLbSgkTSJ57+xH+1P8N/2AvFHj/8AY5/aR8VQ/B7wpYfFXxd44/ZT+InjMPo3w58X/C/x7rU/jWw8I6d46vgnh+Lxb8Otd1rVfCF74Z1G9sdUGm6ZoF5pVlfWF6JIP02/aD1nxT+2VqHgnxP/AMLx/Z61TRfB1v4ht/B3iKDwHc6h4lvdB8Ww6a2q6TrupWXxMsPDWsWbXOmWVxZ3WkeG9CniSFoy7RXupx6h+lZLjcppVK9bM6ijCvL2uCq1YTlGtSqTlNTU6cZSlU5HTTt/DkqlKclOEoL8k4hwuf1qGEw2UU254WKoY+jQnThPD1aVKnT9jKFSUYRoxnGry8y/ewdGvCLpzjN/gz+1B8O/2q/jRa/sQeAPhN8QP2eNc8W/thfEHxvZ+AvAN5+2D+094S+IcWr+A/A/ibxAtn4ng8J/EXz/AAtHcNpuo+H4b7WTeeH18Xap4U02/SEakl3adL+yN4E+POl6J4l+GPxs0D4i+GvjH4X+K3i7wJ4g+FvxJ1w+M/Evw51HQLqCyfRoviJcWtvqXj3wnq9ssPj7wt4u1WTUml8LeKNOSz1/XdEtdO1e7+gL79kz9oj4UftUeBfj18F/j7+z7oupfBnS9Vi+Hel61+zT4h8WeHdH8V+IvDOvaHdeMzZ6d8ffDNpq+s6Hb+LtWm8Pxaml7olpqyWeoajpOrXGmaY1l9en4w+FPhdP/wALI/au/aG8LXnjvUdDiXxv8aPixrfgz4ev4s1qzsLOG7votPWTQ/Dvh3TnitRBpHhjw7awabpNhFp+k2cUot4pJODivOsixeGhh8olLF494lQlGnSxXLGnCM+ZJVoKLcqjgougnNqMlzKEmp+3wBk/E+WY2ri89jDBZUsHKVN1q2BTqV6tSi4Sk8NUc1CFJVW44mXsoylCSpurGM6f5Ef8FlPhN4E+EXgn4AeJfgn8SW/ZS/ae+IXxH8MfCPVvjt4NuvHPwzsvFHwv1m3+xeNk+L3jv4b6WI7vStD1qTwv4rtE8QXN34jFlpmsf8IzY3tpBrK2v8kf7ZXwP0P4DfFfT/DmjfG6T9oSbxT4PsvHviD4lHwd4v8ACFnqniXXfEPiaz1ZNKk8byNrvjDTHk0iPUrbx4yQ2XihtSkvLJJIUFxP/bh421TSv+CkX7TX7LPiX9nzVfjRa/AT9mn4gal8XPH3x9tovEPhL4PePPEHhuwmtvAPgb4X+G/HVhJ4Y+KfiCbxLfXM/iL4iWvgq8s/Cvg221TTfDvjFNQ8SWwT8hP+C4//AATr/aN+I3x4/aR/bevL/wCHPhP9nn4b/DD4ctpuseJfFc48ReJ5dJ8O6NoMvh3w/wCHNG0nVrmLV73xxqD6PZf8JBLoOn3EmoWk8N9LG8nl78JZtHA1cDlmOxHsKs8NiKleOIlSVVV6uLhRweBcJUHiFNwl7WlQWIjKEZSc6PJKHLw8d5FPNKGZ53luFWJoU8ZhqWGqYSFZ0HhqWAliMxzNThiVhHSVSH1evinhXGpOnCMMQ5xnzfjt+w/8Bfjx+2mLX9lL4F6fp/h7SYfE7/Fj4wfFfxC91H4b8H6Hp66dYaRNrE1rDMfstvPp0M+kaLDHPqniLxA0SW6WOm6Xqeop+x2tfDnxT+zlD4w+Nn7Fv/BXjX/2rP2mv2bNB1D4u/FL4H+O9Xm8deBfiF4D8ESQTfEmM+GtZ8VeL/DPiW28O2Est7q3hy9l8RXsNjHMLfUNC1uHT7xeP/Zo+G3xg8G/sP8AwT/Y7+Angi18RfGL9sDTLn9r39pyTQfG9t8N/ED/ALJbatpHhjwF4An+I+o6TeN4Z1D4oWJktdMEFlqMen6ZeeLjbW0y6xeXi/pL8TPDHh7WvD//AATj0XR/2f7v9m/xFP8AtGeK/g3cfBq9tvDgvvD/AMMNf+CHxp8L/GjSLa88K3F5pPiDwfrfhzSLfW012OXy/ENnHo+t30UNzcBR6mN4tzbCZnF4HMa2CwsMRXhTw+Dr4aM60cFTrTeJx0ZOWKdHEzoYijQ9hGhGlQviPrEpzhTl85g+AOHMzydxzjJMJm2MqYXDuriczweJrU8NLM6uFgsFlkuWOCVfCU6+ExGL+sSxMsRirYT6rGFKdSH9kH/BPDxz+zH+33+xV+zr+114a+B3wz0WH40/D2y1zXfD1v4Z0i7g8MeOdIvb3wt8Q/C9tdSWnm3Vp4c8daF4i0azu51invLKyt7yWKJpyi/Z/wDwzt8Bf+iP/DnH/Yo6NyeOn+h8/gfXrjj+df8A4NAvFGoa7/wR60bRr2d5bbwJ+0n8c/CukB2zHFp11N4W8aSR2+SdsTat4v1ScgbQZZpW2ksWP9RuenIPXjnPHOAOxA9e+Bxmv1H+181/6GmYf+FuJ8v+nvkvuR+I/wCqfCv/AETPD/8A4Zsu/wDmY8Y/4Z2+Af8A0R74cf8AhIaL/wDIdFe0joP6dPwoo/tbNP8AoZZh/wCFmJ/+WeS+4P8AVLhX/omeH/8Awy5b/wDM3kvuPxI/4eL/AB2/6AXww5z/AMy/4j7ck/8AI4c4/HocUv8Aw8X+O5/5gfww64/5F/xIeR9PF5/yMjjmvgnOMcgY3Hp39ME4zg9OxBGetL0H44+73Gc4AP4n8Rz0H9Rf6l8K6f8ACFgOv/Lt/wCfnru/Q/y8/wCIz+Kf/Rc5/wD+FUf/AJX5fn3Z+Cn/AASd8GeC4PiL/wAFC/2YfiF4X0PUfEv7OX7a0/7Q/wAMrW/tSZdFm8UReJPBeneMvDCTSPcW5tPDGkaeizeZL5Nj4otUmMjXKNXrP/BQ34xax8Qr/wAe/sfeGPEnhv4d/D6y+C938Q/2tfjN4l0W08Rx+Bfhd4kGr6fo3gnwlo99usD438YW2ia7qk2t6hHIfC3h/T01LRYbnxDd6cbX5z/4KDWOu/sMfty+Bv27NA1DxP4a+CX7S/gnUP2Zf2ofEXgiONvEXgu51vRI9F0X4jaQk9pfWf8Ab2jaVpXh3xX4ZSbT7uK4134X3GnXY3eI44pviLx7qHxy8a/FX9sX4CePdK/4WV8XvE37PPwD+I2neNfD1hJL4M/aN8Ifs7+M7fWPC3jHwxNp6XWnjRP2gPhbrWhq0cEn9mx+OpvEfh5fIla208fwxx9wXWyLj/N5VuWOEjUp43Lo1PejTo1MRhYfWoxqr2NSOFoVXipxcnGGIlSjUhOKqJf7LeCPifhOOvB7hPE0Oatj6+Enlue+xfLUq5jhMFjKlTL6s6EliaU8djaH1KjJRhUqYKNWpSlSc6Epe9/8EqvGnjnwH/wU4+Elx8TNK1nw9r/7Yf7BHh4XNp4gsrnS73xDrvwusbPQ9I8aS2V4kVyl54x8KfAHUfGii6iiu5bfxhJd3MEU10QP6+HdUVpJGVERS7s5wqqoyzMSAFCjcSTjjr7fyn/tkfGzwBe/tGf8EiP+Ch3wu1e3u/h7qXxVT4YeK9XjMcM+jeHfGN9o1nqPh7WIUP8AxLNb8NaTqvxR07W9HudktpqNpdWkgTy5Gb+qm9txd2d1bNytxbTQn/trGyA9BggnPt/L8s4mcsQ8szCVH6vLE4OphquHad6FfLsZiMLKg+ZJ/uqKw8VdKVrXSvY/f+CowwtPOcrjiFi44TMaWLoYpWaxOGzbL8JjYYhcratWxEsXO8W05OSTdmfAvxz/AGU/2VPj9rtxrPxH/Z5+EfirUy7keKb3wNodp4yuZCSzXb+L9OsrLxOsjOPMiK6qrQ4WRdsxZj89j/gmd+x/Cpj0/wAH/EzRrds/6Dof7Sf7Suj6cuQeItPsPi5DZ26bflEcEMcYHAT1+98FcqeCpKkYOQVyCMHnP9R36Uc9iO3b/wCv0P8Ak14lPMswpQVOlj8ZTpx2hTxVeEF6RjNRWy6HvVspyrEVJVa+WZfWqzbcqlXBYapUk7rVznTcnv1bbt9/wVZ/8Exv2H4pUm1L4Jjxg6MHKfEX4ifFj4lW0jg5JmsfiB468SWMu4jLRvbGM9CmMCvR/iBqn7Ov7CnwM8d/ETQ/Anw6+FnhTwzos91beHPA/hfw94Obxh4lEEqeGvCel2GhafZNrHiXxNqzQaTpNsIrm7nu7wyN+7WeVPq78R7/AOc1+X3hPw5oH7QH7ev7Q/jD4xz2us+H/wBi65+E3g/4HeBtdmT/AIRfwn4n8efD3TviL4n+M15pFyRp934vv59Ws/DnhLXrtHOh2GgXb6cItSEV3bdeHr4nHOrLMMZjMRhMHSjia9KWIq1Z1Y+3oUIUaSqTlGEqtavTi6jT9lBzqqFSUI058GJw2Ey1UIZVl2X4XHY+s8Hh60MLQowoy+r4jE1K9Z0qanUjRoYarONFNe3qqnRc6UJyrU+p/ZT+DXx6sf2fvhxpvxm1rVr34h3unal4z8aN4t1m9v7ux8VfEHXdT8ca1oUSNJqUttbeGrvxA/h60tR5dtaWmmQWtqBDCij618GaJ458Lagmn3EFve6DOx86SO8jaKzYgkT2qTGK6XcwCywrblJc7hscF69hR1dVaN1dCAVZfmDKRlSCDggjBBHB7deFZggJdgFAJJIwABySSSQAB1JrjxGNqYmpWnOFGKrVJ1HCEOWEJTk5PkSfuqLb5UnZbWtoexhqEMLhqGGg5yjh6FKhCU5c1ScaNOMIynJ6ym0ryk95Nt7nlj638IfjBffET4W3sngv4g3fgLUNI0b4leBdYsdO1+PQr3XtEtPEeh23iHQdWtri3aPVNEv4L7T55LeW1uE+0RRStPa3cMHzfrX/AATW/Yb1m9n1GH9njwh4TvLhzJNP8NNR8VfCYlz1ZU+F/iHwhHGTxzGiYAHYDHBfHCC1+Ef7cP7Kfxv8J3MMFt+0HfeI/wBlX4x6dYyRvB4nitvB/ib4mfCLxFcWsTCKTWvBuveGNc0d9WlWS8Tw54kl04Sx2sMULfpDz3I/Ij9c1vOpicBDDVMFjMVRp4vDxrWp1qlGUasJzoV4T9lKCdq1Gc6b1fsZ0uZ8/Ml50KWEzOpjKOY4DBYitgMXPDp1sPSrwlRqU6WKw1SHtozcW6FenCqr2+sU63KlDlPgiP8A4Jn/ALIioIX8L/Fe4tMH/QLj9p39p6axdSQ2yS0f4wGGSPccmORWRv41YbgfQfAX7B/7HPwz1aHxD4T/AGcvhUniO2miubbxL4k8NW3jjxNBcRMjxzxeIvG58Q63FOrDJmjv1kOfmYhmz9a8/wB4H8Pp6Ee35+hxTufb8v8A6/1/yOcp5nmU4uE8wx04S0lGWLryjL1i6jT87rU2hk+UUpRnTyrLqc4tOM4YLDRnF6JOMlTUk+1me+aPeQX+mWdzbJHFEYEQQxgJHA8QEbwIigBUiZNiKFACBSowQK/Df/g4f8ZXuk/sD6b8ONJ3S6z8cvj38LPhzZ2EbkS3iWcms+PFG1cs8Q1Twdo8DgBsTXMHynIr9pPAcMqaTPM7MI7i8cwp1VVjRI2dRjgvIGVucfuwcDJr8Ef+Cu923xb/AG+f+CVX7MNs32i2i+KOufHTxhpYy63WjeFdU0C/06SaHkCEaP4H+IVvJIynMU8+xk8t8+hw1Ff21g60knDBe3zCbeiUcBh6uLTb6LmoxXq1qtzh4xnL/VvH0IO1XMFhcqpJWvKWZ4vD4FqK2vyV5y6aJ+aPjTwL8Vfip+yr8ff2p/jdayW2oeF/gn4p+GvwR+Mf7Pev+Fl07xno/wCyN8JfBuh+DvhH8Yfhjr8gS7i/4lkXifx8uhwyXXhPxdo8+pi4msvEFna3EP7m/tGeIfhr4K+BPxJ/an1vTdD1bVfgR8EPit47+Fviy5jSW50fXfFvw/vtAsD4euWIWC78XR6laaAkigvcRagIVKiVgfwm/wCCi/xq8P8AiPxl+2l8ZfAdrLrfw/8AAv7Mej/sU654v0mymvtE8UfFzx/8Rb2618W1/ZpJbXth8INH8Rrp+p6jJIkUXiXWbjw/bzTXE1vFJ538ZfE/7Sf7RPh/4Lf8E538Ta/p/j/9qzW/hf8AEL4kfC1re0+w/sn/ALJ/w00e2f4YeFvFvk24vW+JHjDRrOD41fFO21i9hmg1Wy8DeGrHS9PfWFtrr7LCcPYziTH5CsPRVPGYqvhMFVpQTjXq0fq2A5qDjCK5pQo1qi9rV5fcr0cHiKkqvs1L86zXi7K+Ccm4mxOYYn/hLy3AY/Nliqz5sJhqlLGZpKGN560pckKlfD0pujh3Je0wuIx+EowourKH9An/AAb1fHb4t/spf8EvPg14P0HQfBawePvEvxG+Lk//AAkGj61c6m6+LfFV3YaRcyTWfiHTYGgvPDGgaDe2hW1X/RbiIl3JzX7bf8PF/jvj/kBfDDGB/wAy/wCJOh/7nD6/XHHUZ/ODwL4M8P8Aw48E+EPh94Rsk0vwr4I8M6H4R8OadGAVsdD8OaZa6RpVqCMb/IsbOFGcgF2UswyxFdX83rk4OOwJ4z9e5HGO444H+hOC4F4boYPCUMRlGCxFelhqFKviJU5OVetTpQjWrSbau6k1Kb0VrvTZP/DXOvHLxIx+c5rjsv4tzvL8BjMyxuJwOApYiMaeCwdfE1KuFwkI8jtHD0JU6KV3pDVu7b+9P+HjPx1/6AXwy/8ABB4j/wDmvor4EyP72PxX/Ciuj/UvhX/oRYD/AMFP/wCSPG/4jV4q/wDRdZ9/4Uw/+VH9Iv8Awzh8A+f+LQfD3Ax/zLGm5wcj/nh64/XjNB/Zw+AYz/xaD4e/j4Y03vjr/o/B9uvQ17VuXB5B5z6dT+uOvH8+aCRjt2ByPfv9OTj/ACf5f/tjNv8AoaZj/wCFuJ/+Wn+o/wDqjwp/0THD3/hly3/5mPjT4/8A/BP39kD9pb4OfET4FfFX4H+B9S8CfErw5e+G9bTTtFsdM1nT1ukDWWueHtXt7Y3GjeJNA1CO11rw9rFtmfTNYsbO8jDmHY3+e7+05+x94r/4JMfF3wb+y1/wUTsfid4w/Y1tfEWtWv7C3/BST4NX2v8Ahb4lfBXTdflmvL74c+I9b8Mtd3VvpQt3uJ/FPwr1mDWYtOlTUvEvgXSvEnhyeOPSf9O3cBnp1HUEcdfTr1I9+a80+MHwa+E37QPw68T/AAj+N/w68HfFX4Y+MbI6f4m8D+O9A0/xH4d1e3LLLEbjTdSgnhW6tJ0ju9O1CBYr/TL6GC/0+5try3gnTyc1pSzmm4ZhiMTWmo2pYiVepLE4dpSSlRrTcpQspzThrTnCdSnUhOnUnCX0nD7w/C9XnyTAZfgaMp89fB4fBUKGCxbbptrE4ehClCo26NFqouWtTnSo1KVSnVpUpw/zDP8Agof+wP8AsVfDn9gLx18d/gX8SrL4heN4/GHhf4h+Gfi/rfx7XxnqPxBvvEPivTbPxLZadaWOuad4SvNbu9K1rUNbki0rwlB4gkvNMZp5DKs7j+pH9lP4uw/H39mj4CfGiOdLib4mfCTwH4v1MoVIg13V/DmnT+IrF9p2ibTdeOo6fcKuVW4tZFUkAE9/4t/4NKv+CPWpXvjnXvC3wr+Jnh/VPEHh/wAUWnhTw5L8afH1/wCA/BfiPWdHvbPQ9esNPn1B/E9+nhvVbm21iy0vWfFeq6ZPLaR2l/Z3unl7R/x//wCCBfxA1u9/Yw8Qfs/eNo5dO+Iv7Jfxu+JvwY8UaFduTqGmRHX7jxZbLcg4cQ22t694m8OWwkAaMeGpoAiQwxg/lHGGQ4jA5FQrVsfXzOphszqT9viI8tSjhsbSp03SXv1E4RrYeja3JHmqvlhFWR+7eH3E+FzLibFUMPlmHyenjcmpQeGw1Tnp4jGZdiKlVV3elSftJ4fF4jm5vaT5aMXKpN3Z+ouu232PWdTgwAq3ckqAjok489AOnASRQMdgaysj1Hr+H5/Xmuz8dWxh1eG5Awl3aR5bBIM0LNGwzxyI/J68464rjMD2568dfw9e/OelflZ+0SVm12fn5P5/8Ou4cd8dfy6n8Djr/wDqr82P2yf+CfVv+0brmreO/h/440jwB4v8XaB4b8JfFXwx4v8ACtx42+Evxr8M+DNaXxB4LtfiD4X07XvC+rw6/wCDtVDv4f8AF2h67a6pDpktz4fvob7SJxBB9pfFT42fDD4J6Pb698UvFun+DtGu76DTLTUdVju0tLvUrqOea3sLeaK3kilvJo7W5dLZHaUrE7bcDnx6T9tj4GuEOly/ELxF5wbyG8N/Cb4la/HOF27vJfSfC155wG9c+UH+8v8AeXPr5XQzqFSOLyrCY2rJNxVShhKmIpy5XGTjJeyqU58s4wmk03CpGFSPLOMJLw84q5FUpSwWc4zAUYSSm4YnHUsJUjdSipwk61KpDnhOpTk4ySqUpVKU+elOcH8afDzwx/wUK/Ys0C2+FPgf4e+Dv2w/g34fjNh8Ob+5+IQ+HPxR8I6Cjs1l4Z1xfEttquh+I9M0WF00zRtRj1fTL/8As61jW9tJJVRpMr4pfBz/AIKC/t3eGtR8BfFNfBf7GfwavoC2reF9E8QH4s/ELx7e2xE+n6X4pvvD114W0nS/AT3yWk+u6DpOuHWfEFpbzaO+taVbXstwn3DH+1ha6i23w78Bf2qvE7MoaL+xv2X/AI+XKyIchXEx+GwgVGIIV2mCk9DWrB8dPjDqBxo/7EX7ZF4pG5JLr4GeMNCSQcYKnxJpmigZyOGZTjkgKN1fRU8v4olVWKpcLzjj+f2n155diFV9rpL2yoVan1GNW/ve0WFTU/fjy1PePmquZ8HwovBVuMcPLLuT2Ty1ZzgHRVFLlVB4mjTWZyope77OWPcZQ/dS5qXuHzd+yb/wTi039nvV/Anif4i/Gnxd8dNZ+E9r4jtvg9oWo6VF4S+HHwrfxcLuLxBqnhjwkNY8TarfeIbrT9QvdDstb8UeLNem0Xw/dNo+iQadaR2ywfpj36g8f4d89OnFeH+G9Z/bJ8b3U1n4L/4J5ftQavcwwrPKL+1+FXhiGOJiFUyXPiv4o6FEjFjtCtgkghQdpx4uPj7+0l/w0Uv7O2pfsw23hPxB4Y/szUfjHqet/GL4deKIvhLo97IXTSvEi/CfVfiJotv8QtTs1afRvAF54msfEYhlttU1ew0zQ5v7THBmeRcUVPa47NcHVoxpQcqlbEywuEp04OTqNRhzUoJ1KlSUlCnDmq1pyajOrU97uyniHg+j7HLcmzHD4iVapGFKhg5YrMK1WahClFyqRjiKko0qVOFN1KtTkoUKUYuUKNJKP2x/3zyOvXPT6cduvpSEntg9sDrnIGO/v+lA6D8O3Tp/j+H4GtjQbP7drOn25GY/PE0oxkGO3HnMDx0YJs78sB7V8qfZLVpd/wDgHtGkWf2DTLG0AAaG3jDgd5mUvMfbMruc+/pX8rvxK8J/Br9u3/guZ8cfBnxiu9I1f4f/ALOvwE0r4d+FNCu/Hl94I1HW/GOn/wBh6hq1vol1o3iHw5rssvh3XPiB8QXv10y+Y282mW815F5Uq7f6kPGPizQ/AfhDxV458TXi6d4b8GeG9c8V+INQkICWOh+HdLutY1a8csVAS1sLO4mYsygCM5IHNfih/wAERf8Aghl+yX/wVk/Y/wDjD+3N+3Z4J8e3fxH/AGk/2tPjF41+GvinwR8QvEXg/UtP+HthqUNhrVtFDC934c1a21H4nv480+abUvD97d2kfhyyi0y9s0ku4H++4FymrmM84qU8RUwco4FYOliqcW50auLqKTqU7TpvnjSoTi+WpGUVVTUk7M/NPE3PKGU0uH6NXC0sfCeZvH18FVko08RQwFJxjSquVOtFQnWxVOa5qU1J0LcskpJfnN8W/B37Jf7NXxD+Hv7N37FWk/FP9vn9sm+8VPF+zv8As0TfEe/+MfwJ+Afju+u7q+h8b674W0WOy8M614o8Nz3F/rNpoviW+15tBMF34n8Y6r4csrVNUn/sC/4I0/8ABEDwt+xL8N/GHxk/bA/4R/8AaF/bz/aTu08WfHn4jeIoYPFFj4NF/ONUT4YeCdQu4ik1hYX8v2rxZr9lFaW3ifW7ayis4E8OeHPDMMH35+wX/wAEm/2Cf+Ca2lajb/sn/AjRPB3irXrIWHib4peIrzUfHHxX8SWQdJZNPv8Ax34lnv8AVrDRJZ4obmXw14cOh+GHuoYbz+xhcokw/Rvcvt0x0J/DoOOlfr+TZc8mSq08XisRj955hVqSjiW7ttUnBr2FNybk4U3ecm51Z1J+8fgHEmaU+JVPC4rLsBRylpRhk8aEKuC5YqMYvEQrRksXUjGEIRnWi404QjToU6NNKC8V/wCGcPgH1/4VB8Psf9ixpuf/AER9O3GfcGj/AIZw+Af/AESD4e/+Evpv4ceR9OPcHPIz7Xx3Azg9vrkeueuR165xmgFfUHHPA+pPr26+/XrivpP7Yzb/AKGmY/8Ahbif/lp8R/qjwp/0THD3/hly3/5mPFP+GbvgGef+FPfDw57/APCM6Zz7/wDHsaK9qynfH5f/AFqKP7Yzb/oaZj/4W4n/AOWh/qjwn/0THD3/AIZct/8AmbyX3H4tH/go98Zz/wAyl8Mzn/qG+J//AJqOnAz6c54Bpf8Ah498Zuf+KT+GfvnTfE+OPY+KMZ6ZA5/Kvz67cEYwc9O/0GR06flnFJj0K9T24zyT1HQdO/Y8V/T3+pHCn/QjwX3Vf/lnl/V2f5hf8Rt8V/8AouM66fawvlf/AJh/61t0t+gx/wCCj3xm7+E/hnyMn/iW+JyOP+5owenXrwKD/wAFHvjPjJ8JfDToD/yDfE59eCf+Eox1/XNfnz+IOBknGeMDH8PGP880YGOo6YJA+pzjaegznkE4GcUf6kcKf9CPB/dU8v8Ap55f1di/4jb4r/8ARcZzuvtYXbS9/wDZ99/6sfoN/wAPHvjNz/xSfwz9/wDiW+Jz/wC7R7cevHqK/km/aj1P9oz/AIJt/tv/AB+/4KJ/Bv4bWnxP/ZS/at8UL44/ay+Dng6K6tNU+H3i+5vL3VNb+IWhLcSajc21rLr+teJfFNprcz3WjW0/ifxF4b8UQ6RYv4d8SW/7p9uSM8HOPqeTt7+h5P8AOOaKG4ilgnihmt545IZoZo1limhkVkkiljdWjkjkQsrRupV1JVgQSD4+f+GPCOe5VisrnllHCLEwSVehz89OcbShJxdS04qSjJq8JJpSp1KdWMakfq+CvpKeK3B3EuW5/LiPF51TwVVurl2YSoqjiKM0oVYwqU8OpUKzg5KnVtUgm3CtRxGHnVoTwf2a/wBqT9n39tr4U2fxL+CPjOw8YeHLnyYNZ0qQpY+LfBOtvFvk0LxboEkj3/h/WrciTyxKJbHU7dRf6PfappNxb3k/Xa74eu9Ek3EtPYucQ3Srjbn7sU6gHy5AOh+5JjK4O5F/C39of/gl/wDEL4RfEm7/AGsv+CXfjg/AL4327vfeJ/g3bXUNh8JPilaif7Zd6NBpV2p0DRm1OUMG8Ma1ay+Brm6a1uLA+C7u1OrSfTX7Fv8AwV/+Hvxr8Tf8M2fte+EW/ZS/a30q4h0DVvAXjuK50TwR461V1SKKTwTrOuMP7PvdXcpNpnhbXbuSfUI7uyXwnr3jBJWuIv4U8Q/CHiLgbE1KvsKmPyiUpOhjaEZVFGCu7VOWKu4xTcvdhUglKVSlGny1J/7F+CX0luA/GbLqMMLjaOVcSUqVP69k2MqU6FeFR8sXKEJTadOpUaVKpCpVw9SUo06Vd1/aYel94eMfBXhP4h+GtV8H+N/D+l+KPDGt25tdU0XWbOK7sbuLcHjZo5FJjnt5UjuLS7gaK6s7mKG6tJoLiKORdv4f/tIftsfs3+GJvhp4P8X+Hf2rfg5Fb2y+Hvht+0D4r1Tw/wDGTwBp1ncRNbab4L+P1povi278V+H7ZFe0s9L+KfhPXNbs7OOGytviBDbQpbV6B4i8Jy6bvvdODz2GS0kQy81oO5PVpYB/f5dB/rMgGQ+WeIfDtp4htVimeW2uoGMtjqFsxju7KbAG+GRSrFHAAliLBZFA5V1SRPz3Jc/zTIq6q5fipUYucZ1KUoqth5yja0p0Z+65KyXPBwqW0U+W8X+1Z/wtkvElD6vm+DjWahKFPEQk6WKpRle6p14WlyO7bpz5qd3dw5kmu0uf29v2xrxQuk/sYfCTSZHQgTeJ/wBrPUJoonzhWeHw/wDs737zIpw5RJoiy5USIxBrm7r9rz/goNeIzJ4J/Yv8Eo3Ky3/iT40/EFoFI4EsMWk/DGKVoz1KXcauCcFMAn4h+KngT9r25k1CL4f694NfSBeWlnpZm1XV9R8TX9pOkf2jU7+bX5NJ8N+EI7OXzlljt9K+JdxLAkUtvp9xNPJb2uv4D+DPx6LRyfErxD8O2hTS7W1Wy0BNf1PWH1KERJcarqPimey8OaXqJu1WR5LHTvAPhyCO4lMlu8NvGlpX3NXxD4olRVV5zlVG/wDy7oYSnKunaLS5Zxna6eju0mnGTUk4nwGH8J+B4Yh0Hk+cVlHX22IxVaOHkk2m1ONSi2rx1jyqbTjKEZQlzH03L+0J+3z4tttW8H3v7ZngL4caR4stZdM13Tv2a/gmfDfi3+zJ1CXw0Lx947+I3xSvfCerG0823g8U6Xo1lrWlGT7Xpl3Y3kcEsT/h58N/Bnws8N2/hbwRosWjaVFPc390xluL3U9Z1e/kNxqmv+INZvpbnVfEHiLV7pnu9W13WLy91XUbp3nu7qV2Bqx4R8Gaf4UtWETG61CdR9qvpECu4BDCGFPmMMAb5igdmdsNIzFYwnf6fp15qlytrZRGSQ4LuciKFMjMkr4IVfzZj8qKzECviM44hzfO5R/tHMK2LhB80IyhSoU+a1ub2VCnTg2lpGU1KaTaTSbR99kfCuQcOqaybLaODlVSjOopVq1aUb35Pa4ipWqRi5ayhCUYSkk3FtJlSKOWeRIYY3mmlYLHEilndjjhVAJ9z6YOTgV614X8MPpO69vHDX00Jj8pCpjt42KsyFgCHlOxQ7LlFGVXcDuPnPxN+KnwV/Za+HurfFH42/EDw38PvCekxFdQ8T+JrwWwuLgo8kWk6Fp8azalrOq3gjYWGh6JaahrOpSKUtrS4ddq/wA//jz9ub9t7/gqnresfCf/AIJ9+Gtf/Zw/ZaW+uND8d/tbeNre40jxXr9hGzQ6laeCDaSedo808TMIdG8J3V74wJfT5de8T+AbO9u7UdfDHB+fcXY6ngclwNbESnNRlWUJexpq65m52s3BNSkk/cj79R06d5ryuOvEXg/w1yfEZ3xbnGFy7D4en7RUqtaEa1Rtfu4wpt35qjTjSVnKtO9PDwrVnGnL2z/gr7+3novi7wl4o/4JvfspLe/GT9qj49GD4c+K9M8Cypf6Z8L/AApfXtvJ4xsvFerwb9Pg1rV9Bgv9E1bSZLiKHwp4dvtY8ReLr3RYrLT7TV/3w/Yg+Pnjv9in9kb9nr9lTwX4V+Gk+ifBD4X+G/BUmonTfEZk17X7a1N74w8TXHl+IbWM3Xinxde654iu/LtbdDdapLsgiXCL+Xv7FP8AwT9+Av7DvhS50/4b6ZP4h+IPiK3iHjz4veKkhvfHHi+4Mi3E8BugjR6F4e+2gXFt4d0kpa744bjVZ9X1VJNUl+5MZ544Hpx+Hy8gHr6Z5I5z/enhx4NZPwlk8sPnFOlmuYYucK+IdS7pUKiik403CS5pNKKlZypwUVCm6jdStW/xr8evpb8VeI3FFLEcFYvGcMZDlkKuGwlSioRxmZU5TjKNWvCrTqOjRg/aOjGSjiKjqzrYiNFexweE/QX/AIePfGbv4S+GnIyf+Jb4mPrkf8jR25z+PvS/8PHfjOOvhL4Zjjp/ZviboAf+po6Dmvz5xjuM9OnB7novI4/DHJ5xR+RHuPx4G0kDHJ+vWv0T/UjhT/oR4L7qn/yzy/Puz8K/4jb4rf8ARcZ1/wCBYby/6htu783t0/QX/h498ZxjPhL4aDj/AKBvifj8P+Ep7Y544z70D/go98Z8D/ik/hoOP+gb4n/ED/iqO2OeK/PrHrgkkdsc9Sfu/wD1uec90x2yO2OB+J+7yPQ9Pej/AFI4U/6EeC+6p/8ALPL8+7D/AIjb4r/9FxnT2+1hvLX/AHfbe/r6H6C/8PIPjN/0KXw1/wDBb4m/+amivz3oo/1I4U/6EeC+6p/8s8vz7sy/4jf4sf8ARc5z/wCBYb/5n8v6uz+j/wD4Zs+APP8AxaLwD25/4R6w6dz9w9Pf9c0f8M1/ALoPhF4A7Y/4p6x9v9j68fTrzXt/GCcKefXj69P89RnPJkeg5I78nPrgfpyDzziv5i/tjN/+hrmX/hdiv/lvkvuP9QP9TuEf+iV4c/8ADHln/wAyniH/AAzX8Au3wi8Adv8AmXrH/wCI/H8hzR/wzZ8Af+iReAO3/MvWP4/wdR/nrx7fnOeF6jOT1HY9Ppj/ABqteXlnp9nd6hf3NtZWNjbzXl9e3dxFbWlnaW0bTXN1dXMzJDb28EUbyzzyukccSNJI6qpIP7Yzf/oa5l/4XYr/AOW+S+4P9TuEf+iV4c/8MeWf/Mp4x/wzZ8Av+iReAP8AwnrH/wCI/H9AT1pf+Ga/gF/0SHwD25Ph6x9Mn+Dr6fhyeo/lC/a6/wCDib9qr9q39onxJ+w1/wAED/2f4v2jPH+gzXeneNP2qtc0i11z4d6Ettctp994h8CWes3ukeAtN8H6dfqLWz+LHxZ1k+EfEd75ln4b8Ha5Z3eh67q3yT+zT/wS1/4L3f8ABRT4Yw/tOfET/guJ8W/g3rfifXdb0o+B/APjv41aPoenDRrvyJ2XQ/hR4o+EfgHR3e4eRVs/D3hl7NI0XZeyIFVT+2M3/wChrmXT/mOxPy/5e+X4B/qfwj/0S3Dn/hkyz/5lP7c/+Ga/gFz/AMWi8AdeD/wj1jz3/ufh696/kK/4L2fsz/An9vL/AIKKf8E6/wDgkp8CvhZ4B8LfFTxTrF/8eP2m/jl4N8IaAvxD+EP7OWl2esQf8I7a+Jnsrk6emu6VZeNPEy+HdZkXTbrxXb/CiSS0nTX4BP8AL37LX/BG7/gsl+038HG+Mlv/AMF9v2nfAmkx+JfEHhyfT9Y+L37UuotbtoF1b2sl/NfwfHKzgjt7h7hWG+JRAqs0sm0Fh8m/8EpfF37YP7HH/BUb4pfCjxf4g1X9pz9qv/goJ+zsfAnwR/ap+JOu+IfE3jW1vfA/i9IPEd1P4r8b6p4j1q50/QfB3w41+bVLLUdXuhp0fgbwBqMsE2i2EWl3WdbMMyxFN0sRjsbWoyaUqdbFV6lKTi01zRnOUHyvla0dvdfY6MHw7w5l+IhisvyHJcFi6cZezxGDyvA4bEwjUi4S5KtGhCrBTi5QlaSUk3F3V0eF2/8AwUC/a3/4I3ftn/HH9gD483uvftm/Ab4AeKL/AEzRfFgKv8VNH+E0UNnqHh3xfoGvLeata3VnYeGtTsF8SeBPFeqahZ+FddstQ8J2Xi3wzb6NdGv6JP2bf2k/2Zf21/CP/Cwv2cvilpHiK1jigfxJ4X8oWXjHwfeXIyLPxV4PvpbXWNBlMqyww3TQTaLqjwyTaHqOoWgFzJxv/BZr/gkV8Nf+FT/ssWv7MPiDWLj/AIKveFvGU2r/AA78VaT5FxrXx0g8RXLXPj3TPihLqVw0WkeCYdRS5HgnUtce702xaXxJpWuw3mieIPH3iHTv5VdR+FX7NerfGPUvh7+1FofxR/4JW/t1eErltN8a6Xpd1L8KPh/4j1d3KnxNod7qttN4f0DTdd8s6lbTW2teGNC1aOWC90bxL4wjuRqUn5XxVwnl+Ik8bRw9bCSnrVxOAofWIc9/elisvjKE3GSu/rOFlzRk5OvRkkpv954H43zTD045dicZhscqclChgs2xX1Ko6TjFRhgc3qRqUo1IStH6njYqE4ciwteMuamv7Z2+H0o+7q8bY5G6yK/Q8XL4479ue1RN8P70fd1K2OOPmgkXr9C3Pt9K/mJ0f9gH9sV9LtNQ+Gv/AAVa/bEbwvexCbTrvSvGHxD8T6Jd27BAkun6jofxitdJuEIUBJ7VWQoQFO3Ob83/AATw/bQurS5vPGX/AAVO/bTutPtY3nurq48YfEfQdJs4UC75bi41f4vXlhaxqozJJI0aA/O55JP528kytOz4iwqd7cv1DMOe/ZxdJRv5c716n65/bWeNXXCOKa5ebn/trJ/ZWsndT+s8zi07p8i0Vz+l+XwTPZxyXeo6vp1np1rHJcX15K5hjtbSBGlnnkknWOCKOKJWeSWaWOKJA0jttU5/EX9tb/gvH+zz+zq958Iv2SdNsP2mPjbPdDRLbUtHuprj4SaFr9zKLO2W/wDEumOt58Q9TF3LAsOi+B5v7Kuy0lnL4w0m8gNmfww+P/wk/Z6s9c074Vf8NZ/tc/t9/HrxTqlvoHhL4HfCP4gj4it4j8R3MnlWGjanrdpo/wARrDzJ7wGCbSPDtxr3iyOXMSaLCS1zF++3/BEj/giN8Nf2f/2pYdf/AOCrPwTt/AHxh+Jfw/huf2UfgpqN9bX/AMMfC7+I7G4sNUvNd8SwavrE+ofHLSrO6itPCFld6/dzeD9de9v7i5bx6/gsaP8AZ8PcC5fXksXjK2LxmHjaVOnLCTwFCq007v2tR4mtTs7pwjRpyt/EmrxX5zxX4k5pg4SwOXUMvwGLneNWrSx9LN8RQTW0XQorBUKqaakp1cTVi3/CptRmfKPxf/YJ+O37O/7bX/BMT9p//gvR4n8K/tHfszftXfEHWfBfjfwh4d8W69a/CD9mjxPr2kWt18MvDXiS+8OXGheFB4djvtW0bxb4ttfDKzeGvEHh7wX47i1LXfGltp82rap/ogeHf2Sv2YfCWhaT4a8JfAf4U+HPDWiWNvp+h6DoHg3RNI0TStNt4wtrZaXpunW1vY2VlEmFgt7WCKGNPuKBjP8AFd/wV1/Zn/aJ/aB/a6/YK/4N/bT4v/a/g38Q/iTrf7UPh74iyafH4k8bfDr4N+GfCnxB0axTxHpCano8V9D4P0zS/iyND028u9L0/UtYTTU06+srK5s9K0Xm/wBmP/g2j+LX7QOrftB6Vcf8Fc/2n/CqfA745eMfg5ayW3hnxLqv/CQ23hS6a2j12aOT49aeuly3irubTo2vY7fIUXko5r9bwVSrlkPZ5bVqYGnGEaajgqksLFQTvGHLRcFyp6qOyeu+p+AZtgsFxBNVM+weFzqr7SdbnzbDUcxmqskozqKWLhWanJJRc01JxSV7Kx/cj/wzX8Af+iReAcdv+KesT2zz8g9+n19aT/hmv4Bdf+FReAf/AAnrHGcD0T1OD+HHc/w4fs2f8G0fxb+Pnin9o3w7c/8ABXT9qDwwnwI+NniX4SWt3B4Z8Taq/iWDw/dXVuuuTwyfHuwGly3Itg7WEcl+kRfaLtwu4+ceBPh//wAF+P8Agk54h/aG+I37HX7Rfi7/AIKF/ssfsqfFfxD4H+LfwQ+KkniPxhrNx4V8P3WoS3vifSPhdrviLxN4l8O6SLLT5LzUdR+CHj5fEWnXQfUtb8Naj4btdTlPd/bGcf8AQ0zLRJ/79idFpb/l76fgeMuEOEHb/jFuHNb2/wCEPLOm/wDzCn963/DNfwB6/wDCovAOPfw9Y/lnZ357HoaP+Ga/gF/0SHwD07+HrHrn/c6dfpg9elfDf/BJn/grX+zx/wAFbPgBL8WPhEk/gv4i+C5tO0T42/A7X9Rtb/xX8LvEuoQTS2LreQQ2ieJfBPiJbS/uPB3jO1sLG31mGyv7G+07RvEGk61omnfqn14wvQd/6YPGfc0f2xm//Q1zL/wuxX/y3yX3D/1O4R/6Jbhz/wAMeWf/ADKeID9mv4A9T8IvAP8A4T1j/wDEEf8A6xSj9mv4A/8ARIfAOD/1L1jnrx/AQe9e3Z6fd59+vbAHQnp39s96AfoMZ4B6euQOB3+h4zzR/bGb/wDQ1zL/AMLsV/8ALfJfcH+p3CP/AES3Dn/hjyz/AOZTxD/hmn4BHn/hUXgLn/qXrP8AouPyor3Dn0A/H/61FH9sZv8A9DXMv/C7Ff8Ay3yX3B/qdwj/ANErw5/4Y8s/+ZfJfcfjB/w8h+L54/4Qv4anB/58/E/tx/yM4yORx7euKP8Ah5D8X/8AoS/hryeR9j8T8H0IPifgnH59+lfnnwDyF/Mdc85HOMZ6Z/PAIMKDjI9xnpnH4noR7Z5GM1/Tv+o3Cf8A0I8J0/5+vXT/AKe6/hq9d2f5jf8AEb/Ff/ot84/8tfL/AKhvLX/h7/oYf+CkPxfwc+DPhrgnP/Hn4o5xjp/xU3PTnrX883/Bff8A4KrftWfGD4dfCH/gmZ8AtM0DRvix+3P4t0vwVrw8CnW9P8S6h8PtT17TvDFl4J/tC71u9/svR/ib4p1CLStevo40Sfwp4d8U6NqDjStVvkf9DuPYexJHrj/Aj65zkY/GH9n/AETT/id/wdR/s+ab4si/tDTvhT+z94y13wxZTfvbeLUtI/Z9+LHiXSZ2R+I307xF4pudbtmjBZdQs7WUHI3L8Tx/w/w9kvDtbE4HKMLQxVbE4bC0q8VUcqPPKVWpOPNUlHmlToTpptO3tHJWkkz9r8AeP/EHjTxEwuXZ5xdmuNyvBZXmOaYnA1Xh1SxfsYUsLQpVHChCfJDEYyjiGoyXM6PLK8JST/pP/wCCCH7P3wd/Yp/4JfeF4fCnhvR7XXdU8ReMNT+JfjOy060tvE/xP8Y2WuXWjWd/reoeWLy8S3iRNL8PWNzPLbeH9GVba3VFW7mnj/4Jp/F3xn4S/ZU8O6FodzY21jB4u8aXA8ywiuZnkudXaWTzJJy4wGOFEaRkADJLZNdF+w/fyQ/8E9vhBpiMVjvviF8ULqUA/fGn+I9TjRW9V3X+8g8b1Q4yox4f/wAE+f8Ak23RP+xo8W9P+wmf8+3tX5PleDoydKdWEairRxTUZpSjy0alCEPdd1dOVS710a7H9lZxjq8I16dGpKk6E8HHmhJxm3Xp16k7yTTs1Gnpto31Z9Bf8EsPi3omg/srzeCfFOl3D6Pqfjn4gCbU7FlnkSPVp7eK4juLFwjGJVZiJYJZJQDhbaRgCf5e/wBvW28Z/Af9nL9jn/gpl8KbWXUfHf8AwT5/bevfE2oWwkmgt9W+FfxB8R+GrDxJo2qSwgyLpWueItE8L+FryLAEeneNNcKsjTSCT9//APgn1/yb1B/2O/i//wBK7f8AnXgPwz+Auh/tR/8ABPT4+/s9eIBAlh8XYPjD4KhvLhPMj0nWdUtIl8Oa+q4b9/4e8RR6VrtqdrFbnTomCnbitY4ClOjSjBNSxODqykruUfbQVBwmk2+VuTXNZpNW23Mp5lWhWryqSThhMfhoJxilN0Kn1hVIScbKajFPkum027vofp/+zPB4S+Enwj17/gpx+1f4y0rxJ8Sfjb4L0TxzoWqWUlvqOn+Efhx4w0q01fwF8O/hnaC5lhn1XxFo93pkQFlcsi28kOnm8+yW+va1qv42/wDBVHxp+y7Y/s5+I/29v+CrPwF8N/Hbxn8W9Ouvhd/wT4/YY1G+1TRvFeoDWHjm0/Uode8NGz8deHiIb6DxB4s8V6PJa3Gn2d9btFbzeJPEvgLw1ZfD3/BMT9vb4ZeE/wDgmp4d+N3/AAUy+JthL8L/APglp4m1X9lj4b/sz6fcW9346+M/xp8Nx3Ot/DvR5PC93dhPEdzpHhC90rwbpElw0Xh+30zwVql/rk2k+GtC8XTa167/AME+/hN8XP8AgqB+01rn/BYX/goFZxv4h0jXLjwn+xp+zHcC6ufBv7OngvSVtdV0bxDJpuoxRLdeKY4tWhvtIvLuyt72bxFPqfj3U7W21ibwtZeFfEw2Hniq0aMGlKV3OT+GnTVuZpaXaWlurajpds9/GYungsPPEVFeMbKEVvUnL4dVdJPdt7K73SR+Uf7CPi7Sf2M/22rj4GR/DTxZ8CP2eP22vBnhPxp8Jfhz428R3fi6H4SftEeGPCWiN8Tfg3Z+NdUnlvdfjsNc1PWPDtjqetGLxTqFh/wqu01u2k1fUJbif6A/bt0+8/br/bi/Ze/4Jq+EbDW/GHgrQLy6/aN/aw8P+Ftbh8OXV14G8J6W2o+HvAt14rk3QeE77xNpz3ekW2qX9vd2ula18Rfh5r32S5ltIImxP+Cvfif4B+HP+CfE+oeNdd8QeGv2ldA/bU8T+NP2StZ8K6HeXurWvjDw4vgJ/Gy32vL9lsNA8Njw7dW9/dm41OO+l8SaZ4T1DTdL1h9Hlhi9T/4IvfEL4Vfs06T8eP2jv2ufFut+Mv29f2svGMGu3/w3+GXw/wDGnxo+Kth8JYNP07VfCVjpfgb4Q+GPF1/4c0vxHr2oao0u9NO8MxWHhvwvoxubdvC8iQfOrgvB1eMcLnVStSVBYRVq9JtKX1yMVSpVHpypujstJqpGjWSk5St9ZLxAx9DgPG8O06FWWIeO9hh66TlFYCc1XrUopPnaWI3fwOlOvQlypQb/AEj/AOCN3hX/AIJZftt/s9/EX4F/s4fsq+Dv+Ccn/BQr9nLxPfTeNvB0Guar4v8Ajv4N8ReFdZl07QPHWnfGDxb5fxM+Jnw4m1FbbS/FemXV9EPBfieRoRYac914K8S65+y/h690n9trwX4r/Yw/a/01Ph5+118IV/tHRfE2npDa3mq3OnwqNE+MXw0u0NpHe2t/CLafxRoli9vZ39nOL2zSwhkjj8Pfy1/8MVfFf9pTR/E//BRn9hb/AIWF+yx/wUM+AP7RPxAvPhzJ42tdM8HXvxk8HabpXhy9Pw7+IWgjWNU02xbVrfU9U0XTF8YLb2+p2l5qngn4haWnhq/0/UvDv2n4t/4LM/AD9tX/AIJ5/tBftGePrmX9jn/gq3/wTk+H+t614h+HQZ9H8UD4hafqdr4L0lPBVtq88Wp+J/hd40+JGt6PoOpaFfy3+ufC/wARa0um6+uqaNqNrqnjz6Gth6mH9nKSfs6qc6VRK90nZp7WktFKL6NNXVr/ACtDE0sS6kYte1ouMa1PZpuKkmlfWLT9yV904t3Tt5f/AMEpfE/xi/aK/wCCtv8AwUX/AG1/if4x0/4hax+zT4f0r/gnt8KviLawteaZqEfgrVf7M8beJ/Cd3cRRBr6+h8BLrM+tS26XN9B8V9TvRDFPqLyJ+237A/xh1nwPJ+19Y2FhbahqWs/tS/EDU5tU1O4nnCySyrHI0lvGY5bieSRHmeaS7AZny8bnJP47/wDBuD8Nrv4dfsDSz6yJ38UfEb4iah8VPFF1dl3vL3UfG2g6HfadcXskuZnuz4bi0KO6aYmQ3KTM+GYiv0o/Y8/5Dv7Uvb/jJHx5/wClbf5/zmvdwmDw86OAU6cZe29vUqXVnOUU3FSta8YbKO2+l2z5vG43FU8Rmbp1pQ9jHD0qVmmoRlKmpuKldKU7tylZO9rSVlb1P9iP4w+MPDPxD/bKv7B9NlOv/tPeNda1S2urIPbz3l1qF+8xQxyxTwRkuQqRzgKMdSM19G/8EztbGv8AxL/b81WWOGC51T9pvVdWntI5N4txfy69IAu4BzEZPNjjZgN3lsMkg18Pfsj/API6ftZc4/4yH8Xfj/pl6P5kV9Lf8Etblk+OX7a9nuwlx8V9auWXsWtNauIlJ7fKt5IP+Bcda58bhaKwVKpTpxhUbq80oqzlGEpOzs0nZRSTabVkkdWAxlZ4+vSqVJzpqOGUIyd1CVSFOLavqryldrZu7eup/IX+018QvGn/AARX/wCC5PiT9uD4BaNpnhf9m342ftE/E/4JfG/wPa2sln8PVj8Qa7DqHinT73Q9Hn02OytrdL3TPiv4PtbWa12eJ/B2uQWrxaOLmwf+wdf+CkXxeZVZfBvwzZXUMCtp4nKspAIIx4oIIKnIIyMdDzz/ADvf8F7vgzYfEz/gn1/wVY8czW0cupfs+/8ABQT4KfFPS7gKPOhXXfE/i34L6jEkn3hBNZfFd5p492x3tLd2DPFHj6D/AGNvH1z8Uv2S/wBm34hX8zT6l4s+CHwx1jV5nZmeTWp/CGkJrTM5OSTqsd3liTzgknqft/DTK8mzepmuDzTLsPjKlGnhcVhqlTn5owm5U68PdnG8b+xlFa2bn0kfz39JfibjLg/DcKZvwvxDj8ow+Mq5hl2Y0MM6LpVa0IYfFYGrarSqNVPZrGwqNP3oxpq3uu/7Qf8ADyD4wD/mSvhr05/0LxPwPf8A4qf6/rSH/gpD8X/+hL+GnT/nz8UdPp/wk/OOT3xz68/nnx7cDjBJ9ufYcYznIzgc4pTjrxwT69+uecAYPPXk9M8H9Y/1H4T/AOhJhP8Aytv/AODfJa+um9/5M/4jf4r/APRb5v8A+Wnl/wBQ3l/V2foT/wAPI/jB28F/DX/wB8Uf/NRRX54ZHqPzFFH+pHCn/Qjwf/lX/wCWGP8AxHHxZ/6LjOPuwn/zL5fn3Z/Rt/wzB+z5j/kkngvqP+Ycv8y/+ecUH9mD9nz/AKJJ4L7f8w0fX++T9fbn1x7wT146kfxdT7ehHH+RQccjC9j1A/MY7fyPviv5k/tvOf8Aob5n/wCF+K/+Wn+nf+pPBn/RI8Mf+GDKv/mQ8H/4Zg/Z7Gc/CTwWOR/zDR7Z/j6fX9BX8eFx4P8ADHgX/g8i0Hw94Q0PT/D2iQfsla9cQ6ZpsIgtI57r9lDxZLcSrHk4eWQl3Ofmav7hzjJwB1GefoM+oI59OeRkmv4nPGf/ACud6Nx/zaJq/HHH/GJniv8ADj+dY4jMcwxdNUsVj8biaSkpqniMVXrU1NJpSUKlSUVJJtKVrpNq+rO7L+HOHsprvE5XkOTZbiXTlSeIy/K8Dg67pTcZTpurh6FOo6cpQhKUHLlk4RbTcVb9e/2JkP8AwwV8E37L42+MS+2X8UFh+OE/n6CvHv8Agnz/AMm26Jxj/ip/FnA5/wCYma9s/Yijz/wT7+DkuPufEH4qR/8Af3xFqTenfyc9fWvE/wDgnz/ybbon/Y0eLP8A05mvfyt3p4NdqePXX/oIoP8AX+tTxM4Xv5h51stf/lrXXl2/4Iz/AIJ9f8m9Qf8AY7+L/wD0rt/51L+wJ/yQq7/7KN42/wDSqz/z+FRf8E+v+TeoP+x38X/+ldvUv7Av/JCrv/so3jXv/wBPVn9evOfy4rpw/wDzL93/ALLW/wDdfb/g/kcmL2zX/sNwvbtivP8Aq3fQ/Ef9hL/gjB8JfHn7Y3xh/bl+M0+meN/Adp8WdQ8QfCD4IzwzXOgW/wAUY47C/wDFXxA8eWdzEum6qtlr0rXnhXQo0ubSe9mbUdcZ002wsZv06/Z8/Zn+Dn7R/wAGtVs/izoHiDVG8JfG74l3fhnUvC3xF+JHwy17RLnVrPwrFfzWHiL4Y+LvB+uxtcJYWe5X1B41MCFEBL7vfv2Cv+SO+Jf+yteO/wD0LSvfr/LnpXqf/BNb4OeK/G/wc8f3+mz6NaWdp8dfiHpss19fyENdWtr4dacQiwtr5ZkUSoVmVvKkDZikZPmrlpRwuGhg51VCFOrTrzquaTU5zdBq973tpyrWyWi3OuvLGYuePp0XVqVKNTCQpRp3Tp04qsny8rSje/vS3d3d20P5bP2vvDfwF/ZG/wCCfHx68dfELQf2j/2k/D/jj40ePPgt4K+CPxE+Knjr4hfAPw7431nw/plx4Y+K/jfV/E8useKfBWv+DDBcXPhvxP4f8aaL4r8Qautn4atr6O31DUdU079C/wDghH+yh+034e/4Jx+LPht4V/aA1DSPin4o+IX9u+ENas/CXhX4s6B8HdCj0X4fahe6R4V0vWJbOLxcuq6TdXFlcSX2uXvhnSby8hvvC2leRaXlz4g9X+Nnw38E+JP+CSP/AAUn074jXGk3GgxR/tY3wt7pd7ad4j+G3w+8O+MfBviGNZQFa4s/GthoV7okYCz3GqadHDCrSEIPev8Ag1Ti13xJ+wB8N/EeuSzmbQLzxHp4eZnLy2MOneHvD3hqJ2bkwf8ACO2EEsTHIC28AGVORx+0jQlRrxcXFYGs4pc0HzqfJKLlGUZ2lJpQacVF3cFe7ffKm8RHEYaUZKTzLDRm7xn+7lTjUjJQmpU1yRUpTTi3JJKV1aK8U/Yb/ZC+MWoN8QvGXjz9tf8Aat1KHwR+014m/tPwf4Rtfgz8Mvh14r1rw8fC2pX0PjDTvDnwhfxJdWmtEx2Ot6DbeM7K0k0zFrHHCZZJpfiz47f8EZfgn/wUmsfir47/ALTHwt+O/g/9prxDpknxD0+1kntPGXw6g1PRLjXPBnjDTYGT7ZcQ6dLqE3hHxFEDqWiao0NvctfaLJJYxf0z/wDBNHx74O8I+AP2qY/EerWtvLP+2X8ZZ4rAI93eXMD6R4JjR0tII5ZGhmkjkiWWRUt2ZHVpAEfHyr+yFqOmX8P7S0ljpwhe5/as+K2ow3hkeNjpl4mimx082KZtofsrJNKZEZmY3HlcJCu7TCNYp0KFbCTdK1X95KUpRqNxTvFzaacHpenKTV1e2pnj08EsVicPjYe2f1e9KEYRnSXOlaag2pKom9KkIppPV6I4z9hXRtL8OWX7QPh/Q7GDTNF0L48eK9G0jTbVBFa2Gl6ZBaWWn2VvGOI4LS0gighReFjjVR0rZ/Y8/wCQ7+1L/wBnI+Pf/Stv8/5zUP7F/wDx9/tLcf8ANxfjn/0OHn9am/Y8/wCQ7+1L/wBnI+PP/Stq9Ghp/Z9lb/ee3Z/h+PY8rFavMm76xwV79daPmtdXu1vvuH7I3/I6ftZf9nD+Lf8A0sva+gf+CX8mz9of9sIdBJ8U/FSHgHIGqmX0PeMdO469q+fv2Rv+R0/ay/7OH8W/+ll7Xu3/AATHfb+0b+1iOnmfF/xanUc/v75/f+5254znFcuKV8DTX93GP7oVn+h24N2zGv5yy5ffPDr9fubPyw/4KzxpJ/wTC/4ODVkRXC/HP4MyANyBJF+0r4DkjYZ5yjqrqeoZQa/U/wD4Ibfs8/BXxH/wSN/YC13xD8OPDGta1q37PPhW+1DVNQsBLeXc89xqLl5pA67zGpWJDgYiRASSCT+Wf/BWP/lGH/wcHdP+S4fBzv8A9XJeBenc/wCAzX7Y/wDBB7/lDx/wTx6f8m2+Du+D/rL/APLjrjnHPYA/P0cXisJNzwmJxGFnKnCMp4etUoylHli+WUqcotxuk7NtXSdro+gx+U5Vm9GlRzbLMvzSjSmqtKlmOCw2NpU6vK4e0hTxNOrCFTklKPPFKXLJxvZtH3p/wy/+z5x/xaPwZ0yf+JYP/jnP+eRxk/4Zf/Z8GD/wqPwXj200fp8/PHP0B617vxxwp7DkflkjPcfr7ZXjjgfUH8OvX0yev44z0/21nP8A0Nsz/wDC/Ff/AC08n/Ungz/okeGP/DBlX/zIeD/8Mu/s9nr8IvBee/8AxKx/8cor3kZwMAY7cn/Cij+2s5/6G+Z/+HDF/wDy7y/q7D/Ungz/AKJHhj/wwZV/8yCdv+Bfybj8qCBzx/Ev/stFFeYfTh6/7y/+y1/E1414/wCDzrR8cf8AGImr9OP+bTPFdFFAH7J/sQAf8O7fhIcc/wDCzPiMM98f254g4rwf/gnx/wAm26J/2NHi3/05miivqsp+HDf4cf8A+ncKfG518WN/6+Zb/wCmMSN/4J9/8m9w/wDY8eMP/Su3qT9gT/khV5/2Ubxr/wClVpRRXZhv+Zf/ANgtb/3XOPGf8zf/ALDsN/7th+wV/wAkc8S/9la8d/8AoWl19Vf8Ebry7Pwy+Kdgbq5NjH8YfG9zHZmeU2iXElv4eSSdLct5KzSIiK8qoHdURWYhQAUV5WY/7lg/+vVX/wBKonr5X/yMsw/6+0f/AEmsfz4f8FEru6s/+CNf/BS+W0ubi1lb4yX9q0lvNJBI1rffGD4L2d7bM8TKxgvLO4ntbqEkx3FtPNBMrxSOjfYv/BC9msf+CeHwnt7JjZ28+j+CzNBak28Mxk+EHwwmkMsUWxJC8skkrl1JaSR3bLMxJRWuG/3jA/8AYvf/AKkMxxn+6Zj/ANjOX/qPRPpr9hn/AJFj44f9nGfET/03+F6d+xL/AMeH7Qf/AGcZ4/8A/RWlUUV6FD/mB/w4n9DzcXvmnrg/ziN/Yu/4+/2l/wDs4zx1/wCjIam/Y7/5Dv7U3/ZyXj3/ANLHoop0t8D64n8pBit8z/wYL86R8MeOfFvivwhp37QN94S8TeIfC97dfteeLbS5vPDutalol1cWv2DxJL9mnuNNubaWW382KOXyZHaPzI0fbuRSPnzwz8Xviz4K1LVdY8G/FD4ieEtX128m1HXNV8M+NfEug6lrOoXG/wC0X2q32lanaXWo3k/mP511eSzTy733u245KK8LFfwv/B//ALcfSYT436YT/wBwnLeLPE3iPx74Y+IXgjx14g1vxp4L+Ld9Z6n8VvCPizVb/wAR+GPibqWnalBrOn6h8QtA1i4vNK8aX1jq9ra6rZ3fiS01K4tdStoL6CRLqGOVe28B/HD41fCzwd4d+Hfwx+L/AMUfhx8P/CGmw6N4T8DeA/iB4s8IeDvC+j2xY2+k+HfDPh/VtO0XRdNgLsYbHTbG2tYizbIl3HJRXjPf5R/JHtnW/wDDV/7Uv/Rynx+/8PH8RP8A5o6P+Gr/ANqX/o5T4/f+Hj+In/zR0UUgD/hq/wDal/6OU+P3/h4/iJ/80dFFFAH/2Q==", "company_abbr": "FRP", "company_name": "Frappe", "company_tagline": "Open Source ERP", "country": "India", "currency": "INR", "customer_1": "RIGPL", "customer_2": "Mahesh Engg", "customer_contact_1": "Aditya Duggal", "customer_contact_2": "Mahesh Malani", "first_name": "Rushabh", "fy_start": "1st Apr", "item_1": "Enterprise Plan", "item_2": "Small Business", "item_3": "Solo", "item_4": "Manual", "item_buy_1": "Server Hosting", "item_buy_2": "Adwords", "item_buy_group_1": "Services", "item_buy_group_2": "Services", "item_buy_group_3": "Raw Material", "item_buy_group_4": "Raw Material", "item_buy_group_5": "Raw Material", "item_buy_uom_1": "Unit", "item_buy_uom_2": "Unit", "item_buy_uom_3": "Unit", "item_buy_uom_4": "Unit", "item_buy_uom_5": "Unit", "item_group_1": "Services", "item_group_2": "Services", "item_group_3": "Services", "item_group_4": "Products", "item_group_5": "Products", "item_img_1": "logo-2013-color-small.png,data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAMgAAADICAYAAACtWK6eAAAEJGlDQ1BJQ0MgUHJvZmlsZQAAOBGFVd9v21QUPolvUqQWPyBYR4eKxa9VU1u5GxqtxgZJk6XtShal6dgqJOQ6N4mpGwfb6baqT3uBNwb8AUDZAw9IPCENBmJ72fbAtElThyqqSUh76MQPISbtBVXhu3ZiJ1PEXPX6yznfOec7517bRD1fabWaGVWIlquunc8klZOnFpSeTYrSs9RLA9Sr6U4tkcvNEi7BFffO6+EdigjL7ZHu/k72I796i9zRiSJPwG4VHX0Z+AxRzNRrtksUvwf7+Gm3BtzzHPDTNgQCqwKXfZwSeNHHJz1OIT8JjtAq6xWtCLwGPLzYZi+3YV8DGMiT4VVuG7oiZpGzrZJhcs/hL49xtzH/Dy6bdfTsXYNY+5yluWO4D4neK/ZUvok/17X0HPBLsF+vuUlhfwX4j/rSfAJ4H1H0qZJ9dN7nR19frRTeBt4Fe9FwpwtN+2p1MXscGLHR9SXrmMgjONd1ZxKzpBeA71b4tNhj6JGoyFNp4GHgwUp9qplfmnFW5oTdy7NamcwCI49kv6fN5IAHgD+0rbyoBc3SOjczohbyS1drbq6pQdqumllRC/0ymTtej8gpbbuVwpQfyw66dqEZyxZKxtHpJn+tZnpnEdrYBbueF9qQn93S7HQGGHnYP7w6L+YGHNtd1FJitqPAR+hERCNOFi1i1alKO6RQnjKUxL1GNjwlMsiEhcPLYTEiT9ISbN15OY/jx4SMshe9LaJRpTvHr3C/ybFYP1PZAfwfYrPsMBtnE6SwN9ib7AhLwTrBDgUKcm06FSrTfSj187xPdVQWOk5Q8vxAfSiIUc7Z7xr6zY/+hpqwSyv0I0/QMTRb7RMgBxNodTfSPqdraz/sDjzKBrv4zu2+a2t0/HHzjd2Lbcc2sG7GtsL42K+xLfxtUgI7YHqKlqHK8HbCCXgjHT1cAdMlDetv4FnQ2lLasaOl6vmB0CMmwT/IPszSueHQqv6i/qluqF+oF9TfO2qEGTumJH0qfSv9KH0nfS/9TIp0Wboi/SRdlb6RLgU5u++9nyXYe69fYRPdil1o1WufNSdTTsp75BfllPy8/LI8G7AUuV8ek6fkvfDsCfbNDP0dvRh0CrNqTbV7LfEEGDQPJQadBtfGVMWEq3QWWdufk6ZSNsjG2PQjp3ZcnOWWing6noonSInvi0/Ex+IzAreevPhe+CawpgP1/pMTMDo64G0sTCXIM+KdOnFWRfQKdJvQzV1+Bt8OokmrdtY2yhVX2a+qrykJfMq4Ml3VR4cVzTQVz+UoNne4vcKLoyS+gyKO6EHe+75Fdt0Mbe5bRIf/wjvrVmhbqBN97RD1vxrahvBOfOYzoosH9bq94uejSOQGkVM6sN/7HelL4t10t9F4gPdVzydEOx83Gv+uNxo7XyL/FtFl8z9ZAHF4bBsrEwAAAAlwSFlzAAAZxQAAGcUB/Hz7SgAAJcZJREFUeAHtXQmsHVd5njMzd3m7n5c4jQOJTUiIbRwggCJKwG4hoJZNVNdFqKUKSEArVKVqGrWU8PwUQCgEFQmQSKUSKUiI+qGItYIINRa0AaUssbEdEnAWhSTEjp+f33qXmTn9vjNzX952Z+4699z7zrHn3XtnOef/v///zn9m5ixCSmmZFI+AsCwBlBRQr/7ZzVf6QfBeaYs3YOdu7B7SFUIBwSHfghT2EyKQDzq2fd+vbrjnSe5dqRN/m7QxAsIQZGNgqnuFJcCDkAL7H7z5fbYV3Aam7IKHlUGZIs7zq+dq+ulA1jxkzYIUzwSWfefJN9zzDcq6UjdNZe+6WHbXJdBdgMkjqh5+5U//5oO2kJ8HWbZix1nUzLPwME938UMZxSxlpuwg+Oepi5I70k17HboooIkgMeAfOnbIfeDgA951P/vga6T0vy6lyAhLLuKSLByvZ4KvampJSGxZZXwMIiZWhHDef/yGr/6yqmMMDJv6kLuptY9RXjU/DkoVIUCOm8GGLXC0c2yqqPsRdUcSk4FGh6J7JAmGZEGOReiyAy3DmyHiL1kBmKZWbWOZJlYNbApTBYXNK/73Q5fBwV4rhFhC7WszctS4RP/djHrQIdTFuv410I1CV3XVX4H0JTQEqYV5ITyQsSp0onF4FqIJKuHepQfEp04SzWroIq2tlVA3MCTU1fxdj4AhyHpMwj1T4QdixhCcCvceVgCnYju+txN0ULoIyw11gzqRrr2tWGekNwRJwFUKp/dJsZGOeOLQt7ptpG+T+wxBkoDz9H+Sm6RCzeN9rFpNnRs8YAiSCJh50JcIUR+fYAjSx8Y1qrWOgCFI6xiaHPoYAUOQPjauUa11BAxBWsfQ5NDHCBiC9LFxjWqtI2AI0jqGJoc+RsAQpI+Na1RrHQFDkNYxNDn0MQKGIH1sXKNa6wgYgrSOocmhjxEwBOlj4xrVWkfAEKR1DE0OfYyAIUgfG9eo1joChiCtY2hy6GMEDEH62LhGtdYRMARpHUOTQx8jYAjSx8Y1qrWOgCFI6xiaHPoYAUOQPjauUa11BAxBWsfQ5NDHCBiC9LFxjWqtI2AIUgNDM5daDWA22e7emdMGE8qmOe3nkR0H1YRxLhDyghVegb1cTScuJRyOu7SlY43OcCdFoC45t+McwOWEiykllIQJtLsFU0NK9kAEwQSZU8KxjmDaT2VCAtv57dTBHRsbcOO9q0Cnq6lt1d7O/6BocdtaCYRjK212QFcuEpTGpmx45IgQU4cxZeWk9v6ncQSBi01ZtlWQ/vLkyhNrTdy530eto2rlqIeFW9xvBRJBhLPaBvCi+mZ45zTqmCianFZe2AFRl/PmIlj1xFeeJTjHMFdCsKWdVStkWVVdOyDiuiyjyLEMCYmC6eUDknPdyRrs0HQBHeVXgEfK5++/dCjjVG6Eax6AWbcHgcyg2qGbdjT50rHHrPPF+zOv2/PJ7O6b8pi8mqsHwLfgXDVsCUcNLBH4ll32LHepIuwyAMZyA9SkQ+LaQKYSZOVSZSAoB1lLBlgIS9XTGxbIMIHWKpeVs+WSf78seY/j9LwSfcMr2rcTELAlVwF8LwDME0Oy+JPjf/3DBcpDSHUkiYYR5EV3euH+re/MWPJWeN0BGpEgRljW9NF2mZO1s4fytgQLtmOVM1w1BxGBItRO0UF4IIlUKVm5i4tW9gJk5y4uO9C+pHxcCu/C0rZgtjwmvQBQMTI0ksS7VURJ0quRLJPOjUCwA6u4ZOdPvPxr77wLYH2Xl5E9upFEswhCcjBJeeFHWz+G9synAJoDv5rFPp8VcXi8838DyxGDwfng55n9A/+Ye/lWrltGgvB+PdELwzAD0aVdtjKzc9bg82AII1BbSKICQIC8zy3sDObLoxaiSFgHw7/qT0L6chrOuQTuotnYVvpuIMUq1ICN5eCkUUDqQ/ZPPPpX3/6SiiTYCZka0WODstq3S68IEt1zMHLAH0EOqwK4ZoBmBm6pZAV0q5BuHxSrc1IWCtvrLFf5H32o3sJJCFbMWVEZHbKK3pw1cJZNH17fivXV9WCaP43IAXJgdQbcK5EYjB71RxCqAn1cXJKJuL8agLb/irRGYcxaCSzleXxiGW3xKUSSpxhJ1D2JRisHd7gl3wjKqMVwQ857DjSib2XkwNULAC8HEFX9FtUrBLjjGxnBQsAIsiKhbcUT16SouYOL/Zwsj2WlN4hqvq4AtCan5Z8kGO5oAtxvDPrzpbGQHDxcPzGqmYWMV8Sigox56WyUVoms6pocWL2Ab44Q9q3Xfe1tQ7JwFC0FfZ5u6UOQKQWYxRtyAIh7DjarEDmqzsmqM80tKo62bClBEVtYds6qDKt8oA/VaCbhQuVb/nxlGM/29LFdM8rgmoiYaB1Ys6DnKxdE/kaV1dSpZiFqUpLal+kBMu+8T4fG59MqMAEPjeACYf0dEqO2DnofQc3MsOFIPwcnsKMo2LzMuPeQZT8XVidhc6X5zLp/JaMiLO+D+QN4KwPbI53eh4CmWqNdF1APgrBePBISBLXkdoWKqiu7jk/bBGA7H2DjZli5RHP58tqAD9PwvKC/Umjtqu2PHGm8SdshPPQBOnpywfcc1FXdA7B26ZdEXVrUpno5b7D7BRaQHbpEtaEMba/TUyx9CBJZHAL1WeyIFIvuH9rh2OoGux0ZaZbHi0zRRzDtCKLbredKtq78ro8JjSSdREA/gnRS2wbzJiHwogBvtMImcf+0a0IgqoTvN70aNHPs6YYgG8Gz/KjJtkascjCC3x68qK8cCS/10f7HA7a+0moja7a0zxCkJnzsAzGI3pEX/d1B2V/A24zEvlg189LtAB+hSg4GUW9TqpFENyl1kMcQZCMrwH94T122cmIsOBe8yb9Q5FtL1rVNA6bTkyfIgodHRQRGdocxIWQjH4j2NW3vmDz745BqZjGKDFt/4p1ZenuwUP6tyNjoGyHZB4ZeVc9GMAiyevLEqpr5RnmHD+zUTh5I3HgnFHaXxKnNJNWsQiC0ZFkEwRJf8ZsUj4BenRXjZU33KKMImuhlkROjwWzw98XfzJZy14790B3KXGH5GIBR/5AUjttjewaOGaDjLL8255l4UajeMJNLzSVwW1ag1iw6vCg5ms+qOQF67SpDkDiLKZL4VlEMi8uD57zbS+WZA8FVQz90x3PPWA5GQ9X1cgM9scAIabuuEGMgC0bPNUcQsIqvUxmQ3IhhzDlOgxePqeaUXMT1C2hY+bgMd+kmJSFgCJKEUBRJSJJtcsb/YOl/Zt9Weal7xh3PnBd5p5TQTkFbP7CtIF8U7rPfzL70nictZ3GrsDIgV2AjniQVr45zjCD7cdlWRXpyUPrBzYhFl+FYEUdImJqJHRxRiA9iVEAML2zv1cuqmtlumgOGIPWYGh7G5laJI1PRj5LR5MrKkxXWwAgHSU7OgYnsu//kx94s76inuKRzXn7vuw6Bt9tQ+EUQEDaMiQUgiDqKxhmjRtypSeVuxuOGIPVaXZGErs5qexgV90jYwIqjR0ge1vC82n3uRy8b2/OWMxenpvZnDxcKXgHduuuaf6uwT+6bOuWeLBwtv2zq8BgGT+D9JQIDBlGwEyQfStVSgwWrY/hb86RaF5v9eFFsUv0IhK4Gd2QTngl/o33rMqE3quaNqsD5S44uzrLnOyZpOYXu3SeD5dlauDMhCeuwmmUlM1cKODUDTo+22uRgljzJpOYRMARpBjvEjnpSfWfVk5M5p1sIxN7gdUsoU65BQBcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sEDEG0NIsRShcEDEF0sYSRQ0sE+oMgpsORls7VD0Jt0BcLHY24DEGa6bQljqFn38EJjHbgNAIoHV38wq5M+JvUp6nOURVparTpy1KjxJTlkmovWDfsmUybK787PGU5B08/IA7vTbev5VRBDWZbJfBqgkyiGzWcFL1MVc/RNK18sFqYK4oY2ENW8K9aGGaZLNVz1nySQNAq5BHVS2LUmuvNz3YhwFqNI/ZhD8wJYWG1NZhQ/Y75w+7OAQYTY9DMfJHnHVX+dzDmks4dOjR5zH1g4qBXLSEiCKLGJHQjOZCm79x2uWuLXRgjNAxOi+Wzq1e1+RMzumPBVQxJKgVFcUBeZeWwwqS0BtBb3F1F5zXlckAGwYUlfExF4IMlHBKEGTtwoiHJGrQ6/1Nwmj1FjEVY5BLb8nY6IhhEVIhpkCB6wGwYjYyBl/62PddNPnlD4MznpY2FpwIuLtbZhPXLMDuYWKh4mWd/M3HNsySHmITAE8q3pDs5KeyJCVABO87fuf31GIDzEWHL14MU2/BdTSTNSQc66XBqng16N4cBzYthmcHCTL61XQ0JUsFhPUgQSQUO/MGFaJhhzTssLgPLqKgTP0hifXZmT4sIsEkl7Tkp/C22WLhpwF7anRf+CEYbc+qU2rUVxhKjniNHApBp4CbfnX8N4w9JRep0OoEcKER6bqYyvf/Tv/5FINx75MS1v2S5FNxV5MCPC3du/ahti0+i9h2Hyy1hF+IjgkdVxuonr2xzUvcQYf6sT9Q3/mWEqFUUhK8e41kZUDyLWJJHJTYDyNVEiNUTauVh9rcHgZAc81i2dLfrTL99VJR2ZjF5C0zI+SlohdqWUHaM7AyycKYVNljwkVJS/OCIaPlHKLFgS/+t+z5z8s5TH9//VQqvYt/M58Y/AG+8C/V3FrKehXxYmhcTBKjmi9KSmnZuC5FUiAIZ1WiNmk9hnIPkhHjdhh0kEjYe4nWMPFuwhw1h7jOpwwgocoglTAmx03HOv2NMlC7NWk4JjWa4D5dPXG+11ftUlIjMpypHdREntFP3oDi5s59hOViaWizC986Bmhlw4I79nzn1PkLnXvzClj1Y8v52eFkZB+dRheeqNTc+000oD86uSuWnKj9OhhXHCDF+cvaOLGd7A0Uupiv85ixN1U1oaDhzbx4S5e0Zyy2iuduA50SGU9ZTtleVNkxJ46ZWxylPQmkuOLoIMVDBBrdd+9mHf2YHFZtMeQm2uZXk6DVzKzhJKtY4bGpxBVcgXNW81/TpBXkZPSx7EcFij2sXr8SjlUobpjKt3nikRg66SbSxfkYrxJJz2LHL8Z332mivvwl+VQJjMUNGepTtiAOADRGsnGc6u6yPYklHStzkmRLYsrRLV2REkANb0BoKq6QexoXtPDw24tTMlngDv+zBHj5/jnkW10P6giFR1AgfyPeQ6L0pKh6r+CPAuj/cJ6piuZYkXuTI3Ywaw/AoNRVlbxqoptQmbtSEpl0HGK/5kDaaCbXaOmpX9l3Lh2FQYr12MdTpVxxdU9EUbBBoHQH0fTHVbOswmhz6F4F+aTj2r4U2k2bouqGbuloRhM/Y0GlEO5B0M1rfyoOZ8HXTTSuCEBw8cGY3ETxpi54n6IaYkaf9COAFHDJFxeiWdaseV3d3b7/qDeXIt3xyTpTRycVHnypH9c9tkcIqHJmY1JAdUj0ZzSo8TsW9sFMRgbuER2IkizZJK4KAEgJvZDzrAnrl7pSjeO/EFZXUi426UVtxIq7lq092WekKRVT54WsZNq5XSNYO+7M6UXEWX7rUdmdH3FYDPaIHOGKLIH/Rkhm8nEO3d42SXgQhMHxFc1YsyJyVsbbIAXRdYydG1QdA4ZZgkogMPIuvddkzlIB3hSAolx7EBXQ8vJv1to227sjuSE5WiiX0AsToS/S2Vm3RbumH2h62UZ1YFcK8yVacgeZJic0qWgkdS/EWftbyBy/oRg5Kpx1BODaEkcP+vZhBQ8uXY3IQkSXsBkOiUOqYhOMqauBvDiejZ68yQ9JlMTm2dIiM8OAzY3CkkRcyUmxvKTtkNldCf1NrBJ44RkWRP23YHf1IBwwewhCcHASArgHeqMPrY6RZ5g/JwZtyOTgt/cHzYRTUq3lFU2lHEArFKMK6XzwtZsUMBkKNWXn8czHOkERJThLnZqw/oMvi4zBB1xyITsPIQXKA94+UF5zwKU0hzoVqqIdVpngklykGRSv3CCMH/rFTnYsvMS5ZI7/27EYMqWDMUP5KBIJdGF9Hq9XMGWIi4oBHqot5pmQF2Xkhc4tkSrjVvLRrB8T057Y+27XS6ygYRFHGx2hDNWw5jiA8EUjTCbfg7C+M/8P0vz75rfEtw7ab+hj7qmrb0axi5PBBjp03PU9naMmZUWWLA/feNFiq5G02t1REqRaW8mfFu9Y+8+HPXtz/2V/cbvnZv4NuGGLA7uq1SRKKyMe5dlhZaHbPsRZCPSPICilFJqwfMWKw6vwrjq7+GpmFwLOOgjNa1pXvuTCz+qz0f7XarFopMZ5akGALK/d1+7v0BhbR0uIjleoIqQSRGDECdHPiXT5bofom7QlSbTyoe5OEqimKIKq/Moa+qPH0p6dEdi8GUnXdBKpZRZq3njDDhm1hAdDWc2oth32n9zknJyawonUlw3tt5Iat/vsI3clBdPQnSCM2hPtVzYPqSTnjqSnL33tU1WyN5KT1udKaaGgB0E4pg9k/otrfJfKdKqar+fKZUV+ltdVqodBX6umlzKmpEG4114JeorVLmr4jSLuAMfkYBIiAIYjxA4NADAKGIDHgmEMGAUMQ4wMGgRgEDEFiwDGHDAKGIMYHDAIxCBiCxIBjDhkEDEGMDxgEYhAwBIkBxxwyCPQXQaBNtatJ1bRTU9Vv5rPtCOwrhP1L0FOx7XlrkmHv9MWqs5sfu8Ozv2sQhC9B91n7nKnDazugpIt+AWvaWfum0IWqTZ0VJ6FbtZtHuqqsKu36t+xBlXQ9LMNhst3FeJVgbfyhP0GqxGAllTAwKDIRgwiGTAVcIM/ae/RkGb15+yrJCXbE1KKTmeolLYSDxUBUN3xGkuRoEs5ighNNd/fmHZPEYAPQ5ZBupABLN1TQ6Z3uX2sIwYumwYhEd5CXXXjra8fOLs1XacZdqaatGZeD7sSzc7ngwIkTi/CjF6VsQhIOmLrsw98dGPHOOm4eI2+7mJawLMiZuwsXsbjkoJRYZVJIrPinhnjUDic4SXVzFw5XkwKdfFpZ26TniEK4M6DGeFUpvemBrPfcYN6/6LiyZHM2+thE90OT2A0Wnaf9i5nH+V1FlNir2n8wEpPu4GGU6Qi855GLRfm3B44fX1ADcbF0ZCOlcmFJRo5dH/neYC6z9G+49mroNY+8HDVtQiOZtetcTNYjg8CzM/mrhZO5ErZB1F57F7imMBgEHMdSgNmykx+et7ODWO4P10RRZc3ZXf+pXxOLbsN6CFMdFB/dOlp+LD8YlKK1S1jlcEsgCc7hUp6XIt6Mh1fUCjk42tEEaiCWofQxDLezLhsphbVloUAtGkvqnqNgMXKUMyMkx3XIgKtocUw6UuNZ8qpWEywTSL8yBqKgIlLDbRMF4aB0q7w46BdnR5386Kw7sm0ag/c5IjHx2lblbfR6vQhCcvBeAysALz68dUv5sYEBOx8E9kC1tk1iBtyEYYe5eFYJa7vMwG+QY2vNmkZBXXE+DK4iCAfUz0k0t3hsCv+avYNgswrV9DyyATnELGqLiCDJ2KyQq01foZ7EaE3HxrJ99hAybSAqhmTwFi6MW4HnZMYuPdsmodqajV4EoWq2L0tntg0rcgzhWRTTMuzJFQzchPGDlOCTFejHWfu6E0F4E0oPQsKNiHSncS+yg79aTZiGMNINC2dzVGh39INuLBiNJkQOZZrGI4BwHN9bmhu13Fw5M7x9Wrd7En0IQhK4iNYLebf824FBkWWtj//hrVxTLoXbRdqvi+GDZbN43pi2UQxitcwJfsH/riSWvVx7NSkBsLHtwF+6OOoOjMzjXgYrzjZOtCYLT7xMrycIaIb65/PZYNZ2hAuHaoEcKzVn5Zb2trb8lb/N95UIgAxoFlu+lwnKiwOqRbzycJe/60UQVEbBvEtqdKtV1GVzbNbiw3AYeJVMt2JhLeT1IYiSBE9xK2bRq1rG6vv9km/k29RsaBNY+hBkpUK6VSMrZTPfNxUC/dvLbFOZ0SjbKQRUe8ZU2J2C1+Tb2wjwkYHESyeJ5+rq9WZvq7NGesP7NYC0/2f4OBZA9xnWfGgg0NlJLvAFz+N4X5AHeK0+0G4//i3kiN4+y/Px9pn1WkClM5fCnTAHfxvf83RGzDpzVW882e0lj0epTyCCyB/jJVYOlQBXclJH68xJz9OUBlyYBWsd9oM+eqIcSaVqWrpOETUu1yfuff+B44APATTJQrEHbTcTfAPaPo1tBO9rSj1NEr5wYj8sXxQxXawHJUO6ROY0H+1GQDECT0JlCV1EFgA+e1s3H7DVG/RucIxlht0u8K0MrUaw4xnfydxnj90y8zjCyR04IQvVhnEaa142TxhRGDhT21AmO/cogNnpEL/rTuHJEvdSogyCzHcD5rqF7asTQ8eSgY81BrEaGHsX04aRHRtUlWZkU5+faW4skwNTyogfg+jGg1Vp7Dsf+edXPKneg2z5pwv3gga34pU/2XMJZBsCMTJwMj7lSm2DkBgswC7TkKDaHyfJ03mcnRX4wCGA/BV7Bu+a/Gr0YF4mdRIB+jFrMwxn8yvPgy5YyhljVNihsu4mF6we2pvNG3TEVEPl6Aed3+A3KAeEkIMQdztkxuhIcfvJj+9jy8qyJyfDpWnGb5v+ShCI92Df16HgM1AQUQSDjdALtePbinJQPkDiX1WFMIywp9+qTR1j8yk8kdhiZXUxC3JMs2mFDBoLPyzPpBYQIBPoR2iiB95ziCbnYZQSMgwf/PBw3EZDkhrsGKx6YKt1a9iRtrObUNEOZUgfAeEP+PymI5z3n/r4/q8SDErlTqiJBCDdpCW2TciHsP+h6Tu3Xe7aYhccb9iL3JAXdCqh+hAOWGjnikXvXO4W4PROWUYkkBjrsEFincWET1RcGLCDJiHE9MEKSqvIobgTnmb+poKAalMxasDZPAyAQoVFZ1eRhHVe1WprhaEbojKWwZgsL/6XLZwv43ERHhqlkFj7YnAeKtUFz6s8+5uJV6v1Ojl605qA90HoyAHxHTusSSg0Ib2tt53/PcTjlno69++vege6ugdYPHkJvq5WKIR0sf6uDjJURmaIPTl1jTZVgWQBAzjDgY/u2NESrPEYoEbjBBt59Ob97Yl/ecVP48/u7NFDk8dcOXEQq/WGaXUNDXIo/aZUu6x6Tuc/T1viGEo5yPKzfP4M6iKgAG3c/6B1VafHK+t0XlpTQiwCtAITbwvpR0mtXb5sxGN51YgWfB9nHZ6ynHOnj4kde88hs2bHXjKnxtJUAQ+mVpCDV68miMoP9XVBPcVqLPdWzgaWBxHpmAXu0gkWvy7/ieOHOgkn85y485ihSWkiUDVf1UK1yqaxw3PAEXXPchT+B49Qo81qXZXW/g0IklbR7SnHkKI9OJpcNkaAj7hMMggYBGogYAhSAxiz2yBABAxBjB8YBGIQMASJAcccMggYghgfMAjEIGAIEgOOOWQQMAQxPmAQiEHAECQGHHPIIGAIYnzAIBCDgCFIDDjmkEGg57uadMOE1d5FvdDNpSorceoFebthz7gyDUHi0KlxrB5H4zkcrIIP9MYLPzPFYRWx953GwqL1dlGOZDhSOMKRdv5e74Jzwh1X3fuYWTjDd0yPWQx2CGWxLKxgt5IvUc7mIw4BQ5A4dGodg8ejOk50NnbUR7d99FDlh+Xt/vnPuSKUtfdkUwuLqmmMjt794Yvilu9h/IRaO4WjPhP4GpJiFEMud/rhxMccfJFwUS3NN91+Q5A6TE4mKIciMZDg7ZyBPouf9Yy75ozMuVIQ7Pn+NVd9uiy9RQzrzGDJsmih6joEAMWk7dqOv1Ap57cNfPKlf3xtyc5uwSD8ASyuAdFUL/ENMwrsrJzJjXi/G9u19OOhS0rjGL05HnjCkGRDuNbtNARZB8nqHSvJAWI4fiCHQRMO7KnrAUc0zEVgHOpLMNzhFs5KgcGcGAeGHBJjUCQLGklY6hB0zEu3PC/2nvlODhN/sfzh6IyYDy7X6Fo35Hd4N156/fwPrnjj9Kn8aGWXV7HLkEERP+bqzX7IECTGA9aQwwU5tuB0TijGKrsu91YjIkMycRjnDNpmuCmAx0fjiOvKBEVhDUBcwouEXcmMb/VtB7POqOZbrI+rliAuy3jzzt4z/zm+Y+bMwH37//K5h4Z3lnZVik6Zq87GYLDZD9VVC25WkJTjqPpe2iQHnDkL11b3AvViAp9e6X/AmzU/1/SjZ8Lr69wgBtpSKmrweojBgZe8yeFn7Y1EZGJTa2lwj7d9+nj+3ae/tfPK0rw762SCDI7VR1KVzab7YwiSZHLW9ZJzJoEcqLHXOHzS1WuOr+TKmkP1/Fx3OV27vo0ksoOKKA1c7l3ywkODb3vuV2PP2ZifGWldtvXIsknOMQTZwNDKa7if0QMt/wD3HHCi5d0bXFLnrjZkUWdJG5/G8vFg2B6Su5//9fD+0oI7B5Kwnd1tyTaWt/t7DUGSbIClIdCewTuIqK2SdL7ux/FeJHBzcmjxD5krijOZcyAIoosJIjXsZgiyATBrvIUz4K/ZtcFFPbJL3bRjCWHXXxLDlUWnOgFU3yjYZjsYgiQAygeyaH70mf/wbsrDU4JqwyrmTXwCPv1+2BAkxsLL7mOa6DEo9fch7QgSoFeGbpC3L4Jop5pWULcP5/appQ9B1EyVFu6IOf28Srg37p/QH+oUadai/fjkucUsNLs8rDhQNapbIvUiVRMJ9SFItbtTYL+gCTbtEwP2R9eQALM5s5Nj02EErOC1DLF9RhBqReXkeQX6kSNQsWmY2mc35KQHQfCYyCoUQkREcAJgFdHSYkdA1d7qA28QJT/w0B+Lr9Bb8G7GVIkXM5g1vV8S4VCP0UURyp1Sap3aSzW1MLseBCEq+6YUILMl6ycA7AS4MYod6HTKbhVhlckT0tooEgpWMvF7M4kmtpEJXr8HMxUPC122J2UCv0QP0qWWbUErmrYClLEmoDxVLA0+qPLaV2gJ9xbkWXepPgThQj6HDzsHjh9fgJR3wTu5jBqWghNYIDKMJGGIUS0MfO3sZ7VIltlMq6ha/4EgNsixOOd5ZbxxbPmmCtWFcAO/nPX9pahH7zqj1r+DGMIFlIKhpiHpUvhOKARsa0msCYjZ3KX1xWfufseiODzlyImY/vv1K9eWM/XqzTs1hV53QuyW8rtPve5Vn8D73U8BwG0wGlYr4tolKqVVu6CSRuWPZbqq4Z7+jUTviU08AcTASA0hZrzK0rPF0hz2LUfC2IvrOAgJRN4vzwciZ5dtewA/satKyYQMACTOoGxUBJhKrsnHEScJF7br8DJ8bEJvVeSw5Kd/9+XCDwAXD6YoS7JOehGEzYYQJOuK/3v4S0+8/rqnAOCt2H0AqqjFVZbhTdat5TM4dAP/bce2o0gLJ1I2TMgaTliR0r9QKi8+Xy4vkFUkS/tcEJQAIQa90qzjZLyy7QwENvqM1EPeKskBtS/sMThpHlnZYHT7xIuFB8WwJIl7DkseBzW/SHKoSyBTdDQ2hzQP6kUQag4rV0nCSHLiuuv+ezRn3Yie3geEHWzHk6CMGnTUYZTwPgasEMVi4F+14Ht/BmaQL4wotTkK6/JGnCtZzlf88lLge1wiq73kCBUnSPS0vFdeyAq7WLHtbCBsl02wOGhCFdh1HmtSFud+YNnuo1i3Oe9Lu/awxLgMGzgGeBi2sMgqnlb5wamiP/igalZRZg3JQdXYZmhAxRRPJWiFgm0dPdrQ+It2Szj1sive6DrOfXBGD0ixV1+sA7J8IsqQQ2JUf/OzU6lBI4JbjBfCHZx54i/+9Kz8Safkqidf3nNYU4c5jEBLR9TnJn0tmgSM5JjEoCLcvKPC5OCiROdcm02zv48dOqSia9Z183h5yVV4sYXRgI4ft2Hd7OWokYbVGTXq30hwnm+J+fE9ajXZyUjXZrFq5DqIKsTkpE1icDVZebTAoJsGTI2IuXyufk2sZdGiL2qZapoURuUufK49pSO/Dx5U2aJ2his1VoK21l6jRiBBeaQ3YwOqDWq5JrO6fwKdI4x5eGrZA0l/gkQgpl3LnIv4iJoZDOkVl2/M42xMJcErzh07xjo8RSVTLKoxSNadrW8Ta52oZodBIH0EDEHSx9yU2EMIGIL0kLGMqOkjYAiSPuamxB5CwBCkh4xlRE0fAUOQ9DE3JfYQAoYgPWQsI2r6CBiCpI+5KbGHEDAE6SFjGVHTR8AQJH3MTYk9hIAhSA8Zy4iaPgKGIOljbkrsIQQMQXrIWEbU9BEwBEkfc1NiDyFgCNJDxjKipo+AIUj6mJsSewgBQ5AeMpYRNX0EDEHSx9yU2EMIGIL0kLGMqOkjYAiSPuamxB5CwBCkh4xlRE0fAUOQ9DE3JfYQAoYgPWQsI2r6CBiCpI+5KbGHEDAESTAWpjjjpGp9l6hT+L/vVGurQoYgteAshAeEDOYxKSdWM8CEz+G81LWu6In91IG6UCelG6WOdO0JBVIW0hAkAXBfus+gqj0Ph8pgmt6ejyVKB+hCnZRuCfpv9sMNzpy/ieDiJNlR4+pbV+/+D8zt/H5ofxZ7s73aNEH0gL2tMj4vwfJtX3/PY098SFl0ha6byMJ1qWoiSC2YQI5jh0Q4ubcQdyOCXIB7DYEcJTparct03a/IAdmVDkoX6ISklnnoy7us9ljCRJAEHCex1McEpur/ztW7Pwpa3AVulFELz+EytuNZwehOFs7bzqUGAkQ/riabxe9b3/XYE1+p6pYAwaY+bAiSZP4VzY9vX7PnA7gLuR2XvAS0KIEoWNvb6uoKWEnigwxY+4frEFpcLOdprOVwx7sfffxedd0K3RLz2aQnGILUYfiVNe33r7lij2c574PDvQmhYw8cbriOLLp2Cgg9j2bh4yDyj13L/8afP/rU4xRmpU5dE64HCv5/TkFf8RZsb3gAAAAASUVORK5CYII=", "item_uom_1": "Unit", "item_uom_2": "Unit", "item_uom_3": "Unit", "item_uom_4": "Unit", "item_uom_5": "Unit", "last_name": "Mehta", "supplier_1": "Google", "supplier_2": "Hetzner", "supplier_3": "Digital Ocean", "tax_1": "Service Tax", "tax_rate_1": "12.5", "timezone": "America/New_York", "password": "password", "email": "[email protected]", "user_email_1": "[email protected]", "user_fullname_1": "test setup user", "user_sales_1": 1, "user_purchaser_1": 1, "user_accountant_1": 1, "user_email_1": "[email protected]", "user_fullname_1": "test setup user", "user_sales_2": 1, "user_purchaser_2": 0, "user_accountant_2": 0 }
agpl-3.0
40223102/2015cd_midterm
static/Brython3.1.1-20150328-091302/Lib/xml/dom/minidom.py
727
66854
"""Simple implementation of the Level 1 DOM. Namespaces and other minor Level 2 features are also supported. parse("foo.xml") parseString("<foo><bar/></foo>") Todo: ===== * convenience methods for getting elements and text. * more testing * bring some of the writer and linearizer code into conformance with this interface * SAX 2 namespaces """ import io import xml.dom from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE, domreg from xml.dom.minicompat import * from xml.dom.xmlbuilder import DOMImplementationLS, DocumentLS # This is used by the ID-cache invalidation checks; the list isn't # actually complete, since the nodes being checked will never be the # DOCUMENT_NODE or DOCUMENT_FRAGMENT_NODE. (The node being checked is # the node being added or removed, not the node being modified.) # _nodeTypes_with_children = (xml.dom.Node.ELEMENT_NODE, xml.dom.Node.ENTITY_REFERENCE_NODE) class Node(xml.dom.Node): namespaceURI = None # this is non-null only for elements and attributes parentNode = None ownerDocument = None nextSibling = None previousSibling = None prefix = EMPTY_PREFIX # non-null only for NS elements and attributes def __bool__(self): return True def toxml(self, encoding=None): return self.toprettyxml("", "", encoding) def toprettyxml(self, indent="\t", newl="\n", encoding=None): if encoding is None: writer = io.StringIO() else: writer = io.TextIOWrapper(io.BytesIO(), encoding=encoding, errors="xmlcharrefreplace", newline='\n') if self.nodeType == Node.DOCUMENT_NODE: # Can pass encoding only to document, to put it into XML header self.writexml(writer, "", indent, newl, encoding) else: self.writexml(writer, "", indent, newl) if encoding is None: return writer.getvalue() else: return writer.detach().getvalue() def hasChildNodes(self): return bool(self.childNodes) def _get_childNodes(self): return self.childNodes def _get_firstChild(self): if self.childNodes: return self.childNodes[0] def _get_lastChild(self): if self.childNodes: return self.childNodes[-1] def insertBefore(self, newChild, refChild): if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE: for c in tuple(newChild.childNodes): self.insertBefore(c, refChild) ### The DOM does not clearly specify what to return in this case return newChild if newChild.nodeType not in self._child_node_types: raise xml.dom.HierarchyRequestErr( "%s cannot be child of %s" % (repr(newChild), repr(self))) if newChild.parentNode is not None: newChild.parentNode.removeChild(newChild) if refChild is None: self.appendChild(newChild) else: try: index = self.childNodes.index(refChild) except ValueError: raise xml.dom.NotFoundErr() if newChild.nodeType in _nodeTypes_with_children: _clear_id_cache(self) self.childNodes.insert(index, newChild) newChild.nextSibling = refChild refChild.previousSibling = newChild if index: node = self.childNodes[index-1] node.nextSibling = newChild newChild.previousSibling = node else: newChild.previousSibling = None newChild.parentNode = self return newChild def appendChild(self, node): if node.nodeType == self.DOCUMENT_FRAGMENT_NODE: for c in tuple(node.childNodes): self.appendChild(c) ### The DOM does not clearly specify what to return in this case return node if node.nodeType not in self._child_node_types: raise xml.dom.HierarchyRequestErr( "%s cannot be child of %s" % (repr(node), repr(self))) elif node.nodeType in _nodeTypes_with_children: _clear_id_cache(self) if node.parentNode is not None: node.parentNode.removeChild(node) _append_child(self, node) node.nextSibling = None return node def replaceChild(self, newChild, oldChild): if newChild.nodeType == self.DOCUMENT_FRAGMENT_NODE: refChild = oldChild.nextSibling self.removeChild(oldChild) return self.insertBefore(newChild, refChild) if newChild.nodeType not in self._child_node_types: raise xml.dom.HierarchyRequestErr( "%s cannot be child of %s" % (repr(newChild), repr(self))) if newChild is oldChild: return if newChild.parentNode is not None: newChild.parentNode.removeChild(newChild) try: index = self.childNodes.index(oldChild) except ValueError: raise xml.dom.NotFoundErr() self.childNodes[index] = newChild newChild.parentNode = self oldChild.parentNode = None if (newChild.nodeType in _nodeTypes_with_children or oldChild.nodeType in _nodeTypes_with_children): _clear_id_cache(self) newChild.nextSibling = oldChild.nextSibling newChild.previousSibling = oldChild.previousSibling oldChild.nextSibling = None oldChild.previousSibling = None if newChild.previousSibling: newChild.previousSibling.nextSibling = newChild if newChild.nextSibling: newChild.nextSibling.previousSibling = newChild return oldChild def removeChild(self, oldChild): try: self.childNodes.remove(oldChild) except ValueError: raise xml.dom.NotFoundErr() if oldChild.nextSibling is not None: oldChild.nextSibling.previousSibling = oldChild.previousSibling if oldChild.previousSibling is not None: oldChild.previousSibling.nextSibling = oldChild.nextSibling oldChild.nextSibling = oldChild.previousSibling = None if oldChild.nodeType in _nodeTypes_with_children: _clear_id_cache(self) oldChild.parentNode = None return oldChild def normalize(self): L = [] for child in self.childNodes: if child.nodeType == Node.TEXT_NODE: if not child.data: # empty text node; discard if L: L[-1].nextSibling = child.nextSibling if child.nextSibling: child.nextSibling.previousSibling = child.previousSibling child.unlink() elif L and L[-1].nodeType == child.nodeType: # collapse text node node = L[-1] node.data = node.data + child.data node.nextSibling = child.nextSibling if child.nextSibling: child.nextSibling.previousSibling = node child.unlink() else: L.append(child) else: L.append(child) if child.nodeType == Node.ELEMENT_NODE: child.normalize() self.childNodes[:] = L def cloneNode(self, deep): return _clone_node(self, deep, self.ownerDocument or self) def isSupported(self, feature, version): return self.ownerDocument.implementation.hasFeature(feature, version) def _get_localName(self): # Overridden in Element and Attr where localName can be Non-Null return None # Node interfaces from Level 3 (WD 9 April 2002) def isSameNode(self, other): return self is other def getInterface(self, feature): if self.isSupported(feature, None): return self else: return None # The "user data" functions use a dictionary that is only present # if some user data has been set, so be careful not to assume it # exists. def getUserData(self, key): try: return self._user_data[key][0] except (AttributeError, KeyError): return None def setUserData(self, key, data, handler): old = None try: d = self._user_data except AttributeError: d = {} self._user_data = d if key in d: old = d[key][0] if data is None: # ignore handlers passed for None handler = None if old is not None: del d[key] else: d[key] = (data, handler) return old def _call_user_data_handler(self, operation, src, dst): if hasattr(self, "_user_data"): for key, (data, handler) in list(self._user_data.items()): if handler is not None: handler.handle(operation, key, data, src, dst) # minidom-specific API: def unlink(self): self.parentNode = self.ownerDocument = None if self.childNodes: for child in self.childNodes: child.unlink() self.childNodes = NodeList() self.previousSibling = None self.nextSibling = None # A Node is its own context manager, to ensure that an unlink() call occurs. # This is similar to how a file object works. def __enter__(self): return self def __exit__(self, et, ev, tb): self.unlink() defproperty(Node, "firstChild", doc="First child node, or None.") defproperty(Node, "lastChild", doc="Last child node, or None.") defproperty(Node, "localName", doc="Namespace-local name of this node.") def _append_child(self, node): # fast path with less checks; usable by DOM builders if careful childNodes = self.childNodes if childNodes: last = childNodes[-1] node.previousSibling = last last.nextSibling = node childNodes.append(node) node.parentNode = self def _in_document(node): # return True iff node is part of a document tree while node is not None: if node.nodeType == Node.DOCUMENT_NODE: return True node = node.parentNode return False def _write_data(writer, data): "Writes datachars to writer." if data: data = data.replace("&", "&amp;").replace("<", "&lt;"). \ replace("\"", "&quot;").replace(">", "&gt;") writer.write(data) def _get_elements_by_tagName_helper(parent, name, rc): for node in parent.childNodes: if node.nodeType == Node.ELEMENT_NODE and \ (name == "*" or node.tagName == name): rc.append(node) _get_elements_by_tagName_helper(node, name, rc) return rc def _get_elements_by_tagName_ns_helper(parent, nsURI, localName, rc): for node in parent.childNodes: if node.nodeType == Node.ELEMENT_NODE: if ((localName == "*" or node.localName == localName) and (nsURI == "*" or node.namespaceURI == nsURI)): rc.append(node) _get_elements_by_tagName_ns_helper(node, nsURI, localName, rc) return rc class DocumentFragment(Node): nodeType = Node.DOCUMENT_FRAGMENT_NODE nodeName = "#document-fragment" nodeValue = None attributes = None parentNode = None _child_node_types = (Node.ELEMENT_NODE, Node.TEXT_NODE, Node.CDATA_SECTION_NODE, Node.ENTITY_REFERENCE_NODE, Node.PROCESSING_INSTRUCTION_NODE, Node.COMMENT_NODE, Node.NOTATION_NODE) def __init__(self): self.childNodes = NodeList() class Attr(Node): __slots__=('_name', '_value', 'namespaceURI', '_prefix', 'childNodes', '_localName', 'ownerDocument', 'ownerElement') nodeType = Node.ATTRIBUTE_NODE attributes = None specified = False _is_id = False _child_node_types = (Node.TEXT_NODE, Node.ENTITY_REFERENCE_NODE) def __init__(self, qName, namespaceURI=EMPTY_NAMESPACE, localName=None, prefix=None): self.ownerElement = None self._name = qName self.namespaceURI = namespaceURI self._prefix = prefix self.childNodes = NodeList() # Add the single child node that represents the value of the attr self.childNodes.append(Text()) # nodeValue and value are set elsewhere def _get_localName(self): try: return self._localName except AttributeError: return self.nodeName.split(":", 1)[-1] def _get_name(self): return self.name def _get_specified(self): return self.specified def _get_name(self): return self._name def _set_name(self, value): self._name = value if self.ownerElement is not None: _clear_id_cache(self.ownerElement) nodeName = name = property(_get_name, _set_name) def _get_value(self): return self._value def _set_value(self, value): self._value = value self.childNodes[0].data = value if self.ownerElement is not None: _clear_id_cache(self.ownerElement) self.childNodes[0].data = value nodeValue = value = property(_get_value, _set_value) def _get_prefix(self): return self._prefix def _set_prefix(self, prefix): nsuri = self.namespaceURI if prefix == "xmlns": if nsuri and nsuri != XMLNS_NAMESPACE: raise xml.dom.NamespaceErr( "illegal use of 'xmlns' prefix for the wrong namespace") self._prefix = prefix if prefix is None: newName = self.localName else: newName = "%s:%s" % (prefix, self.localName) if self.ownerElement: _clear_id_cache(self.ownerElement) self.name = newName prefix = property(_get_prefix, _set_prefix) def unlink(self): # This implementation does not call the base implementation # since most of that is not needed, and the expense of the # method call is not warranted. We duplicate the removal of # children, but that's all we needed from the base class. elem = self.ownerElement if elem is not None: del elem._attrs[self.nodeName] del elem._attrsNS[(self.namespaceURI, self.localName)] if self._is_id: self._is_id = False elem._magic_id_nodes -= 1 self.ownerDocument._magic_id_count -= 1 for child in self.childNodes: child.unlink() del self.childNodes[:] def _get_isId(self): if self._is_id: return True doc = self.ownerDocument elem = self.ownerElement if doc is None or elem is None: return False info = doc._get_elem_info(elem) if info is None: return False if self.namespaceURI: return info.isIdNS(self.namespaceURI, self.localName) else: return info.isId(self.nodeName) def _get_schemaType(self): doc = self.ownerDocument elem = self.ownerElement if doc is None or elem is None: return _no_type info = doc._get_elem_info(elem) if info is None: return _no_type if self.namespaceURI: return info.getAttributeTypeNS(self.namespaceURI, self.localName) else: return info.getAttributeType(self.nodeName) defproperty(Attr, "isId", doc="True if this attribute is an ID.") defproperty(Attr, "localName", doc="Namespace-local name of this attribute.") defproperty(Attr, "schemaType", doc="Schema type for this attribute.") class NamedNodeMap(object): """The attribute list is a transient interface to the underlying dictionaries. Mutations here will change the underlying element's dictionary. Ordering is imposed artificially and does not reflect the order of attributes as found in an input document. """ __slots__ = ('_attrs', '_attrsNS', '_ownerElement') def __init__(self, attrs, attrsNS, ownerElement): self._attrs = attrs self._attrsNS = attrsNS self._ownerElement = ownerElement def _get_length(self): return len(self._attrs) def item(self, index): try: return self[list(self._attrs.keys())[index]] except IndexError: return None def items(self): L = [] for node in self._attrs.values(): L.append((node.nodeName, node.value)) return L def itemsNS(self): L = [] for node in self._attrs.values(): L.append(((node.namespaceURI, node.localName), node.value)) return L def __contains__(self, key): if isinstance(key, str): return key in self._attrs else: return key in self._attrsNS def keys(self): return self._attrs.keys() def keysNS(self): return self._attrsNS.keys() def values(self): return self._attrs.values() def get(self, name, value=None): return self._attrs.get(name, value) __len__ = _get_length def _cmp(self, other): if self._attrs is getattr(other, "_attrs", None): return 0 else: return (id(self) > id(other)) - (id(self) < id(other)) def __eq__(self, other): return self._cmp(other) == 0 def __ge__(self, other): return self._cmp(other) >= 0 def __gt__(self, other): return self._cmp(other) > 0 def __le__(self, other): return self._cmp(other) <= 0 def __lt__(self, other): return self._cmp(other) < 0 def __ne__(self, other): return self._cmp(other) != 0 def __getitem__(self, attname_or_tuple): if isinstance(attname_or_tuple, tuple): return self._attrsNS[attname_or_tuple] else: return self._attrs[attname_or_tuple] # same as set def __setitem__(self, attname, value): if isinstance(value, str): try: node = self._attrs[attname] except KeyError: node = Attr(attname) node.ownerDocument = self._ownerElement.ownerDocument self.setNamedItem(node) node.value = value else: if not isinstance(value, Attr): raise TypeError("value must be a string or Attr object") node = value self.setNamedItem(node) def getNamedItem(self, name): try: return self._attrs[name] except KeyError: return None def getNamedItemNS(self, namespaceURI, localName): try: return self._attrsNS[(namespaceURI, localName)] except KeyError: return None def removeNamedItem(self, name): n = self.getNamedItem(name) if n is not None: _clear_id_cache(self._ownerElement) del self._attrs[n.nodeName] del self._attrsNS[(n.namespaceURI, n.localName)] if hasattr(n, 'ownerElement'): n.ownerElement = None return n else: raise xml.dom.NotFoundErr() def removeNamedItemNS(self, namespaceURI, localName): n = self.getNamedItemNS(namespaceURI, localName) if n is not None: _clear_id_cache(self._ownerElement) del self._attrsNS[(n.namespaceURI, n.localName)] del self._attrs[n.nodeName] if hasattr(n, 'ownerElement'): n.ownerElement = None return n else: raise xml.dom.NotFoundErr() def setNamedItem(self, node): if not isinstance(node, Attr): raise xml.dom.HierarchyRequestErr( "%s cannot be child of %s" % (repr(node), repr(self))) old = self._attrs.get(node.name) if old: old.unlink() self._attrs[node.name] = node self._attrsNS[(node.namespaceURI, node.localName)] = node node.ownerElement = self._ownerElement _clear_id_cache(node.ownerElement) return old def setNamedItemNS(self, node): return self.setNamedItem(node) def __delitem__(self, attname_or_tuple): node = self[attname_or_tuple] _clear_id_cache(node.ownerElement) node.unlink() def __getstate__(self): return self._attrs, self._attrsNS, self._ownerElement def __setstate__(self, state): self._attrs, self._attrsNS, self._ownerElement = state defproperty(NamedNodeMap, "length", doc="Number of nodes in the NamedNodeMap.") AttributeList = NamedNodeMap class TypeInfo(object): __slots__ = 'namespace', 'name' def __init__(self, namespace, name): self.namespace = namespace self.name = name def __repr__(self): if self.namespace: return "<TypeInfo %r (from %r)>" % (self.name, self.namespace) else: return "<TypeInfo %r>" % self.name def _get_name(self): return self.name def _get_namespace(self): return self.namespace _no_type = TypeInfo(None, None) class Element(Node): __slots__=('ownerDocument', 'parentNode', 'tagName', 'nodeName', 'prefix', 'namespaceURI', '_localName', 'childNodes', '_attrs', '_attrsNS', 'nextSibling', 'previousSibling') nodeType = Node.ELEMENT_NODE nodeValue = None schemaType = _no_type _magic_id_nodes = 0 _child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE, Node.COMMENT_NODE, Node.TEXT_NODE, Node.CDATA_SECTION_NODE, Node.ENTITY_REFERENCE_NODE) def __init__(self, tagName, namespaceURI=EMPTY_NAMESPACE, prefix=None, localName=None): self.parentNode = None self.tagName = self.nodeName = tagName self.prefix = prefix self.namespaceURI = namespaceURI self.childNodes = NodeList() self.nextSibling = self.previousSibling = None # Attribute dictionaries are lazily created # attributes are double-indexed: # tagName -> Attribute # URI,localName -> Attribute # in the future: consider lazy generation # of attribute objects this is too tricky # for now because of headaches with # namespaces. self._attrs = None self._attrsNS = None def _ensure_attributes(self): if self._attrs is None: self._attrs = {} self._attrsNS = {} def _get_localName(self): try: return self._localName except AttributeError: return self.tagName.split(":", 1)[-1] def _get_tagName(self): return self.tagName def unlink(self): if self._attrs is not None: for attr in list(self._attrs.values()): attr.unlink() self._attrs = None self._attrsNS = None Node.unlink(self) def getAttribute(self, attname): if self._attrs is None: return "" try: return self._attrs[attname].value except KeyError: return "" def getAttributeNS(self, namespaceURI, localName): if self._attrsNS is None: return "" try: return self._attrsNS[(namespaceURI, localName)].value except KeyError: return "" def setAttribute(self, attname, value): attr = self.getAttributeNode(attname) if attr is None: attr = Attr(attname) attr.value = value # also sets nodeValue attr.ownerDocument = self.ownerDocument self.setAttributeNode(attr) elif value != attr.value: attr.value = value if attr.isId: _clear_id_cache(self) def setAttributeNS(self, namespaceURI, qualifiedName, value): prefix, localname = _nssplit(qualifiedName) attr = self.getAttributeNodeNS(namespaceURI, localname) if attr is None: attr = Attr(qualifiedName, namespaceURI, localname, prefix) attr.value = value attr.ownerDocument = self.ownerDocument self.setAttributeNode(attr) else: if value != attr.value: attr.value = value if attr.isId: _clear_id_cache(self) if attr.prefix != prefix: attr.prefix = prefix attr.nodeName = qualifiedName def getAttributeNode(self, attrname): if self._attrs is None: return None return self._attrs.get(attrname) def getAttributeNodeNS(self, namespaceURI, localName): if self._attrsNS is None: return None return self._attrsNS.get((namespaceURI, localName)) def setAttributeNode(self, attr): if attr.ownerElement not in (None, self): raise xml.dom.InuseAttributeErr("attribute node already owned") self._ensure_attributes() old1 = self._attrs.get(attr.name, None) if old1 is not None: self.removeAttributeNode(old1) old2 = self._attrsNS.get((attr.namespaceURI, attr.localName), None) if old2 is not None and old2 is not old1: self.removeAttributeNode(old2) _set_attribute_node(self, attr) if old1 is not attr: # It might have already been part of this node, in which case # it doesn't represent a change, and should not be returned. return old1 if old2 is not attr: return old2 setAttributeNodeNS = setAttributeNode def removeAttribute(self, name): if self._attrsNS is None: raise xml.dom.NotFoundErr() try: attr = self._attrs[name] except KeyError: raise xml.dom.NotFoundErr() self.removeAttributeNode(attr) def removeAttributeNS(self, namespaceURI, localName): if self._attrsNS is None: raise xml.dom.NotFoundErr() try: attr = self._attrsNS[(namespaceURI, localName)] except KeyError: raise xml.dom.NotFoundErr() self.removeAttributeNode(attr) def removeAttributeNode(self, node): if node is None: raise xml.dom.NotFoundErr() try: self._attrs[node.name] except KeyError: raise xml.dom.NotFoundErr() _clear_id_cache(self) node.unlink() # Restore this since the node is still useful and otherwise # unlinked node.ownerDocument = self.ownerDocument removeAttributeNodeNS = removeAttributeNode def hasAttribute(self, name): if self._attrs is None: return False return name in self._attrs def hasAttributeNS(self, namespaceURI, localName): if self._attrsNS is None: return False return (namespaceURI, localName) in self._attrsNS def getElementsByTagName(self, name): return _get_elements_by_tagName_helper(self, name, NodeList()) def getElementsByTagNameNS(self, namespaceURI, localName): return _get_elements_by_tagName_ns_helper( self, namespaceURI, localName, NodeList()) def __repr__(self): return "<DOM Element: %s at %#x>" % (self.tagName, id(self)) def writexml(self, writer, indent="", addindent="", newl=""): # indent = current indentation # addindent = indentation to add to higher levels # newl = newline string writer.write(indent+"<" + self.tagName) attrs = self._get_attributes() a_names = sorted(attrs.keys()) for a_name in a_names: writer.write(" %s=\"" % a_name) _write_data(writer, attrs[a_name].value) writer.write("\"") if self.childNodes: writer.write(">") if (len(self.childNodes) == 1 and self.childNodes[0].nodeType == Node.TEXT_NODE): self.childNodes[0].writexml(writer, '', '', '') else: writer.write(newl) for node in self.childNodes: node.writexml(writer, indent+addindent, addindent, newl) writer.write(indent) writer.write("</%s>%s" % (self.tagName, newl)) else: writer.write("/>%s"%(newl)) def _get_attributes(self): self._ensure_attributes() return NamedNodeMap(self._attrs, self._attrsNS, self) def hasAttributes(self): if self._attrs: return True else: return False # DOM Level 3 attributes, based on the 22 Oct 2002 draft def setIdAttribute(self, name): idAttr = self.getAttributeNode(name) self.setIdAttributeNode(idAttr) def setIdAttributeNS(self, namespaceURI, localName): idAttr = self.getAttributeNodeNS(namespaceURI, localName) self.setIdAttributeNode(idAttr) def setIdAttributeNode(self, idAttr): if idAttr is None or not self.isSameNode(idAttr.ownerElement): raise xml.dom.NotFoundErr() if _get_containing_entref(self) is not None: raise xml.dom.NoModificationAllowedErr() if not idAttr._is_id: idAttr._is_id = True self._magic_id_nodes += 1 self.ownerDocument._magic_id_count += 1 _clear_id_cache(self) defproperty(Element, "attributes", doc="NamedNodeMap of attributes on the element.") defproperty(Element, "localName", doc="Namespace-local name of this element.") def _set_attribute_node(element, attr): _clear_id_cache(element) element._ensure_attributes() element._attrs[attr.name] = attr element._attrsNS[(attr.namespaceURI, attr.localName)] = attr # This creates a circular reference, but Element.unlink() # breaks the cycle since the references to the attribute # dictionaries are tossed. attr.ownerElement = element class Childless: """Mixin that makes childless-ness easy to implement and avoids the complexity of the Node methods that deal with children. """ __slots__ = () attributes = None childNodes = EmptyNodeList() firstChild = None lastChild = None def _get_firstChild(self): return None def _get_lastChild(self): return None def appendChild(self, node): raise xml.dom.HierarchyRequestErr( self.nodeName + " nodes cannot have children") def hasChildNodes(self): return False def insertBefore(self, newChild, refChild): raise xml.dom.HierarchyRequestErr( self.nodeName + " nodes do not have children") def removeChild(self, oldChild): raise xml.dom.NotFoundErr( self.nodeName + " nodes do not have children") def normalize(self): # For childless nodes, normalize() has nothing to do. pass def replaceChild(self, newChild, oldChild): raise xml.dom.HierarchyRequestErr( self.nodeName + " nodes do not have children") class ProcessingInstruction(Childless, Node): nodeType = Node.PROCESSING_INSTRUCTION_NODE __slots__ = ('target', 'data') def __init__(self, target, data): self.target = target self.data = data # nodeValue is an alias for data def _get_nodeValue(self): return self.data def _set_nodeValue(self, value): self.data = data nodeValue = property(_get_nodeValue, _set_nodeValue) # nodeName is an alias for target def _get_nodeName(self): return self.target def _set_nodeName(self, value): self.target = value nodeName = property(_get_nodeName, _set_nodeName) def writexml(self, writer, indent="", addindent="", newl=""): writer.write("%s<?%s %s?>%s" % (indent,self.target, self.data, newl)) class CharacterData(Childless, Node): __slots__=('_data', 'ownerDocument','parentNode', 'previousSibling', 'nextSibling') def __init__(self): self.ownerDocument = self.parentNode = None self.previousSibling = self.nextSibling = None self._data = '' Node.__init__(self) def _get_length(self): return len(self.data) __len__ = _get_length def _get_data(self): return self._data def _set_data(self, data): self._data = data data = nodeValue = property(_get_data, _set_data) def __repr__(self): data = self.data if len(data) > 10: dotdotdot = "..." else: dotdotdot = "" return '<DOM %s node "%r%s">' % ( self.__class__.__name__, data[0:10], dotdotdot) def substringData(self, offset, count): if offset < 0: raise xml.dom.IndexSizeErr("offset cannot be negative") if offset >= len(self.data): raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") if count < 0: raise xml.dom.IndexSizeErr("count cannot be negative") return self.data[offset:offset+count] def appendData(self, arg): self.data = self.data + arg def insertData(self, offset, arg): if offset < 0: raise xml.dom.IndexSizeErr("offset cannot be negative") if offset >= len(self.data): raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") if arg: self.data = "%s%s%s" % ( self.data[:offset], arg, self.data[offset:]) def deleteData(self, offset, count): if offset < 0: raise xml.dom.IndexSizeErr("offset cannot be negative") if offset >= len(self.data): raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") if count < 0: raise xml.dom.IndexSizeErr("count cannot be negative") if count: self.data = self.data[:offset] + self.data[offset+count:] def replaceData(self, offset, count, arg): if offset < 0: raise xml.dom.IndexSizeErr("offset cannot be negative") if offset >= len(self.data): raise xml.dom.IndexSizeErr("offset cannot be beyond end of data") if count < 0: raise xml.dom.IndexSizeErr("count cannot be negative") if count: self.data = "%s%s%s" % ( self.data[:offset], arg, self.data[offset+count:]) defproperty(CharacterData, "length", doc="Length of the string data.") class Text(CharacterData): __slots__ = () nodeType = Node.TEXT_NODE nodeName = "#text" attributes = None def splitText(self, offset): if offset < 0 or offset > len(self.data): raise xml.dom.IndexSizeErr("illegal offset value") newText = self.__class__() newText.data = self.data[offset:] newText.ownerDocument = self.ownerDocument next = self.nextSibling if self.parentNode and self in self.parentNode.childNodes: if next is None: self.parentNode.appendChild(newText) else: self.parentNode.insertBefore(newText, next) self.data = self.data[:offset] return newText def writexml(self, writer, indent="", addindent="", newl=""): _write_data(writer, "%s%s%s" % (indent, self.data, newl)) # DOM Level 3 (WD 9 April 2002) def _get_wholeText(self): L = [self.data] n = self.previousSibling while n is not None: if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): L.insert(0, n.data) n = n.previousSibling else: break n = self.nextSibling while n is not None: if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): L.append(n.data) n = n.nextSibling else: break return ''.join(L) def replaceWholeText(self, content): # XXX This needs to be seriously changed if minidom ever # supports EntityReference nodes. parent = self.parentNode n = self.previousSibling while n is not None: if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): next = n.previousSibling parent.removeChild(n) n = next else: break n = self.nextSibling if not content: parent.removeChild(self) while n is not None: if n.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE): next = n.nextSibling parent.removeChild(n) n = next else: break if content: self.data = content return self else: return None def _get_isWhitespaceInElementContent(self): if self.data.strip(): return False elem = _get_containing_element(self) if elem is None: return False info = self.ownerDocument._get_elem_info(elem) if info is None: return False else: return info.isElementContent() defproperty(Text, "isWhitespaceInElementContent", doc="True iff this text node contains only whitespace" " and is in element content.") defproperty(Text, "wholeText", doc="The text of all logically-adjacent text nodes.") def _get_containing_element(node): c = node.parentNode while c is not None: if c.nodeType == Node.ELEMENT_NODE: return c c = c.parentNode return None def _get_containing_entref(node): c = node.parentNode while c is not None: if c.nodeType == Node.ENTITY_REFERENCE_NODE: return c c = c.parentNode return None class Comment(CharacterData): nodeType = Node.COMMENT_NODE nodeName = "#comment" def __init__(self, data): CharacterData.__init__(self) self._data = data def writexml(self, writer, indent="", addindent="", newl=""): if "--" in self.data: raise ValueError("'--' is not allowed in a comment node") writer.write("%s<!--%s-->%s" % (indent, self.data, newl)) class CDATASection(Text): __slots__ = () nodeType = Node.CDATA_SECTION_NODE nodeName = "#cdata-section" def writexml(self, writer, indent="", addindent="", newl=""): if self.data.find("]]>") >= 0: raise ValueError("']]>' not allowed in a CDATA section") writer.write("<![CDATA[%s]]>" % self.data) class ReadOnlySequentialNamedNodeMap(object): __slots__ = '_seq', def __init__(self, seq=()): # seq should be a list or tuple self._seq = seq def __len__(self): return len(self._seq) def _get_length(self): return len(self._seq) def getNamedItem(self, name): for n in self._seq: if n.nodeName == name: return n def getNamedItemNS(self, namespaceURI, localName): for n in self._seq: if n.namespaceURI == namespaceURI and n.localName == localName: return n def __getitem__(self, name_or_tuple): if isinstance(name_or_tuple, tuple): node = self.getNamedItemNS(*name_or_tuple) else: node = self.getNamedItem(name_or_tuple) if node is None: raise KeyError(name_or_tuple) return node def item(self, index): if index < 0: return None try: return self._seq[index] except IndexError: return None def removeNamedItem(self, name): raise xml.dom.NoModificationAllowedErr( "NamedNodeMap instance is read-only") def removeNamedItemNS(self, namespaceURI, localName): raise xml.dom.NoModificationAllowedErr( "NamedNodeMap instance is read-only") def setNamedItem(self, node): raise xml.dom.NoModificationAllowedErr( "NamedNodeMap instance is read-only") def setNamedItemNS(self, node): raise xml.dom.NoModificationAllowedErr( "NamedNodeMap instance is read-only") def __getstate__(self): return [self._seq] def __setstate__(self, state): self._seq = state[0] defproperty(ReadOnlySequentialNamedNodeMap, "length", doc="Number of entries in the NamedNodeMap.") class Identified: """Mix-in class that supports the publicId and systemId attributes.""" __slots__ = 'publicId', 'systemId' def _identified_mixin_init(self, publicId, systemId): self.publicId = publicId self.systemId = systemId def _get_publicId(self): return self.publicId def _get_systemId(self): return self.systemId class DocumentType(Identified, Childless, Node): nodeType = Node.DOCUMENT_TYPE_NODE nodeValue = None name = None publicId = None systemId = None internalSubset = None def __init__(self, qualifiedName): self.entities = ReadOnlySequentialNamedNodeMap() self.notations = ReadOnlySequentialNamedNodeMap() if qualifiedName: prefix, localname = _nssplit(qualifiedName) self.name = localname self.nodeName = self.name def _get_internalSubset(self): return self.internalSubset def cloneNode(self, deep): if self.ownerDocument is None: # it's ok clone = DocumentType(None) clone.name = self.name clone.nodeName = self.name operation = xml.dom.UserDataHandler.NODE_CLONED if deep: clone.entities._seq = [] clone.notations._seq = [] for n in self.notations._seq: notation = Notation(n.nodeName, n.publicId, n.systemId) clone.notations._seq.append(notation) n._call_user_data_handler(operation, n, notation) for e in self.entities._seq: entity = Entity(e.nodeName, e.publicId, e.systemId, e.notationName) entity.actualEncoding = e.actualEncoding entity.encoding = e.encoding entity.version = e.version clone.entities._seq.append(entity) e._call_user_data_handler(operation, n, entity) self._call_user_data_handler(operation, self, clone) return clone else: return None def writexml(self, writer, indent="", addindent="", newl=""): writer.write("<!DOCTYPE ") writer.write(self.name) if self.publicId: writer.write("%s PUBLIC '%s'%s '%s'" % (newl, self.publicId, newl, self.systemId)) elif self.systemId: writer.write("%s SYSTEM '%s'" % (newl, self.systemId)) if self.internalSubset is not None: writer.write(" [") writer.write(self.internalSubset) writer.write("]") writer.write(">"+newl) class Entity(Identified, Node): attributes = None nodeType = Node.ENTITY_NODE nodeValue = None actualEncoding = None encoding = None version = None def __init__(self, name, publicId, systemId, notation): self.nodeName = name self.notationName = notation self.childNodes = NodeList() self._identified_mixin_init(publicId, systemId) def _get_actualEncoding(self): return self.actualEncoding def _get_encoding(self): return self.encoding def _get_version(self): return self.version def appendChild(self, newChild): raise xml.dom.HierarchyRequestErr( "cannot append children to an entity node") def insertBefore(self, newChild, refChild): raise xml.dom.HierarchyRequestErr( "cannot insert children below an entity node") def removeChild(self, oldChild): raise xml.dom.HierarchyRequestErr( "cannot remove children from an entity node") def replaceChild(self, newChild, oldChild): raise xml.dom.HierarchyRequestErr( "cannot replace children of an entity node") class Notation(Identified, Childless, Node): nodeType = Node.NOTATION_NODE nodeValue = None def __init__(self, name, publicId, systemId): self.nodeName = name self._identified_mixin_init(publicId, systemId) class DOMImplementation(DOMImplementationLS): _features = [("core", "1.0"), ("core", "2.0"), ("core", None), ("xml", "1.0"), ("xml", "2.0"), ("xml", None), ("ls-load", "3.0"), ("ls-load", None), ] def hasFeature(self, feature, version): if version == "": version = None return (feature.lower(), version) in self._features def createDocument(self, namespaceURI, qualifiedName, doctype): if doctype and doctype.parentNode is not None: raise xml.dom.WrongDocumentErr( "doctype object owned by another DOM tree") doc = self._create_document() add_root_element = not (namespaceURI is None and qualifiedName is None and doctype is None) if not qualifiedName and add_root_element: # The spec is unclear what to raise here; SyntaxErr # would be the other obvious candidate. Since Xerces raises # InvalidCharacterErr, and since SyntaxErr is not listed # for createDocument, that seems to be the better choice. # XXX: need to check for illegal characters here and in # createElement. # DOM Level III clears this up when talking about the return value # of this function. If namespaceURI, qName and DocType are # Null the document is returned without a document element # Otherwise if doctype or namespaceURI are not None # Then we go back to the above problem raise xml.dom.InvalidCharacterErr("Element with no name") if add_root_element: prefix, localname = _nssplit(qualifiedName) if prefix == "xml" \ and namespaceURI != "http://www.w3.org/XML/1998/namespace": raise xml.dom.NamespaceErr("illegal use of 'xml' prefix") if prefix and not namespaceURI: raise xml.dom.NamespaceErr( "illegal use of prefix without namespaces") element = doc.createElementNS(namespaceURI, qualifiedName) if doctype: doc.appendChild(doctype) doc.appendChild(element) if doctype: doctype.parentNode = doctype.ownerDocument = doc doc.doctype = doctype doc.implementation = self return doc def createDocumentType(self, qualifiedName, publicId, systemId): doctype = DocumentType(qualifiedName) doctype.publicId = publicId doctype.systemId = systemId return doctype # DOM Level 3 (WD 9 April 2002) def getInterface(self, feature): if self.hasFeature(feature, None): return self else: return None # internal def _create_document(self): return Document() class ElementInfo(object): """Object that represents content-model information for an element. This implementation is not expected to be used in practice; DOM builders should provide implementations which do the right thing using information available to it. """ __slots__ = 'tagName', def __init__(self, name): self.tagName = name def getAttributeType(self, aname): return _no_type def getAttributeTypeNS(self, namespaceURI, localName): return _no_type def isElementContent(self): return False def isEmpty(self): """Returns true iff this element is declared to have an EMPTY content model.""" return False def isId(self, aname): """Returns true iff the named attribute is a DTD-style ID.""" return False def isIdNS(self, namespaceURI, localName): """Returns true iff the identified attribute is a DTD-style ID.""" return False def __getstate__(self): return self.tagName def __setstate__(self, state): self.tagName = state def _clear_id_cache(node): if node.nodeType == Node.DOCUMENT_NODE: node._id_cache.clear() node._id_search_stack = None elif _in_document(node): node.ownerDocument._id_cache.clear() node.ownerDocument._id_search_stack= None class Document(Node, DocumentLS): __slots__ = ('_elem_info', 'doctype', '_id_search_stack', 'childNodes', '_id_cache') _child_node_types = (Node.ELEMENT_NODE, Node.PROCESSING_INSTRUCTION_NODE, Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE) implementation = DOMImplementation() nodeType = Node.DOCUMENT_NODE nodeName = "#document" nodeValue = None attributes = None parentNode = None previousSibling = nextSibling = None # Document attributes from Level 3 (WD 9 April 2002) actualEncoding = None encoding = None standalone = None version = None strictErrorChecking = False errorHandler = None documentURI = None _magic_id_count = 0 def __init__(self): self.doctype = None self.childNodes = NodeList() # mapping of (namespaceURI, localName) -> ElementInfo # and tagName -> ElementInfo self._elem_info = {} self._id_cache = {} self._id_search_stack = None def _get_elem_info(self, element): if element.namespaceURI: key = element.namespaceURI, element.localName else: key = element.tagName return self._elem_info.get(key) def _get_actualEncoding(self): return self.actualEncoding def _get_doctype(self): return self.doctype def _get_documentURI(self): return self.documentURI def _get_encoding(self): return self.encoding def _get_errorHandler(self): return self.errorHandler def _get_standalone(self): return self.standalone def _get_strictErrorChecking(self): return self.strictErrorChecking def _get_version(self): return self.version def appendChild(self, node): if node.nodeType not in self._child_node_types: raise xml.dom.HierarchyRequestErr( "%s cannot be child of %s" % (repr(node), repr(self))) if node.parentNode is not None: # This needs to be done before the next test since this # may *be* the document element, in which case it should # end up re-ordered to the end. node.parentNode.removeChild(node) if node.nodeType == Node.ELEMENT_NODE \ and self._get_documentElement(): raise xml.dom.HierarchyRequestErr( "two document elements disallowed") return Node.appendChild(self, node) def removeChild(self, oldChild): try: self.childNodes.remove(oldChild) except ValueError: raise xml.dom.NotFoundErr() oldChild.nextSibling = oldChild.previousSibling = None oldChild.parentNode = None if self.documentElement is oldChild: self.documentElement = None return oldChild def _get_documentElement(self): for node in self.childNodes: if node.nodeType == Node.ELEMENT_NODE: return node def unlink(self): if self.doctype is not None: self.doctype.unlink() self.doctype = None Node.unlink(self) def cloneNode(self, deep): if not deep: return None clone = self.implementation.createDocument(None, None, None) clone.encoding = self.encoding clone.standalone = self.standalone clone.version = self.version for n in self.childNodes: childclone = _clone_node(n, deep, clone) assert childclone.ownerDocument.isSameNode(clone) clone.childNodes.append(childclone) if childclone.nodeType == Node.DOCUMENT_NODE: assert clone.documentElement is None elif childclone.nodeType == Node.DOCUMENT_TYPE_NODE: assert clone.doctype is None clone.doctype = childclone childclone.parentNode = clone self._call_user_data_handler(xml.dom.UserDataHandler.NODE_CLONED, self, clone) return clone def createDocumentFragment(self): d = DocumentFragment() d.ownerDocument = self return d def createElement(self, tagName): e = Element(tagName) e.ownerDocument = self return e def createTextNode(self, data): if not isinstance(data, str): raise TypeError("node contents must be a string") t = Text() t.data = data t.ownerDocument = self return t def createCDATASection(self, data): if not isinstance(data, str): raise TypeError("node contents must be a string") c = CDATASection() c.data = data c.ownerDocument = self return c def createComment(self, data): c = Comment(data) c.ownerDocument = self return c def createProcessingInstruction(self, target, data): p = ProcessingInstruction(target, data) p.ownerDocument = self return p def createAttribute(self, qName): a = Attr(qName) a.ownerDocument = self a.value = "" return a def createElementNS(self, namespaceURI, qualifiedName): prefix, localName = _nssplit(qualifiedName) e = Element(qualifiedName, namespaceURI, prefix) e.ownerDocument = self return e def createAttributeNS(self, namespaceURI, qualifiedName): prefix, localName = _nssplit(qualifiedName) a = Attr(qualifiedName, namespaceURI, localName, prefix) a.ownerDocument = self a.value = "" return a # A couple of implementation-specific helpers to create node types # not supported by the W3C DOM specs: def _create_entity(self, name, publicId, systemId, notationName): e = Entity(name, publicId, systemId, notationName) e.ownerDocument = self return e def _create_notation(self, name, publicId, systemId): n = Notation(name, publicId, systemId) n.ownerDocument = self return n def getElementById(self, id): if id in self._id_cache: return self._id_cache[id] if not (self._elem_info or self._magic_id_count): return None stack = self._id_search_stack if stack is None: # we never searched before, or the cache has been cleared stack = [self.documentElement] self._id_search_stack = stack elif not stack: # Previous search was completed and cache is still valid; # no matching node. return None result = None while stack: node = stack.pop() # add child elements to stack for continued searching stack.extend([child for child in node.childNodes if child.nodeType in _nodeTypes_with_children]) # check this node info = self._get_elem_info(node) if info: # We have to process all ID attributes before # returning in order to get all the attributes set to # be IDs using Element.setIdAttribute*(). for attr in node.attributes.values(): if attr.namespaceURI: if info.isIdNS(attr.namespaceURI, attr.localName): self._id_cache[attr.value] = node if attr.value == id: result = node elif not node._magic_id_nodes: break elif info.isId(attr.name): self._id_cache[attr.value] = node if attr.value == id: result = node elif not node._magic_id_nodes: break elif attr._is_id: self._id_cache[attr.value] = node if attr.value == id: result = node elif node._magic_id_nodes == 1: break elif node._magic_id_nodes: for attr in node.attributes.values(): if attr._is_id: self._id_cache[attr.value] = node if attr.value == id: result = node if result is not None: break return result def getElementsByTagName(self, name): return _get_elements_by_tagName_helper(self, name, NodeList()) def getElementsByTagNameNS(self, namespaceURI, localName): return _get_elements_by_tagName_ns_helper( self, namespaceURI, localName, NodeList()) def isSupported(self, feature, version): return self.implementation.hasFeature(feature, version) def importNode(self, node, deep): if node.nodeType == Node.DOCUMENT_NODE: raise xml.dom.NotSupportedErr("cannot import document nodes") elif node.nodeType == Node.DOCUMENT_TYPE_NODE: raise xml.dom.NotSupportedErr("cannot import document type nodes") return _clone_node(node, deep, self) def writexml(self, writer, indent="", addindent="", newl="", encoding=None): if encoding is None: writer.write('<?xml version="1.0" ?>'+newl) else: writer.write('<?xml version="1.0" encoding="%s"?>%s' % ( encoding, newl)) for node in self.childNodes: node.writexml(writer, indent, addindent, newl) # DOM Level 3 (WD 9 April 2002) def renameNode(self, n, namespaceURI, name): if n.ownerDocument is not self: raise xml.dom.WrongDocumentErr( "cannot rename nodes from other documents;\n" "expected %s,\nfound %s" % (self, n.ownerDocument)) if n.nodeType not in (Node.ELEMENT_NODE, Node.ATTRIBUTE_NODE): raise xml.dom.NotSupportedErr( "renameNode() only applies to element and attribute nodes") if namespaceURI != EMPTY_NAMESPACE: if ':' in name: prefix, localName = name.split(':', 1) if ( prefix == "xmlns" and namespaceURI != xml.dom.XMLNS_NAMESPACE): raise xml.dom.NamespaceErr( "illegal use of 'xmlns' prefix") else: if ( name == "xmlns" and namespaceURI != xml.dom.XMLNS_NAMESPACE and n.nodeType == Node.ATTRIBUTE_NODE): raise xml.dom.NamespaceErr( "illegal use of the 'xmlns' attribute") prefix = None localName = name else: prefix = None localName = None if n.nodeType == Node.ATTRIBUTE_NODE: element = n.ownerElement if element is not None: is_id = n._is_id element.removeAttributeNode(n) else: element = None n.prefix = prefix n._localName = localName n.namespaceURI = namespaceURI n.nodeName = name if n.nodeType == Node.ELEMENT_NODE: n.tagName = name else: # attribute node n.name = name if element is not None: element.setAttributeNode(n) if is_id: element.setIdAttributeNode(n) # It's not clear from a semantic perspective whether we should # call the user data handlers for the NODE_RENAMED event since # we're re-using the existing node. The draft spec has been # interpreted as meaning "no, don't call the handler unless a # new node is created." return n defproperty(Document, "documentElement", doc="Top-level element of this document.") def _clone_node(node, deep, newOwnerDocument): """ Clone a node and give it the new owner document. Called by Node.cloneNode and Document.importNode """ if node.ownerDocument.isSameNode(newOwnerDocument): operation = xml.dom.UserDataHandler.NODE_CLONED else: operation = xml.dom.UserDataHandler.NODE_IMPORTED if node.nodeType == Node.ELEMENT_NODE: clone = newOwnerDocument.createElementNS(node.namespaceURI, node.nodeName) for attr in node.attributes.values(): clone.setAttributeNS(attr.namespaceURI, attr.nodeName, attr.value) a = clone.getAttributeNodeNS(attr.namespaceURI, attr.localName) a.specified = attr.specified if deep: for child in node.childNodes: c = _clone_node(child, deep, newOwnerDocument) clone.appendChild(c) elif node.nodeType == Node.DOCUMENT_FRAGMENT_NODE: clone = newOwnerDocument.createDocumentFragment() if deep: for child in node.childNodes: c = _clone_node(child, deep, newOwnerDocument) clone.appendChild(c) elif node.nodeType == Node.TEXT_NODE: clone = newOwnerDocument.createTextNode(node.data) elif node.nodeType == Node.CDATA_SECTION_NODE: clone = newOwnerDocument.createCDATASection(node.data) elif node.nodeType == Node.PROCESSING_INSTRUCTION_NODE: clone = newOwnerDocument.createProcessingInstruction(node.target, node.data) elif node.nodeType == Node.COMMENT_NODE: clone = newOwnerDocument.createComment(node.data) elif node.nodeType == Node.ATTRIBUTE_NODE: clone = newOwnerDocument.createAttributeNS(node.namespaceURI, node.nodeName) clone.specified = True clone.value = node.value elif node.nodeType == Node.DOCUMENT_TYPE_NODE: assert node.ownerDocument is not newOwnerDocument operation = xml.dom.UserDataHandler.NODE_IMPORTED clone = newOwnerDocument.implementation.createDocumentType( node.name, node.publicId, node.systemId) clone.ownerDocument = newOwnerDocument if deep: clone.entities._seq = [] clone.notations._seq = [] for n in node.notations._seq: notation = Notation(n.nodeName, n.publicId, n.systemId) notation.ownerDocument = newOwnerDocument clone.notations._seq.append(notation) if hasattr(n, '_call_user_data_handler'): n._call_user_data_handler(operation, n, notation) for e in node.entities._seq: entity = Entity(e.nodeName, e.publicId, e.systemId, e.notationName) entity.actualEncoding = e.actualEncoding entity.encoding = e.encoding entity.version = e.version entity.ownerDocument = newOwnerDocument clone.entities._seq.append(entity) if hasattr(e, '_call_user_data_handler'): e._call_user_data_handler(operation, n, entity) else: # Note the cloning of Document and DocumentType nodes is # implementation specific. minidom handles those cases # directly in the cloneNode() methods. raise xml.dom.NotSupportedErr("Cannot clone node %s" % repr(node)) # Check for _call_user_data_handler() since this could conceivably # used with other DOM implementations (one of the FourThought # DOMs, perhaps?). if hasattr(node, '_call_user_data_handler'): node._call_user_data_handler(operation, node, clone) return clone def _nssplit(qualifiedName): fields = qualifiedName.split(':', 1) if len(fields) == 2: return fields else: return (None, fields[0]) def _do_pulldom_parse(func, args, kwargs): events = func(*args, **kwargs) toktype, rootNode = events.getEvent() events.expandNode(rootNode) events.clear() return rootNode def parse(file, parser=None, bufsize=None): """Parse a file into a DOM by filename or file object.""" if parser is None and not bufsize: from xml.dom import expatbuilder return expatbuilder.parse(file) else: from xml.dom import pulldom return _do_pulldom_parse(pulldom.parse, (file,), {'parser': parser, 'bufsize': bufsize}) def parseString(string, parser=None): """Parse a file into a DOM from a string.""" if parser is None: from xml.dom import expatbuilder return expatbuilder.parseString(string) else: from xml.dom import pulldom return _do_pulldom_parse(pulldom.parseString, (string,), {'parser': parser}) def getDOMImplementation(features=None): if features: if isinstance(features, str): features = domreg._parse_feature_string(features) for f, v in features: if not Document.implementation.hasFeature(f, v): return None return Document.implementation
gpl-3.0
meteorfox/PerfKitBenchmarker
perfkitbenchmarker/linux_packages/ycsb.py
1
28681
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Install, execute, and parse results from YCSB. YCSB (the Yahoo! Cloud Serving Benchmark) is a common method of comparing NoSQL database performance. https://github.com/brianfrankcooper/YCSB For PerfKitBenchmarker, we wrap YCSB to: * Pre-load a database with a fixed number of records. * Execute a collection of workloads under a staircase load. * Parse the results into PerfKitBenchmarker samples. The 'YCSBExecutor' class handles executing YCSB on a collection of client VMs. Generally, clients just need this class. For example, to run against HBase 1.0: >>> executor = ycsb.YCSBExecutor('hbase-10') >>> samples = executor.LoadAndRun(loader_vms) By default, this runs YCSB workloads A and B against the database, 32 threads per client VM, with an initial database size of 1GB (1k records). Each workload runs for at most 30 minutes. """ import bisect import collections import copy import csv import io import itertools import math import re import logging import operator import os import posixpath import time from perfkitbenchmarker import data from perfkitbenchmarker import events from perfkitbenchmarker import flags from perfkitbenchmarker import sample from perfkitbenchmarker import vm_util from perfkitbenchmarker.linux_packages import INSTALL_DIR FLAGS = flags.FLAGS YCSB_VERSION = '0.9.0' YCSB_TAR_URL = ('https://github.com/brianfrankcooper/YCSB/releases/' 'download/{0}/ycsb-{0}.tar.gz').format(YCSB_VERSION) YCSB_DIR = posixpath.join(INSTALL_DIR, 'ycsb') YCSB_EXE = posixpath.join(YCSB_DIR, 'bin', 'ycsb') _DEFAULT_PERCENTILES = 50, 75, 90, 95, 99, 99.9 # Binary operators to aggregate reported statistics. # Statistics with operator 'None' will be dropped. AGGREGATE_OPERATORS = { 'Operations': operator.add, 'RunTime(ms)': max, 'Return=0': operator.add, 'Return=-1': operator.add, 'Return=-2': operator.add, 'Return=-3': operator.add, 'Return=OK': operator.add, 'Return=ERROR': operator.add, 'LatencyVariance(ms)': None, 'AverageLatency(ms)': None, # Requires both average and # of ops. 'Throughput(ops/sec)': operator.add, '95thPercentileLatency(ms)': None, # Calculated across clients. '99thPercentileLatency(ms)': None, # Calculated across clients. 'MinLatency(ms)': min, 'MaxLatency(ms)': max} flags.DEFINE_boolean('ycsb_histogram', False, 'Include individual ' 'histogram results from YCSB (will increase sample ' 'count).') flags.DEFINE_boolean('ycsb_load_samples', True, 'Include samples ' 'from pre-populating database.') flags.DEFINE_boolean('ycsb_include_individual_results', False, 'Include results from each client VM, rather than just ' 'combined results.') flags.DEFINE_boolean('ycsb_reload_database', True, 'Reload database, othewise skip load stage. ' 'Note, this flag is only used if the database ' 'is already loaded.') flags.DEFINE_integer('ycsb_client_vms', 1, 'Number of YCSB client VMs.', lower_bound=1) flags.DEFINE_list('ycsb_workload_files', ['workloada', 'workloadb'], 'Path to YCSB workload file to use during *run* ' 'stage only. Comma-separated list') flags.DEFINE_list('ycsb_load_parameters', [], 'Passed to YCSB during the load stage. Comma-separated list ' 'of "key=value" pairs.') flags.DEFINE_list('ycsb_run_parameters', [], 'Passed to YCSB during the load stage. Comma-separated list ' 'of "key=value" pairs.') flags.DEFINE_list('ycsb_threads_per_client', ['32'], 'Number of threads per ' 'loader during the benchmark run. Specify a list to vary the ' 'number of clients.') flags.DEFINE_integer('ycsb_preload_threads', None, 'Number of threads per ' 'loader during the initial data population stage. ' 'Default value depends on the target DB.') flags.DEFINE_integer('ycsb_record_count', 1000000, 'Pre-load with a total ' 'dataset of records total.') flags.DEFINE_integer('ycsb_operation_count', 1000000, 'Number of operations ' '*per client VM*.') flags.DEFINE_integer('ycsb_timelimit', 1800, 'Maximum amount of time to run ' 'each workload / client count combination. Set to 0 for ' 'unlimited time.') # Default loading thread count for non-batching backends. DEFAULT_PRELOAD_THREADS = 32 def _GetThreadsPerLoaderList(): """Returns the list of client counts per VM to use in staircase load.""" return [int(thread_count) for thread_count in FLAGS.ycsb_threads_per_client] def _GetWorkloadFileList(): """Returns the list of workload files to run. Returns: In order of preference: * The argument to --ycsb_workload_files. * Bundled YCSB workloads A and B. """ return [data.ResourcePath(workload) for workload in FLAGS.ycsb_workload_files] def CheckPrerequisites(): for workload_file in _GetWorkloadFileList(): if not os.path.exists(workload_file): raise IOError('Missing workload file: {0}'.format(workload_file)) def _Install(vm): """Installs the YCSB package on the VM.""" vm.Install('openjdk') vm.Install('curl') vm.RemoteCommand(('mkdir -p {0} && curl -L {1} | ' 'tar -C {0} --strip-components=1 -xzf -').format( YCSB_DIR, YCSB_TAR_URL)) def YumInstall(vm): """Installs the YCSB package on the VM.""" _Install(vm) def AptInstall(vm): """Installs the YCSB package on the VM.""" _Install(vm) def ParseResults(ycsb_result_string, data_type='histogram'): """Parse YCSB results. Example input: YCSB Client 0.1 Command line: -db com.yahoo.ycsb.db.HBaseClient -P /tmp/pkb/workloada [OVERALL], RunTime(ms), 1800413.0 [OVERALL], Throughput(ops/sec), 2740.503428935472 [UPDATE], Operations, 2468054 [UPDATE], AverageLatency(us), 2218.8513395574005 [UPDATE], MinLatency(us), 554 [UPDATE], MaxLatency(us), 352634 [UPDATE], 95thPercentileLatency(ms), 4 [UPDATE], 99thPercentileLatency(ms), 7 [UPDATE], Return=0, 2468054 [UPDATE], 0, 398998 [UPDATE], 1, 1015682 [UPDATE], 2, 532078 ... Args: ycsb_result_string: str. Text output from YCSB. data_type: Either 'histogram' or 'timeseries'. Returns: A dictionary with keys: client: containing YCSB version information. command_line: Command line executed. groups: list of operation group descriptions, each with schema: group: group name (e.g., update, insert, overall) statistics: dict mapping from statistic name to value histogram: list of (ms_lower_bound, count) tuples, e.g.: [(0, 530), (19, 1)] indicates that 530 ops took between 0ms and 1ms, and 1 took between 19ms and 20ms. Empty bins are not reported. """ # TODO: YCSB 0.9.0 output client and command line string to stderr, so # we need to support it in the future. lines = [] client_string = 'YCSB' command_line = 'unknown' fp = io.BytesIO(ycsb_result_string) result_string = next(fp).strip() def IsHeadOfResults(line): return line.startswith('YCSB Client 0.') or line.startswith('[OVERALL]') while not IsHeadOfResults(result_string): result_string = next(fp).strip() if result_string.startswith('YCSB Client 0.'): client_string = result_string command_line = next(fp).strip() if not command_line.startswith('Command line:'): raise IOError('Unexpected second line: {0}'.format(command_line)) elif result_string.startswith('[OVERALL]'): # YCSB > 0.7.0. lines.append(result_string) else: # Received unexpected header raise IOError('Unexpected header: {0}'.format(client_string)) # Some databases print additional output to stdout. # YCSB results start with [<OPERATION_NAME>]; # filter to just those lines. def LineFilter(line): return re.search(r'^\[[A-Z]+\]', line) is not None lines = itertools.chain(lines, itertools.ifilter(LineFilter, fp)) r = csv.reader(lines) by_operation = itertools.groupby(r, operator.itemgetter(0)) result = collections.OrderedDict([ ('client', client_string), ('command_line', command_line), ('groups', collections.OrderedDict())]) for operation, lines in by_operation: operation = operation[1:-1].lower() if operation == 'cleanup': continue op_result = { 'group': operation, data_type: [], 'statistics': {} } for _, name, val in lines: name = name.strip() val = val.strip() # Drop ">" from ">1000" if name.startswith('>'): name = name[1:] val = float(val) if '.' in val else int(val) if name.isdigit(): if val: op_result[data_type].append((int(name), val)) else: if '(us)' in name: name = name.replace('(us)', '(ms)') val /= 1000.0 op_result['statistics'][name] = val result['groups'][operation] = op_result return result def _CumulativeSum(xs): total = 0 for x in xs: total += x yield total def _WeightedQuantile(x, weights, p): """Weighted quantile measurement for an ordered list. This method interpolates to the higher value when the quantile is not a direct member of the list. This works well for YCSB, since latencies are floored. Args: x: List of values. weights: List of numeric weights. p: float. Desired quantile in the interval [0, 1]. Returns: float. Raises: ValueError: When 'x' and 'weights' are not the same length, or 'p' is not in the interval [0, 1]. """ if len(x) != len(weights): raise ValueError('Lengths do not match: {0} != {1}'.format( len(x), len(weights))) if p < 0 or p > 1: raise ValueError('Invalid quantile: {0}'.format(p)) n = sum(weights) target = n * float(p) cumulative = list(_CumulativeSum(weights)) # Find the first cumulative weight >= target i = bisect.bisect_left(cumulative, target) if i == len(x): return x[-1] else: return x[i] def _PercentilesFromHistogram(ycsb_histogram, percentiles=_DEFAULT_PERCENTILES): """Calculate percentiles for from a YCSB histogram. Args: ycsb_histogram: List of (time_ms, frequency) tuples. percentiles: iterable of floats, in the interval [0, 100]. Returns: dict, mapping from percentile to value. """ result = collections.OrderedDict() histogram = sorted(ycsb_histogram) for percentile in percentiles: if percentile < 0 or percentile > 100: raise ValueError('Invalid percentile: {0}'.format(percentile)) if math.modf(percentile)[0] < 1e-7: percentile = int(percentile) label = 'p{0}'.format(percentile) latencies, freqs = zip(*histogram) time_ms = _WeightedQuantile(latencies, freqs, percentile * 0.01) result[label] = time_ms return result def _CombineResults(result_list, combine_histograms=True): """Combine results from multiple YCSB clients. Reduces a list of YCSB results (the output of ParseResults) into a single result. Histogram bin counts, operation counts, and throughput are summed; RunTime is replaced by the maximum runtime of any result. Args: result_list: List of ParseResults outputs. combine_histograms: If true, histogram bins are summed across results. If not, no histogram will be returned. Defaults to True. Returns: A dictionary, as returned by ParseResults. """ def DropUnaggregated(result): """Remove statistics which 'operators' specify should not be combined.""" drop_keys = {k for k, v in AGGREGATE_OPERATORS.iteritems() if v is None} for group in result['groups'].itervalues(): for k in drop_keys: group['statistics'].pop(k, None) def CombineHistograms(hist1, hist2): h1 = dict(hist1) h2 = dict(hist2) keys = sorted(frozenset(h1) | frozenset(h2)) result = [] for k in keys: result.append((k, h1.get(k, 0) + h2.get(k, 0))) return result result = copy.deepcopy(result_list[0]) DropUnaggregated(result) for indiv in result_list[1:]: for group_name, group in indiv['groups'].iteritems(): if group_name not in result['groups']: logging.warn('Found result group "%s" in individual YCSB result, ' 'but not in accumulator.', group_name) result['groups'][group_name] = copy.deepcopy(group) continue # Combine reported statistics. # If no combining operator is defined, the statistic is skipped. # Otherwise, the aggregated value is either: # * The value in 'indiv', if the statistic is not present in 'result' or # * AGGREGATE_OPERATORS[statistic](result_value, indiv_value) for k, v in group['statistics'].iteritems(): if k not in AGGREGATE_OPERATORS: logging.warn('No operator for "%s". Skipping aggregation.', k) continue elif AGGREGATE_OPERATORS[k] is None: # Drop result['groups'][group_name]['statistics'].pop(k, None) continue elif k not in result['groups'][group_name]['statistics']: logging.warn('Found statistic "%s.%s" in individual YCSB result, ' 'but not in accumulator.', group_name, k) result['groups'][group_name]['statistics'][k] = copy.deepcopy(v) continue op = AGGREGATE_OPERATORS[k] result['groups'][group_name]['statistics'][k] = ( op(result['groups'][group_name]['statistics'][k], v)) if combine_histograms: result['groups'][group_name]['histogram'] = CombineHistograms( result['groups'][group_name]['histogram'], group['histogram']) else: result['groups'][group_name].pop('histogram', None) result['client'] = ' '.join((result['client'], indiv['client'])) result['command_line'] = ';'.join((result['command_line'], indiv['command_line'])) if 'target' in result and 'target' in indiv: result['target'] += indiv['target'] return result def _ParseWorkload(contents): """Parse a YCSB workload file. YCSB workloads are Java .properties format. http://en.wikipedia.org/wiki/.properties This function does not support all .properties syntax, in particular escaped newlines. Args: contents: str. Contents of the file. Returns: dict mapping from property key to property value for each property found in 'contents'. """ fp = io.BytesIO(contents) result = {} for line in fp: if (line.strip() and not line.lstrip().startswith('#') and not line.lstrip().startswith('!')): k, v = re.split(r'\s*[:=]\s*', line, maxsplit=1) result[k] = v.strip() return result def _CreateSamples(ycsb_result, include_histogram=True, **kwargs): """Create PKB samples from a YCSB result. Args: ycsb_result: dict. Result of ParseResults. include_histogram: bool. If True, include records for each histogram bin. **kwargs: Base metadata for each sample. Returns: List of sample.Sample objects. """ stage = 'load' if ycsb_result['command_line'].endswith('-load') else 'run' base_metadata = {'command_line': ycsb_result['command_line'], 'stage': stage} base_metadata.update(kwargs) for group_name, group in ycsb_result['groups'].iteritems(): meta = base_metadata.copy() meta['operation'] = group_name for statistic, value in group['statistics'].iteritems(): if value is None: continue unit = '' m = re.match(r'^(.*) *\((us|ms|ops/sec)\)$', statistic) if m: statistic = m.group(1) unit = m.group(2) yield sample.Sample(' '.join([group_name, statistic]), value, unit, meta) if group['histogram']: percentiles = _PercentilesFromHistogram(group['histogram']) for label, value in percentiles.iteritems(): yield sample.Sample(' '.join([group_name, label, 'latency']), value, 'ms', meta) if include_histogram: for time_ms, count in group['histogram']: yield sample.Sample( '{0}_latency_histogram_{1}_ms'.format(group_name, time_ms), count, 'count', meta) class YCSBExecutor(object): """Load data and run benchmarks using YCSB. See core/src/main/java/com/yahoo/ycsb/workloads/CoreWorkload.java for attribute descriptions. Attributes: database: str. loaded: boolean. If the database is already loaded. parameters: dict. May contain the following, plus database-specific fields (e.g., columnfamily for HBase). threads: int. target: int. fieldcount: int. fieldlengthdistribution: str. readallfields: boolean. writeallfields: boolean. readproportion: float. updateproportion: float. scanproportion: float. readmodifywriteproportion: float. requestdistribution: str. maxscanlength: int. Number of records to scan. scanlengthdistribution: str. insertorder: str. hotspotdatafraction: float. perclientparam: list. shardkeyspace: boolean. Default to False, indicates if clients should have their own keyspace. """ FLAG_ATTRIBUTES = 'cp', 'jvm-args', 'target', 'threads' def __init__(self, database, parameter_files=None, **kwargs): self.database = database self.loaded = False self.parameter_files = parameter_files or [] self.parameters = kwargs.copy() # Self-defined parameters, pop them out of self.parameters, so they # are not passed to ycsb commands self.perclientparam = self.parameters.pop('perclientparam', None) self.shardkeyspace = self.parameters.pop('shardkeyspace', False) def _BuildCommand(self, command_name, parameter_files=None, **kwargs): command = [YCSB_EXE, command_name, self.database] parameters = self.parameters.copy() parameters.update(kwargs) # These are passed as flags rather than properties, so they # are handled differently. for flag in self.FLAG_ATTRIBUTES: value = parameters.pop(flag, None) if value is not None: command.extend(('-{0}'.format(flag), str(value))) for param_file in list(self.parameter_files) + list(parameter_files or []): command.extend(('-P', param_file)) for parameter, value in parameters.iteritems(): command.extend(('-p', '{0}={1}'.format(parameter, value))) command.append('-p measurementtype=histogram') return 'cd %s; %s' % (YCSB_DIR, ' '.join(command)) @property def _default_preload_threads(self): """The default number of threads to use for pre-populating the DB.""" if FLAGS['ycsb_preload_threads'].present: return FLAGS.ycsb_preload_threads return DEFAULT_PRELOAD_THREADS def _Load(self, vm, **kwargs): """Execute 'ycsb load' on 'vm'.""" kwargs.setdefault('threads', self._default_preload_threads) kwargs.setdefault('recordcount', FLAGS.ycsb_record_count) for pv in FLAGS.ycsb_load_parameters: param, value = pv.split('=', 1) kwargs[param] = value command = self._BuildCommand('load', **kwargs) stdout, stderr = vm.RobustRemoteCommand(command) return ParseResults(str(stderr + stdout)) def _LoadThreaded(self, vms, workload_file, **kwargs): """Runs "Load" in parallel for each VM in VMs. Args: vms: List of virtual machine instances. client nodes. workload_file: YCSB Workload file to use. **kwargs: Additional key-value parameters to pass to YCSB. Returns: List of sample.Sample objects. """ results = [] remote_path = posixpath.join(INSTALL_DIR, os.path.basename(workload_file)) kwargs.setdefault('threads', self._default_preload_threads) kwargs.setdefault('recordcount', FLAGS.ycsb_record_count) with open(workload_file) as fp: workload_meta = _ParseWorkload(fp.read()) workload_meta.update(kwargs) workload_meta.update(stage='load', clients=len(vms) * kwargs['threads'], threads_per_client_vm=kwargs['threads'], workload_name=os.path.basename(workload_file)) self.workload_meta = workload_meta record_count = int(workload_meta.get('recordcount', '1000')) n_per_client = long(record_count) // len(vms) loader_counts = [n_per_client + (1 if i < (record_count % len(vms)) else 0) for i in xrange(len(vms))] def PushWorkload(vm): vm.PushFile(workload_file, remote_path) vm_util.RunThreaded(PushWorkload, vms) kwargs['parameter_files'] = [remote_path] def _Load(loader_index): start = sum(loader_counts[:loader_index]) kw = copy.deepcopy(kwargs) kw.update(insertstart=start, insertcount=loader_counts[loader_index]) if self.perclientparam is not None: kw.update(self.perclientparam[loader_index]) results.append(self._Load(vms[loader_index], **kw)) logging.info('VM %d (%s) finished', loader_index, vms[loader_index]) start = time.time() vm_util.RunThreaded(_Load, range(len(vms))) events.record_event.send( type(self).__name__, event='load', start_timestamp=start, end_timestamp=time.time(), metadata=copy.deepcopy(kwargs)) if len(results) != len(vms): raise IOError('Missing results: only {0}/{1} reported\n{2}'.format( len(results), len(vms), results)) samples = [] if FLAGS.ycsb_include_individual_results and len(results) > 1: for i, result in enumerate(results): samples.extend(_CreateSamples( result, result_type='individual', result_index=i, include_histogram=FLAGS.ycsb_histogram, **workload_meta)) combined = _CombineResults(results) samples.extend(_CreateSamples( combined, result_type='combined', include_histogram=FLAGS.ycsb_histogram, **workload_meta)) return samples def _Run(self, vm, **kwargs): """Run a single workload from a client vm.""" for pv in FLAGS.ycsb_run_parameters: param, value = pv.split('=', 1) kwargs[param] = value command = self._BuildCommand('run', **kwargs) # YCSB version greater than 0.7.0 output some of the # info we need to stderr. So we have to combine these 2 # output to get expected results. stdout, stderr = vm.RobustRemoteCommand(command) return ParseResults(str(stderr + stdout)) def _RunThreaded(self, vms, **kwargs): """Run a single workload using `vms`.""" target = kwargs.pop('target', None) if target is not None: target_per_client = target // len(vms) targets = [target_per_client + (1 if i < (target % len(vms)) else 0) for i in xrange(len(vms))] else: targets = [target for _ in vms] results = [] if self.shardkeyspace: record_count = int(self.workload_meta.get('recordcount', '1000')) n_per_client = long(record_count) // len(vms) loader_counts = [n_per_client + (1 if i < (record_count % len(vms)) else 0) for i in xrange(len(vms))] def _Run(loader_index): vm = vms[loader_index] params = copy.deepcopy(kwargs) params['target'] = targets[loader_index] if self.perclientparam is not None: params.update(self.perclientparam[loader_index]) if self.shardkeyspace: start = sum(loader_counts[:loader_index]) end = start + loader_counts[loader_index] params.update(insertstart=start, recordcount=end) results.append(self._Run(vm, **params)) logging.info('VM %d (%s) finished', loader_index, vm) vm_util.RunThreaded(_Run, range(len(vms))) if len(results) != len(vms): raise IOError('Missing results: only {0}/{1} reported\n{2}'.format( len(results), len(vms), results)) return results def RunStaircaseLoads(self, vms, workloads, **kwargs): """Run each workload in 'workloads' in succession. A staircase load is applied for each workload file, for each entry in ycsb_threads_per_client. Args: vms: List of VirtualMachine objects to generate load from. **kwargs: Additional parameters to pass to each run. See constructor for options. Returns: List of sample.Sample objects. """ all_results = [] for workload_index, workload_file in enumerate(workloads): parameters = {'operationcount': FLAGS.ycsb_operation_count, 'recordcount': FLAGS.ycsb_record_count} if FLAGS.ycsb_timelimit: parameters['maxexecutiontime'] = FLAGS.ycsb_timelimit parameters.update(kwargs) remote_path = posixpath.join(INSTALL_DIR, os.path.basename(workload_file)) with open(workload_file) as fp: workload_meta = _ParseWorkload(fp.read()) workload_meta.update(kwargs) workload_meta.update(workload_name=os.path.basename(workload_file), workload_index=workload_index, stage='run') def PushWorkload(vm): vm.PushFile(workload_file, remote_path) vm_util.RunThreaded(PushWorkload, vms) parameters['parameter_files'] = [remote_path] for client_count in _GetThreadsPerLoaderList(): parameters['threads'] = client_count start = time.time() results = self._RunThreaded(vms, **parameters) events.record_event.send( type(self).__name__, event='run', start_timestamp=start, end_timestamp=time.time(), metadata=copy.deepcopy(parameters)) client_meta = workload_meta.copy() client_meta.update(clients=len(vms) * client_count, threads_per_client_vm=client_count) if FLAGS.ycsb_include_individual_results and len(results) > 1: for i, result in enumerate(results): all_results.extend(_CreateSamples( result, result_type='individual', result_index=i, include_histogram=FLAGS.ycsb_histogram, **client_meta)) combined = _CombineResults(results) all_results.extend(_CreateSamples( combined, result_type='combined', include_histogram=FLAGS.ycsb_histogram, **client_meta)) return all_results def LoadAndRun(self, vms, workloads=None, load_kwargs=None, run_kwargs=None): """Load data using YCSB, then run each workload/client count combination. Loads data using the workload defined by 'workloads', then executes YCSB for each workload file in 'workloads', for each client count defined in FLAGS.ycsb_threads_per_client. Generally database benchmarks using YCSB should only need to call this method. Args: vms: List of virtual machines. VMs to use to generate load. workloads: List of strings. Workload files to use. If unspecified, _GetWorkloadFileList() is used. load_kwargs: dict. Additional arguments to pass to the load stage. run_kwargs: dict. Additional arguments to pass to the run stage. Returns: List of sample.Sample objects. """ workloads = workloads or _GetWorkloadFileList() load_samples = [] assert workloads, 'no workloads' if FLAGS.ycsb_reload_database or not self.loaded: load_samples += list(self._LoadThreaded( vms, workloads[0], **(load_kwargs or {}))) self.loaded = True run_samples = list(self.RunStaircaseLoads(vms, workloads, **(run_kwargs or {}))) if FLAGS.ycsb_load_samples: return load_samples + run_samples else: return run_samples
apache-2.0
dfstrauss/textmagic-sms-api-python
textmagic/gsm0338.py
3
5571
""" Check whether a string consists entirely of characters in the GSM 03.38 character set. Pass your Unicode or ASCII string to is_gsm() to determine whether all characters in the string are from the GSM 03.38 character set. """ #GSM 03.38 character set mapping to Unicode is specified here: # http://unicode.org/Public/MAPPINGS/ETSI/GSM0338.TXT #This code was translated from the C++ snippet at: # http://stackoverflow.com/questions/27599/reliable-sms-unicode-gsm-encoding-in-php #contributed by Magnus Westin: # http://stackoverflow.com/users/2957/magnus-westin UCS2_TO_GSM_LOOKUP_TABLE_SIZE = 0x100 NON_GSM = 0x80 UCS2_GCL_RANGE = 24 UCS2_GREEK_CAPITAL_LETTER_ALPHA = 0x0391 EXTEND = 0x001B # note that the ` character is mapped to ' so that all characters that can be typed on # a standard north american keyboard can be converted to the GSM default character set ucs2_to_gsm = [ # +0x0 +0x1 +0x2 +0x3 +0x4 +0x5 +0x6 +0x7 NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x00 NON_GSM, NON_GSM, 0x0a, NON_GSM, NON_GSM, 0x0d, NON_GSM, NON_GSM, # 0x08 NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x10 NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x18 0x20, 0x21, 0x22, 0x23, 0x02, 0x25, 0x26, 0x27, # 0x20 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, # 0x28 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, # 0x30 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, # 0x38 0x00, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, # 0x40 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, # 0x48 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, # 0x50 0x58, 0x59, 0x5a, EXTEND, EXTEND, EXTEND, EXTEND, 0x11, # 0x58 0x27, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, # 0x60 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, # 0x68 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, # 0x70 0x78, 0x79, 0x7a, EXTEND, EXTEND, EXTEND, EXTEND, NON_GSM, # 0x78 NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x80 NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x88 NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x90 NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0x98 NON_GSM, 0x40, NON_GSM, 0x01, 0x24, 0x03, NON_GSM, 0x5f, # 0xA0 NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0xA8 NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, # 0xB0 NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, 0x60, # 0xB8 NON_GSM, NON_GSM, NON_GSM, NON_GSM, 0x5b, 0x0e, 0x1c, 0x09, # 0xC0 NON_GSM, 0x1f, NON_GSM, NON_GSM, NON_GSM, NON_GSM, NON_GSM, 0x60, # 0xC8 NON_GSM, 0x5d, NON_GSM, NON_GSM, NON_GSM, NON_GSM, 0x5c, NON_GSM, # 0xD0 0x0b, NON_GSM, NON_GSM, NON_GSM, 0x5e, NON_GSM, NON_GSM, 0x1e, # 0xD8 0x7f, NON_GSM, NON_GSM, NON_GSM, 0x7b, 0x0f, 0x1d, NON_GSM, # 0xE0 0x04, 0x05, NON_GSM, NON_GSM, 0x07, NON_GSM, NON_GSM, NON_GSM, # 0xE8 NON_GSM, 0x7d, 0x08, NON_GSM, NON_GSM, NON_GSM, 0x7c, NON_GSM, # 0xF0 0x0c, 0x06, NON_GSM, NON_GSM, 0x7e, NON_GSM, NON_GSM, NON_GSM # 0xF8 ] ucs2_gcl_to_gsm = [ 0x41, # Alpha A 0x42, # Beta B 0x13, # Gamma 0x10, # Delta 0x45, # Epsilon E 0x5A, # Zeta Z 0x48, # Eta H 0x19, # Theta 0x49, # Iota I 0x4B, # Kappa K 0x14, # Lambda 0x4D, # Mu M 0x4E, # Nu N 0x1A, # Xi 0x4F, # Omicron O 0x16, # Pi 0x50, # Rho P NON_GSM, 0x18, # Sigma 0x54, # Tau T 0x59, # Upsilon Y 0x12, # Phi 0x58, # Chi X 0x17, # Psi 0x15 # Omega ] def not_gsm(char): """Is this character NOT in the GSM 03.38 character set?""" result = True ordinal = ord(char) if(ordinal < UCS2_TO_GSM_LOOKUP_TABLE_SIZE): result = (ucs2_to_gsm[ordinal] == NON_GSM) elif((ordinal >= UCS2_GREEK_CAPITAL_LETTER_ALPHA) and (ordinal <= (UCS2_GREEK_CAPITAL_LETTER_ALPHA + UCS2_GCL_RANGE))): result = (ucs2_gcl_to_gsm[ordinal - UCS2_GREEK_CAPITAL_LETTER_ALPHA] == NON_GSM) elif(ordinal == 0x20AC): # Euro sign result = False; return result; def is_gsm(string): """Does the string consist entirely of GSM03.38 characters?""" assert isinstance(string, basestring) for ch in string: if (not_gsm(ch)): return False return True
bsd-3-clause
jlmadurga/django-oscar
src/oscar/apps/offer/migrations/0001_initial.py
52
15207
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import oscar.models.fields.autoslugfield from decimal import Decimal import oscar.models.fields from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('catalogue', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Benefit', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(verbose_name='Type', max_length=128, blank=True, choices=[('Percentage', "Discount is a percentage off of the product's value"), ('Absolute', "Discount is a fixed amount off of the product's value"), ('Multibuy', 'Discount is to give the cheapest product for free'), ('Fixed price', 'Get the products that meet the condition for a fixed price'), ('Shipping absolute', 'Discount is a fixed amount of the shipping cost'), ('Shipping fixed price', 'Get shipping for a fixed price'), ('Shipping percentage', 'Discount is a percentage off of the shipping cost')])), ('value', oscar.models.fields.PositiveDecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Value', null=True)), ('max_affected_items', models.PositiveIntegerField(verbose_name='Max Affected Items', blank=True, help_text='Set this to prevent the discount consuming all items within the range that are in the basket.', null=True)), ('proxy_class', oscar.models.fields.NullCharField(unique=True, verbose_name='Custom class', default=None, max_length=255)), ], options={ 'verbose_name_plural': 'Benefits', 'verbose_name': 'Benefit', }, bases=(models.Model,), ), migrations.CreateModel( name='Condition', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type', models.CharField(verbose_name='Type', max_length=128, blank=True, choices=[('Count', 'Depends on number of items in basket that are in condition range'), ('Value', 'Depends on value of items in basket that are in condition range'), ('Coverage', 'Needs to contain a set number of DISTINCT items from the condition range')])), ('value', oscar.models.fields.PositiveDecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Value', null=True)), ('proxy_class', oscar.models.fields.NullCharField(unique=True, verbose_name='Custom class', default=None, max_length=255)), ], options={ 'verbose_name_plural': 'Conditions', 'verbose_name': 'Condition', }, bases=(models.Model,), ), migrations.CreateModel( name='ConditionalOffer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(verbose_name='Name', unique=True, max_length=128, help_text="This is displayed within the customer's basket")), ('slug', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Slug', max_length=128, editable=False, blank=True)), ('description', models.TextField(verbose_name='Description', help_text='This is displayed on the offer browsing page', blank=True)), ('offer_type', models.CharField(default='Site', max_length=128, verbose_name='Type', choices=[('Site', 'Site offer - available to all users'), ('Voucher', 'Voucher offer - only available after entering the appropriate voucher code'), ('User', 'User offer - available to certain types of user'), ('Session', 'Session offer - temporary offer, available for a user for the duration of their session')])), ('status', models.CharField(default='Open', max_length=64, verbose_name='Status')), ('priority', models.IntegerField(default=0, verbose_name='Priority', help_text='The highest priority offers are applied first')), ('start_datetime', models.DateTimeField(blank=True, verbose_name='Start date', null=True)), ('end_datetime', models.DateTimeField(verbose_name='End date', blank=True, help_text="Offers are active until the end of the 'end date'", null=True)), ('max_global_applications', models.PositiveIntegerField(verbose_name='Max global applications', blank=True, help_text='The number of times this offer can be used before it is unavailable', null=True)), ('max_user_applications', models.PositiveIntegerField(verbose_name='Max user applications', blank=True, help_text='The number of times a single user can use this offer', null=True)), ('max_basket_applications', models.PositiveIntegerField(verbose_name='Max basket applications', blank=True, help_text='The number of times this offer can be applied to a basket (and order)', null=True)), ('max_discount', models.DecimalField(verbose_name='Max discount', max_digits=12, decimal_places=2, null=True, help_text='When an offer has given more discount to orders than this threshold, then the offer becomes unavailable', blank=True)), ('total_discount', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Total Discount')), ('num_applications', models.PositiveIntegerField(default=0, verbose_name='Number of applications')), ('num_orders', models.PositiveIntegerField(default=0, verbose_name='Number of Orders')), ('redirect_url', oscar.models.fields.ExtendedURLField(verbose_name='URL redirect (optional)', blank=True)), ('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')), ('benefit', models.ForeignKey(verbose_name='Benefit', to='offer.Benefit')), ('condition', models.ForeignKey(verbose_name='Condition', to='offer.Condition')), ], options={ 'ordering': ['-priority'], 'verbose_name_plural': 'Conditional offers', 'verbose_name': 'Conditional offer', }, bases=(models.Model,), ), migrations.CreateModel( name='Range', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(unique=True, max_length=128, verbose_name='Name')), ('slug', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Slug', max_length=128, editable=False, blank=True)), ('description', models.TextField(blank=True)), ('is_public', models.BooleanField(default=False, verbose_name='Is public?', help_text='Public ranges have a customer-facing page')), ('includes_all_products', models.BooleanField(default=False, verbose_name='Includes all products?')), ('proxy_class', oscar.models.fields.NullCharField(unique=True, verbose_name='Custom class', default=None, max_length=255)), ('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')), ('classes', models.ManyToManyField(related_name='classes', verbose_name='Product Types', to='catalogue.ProductClass', blank=True)), ('excluded_products', models.ManyToManyField(related_name='excludes', verbose_name='Excluded Products', to='catalogue.Product', blank=True)), ('included_categories', models.ManyToManyField(related_name='includes', verbose_name='Included Categories', to='catalogue.Category', blank=True)), ], options={ 'verbose_name_plural': 'Ranges', 'verbose_name': 'Range', }, bases=(models.Model,), ), migrations.CreateModel( name='RangeProduct', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('display_order', models.IntegerField(default=0)), ('product', models.ForeignKey(to='catalogue.Product')), ('range', models.ForeignKey(to='offer.Range')), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='RangeProductFileUpload', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('filepath', models.CharField(max_length=255, verbose_name='File Path')), ('size', models.PositiveIntegerField(verbose_name='Size')), ('date_uploaded', models.DateTimeField(auto_now_add=True, verbose_name='Date Uploaded')), ('status', models.CharField(default='Pending', max_length=32, verbose_name='Status', choices=[('Pending', 'Pending'), ('Failed', 'Failed'), ('Processed', 'Processed')])), ('error_message', models.CharField(max_length=255, verbose_name='Error Message', blank=True)), ('date_processed', models.DateTimeField(verbose_name='Date Processed', null=True)), ('num_new_skus', models.PositiveIntegerField(verbose_name='Number of New SKUs', null=True)), ('num_unknown_skus', models.PositiveIntegerField(verbose_name='Number of Unknown SKUs', null=True)), ('num_duplicate_skus', models.PositiveIntegerField(verbose_name='Number of Duplicate SKUs', null=True)), ('range', models.ForeignKey(verbose_name='Range', related_name='file_uploads', to='offer.Range')), ('uploaded_by', models.ForeignKey(verbose_name='Uploaded By', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ('-date_uploaded',), 'verbose_name_plural': 'Range Product Uploaded Files', 'verbose_name': 'Range Product Uploaded File', }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='rangeproduct', unique_together=set([('range', 'product')]), ), migrations.AddField( model_name='range', name='included_products', field=models.ManyToManyField(related_name='includes', verbose_name='Included Products', to='catalogue.Product', through='offer.RangeProduct', blank=True), preserve_default=True, ), migrations.AddField( model_name='condition', name='range', field=models.ForeignKey(null=True, verbose_name='Range', to='offer.Range', blank=True), preserve_default=True, ), migrations.AddField( model_name='benefit', name='range', field=models.ForeignKey(null=True, verbose_name='Range', to='offer.Range', blank=True), preserve_default=True, ), migrations.CreateModel( name='AbsoluteDiscountBenefit', fields=[ ], options={ 'verbose_name_plural': 'Absolute discount benefits', 'verbose_name': 'Absolute discount benefit', 'proxy': True, }, bases=('offer.benefit',), ), migrations.CreateModel( name='CountCondition', fields=[ ], options={ 'verbose_name_plural': 'Count conditions', 'verbose_name': 'Count condition', 'proxy': True, }, bases=('offer.condition',), ), migrations.CreateModel( name='CoverageCondition', fields=[ ], options={ 'verbose_name_plural': 'Coverage Conditions', 'verbose_name': 'Coverage Condition', 'proxy': True, }, bases=('offer.condition',), ), migrations.CreateModel( name='FixedPriceBenefit', fields=[ ], options={ 'verbose_name_plural': 'Fixed price benefits', 'verbose_name': 'Fixed price benefit', 'proxy': True, }, bases=('offer.benefit',), ), migrations.CreateModel( name='MultibuyDiscountBenefit', fields=[ ], options={ 'verbose_name_plural': 'Multibuy discount benefits', 'verbose_name': 'Multibuy discount benefit', 'proxy': True, }, bases=('offer.benefit',), ), migrations.CreateModel( name='PercentageDiscountBenefit', fields=[ ], options={ 'verbose_name_plural': 'Percentage discount benefits', 'verbose_name': 'Percentage discount benefit', 'proxy': True, }, bases=('offer.benefit',), ), migrations.CreateModel( name='ShippingBenefit', fields=[ ], options={ 'proxy': True, }, bases=('offer.benefit',), ), migrations.CreateModel( name='ShippingAbsoluteDiscountBenefit', fields=[ ], options={ 'verbose_name_plural': 'Shipping absolute discount benefits', 'verbose_name': 'Shipping absolute discount benefit', 'proxy': True, }, bases=('offer.shippingbenefit',), ), migrations.CreateModel( name='ShippingFixedPriceBenefit', fields=[ ], options={ 'verbose_name_plural': 'Fixed price shipping benefits', 'verbose_name': 'Fixed price shipping benefit', 'proxy': True, }, bases=('offer.shippingbenefit',), ), migrations.CreateModel( name='ShippingPercentageDiscountBenefit', fields=[ ], options={ 'verbose_name_plural': 'Shipping percentage discount benefits', 'verbose_name': 'Shipping percentage discount benefit', 'proxy': True, }, bases=('offer.shippingbenefit',), ), migrations.CreateModel( name='ValueCondition', fields=[ ], options={ 'verbose_name_plural': 'Value conditions', 'verbose_name': 'Value condition', 'proxy': True, }, bases=('offer.condition',), ), ]
bsd-3-clause
jsoref/django
django/db/backends/postgresql/schema.py
202
4100
import psycopg2 from django.db.backends.base.schema import BaseDatabaseSchemaEditor class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s USING %(column)s::%(type)s" sql_create_sequence = "CREATE SEQUENCE %(sequence)s" sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE" sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s" sql_create_varchar_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s varchar_pattern_ops)%(extra)s" sql_create_text_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s text_pattern_ops)%(extra)s" def quote_value(self, value): return psycopg2.extensions.adapt(value) def _model_indexes_sql(self, model): output = super(DatabaseSchemaEditor, self)._model_indexes_sql(model) if not model._meta.managed or model._meta.proxy or model._meta.swapped: return output for field in model._meta.local_fields: db_type = field.db_type(connection=self.connection) if db_type is not None and (field.db_index or field.unique): # Fields with database column types of `varchar` and `text` need # a second index that specifies their operator class, which is # needed when performing correct LIKE queries outside the # C locale. See #12234. # # The same doesn't apply to array fields such as varchar[size] # and text[size], so skip them. if '[' in db_type: continue if db_type.startswith('varchar'): output.append(self._create_index_sql( model, [field], suffix='_like', sql=self.sql_create_varchar_index)) elif db_type.startswith('text'): output.append(self._create_index_sql( model, [field], suffix='_like', sql=self.sql_create_text_index)) return output def _alter_column_type_sql(self, table, old_field, new_field, new_type): """ Makes ALTER TYPE with SERIAL make sense. """ if new_type.lower() == "serial": column = new_field.column sequence_name = "%s_%s_seq" % (table, column) return ( ( self.sql_alter_column_type % { "column": self.quote_name(column), "type": "integer", }, [], ), [ ( self.sql_delete_sequence % { "sequence": self.quote_name(sequence_name), }, [], ), ( self.sql_create_sequence % { "sequence": self.quote_name(sequence_name), }, [], ), ( self.sql_alter_column % { "table": self.quote_name(table), "changes": self.sql_alter_column_default % { "column": self.quote_name(column), "default": "nextval('%s')" % self.quote_name(sequence_name), } }, [], ), ( self.sql_set_sequence_max % { "table": self.quote_name(table), "column": self.quote_name(column), "sequence": self.quote_name(sequence_name), }, [], ), ], ) else: return super(DatabaseSchemaEditor, self)._alter_column_type_sql( table, old_field, new_field, new_type )
bsd-3-clause
erjohnso/ansible
lib/ansible/modules/notification/hipchat.py
49
6387
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: hipchat version_added: "1.2" short_description: Send a message to Hipchat. description: - Send a message to a Hipchat room, with options to control the formatting. options: token: description: - API token. required: true room: description: - ID or name of the room. required: true from: description: - Name the message will appear to be sent from. Max length is 15 characters - above this it will be truncated. required: false default: Ansible msg: description: - The message body. required: true default: null color: description: - Background color for the message. required: false default: yellow choices: [ "yellow", "red", "green", "purple", "gray", "random" ] msg_format: description: - Message format. required: false default: text choices: [ "text", "html" ] notify: description: - If true, a notification will be triggered for users in the room. required: false default: 'yes' choices: [ "yes", "no" ] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] version_added: 1.5.1 api: description: - API url if using a self-hosted hipchat server. For Hipchat API version 2 use the default URI with C(/v2) instead of C(/v1). required: false default: 'https://api.hipchat.com/v1' version_added: 1.6.0 requirements: [ ] author: "WAKAYAMA Shirou (@shirou), BOURDEL Paul (@pb8226)" ''' EXAMPLES = ''' - hipchat: room: notif msg: Ansible task finished # Use Hipchat API version 2 - hipchat: api: https://api.hipchat.com/v2/ token: OAUTH2_TOKEN room: notify msg: Ansible task finished ''' # =========================================== # HipChat module specific support methods. # import json import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.six.moves.urllib.request import pathname2url from ansible.module_utils._text import to_native from ansible.module_utils.urls import fetch_url DEFAULT_URI = "https://api.hipchat.com/v1" MSG_URI_V1 = "/rooms/message" NOTIFY_URI_V2 = "/room/{id_or_name}/notification" def send_msg_v1(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=MSG_URI_V1): '''sending message to hipchat v1 server''' params = {} params['room_id'] = room params['from'] = msg_from[:15] # max length is 15 params['message'] = msg params['message_format'] = msg_format params['color'] = color params['api'] = api params['notify'] = int(notify) url = api + MSG_URI_V1 + "?auth_token=%s" % (token) data = urlencode(params) if module.check_mode: # In check mode, exit before actually sending the message module.exit_json(changed=False) response, info = fetch_url(module, url, data=data) if info['status'] == 200: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) def send_msg_v2(module, token, room, msg_from, msg, msg_format='text', color='yellow', notify=False, api=NOTIFY_URI_V2): '''sending message to hipchat v2 server''' headers = {'Authorization': 'Bearer %s' % token, 'Content-Type': 'application/json'} body = dict() body['message'] = msg body['color'] = color body['message_format'] = msg_format body['notify'] = notify POST_URL = api + NOTIFY_URI_V2 url = POST_URL.replace('{id_or_name}', pathname2url(room)) data = json.dumps(body) if module.check_mode: # In check mode, exit before actually sending the message module.exit_json(changed=False) response, info = fetch_url(module, url, data=data, headers=headers, method='POST') # https://www.hipchat.com/docs/apiv2/method/send_room_notification shows # 204 to be the expected result code. if info['status'] in [200, 204]: return response.read() else: module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) # =========================================== # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( token=dict(required=True, no_log=True), room=dict(required=True), msg=dict(required=True), msg_from=dict(default="Ansible", aliases=['from']), color=dict(default="yellow", choices=["yellow", "red", "green", "purple", "gray", "random"]), msg_format=dict(default="text", choices=["text", "html"]), notify=dict(default=True, type='bool'), validate_certs=dict(default='yes', type='bool'), api=dict(default=DEFAULT_URI), ), supports_check_mode=True ) token = module.params["token"] room = str(module.params["room"]) msg = module.params["msg"] msg_from = module.params["msg_from"] color = module.params["color"] msg_format = module.params["msg_format"] notify = module.params["notify"] api = module.params["api"] try: if api.find('/v2') != -1: send_msg_v2(module, token, room, msg_from, msg, msg_format, color, notify, api) else: send_msg_v1(module, token, room, msg_from, msg, msg_format, color, notify, api) except Exception as e: module.fail_json(msg="unable to send msg: %s" % to_native(e), exception=traceback.format_exc()) changed = True module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) if __name__ == '__main__': main()
gpl-3.0
jcrist/pydy
pydy/viz/shapes.py
4
19419
#!/usr/bin/env python __all__ = ['Cube', 'Cylinder', 'Cone', 'Sphere', 'Circle', 'Plane', 'Tetrahedron', 'Octahedron', 'Icosahedron', 'Torus', 'TorusKnot', 'Tube'] import numpy as np # This is a list of ColorKeywords from THREE.js THREE_COLORKEYWORDS = ['aliceblue', 'antiquewhite', 'aqua', 'aquamarine', 'azure', 'beige', 'bisque', 'black', 'blanchedalmond', 'blue', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'fuchsia', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'gray', 'green', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'lime', 'limegreen', 'linen', 'magenta', 'maroon', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'navy', 'oldlace', 'olive', 'olivedrab', 'orange', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'purple', 'red', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'silver', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'teal', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'white', 'whitesmoke', 'yellow', 'yellowgreen'] MATERIALS = ["default", "checkerboard", "metal", "dirt", "foil", "water", "grass"] class Shape(object): """Instantiates a shape. This is primarily used as a superclass for more specific shapes like Cube, Cylinder, Sphere etc. Shapes must be associated with a reference frame and a point using the VisualizationFrame class. Parameters ========== name : str, optional A name assigned to the shape. color : str, optional A color string from list of colors in THREE_COLORKEYWORDS Examples ======== >>> from pydy.viz.shapes import Shape >>> s = Shape() >>> s.name 'unnamed' >>> s.color 'grey' >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> a = Shape(name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' """ def __init__(self, name='unnamed', color='grey', material="default"): self.name = name self.color = color self.material = material self.geometry_attrs = [] def __str__(self): attributes = ([self.__class__.__name__, self.name, 'color:' + self.color, 'material:' + self.material] + sorted([attr + ':{}'.format(getattr(self, attr)) for attr in self.geometry_attrs])) return ' '.join(['{}'] * len(attributes)).format(*attributes) def __repr__(self): return self.__class__.__name__ @property def name(self): """Returns the name attribute of the shape.""" return self._name @name.setter def name(self, new_name): """Sets the name attribute of the shape.""" if not isinstance(new_name, str): raise TypeError("'name' should be a valid str object.") else: self._name = new_name @property def color(self): """Returns the color attribute of the shape.""" return self._color @color.setter def color(self, new_color): """Sets the color attributes of the shape. This should be a valid three.js color keyword string.""" if new_color not in THREE_COLORKEYWORDS: msg = "'color' should be a valid Three.js colors string:\n{}" raise ValueError(msg.format('\n'.join(THREE_COLORKEYWORDS))) else: self._color = new_color @property def material(self): """Returns the material attribute of the shape.""" return self._material @material.setter def material(self, new_material): """Sets the material attribute of the shape, i.e. its shine, brightness, opacity etc.. The material should be a valid material from the listed MATERIALS. If a shape is attributed as "red" color, and "water" material, ideally it should have opacity and brightness properties like that of a red fluid. """ if new_material.lower() not in MATERIALS: msg = "'material' is not valid. Choose from:\n{}" raise ValueError(msg.format('\n'.join(MATERIALS))) else: self._material = new_material def generate_dict(self, constant_map={}): """Returns a dictionary containing all the data associated with the Shape. Parameters ========== constant_map : dictionary If any of the shape's geometry are defined as SymPy expressions, then this dictionary should map all SymPy Symbol's found in the expressions to floats. """ data_dict = {} data_dict['name'] = self.name data_dict['color'] = self.color data_dict['material'] = self.material data_dict['type'] = self.__repr__() for geom in self.geometry_attrs: atr = getattr(self, geom) try: data_dict[geom] = float(atr.subs(constant_map)) except AttributeError: # not a SymPy expression data_dict[geom] = atr except TypeError: # can't convert expression to float raise TypeError('{} is an expression, you '.format(atr) + 'must provide a mapping to numerical values.') return data_dict class Cube(Shape): """Instantiates a cube of a given size. Parameters ========== length : float or SymPy expression The length of the cube. Examples ======== >>> from pydy.viz.shapes import Cube >>> s = Cube(10.0) >>> s.name 'unnamed' >>> s.color 'grey' >>>s.length 10.0 >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.length = 12.0 >>> s.length 12.0 >>> a = Cube('my-shape2', 'red', length=10) >>> a.name 'my-shape2' >>> a.color 'red' >>> a.length 10.0 """ def __init__(self, length, **kwargs): super(Cube, self).__init__(**kwargs) self.geometry_attrs.append('length') self.length = length class Cylinder(Shape): """Instantiates a cylinder with given length and radius. Parameters ========== length : float or SymPy expression The length of the cylinder. radius : float or SymPy expression The radius of the cylinder. Examples ======== >>> from pydy.viz.shapes import Cylinder >>> s = Cylinder(10.0, 5.0) >>> s.name 'unnamed' >>> s.color 'grey' >>> s.length 10.0 >>> s.radius 5.0 >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.length = 12.0 >>> s.length 12.0 >>> s.radius = 6.0 >>> s.radius 6.0 >>> a = Cylinder(10.0, 5.0, name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' >>> a.length 10.0 >>> a.radius 5.0 """ def __init__(self, length, radius, **kwargs): super(Cylinder, self).__init__(**kwargs) self.geometry_attrs += ['length', 'radius'] self.length = length self.radius = radius class Cone(Shape): """Instantiates a cone with given length and base radius. Parameters ========== length : float or SymPy expression The length of the cone. radius : float or SymPy expression The base radius of the cone. Examples ======== >>> from pydy.viz.shapes import Cone >>> s = Cone(10.0, 5.0) >>> s.name 'unnamed' >>> s.color 'grey' >>> s.length 10.0 >>> s.radius 5.0 >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.length = 12.0 >>> s.length 12.0 >>> s.radius = 6.0 >>> s.radius 6.0 >>> a = Cone(10.0, 5.0, name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' >>> a.length 10.0 >>> a.radius 5.0 """ def __init__(self, length, radius, **kwargs): super(Cone, self).__init__(**kwargs) self.geometry_attrs += ['length', 'radius'] self.length = length self.radius = radius class Sphere(Shape): """Instantiates a sphere with a given radius. Parameters ========== radius : float or SymPy expression The radius of the sphere. Examples ======== >>> from pydy.viz.shapes import Sphere >>> s = Sphere(10.0) >>> s.name 'unnamed' >>> s.color 'grey' >>>s.radius 10.0 >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.radius = 12.0 >>> s.radius 12.0 >>> a = Sphere(10.0, name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' >>> a.radius 10.0 """ def __init__(self, radius=10.0, **kwargs): super(Sphere, self).__init__(**kwargs) self.geometry_attrs += ['radius'] self.radius = radius class Circle(Sphere): """Instantiates a circle with a given radius. Parameters ========== radius : float or SymPy Expression The radius of the circle. Examples ======== >>> from pydy.viz.shapes import Circle >>> s = Circle(10.0) >>> s.name 'unnamed' >>> s.color 'grey' >>>s.radius 10.0 >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.radius = 12.0 >>> s.radius 12.0 >>> a = Circle(10.0, name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' >>> a.radius 10.0 """ class Plane(Shape): """Instantiates a plane with a given length and width. Parameters ========== length : float or SymPy expression The length of the plane. width : float or SymPy expression The width of the plane. Examples ======== >>> from pydy.viz.shapes import Plane >>> s = Plane(10.0, 5.0) >>> s.name 'unnamed' >>> s.color 'grey' >>> s.length 10.0 >>> s.width 5.0 >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.length = 12.0 >>> s.length 12.0 >>> s.width = 6.0 >>> s.width 6.0 >>> a = Plane(10.0, 5.0, name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' >>> a.length 10.0 >>> a.width 5.0 """ def __init__(self, length=10.0, width=5.0, **kwargs): super(Plane, self).__init__(**kwargs) self.geometry_attrs += ['length', 'width'] self.length = length self.width = width class Tetrahedron(Sphere): """Instantiates a Tetrahedron inscribed in a given radius circle. Parameters ========== radius : float or SymPy expression The radius of the circum-scribing sphere of around the tetrahedron. Examples ======== >>> from pydy.viz.shapes import Tetrahedron >>> s = Tetrahedron(10.0) >>> s.name 'unnamed' >>> s.color 'grey' >>>s.radius 10.0 >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.radius = 12.0 >>> s.radius 12.0 >>> a = Tetrahedron(10.0, name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' >>> a.radius 10.0 """ class Octahedron(Sphere): """Instantiaties an Octahedron inscribed in a circle of the given radius. Parameters ========== radius : float or SymPy expression. The radius of the circum-scribing sphere around the octahedron. Examples ======== >>> from pydy.viz.shapes import Octahedron >>> s = Octahedron(10.0) >>> s.name 'unnamed' >>> s.color 'grey' >>>s.radius 10.0 >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.radius = 12.0 >>> s.radius 12.0 >>> a = Octahedron(10.0, name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' >>> a.radius 10.0 """ class Icosahedron(Sphere): """Instantiates an icosahedron inscribed in a sphere of the given radius. Parameters ========== radius : float or a SymPy expression Radius of the circum-scribing sphere for Icosahedron Examples ======== >>> from pydy.viz.shapes import Icosahedron >>> s = Icosahedron(10) >>> s.name 'unnamed' >>> s.color 'grey' >>>s.radius 10.0 >>>#These can be changed later too .. >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.radius = 12.0 >>> s.radius 12 >>> a = Icosahedron(10.0, name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' >>> a.radius 10.0 """ class Torus(Shape): """Instantiates a torus with a given radius and section radius. Parameters ========== radius : float or SymPy expression The radius of the torus. tube_radius : float or SymPy expression The radius of the torus tube. Examples ======== >>> from pydy.viz.shapes import Torus >>> s = Torus(10.0, 5.0) >>> s.name 'unnamed' >>> s.color 'grey' >>> s.radius 10.0 >>> s.tube_radius 5.0 >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.radius = 12.0 >>> s.radius 12.0 >>> s.tube_radius = 6.0 >>> s.tube_radius 6.0 >>> a = Torus(10.0, 5.0, name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' >>> a.radius 10.0 >>> a.tube_radius 5.0 """ def __init__(self, radius, tube_radius, **kwargs): super(Torus, self).__init__(**kwargs) self.geometry_attrs += ['radius', 'tube_radius'] self.radius = radius self.tube_radius = tube_radius @property def radius(self): return self._radius @radius.setter def radius(self, new_radius): self._radius = new_radius @property def tube_radius(self): return self._tube_radius @tube_radius.setter def tube_radius(self, new_tube_radius): self._tube_radius = new_tube_radius class TorusKnot(Torus): """Instantiates a torus knot with given radius and section radius. Parameters ========== radius : float or SymPy expression The radius of the torus knot. tube_radius : float or SymPy expression The radius of the torus knot tube. Examples ======== >>> from pydy.viz.shapes import TorusKnot >>> s = TorusKnot(10.0, 5.0) >>> s.name 'unnamed' >>> s.color 'grey' >>> s.radius 10.0 >>> s.tube_radius 5.0 >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.radius = 12.0 >>> s.radius 12.0 >>> s.tube_radius = 6.0 >>> s.tube_radius 6.0 >>> a = TorusKnot(10.0, 5.0, name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' >>> a.radius 10.0 >>> a.tube_radius 5.0 """ class Tube(Shape): """Instantiates a tube that sweeps along a path. Parameters ========== radius : float or SymPy expression The radius of the tube. points : array_like, shape(n, 3) An array of n (x, y, z) coordinates representing points that the tube's center line should follow. Examples ======== >>> from pydy.viz.shapes import Tube >>> points = [[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]] >>> s = Tube(10.0, points) >>> s.name 'unnamed' >>> s.color 'grey' >>> s.points [[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]] >>> s.name = 'my-shape1' >>> s.name 'my-shape1' >>> s.color = 'blue' >>> s.color 'blue' >>> s.radius = 14.0 >>> s.radius 14.0 >>> s.points = [[2.0, 1.0, 4.0], [1.0, 2.0, 4.0], ... [2.0, 3.0, 1.0], [1.0, 1.0, 3.0]] >>> s.points [[2.0, 1.0, 4.0], [1.0, 2.0, 4.0], [2.0, 3.0, 1.0], [1.0, 1.0, 3.0]] >>> a = Tube(12.0, points, name='my-shape2', color='red') >>> a.name 'my-shape2' >>> a.color 'red' >>> a.radius 12.0 >>> a.points [[1.0, 2.0, 1.0], [2.0, 1.0, 1.0], [2.0, 3.0, 4.0]] """ def __init__(self, radius, points, **kwargs): super(Tube, self).__init__(**kwargs) self.geometry_attrs += ['radius', 'points'] self.radius = radius self.points = points @property def points(self): return self._points @points.setter def points(self, new_points): self._points = np.asarray(new_points)
bsd-3-clause
vene/ambra
ambra/cross_validation.py
1
9371
import numbers import time import numpy as np from sklearn.utils import safe_indexing from sklearn.base import is_classifier, clone from sklearn.metrics.scorer import check_scoring from sklearn.externals.joblib import Parallel, delayed, logger from ambra.backports import _num_samples, indexable from sklearn.cross_validation import check_cv def _safe_split(estimator, X, y, indices, train_indices=None): """Create subset of dataset and properly handle kernels.""" if hasattr(estimator, 'kernel') and callable(estimator.kernel): # cannot compute the kernel values with custom function raise ValueError("Cannot use a custom kernel function. " "Precompute the kernel matrix instead.") if not hasattr(X, "shape"): if getattr(estimator, "_pairwise", False): raise ValueError("Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices.") X_subset = [X[idx] for idx in indices] else: if getattr(estimator, "_pairwise", False): # X is a precomputed square kernel matrix if X.shape[0] != X.shape[1]: raise ValueError("X should be a square kernel matrix") if train_indices is None: X_subset = X[np.ix_(indices, indices)] else: X_subset = X[np.ix_(indices, train_indices)] else: X_subset = safe_indexing(X, indices) if y is not None: y_subset = safe_indexing(y, indices) else: y_subset = None return X_subset, y_subset def _score(estimator, X_test, y_test, scorer, **params): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test, **params) else: score = scorer(estimator, X_test, y_test, **params) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', scorer_params=None): """Evaluate a score by cross-validation Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : cross-validation generator or int, optional, default: None A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if y is binary or multiclass and estimator is a classifier, or the number of folds in KFold otherwise. If None, it is equivalent to cv=3. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' scorer_params : dict, optional Parameters to pass to the scorer. Can be used for sample weights and sample groups. Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params, scorer_params) for train, test in cv) return np.array(scores)[:, 0] def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, scorer_params, return_train_score=False, return_parameters=False): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like or None The target variable to try to predict in the case of supervised learning. scoring : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape = (n_train_samples,) Indices of training samples. test : array-like, shape = (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. scorer_params : dict or None Parameters that will be passed to the scorer. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = "no parameters to be set" else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items())) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust lenght of sample weights n_samples = _num_samples(X) fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, np.asarray(v)[train] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in fit_params.items()]) # Same, but take both slices scorer_params = scorer_params if scorer_params is not None else {} train_scorer_params = dict([(k, np.asarray(v)[train] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in scorer_params.items()]) test_scorer_params = dict([(k, np.asarray(v)[test] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in scorer_params.items()]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) test_score = _score(estimator, X_test, y_test, scorer, **test_scorer_params) if return_train_score: train_score = _score(estimator, X_train, y_train, scorer, **train_scorer_params) scoring_time = time.time() - start_time if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score] if return_train_score else [] ret.extend([test_score, _num_samples(X_test), scoring_time]) if return_parameters: ret.append(parameters) return ret
bsd-2-clause
michaelkirk/QGIS
tests/src/python/test_qgscolorscheme.py
11
3130
# -*- coding: utf-8 -*- """QGIS Unit tests for QgsColorScheme. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Nyall Dawson' __date__ = '25/07/2014' __copyright__ = 'Copyright 2014, The QGIS Project' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import qgis from utilities import unittest, TestCase from qgis.core import QgsColorScheme from PyQt4.QtGui import QColor #Make a dummy color scheme for testing class DummyColorScheme(QgsColorScheme): def __init__(self, parent=None): QgsColorScheme.__init__(self) def schemeName(self): return "Dummy scheme" def fetchColors(self,context='', baseColor=QColor()): if ( context == "testscheme" ): return [[QColor(255, 255, 0),'schemetest']] elif baseColor.isValid(): return [[baseColor,'base']] else: return [[QColor( 255, 0, 0 ),'red'],[QColor( 0, 255, 0 ), None]] def clone(self): return DummyColorScheme() class TestQgsColorScheme(TestCase): def testCreateScheme(self): """Test creating a new color scheme""" dummyScheme = DummyColorScheme() self.assertTrue(dummyScheme) def testGetSchemeName(self): """Test getting color scheme name""" dummyScheme = DummyColorScheme() self.assertEqual(dummyScheme.schemeName(), "Dummy scheme") def testColorsNoBase(self): """Test getting colors without passing a base color""" dummyScheme = DummyColorScheme() colors = dummyScheme.fetchColors() self.assertEqual( len(colors), 2 ) self.assertEqual( colors[0][0], QColor( 255, 0, 0 ) ) self.assertEqual( colors[0][1], 'red') self.assertEqual( colors[1][0], QColor( 0, 255, 0 ) ) self.assertEqual( colors[1][1], None) def testColorsWithBase(self): """Test getting colors with a base color""" dummyScheme = DummyColorScheme() testColor = QColor( 0, 0, 255 ) colors = dummyScheme.fetchColors( None, testColor ) self.assertEqual( len(colors), 1 ) self.assertEqual( colors[0][0], testColor ) self.assertEqual( colors[0][1], 'base') def testColorsWithScheme(self): """Test getting colors when specifying a scheme""" dummyScheme = DummyColorScheme() colors = dummyScheme.fetchColors( 'testscheme' ) self.assertEqual( len(colors), 1 ) self.assertEqual( colors[0][0], QColor( 255, 255, 0 ) ) self.assertEqual( colors[0][1], 'schemetest') def testClone(self): """Test cloning a color scheme""" dummyScheme = DummyColorScheme() colors = dummyScheme.fetchColors() dummySchemeClone = dummyScheme.clone() colorsClone = dummySchemeClone.fetchColors() self.assertEqual( colors, colorsClone ) if __name__ == "__main__": unittest.main()
gpl-2.0
kupiakos/pybcd
bcdelement.py
1
3754
from common import * from elements import * from bcddevice import BCDDevice class BCDElement: obj = None _type = None _changed = False _identifier = None _enum = None _value = None fmt = None def __init__(self, obj, type): self.obj = obj self._type = type self._type_info = element_info(self._type) self.fmt = self._type_info[1] self._enum = None self._value = None def __hash__(self): return hash((self._type, tuple(self.value))) def __str__(self): return 'BCDElement<%s=%s>' % (self.identifier, str(self.value)) def __repr__(self): return str(self) def _find_identifier(self): self._identifier = None self._enum = None cls, fmt, subtype = self._type_info v = None if cls == ElementClass.Application: v = alias_dict[cls][self.obj._type_info[2]].get(subtype) else: v = alias_dict[cls].get(subtype) if v is None: v = (fmt, 'custom:%x' % int(self._type, 16)) elif len(v) == 3: self._enum = v[2] self._identifier = v[1] def _load_value(self): self._value = self.obj._nav.value('Element', path='Elements/' + self._type) @property def identifier(self): if self._identifier is None: self._find_identifier() return self._identifier @property def value(self): if self._value is None: self._load_value() return element_transform[self._type_info[1]][1](self._value) @value.setter def value(self, val): raise NotImplementedError('value setting not done yet') if self.name in element_transform: v = element_transform[self.name][0](self, val) else: v = val self._value = v def dump(self, tab='', verbose=False): p = print if self.identifier.startswith('custom:'): p = printwarn iv = self.value if self._enum: if iv not in self._enum.reverse_mapping: p = printwarn else: iv = self._enum.reverse_mapping[iv] v = element_transform_str.get(self.fmt, identity)(iv) vl = None # the value list, if it exists # handle the first of an objectlist if isinstance(iv, list) and len(v) > 0: vl = v[1:] v = v[0] # test if the guid exists if isguid(v): import random if v not in self.obj.bcd: p = printerror if not verbose: v = self.obj.bcd.guid_to_known(v) identifier = self.identifier if verbose: identifier = '%s<%s>' % (self.identifier, self._type) # print the identifier (printelementname if p is print else p)( tab + identifier.ljust(DUMP_SPACING + int(verbose)*10), end='') if isinstance(v, BCDDevice): v = v.friendly(verbose) # print the value (or first value if we're a list) (p)(v) if vl: # do listy stuff for g in vl: p = print if isguid(g): if g in self.obj.bcd: if not verbose: g = self.obj.bcd.guid_to_known(g) else: p = printerror p(tab + ' ' * (DUMP_SPACING + int(verbose)*10) + g) # END OF LINE.
mit
ceph/radosgw-agent
radosgw_agent/util/log.py
1
2537
import logging import sys def get_dev_logger(name='dev.radosgw_agent'): """ A simple utility to be able to log things that are meant for developer-eyes and not for user facing. All developer logs must be prepended with `dev` so this utility ensures that is the case. To use it:: dev_log = get_dev_logger(__name__) Or:: dev_log = get_dev_logger('dev.custom_name') """ if not name.startswith('dev'): return logging.getLogger('%s.%s' % ('dev', name)) return logging.getLogger(name) BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) COLORS = { 'WARNING': YELLOW, 'INFO': WHITE, 'DEBUG': BLUE, 'CRITICAL': RED, 'ERROR': RED, 'FATAL': RED, } RESET_SEQ = "\033[0m" COLOR_SEQ = "\033[1;%dm" BOLD_SEQ = "\033[1m" BASE_COLOR_FORMAT = "%(asctime)s %(process)d\ [$BOLD%(name)s$RESET][%(color_levelname)-17s] %(message)s" BASE_FORMAT = "%(asctime)s %(process)d [%(name)s][%(levelname)-6s] %(message)s" def supports_color(): """ Returns True if the running system's terminal supports color, and False otherwise. """ unsupported_platform = (sys.platform in ('win32', 'Pocket PC')) # isatty is not always implemented is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() if unsupported_platform or not is_a_tty: return False return True def color_message(message): message = message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ) return message class ColoredFormatter(logging.Formatter): """ A very basic logging formatter that not only applies color to the levels of the ouput but will also truncate the level names so that they do not alter the visuals of logging when presented on the terminal. """ def __init__(self, msg): logging.Formatter.__init__(self, msg) def format(self, record): levelname = record.levelname truncated_level = record.levelname[:6] levelname_color = COLOR_SEQ % (30 + COLORS[levelname]) + truncated_level + RESET_SEQ record.color_levelname = levelname_color return logging.Formatter.format(self, record) def color_format(): """ Main entry point to get a colored formatter, it will use the BASE_FORMAT by default and fall back to no colors if the system does not support it """ str_format = BASE_COLOR_FORMAT if supports_color() else BASE_FORMAT color_format = color_message(str_format) return ColoredFormatter(color_format)
mit
jmccrae/wn-rdf
stresstest.py
1
1210
import multiprocessing import sqlite3 import time import urllib def do_query(uri): t1 = time.time() try: for line in urllib.urlopen(uri): pass print "Got %s in %f" % (uri, time.time() - t1) except Exception as e: print "Failed on %s: %s" % (uri, str(e)) if __name__ == "__main__": conn = sqlite3.connect("wordnet_3.1+.db") cursor = conn.cursor() uris = [] cursor.execute("select synsetid, wordid, casedwordid from senses order by random() limit 100") for synsetid, wordid, casedwordid in cursor.fetchall(): cursor.execute("select pos from synsets where synsetid=?",(synsetid,)) pos, = cursor.fetchone() if casedwordid: cursor.execute("select cased from casedwords where casedwordid=?", (casedwordid,)) word, = cursor.fetchone() else: cursor.execute("select lemma from words where wordid=?", (wordid,)) word, = cursor.fetchone() uris += ["http://localhost:8051/wn31-%09d-%s" % (synsetid, pos)] uris += ["http://localhost:8051/%s-%s" % (word, pos)] print "Starting to query" pool = multiprocessing.Pool(20) pool.map(do_query, uris)
bsd-3-clause
tectronics/mythbox
resources/test/mythboxtest/mythtv/test_domain.py
5
34567
# # MythBox for XBMC - http://mythbox.googlecode.com # Copyright (C) 2011 [email protected] # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # import datetime import time import unittest2 as unittest import mythboxtest import copy from mockito import Mock, when, verify, any from mythbox.settings import MythSettings from mythbox.mythtv import protocol from mythbox.mythtv.protocol import protocols from mythbox.mythtv.domain import ctime2MythTime, dbTime2MythTime, Backend, \ Channel, CommercialBreak, Job, UserJob, TVProgram, Program, RecordedProgram, \ RecordingSchedule, Tuner, StatusException, frames2seconds, seconds2frames from mythbox.mythtv.enums import CheckForDupesIn, CheckForDupesUsing, FlagMask, \ EpisodeFilter, JobStatus, JobType from mythbox.platform import Platform log = mythboxtest.getLogger('mythbox.unittest') def pdata(pdict={}, protocolVersion=56): ''' make creating fake program data easy with a sparse dict in : {'title':'Seinfeld', 'description':'Real funny!'} out: [] of data to pass into RecordedProgram constructor ''' p = protocols[protocolVersion] d = ['0'] * p.recordSize() for k,v in pdict.items(): try: d[p.recordFields().index(k)] = v except ValueError: log.warning('%s is not valid field in protocol %d' % (k,protocolVersion)) return d def socketTime(h, m, s): # return raw value that myth passes over socket for date=today and time=h,m,s (in local timezone) return time.mktime(datetime.datetime.combine(datetime.date.today(), datetime.time(h,m,s)).timetuple()) def socketDateTime(year, month, day, h, m, s): return time.mktime(datetime.datetime.combine(datetime.date(year, month, day), datetime.time(h,m,s)).timetuple()) class ModuleFunctionsTest(unittest.TestCase): def test_ctime2MythTime_MinDateStringReturnsMinDate(self): t = ctime2MythTime('0') log.debug('MythTime = %s' % t) self.assertEqual('19691231180000', t) def test_ctime2MythTime_MinDateIntReturnsMinDate(self): t = ctime2MythTime(0) log.debug('MythTime = %s' % t) self.assertEqual('19691231180000', t) def test_ctime2MythTime_BadInputRaisesValueError(self): # PLATFORM ISSUE: Throws exception on windows but returns 19691231175959 on linux try: t = ctime2MythTime(-1) log.warn('Expected failure for time = -1 : %s' % t) except ValueError, ve: log.debug('Pass: %s' % ve) def test_dbTime2MythTime_ShouldConvertTimeDeltaToString(self): td = datetime.timedelta(seconds=1000) mt = dbTime2MythTime(td) log.debug('MythTime = %s' % mt) self.assertEqual('001640', mt) def test_frames2seconds(self): s = frames2seconds(1000, 29.97) log.debug('1000 frames @ 29.97fps = %s seconds' % s) self.assertEqual(33.37, s) s = frames2seconds(0, 29.97) log.debug('0 frames @ 29.97fps = %s seconds' % s) self.assertEqual(0.0, s) s = frames2seconds(99999999L, 29.97) log.debug('99999999L frames @ 29.97fps = %s seconds' % s) self.assertEqual(3336669.97, s) def test_seconds2frames(self): s = seconds2frames(33.37, 29.97) log.debug('33.37 seconds @ 29.97fps = %s frames' % s) self.assertEqual(1000L, s) s = seconds2frames(0, 29.97) log.debug('0 seconds @ 29.97fps = %s frames' % s) self.assertEqual(0L, s) s = seconds2frames(3336669.97, 29.97) log.debug('3336669.97 seconds @ 29.97fps = %s frames' % s) self.assertEqual(99999999L, s) class CheckForDupesUsingTest(unittest.TestCase): def test_access_to_static_constants_works(self): self.assertEqual(145, CheckForDupesUsing.translations[CheckForDupesUsing.NONE]) class ProgramTest(unittest.TestCase): def setUp(self): self.translator = Mock() def test_constructor(self): p = Program(self.translator) self.assertFalse(p is None) class ChannelTest(unittest.TestCase): def test_constructor(self): channel = Channel({'chanid':9, 'channum':'23_1', 'callsign':'WXYZ', 'name':'NBC9', 'icon':'nbc.jpg', 'cardid':4}) log.debug(channel) self.assertTrue(channel) def test_constructor_IconMissing(self): channel = Channel({'chanid':9, 'channum':'23_1', 'callsign':'WXYZ', 'name':'NBC9', 'cardid':4}) log.debug(channel) self.assertTrue(channel.getIconPath() is None) def test_getSortableChannelNumber_When_channel_number_is_already_sortable_Then_return_channel_number(self): channel = Channel({'chanid':9, 'channum':'23', 'callsign':'WXYZ', 'name':'NBC9', 'cardid':4}) log.debug('Sortable channel number = %s' % channel.getSortableChannelNumber()) self.assertEqual(23, channel.getSortableChannelNumber()) def test_getSortableChannelNumber_When_channel_number_contains_underscore_Then_return_channel_number_as_float(self): number = Channel({'chanid':9, 'channum':'23_4', 'callsign':'WXYZ', 'name':'NBC9', 'cardid':4}).getSortableChannelNumber() log.debug('Sortable channel number = %s' % number) self.assertEqual(23.4, number) def test_getSortableChannelNumber_When_channel_number_contains_dot_Then_return_channel_number_as_float(self): number = Channel({'chanid':9, 'channum':'23.4', 'callsign':'WXYZ', 'name':'NBC9', 'cardid':4}).getSortableChannelNumber() log.debug('Sortable channel number = %s' % number) self.assertEqual(23.4, number) def test_getSortableChannelNumber_When_channel_number_doesnt_seem_like_a_number_Then_return_channel_id(self): number = Channel({'chanid':9, 'channum':'23/4', 'callsign':'WXYZ', 'name':'NBC9', 'cardid':4}).getSortableChannelNumber() log.debug('Sortable channel number = %s' % number) self.assertEqual(9, number) class TVProgramTest(unittest.TestCase): def setUp(self): self.data = { 'title' : 'Bonanza', 'subtitle' : 'The Shootout', 'description' : 'Yee haw!', 'starttime' : datetime.datetime(2008, 11, 21, 14), 'endtime' : datetime.datetime(2008, 11, 21, 14), 'channum' : '23', 'hdtv' : True } self.translator = Mock() self.platform = Platform() self.protocol = protocol.Protocol23056() self.settings = MythSettings(self.platform, self.translator) def test_constructor(self): program = TVProgram(self.data, self.translator) self.assertTrue(program is not None) self.assertTrue(program.isHD()) def test_starttimeAsTime(self): program = TVProgram(self.data, self.translator) time = program.starttimeAsTime() log.debug('startTime = %s' % time) self.assertTrue(time) def test_starttime_TypeInDataDictIsADateTime(self): p = TVProgram({'starttime': datetime.datetime(2008, 11, 21, 14)}, self.translator) self.assertEqual('20081121140000', p.starttime()) def test_eq_Make_sure_bidirectional_equivalence_to_RecordedProgram_works(self): tv = TVProgram(self.data, self.translator) recorded = RecordedProgram(pdata({'channum':'23','starttime': socketDateTime(2008, 11, 21, 14, 0, 0)}), Mock(), Mock(), Mock(), self.protocol, Mock()) self.assertTrue(tv == recorded) self.assertTrue(recorded == tv) self.assertTrue(tv in [recorded]) self.assertTrue(recorded in [tv]) self.assertTrue({tv:tv}.has_key(recorded)) self.assertTrue({recorded:recorded}.has_key(tv)) class RecordedProgramTest(unittest.TestCase): def setUp(self): self.conn = Mock() self.settings = Mock() self.translator = Mock() self.platform = Mock() self.protocol = protocol.Protocol23056() self.pkwargs = {'settings':self.settings, 'translator': self.translator, 'platform':self.platform, 'protocol':self.protocol, 'conn':self.conn} def test_hashable(self): p1 = RecordedProgram(pdata({'channum':'99', 'starttime':999999}), **self.pkwargs) p2 = RecordedProgram(pdata({'channum':'101', 'starttime':888888}), **self.pkwargs) mydict = {p1:'p1', p2:'p2'} self.assertTrue(p1 in mydict) self.assertTrue(p2 in mydict) self.assertEqual('p1', mydict[p1]) self.assertEqual('p2', mydict[p2]) def test_hasBookmark_False(self): p = RecordedProgram(pdata(), **self.pkwargs) p.setProgramFlags(FlagMask.FL_AUTOEXP) self.assertFalse(p.isBookmarked()) self.assertTrue(p.isAutoExpire()) def test_hasBookmark_True(self): p = RecordedProgram(pdata(), **self.pkwargs) p.setProgramFlags(FlagMask.FL_BOOKMARK | FlagMask.FL_AUTOEXP) self.assertTrue(p.isBookmarked()) self.assertTrue(p.isAutoExpire()) def test_hasCommercials_True(self): p = RecordedProgram(pdata({'programflags':FlagMask.FL_COMMFLAG | FlagMask.FL_AUTOEXP}), **self.pkwargs) commBreaks = [] commBreaks.append(CommercialBreak(120,180)) when(self.conn).getCommercialBreaks(p).thenReturn(commBreaks) log.debug('comms = %s' % len(p.getCommercials())) self.assertTrue(p.hasCommercials()) #verify(self.conn).getCommercialBreaks(p) def test_hasCommercials_False(self): p = RecordedProgram(pdata({'programflags':FlagMask.FL_COMMFLAG | FlagMask.FL_AUTOEXP}), **self.pkwargs) commBreaks = [] when(self.conn).getCommercialBreaks(p).thenReturn(commBreaks) log.debug('comms = %s' % len(p.getCommercials())) self.assertFalse(p.hasCommercials()) def test_getCommercials_ReturnsOneCommercial(self): p = RecordedProgram(pdata({'programflags':FlagMask.FL_COMMFLAG | FlagMask.FL_AUTOEXP}), **self.pkwargs) commBreaks = [] commBreaks.append(CommercialBreak(120,180)) when(self.conn).getCommercialBreaks(p).thenReturn(commBreaks) result = p.getCommercials() log.debug('commercials = %s'%result) self.assertEqual(commBreaks, result) verify(self.conn).getCommercialBreaks(p) def test_eq_True_self(self): p = RecordedProgram(pdata({'channum':'99', 'starttime':999999}), **self.pkwargs) self.assertEqual(p, p) def test_eq_True_same_channelId_and_startttime(self): data = pdata({'channum':'99', 'starttime':999999}) p1 = RecordedProgram(data, **self.pkwargs) p2 = RecordedProgram(data[:], **self.pkwargs) self.assertEqual(p1, p2) self.assertEqual(p2, p1) def test_eq_False_different_channelNumber_and_startttime(self): p1 = RecordedProgram(pdata({'channum':'11', 'starttime':999999}), **self.pkwargs) p2 = RecordedProgram(pdata({'channum':'101', 'starttime':777777}), **self.pkwargs) self.assertNotEquals(p1, p2) self.assertNotEquals(p2, p1) def test_eq_False_different_channelNumber_same_startttime(self): p1 = RecordedProgram(pdata({'channum':'99', 'starttime':999999}), **self.pkwargs) p2 = RecordedProgram(pdata({'channum':'101', 'starttime':999999}), **self.pkwargs) self.assertNotEquals(p1, p2) self.assertNotEquals(p2, p1) def test_formattedAirTime(self): # 9:00pm 9:30pm p = RecordedProgram(pdata({'starttime':socketTime(21, 0, 0), 'endtime':socketTime(21, 30, 0)}), **self.pkwargs) self.assertEqual('9:00 - 9:30PM', p.formattedAirTime(short=False)) self.assertEqual('9 - 9:30PM', p.formattedAirTime(short=True)) self.assertEqual('9 - 9:30PM', p.formattedAirTime()) def test_getDuration_When_duration_is_half_hour_Then_return_30mins(self): # 6:30pm 7:00pm self.assertEqual(30, RecordedProgram(pdata({'starttime':socketTime(18, 30, 0), 'endtime':socketTime(19, 0, 0)}), **self.pkwargs).getDuration()) def test_getDuration_When_2_hour_duration_spans_midnight_into_next_day_Then_return_120mins(self): # 10/10/2009 11pm 10/11/2009 1am self.assertEqual(120, RecordedProgram(pdata({'starttime':socketDateTime(2009, 10, 10, 23, 0, 0), 'endtime':socketDateTime(2009, 10, 11, 1, 0, 0)}), **self.pkwargs).getDuration()) def test_getDuration_When_start_and_end_times_same_Then_return_0mins(self): self.assertEqual(0, RecordedProgram(pdata({'starttime': socketTime(18, 30, 0), 'endtime': socketTime(18, 30, 0)}), **self.pkwargs).getDuration()) def test_formattedStartTime_1pm(self): s = RecordedProgram(pdata({'starttime':socketTime(13, 0, 0)}), self.settings, self.translator, self.platform, self.protocol, self.conn).formattedStartTime() log.debug('startime = %s' % s) self.assertEqual('1:00 PM', s) def test_formattedDuration(self): data = [ {'start' : socketTime(18, 30, 0), 'end' : socketTime(20, 30, 0), 'expected' : '2 hrs'}, {'start' : socketTime(18, 30, 0), 'end' : socketTime(19, 30, 0), 'expected' : '1 hr'}, {'start' : socketTime(18, 30, 0), 'end' : socketTime(18, 31, 0), 'expected' : '1 m'}, {'start' : socketTime(18, 30, 0), 'end' : socketTime(19, 0, 0), 'expected' : '30 m'}, {'start' : socketTime(18, 30, 0), 'end' : socketTime(20, 0, 0), 'expected' : '1 hr 30 m'}, {'start' : socketTime(18, 30, 0), 'end' : socketTime(21, 0, 0), 'expected' : '2 hrs 30 m'}, {'start' : socketTime(18, 30, 0), 'end' : socketTime(19, 31, 0), 'expected' : '1 hr 1 m'}, {'start' : socketTime(18, 30, 0), 'end' : socketTime(20, 31, 0), 'expected' : '2 hrs 1 m'}] for d in data: s = RecordedProgram(pdata({'starttime':d['start'], 'endtime':d['end']}), **self.pkwargs).formattedDuration() log.debug('Duration = %s' % s) self.assertEqual(d['expected'], s) def test_originalAirDate_When_missing_Returns_None(self): rp = RecordedProgram(pdata({'airdate':'','hasairdate':0}), **self.pkwargs) self.assertFalse(rp.hasOriginalAirDate()) self.assertEqual('', rp.originalAirDate()) def test_originalAirDate_When_available_Returns_date_as_string(self): rp = RecordedProgram(pdata({'airdate': '2008-10-10', 'hasairdate':1}), **self.pkwargs) self.assertEqual('2008-10-10', rp.originalAirDate()) self.assertTrue(rp.hasOriginalAirDate()) class TunerTest(unittest.TestCase): def setUp(self): self.db = Mock() self.conn = Mock() self.translator = Mock() self.domainCache = Mock() self.tuner = Tuner(4, 'mrbun', 1000, 6000, 'HDHOMERUN', self.domainCache, self.conn, self.db, self.translator) def test_toString(self): log.debug('tuner = %s'%self.tuner) self.assertFalse(self.tuner is None) def test_isWatchingOrRecording_CardIdle(self): when(self.conn).getTunerShowing('Seinfeld').thenReturn(-1) self.assertFalse(self.tuner.isWatchingOrRecording('Seinfeld')) def test_isWatchingOrRecording_CardNotIdleButShowDoesntMatch(self): when(self.conn).getTunerShowing('Seinfeld').thenReturn(-1) self.assertFalse(self.tuner.isWatchingOrRecording('Seinfeld')) def test_isWatchingOrRecording_CardNotIdleAndShowMatches(self): when(self.conn).getTunerShowing('Seinfeld').thenReturn(self.tuner.tunerId) self.assertTrue(self.tuner.isWatchingOrRecording('Seinfeld')) def test_isRecording_True(self): when(self.conn).isTunerRecording(any()).thenReturn(True) result = self.tuner.isRecording() log.debug('isRecording_True = %s'%result) self.assertTrue(result) verify(self.conn).isTunerRecording(any()) def test_isRecording_False(self): when(self.conn).isTunerRecording(any()).thenReturn(False) self.assertFalse(self.tuner.isRecording()) verify(self.conn).isTunerRecording(any()) def test_hasChannel_True(self): channels = [] for x in range(0,5): channels.append(Channel( {'chanid':x, 'channum':'%d'%x, 'callsign':'WXYZ', 'name':'NBC9', 'icon':'nbc.jpg', 'cardid':4})) when(self.domainCache).getChannels().thenReturn(channels) self.assertTrue(self.tuner.hasChannel(Channel(dict(channum='3')))) def test_hasChannel_False(self): channels = [] for x in range(0,5): channels.append(Channel( {'chanid':x, 'channum':'%d'%x, 'callsign':'WXYZ', 'name':'NBC9', 'icon':'nbc.jpg', 'cardid':4})) when(self.domainCache).getChannels().thenReturn(channels) self.assertFalse(self.tuner.hasChannel(Channel(dict(channum='6')))) def test_getChannels_CachingWorks(self): channels = [] for x in range(0,5): channels.append(Channel( {'chanid':x, 'channum':'%d'%x, 'callsign':'WXYZ', 'name':'NBC9', 'icon':'nbc.jpg', 'cardid':4})) when(self.domainCache).getChannels().thenReturn(channels) for x in range(10): channels = self.tuner.getChannels() verify(self.domainCache, 1).getChannels() class CommercialBreakTest(unittest.TestCase): def test_constructor(self): commercial = CommercialBreak(100, 200) self.assertTrue(commercial is not None) def test_constructor_StartAfterEndFailsAssertion(self): try: CommercialBreak(200, 100) except AssertionError, ae: log.debug('Error = %s' % ae) def test_isDuring_True(self): commercial = CommercialBreak(100, 200) self.assertTrue(commercial.isDuring(150)) def test_isDuring_BeforeCommercialReturnsFalse(self): commercial = CommercialBreak(100, 200) self.assertFalse(commercial.isDuring(50)) def test_isDuring_AfterCommercialReturnsFalse(self): commercial = CommercialBreak(100, 200) self.assertFalse(commercial.isDuring(350)) class RecordingScheduleTest(unittest.TestCase): def test_starttime_DataFromNativeMySQL(self): data = {'starttime': datetime.timedelta(seconds=(1 * 60 * 60) + (2 * 60) + 3)} schedule = RecordingSchedule(data, Mock()) self.assertEqual('010203', schedule.starttime()) def test_endtime_DataFromNativeMySQL(self): data = {'endtime': datetime.timedelta(seconds=(1 * 60 * 60) + (2 * 60) + 3)} schedule = RecordingSchedule(data, Mock()) self.assertEqual('010203', schedule.endtime()) def test_startdate_DataFromNativeMySQL(self): data = {'startdate': datetime.date(2008, 11, 12)} schedule = RecordingSchedule(data, Mock()) self.assertEqual('20081112', schedule.startdate()) def test_enddate_DataFromNativeMySQL(self): data = {'enddate': datetime.date(2008, 11, 12)} schedule = RecordingSchedule(data, Mock()) self.assertEqual('20081112', schedule.enddate()) def test_episodeFilter_and_checkForDupesIn_read_from_and_written_to_dupin_field_correctly(self): data = {'dupin': CheckForDupesIn.ALL_RECORDINGS | EpisodeFilter.EXCLUDE_REPEATS_AND_GENERICS} schedule = RecordingSchedule(data, Mock()) self.assertEqual(EpisodeFilter.EXCLUDE_REPEATS_AND_GENERICS, schedule.getEpisodeFilter()) schedule.setEpisodeFilter(EpisodeFilter.NEW_EPISODES_ONLY) self.assertEqual(EpisodeFilter.NEW_EPISODES_ONLY, schedule.getEpisodeFilter()) self.assertEqual(CheckForDupesIn.ALL_RECORDINGS, schedule.getCheckForDupesIn()) schedule.setCheckForDupesIn(CheckForDupesIn.PREVIOUS_RECORDINGS) self.assertEqual(EpisodeFilter.NEW_EPISODES_ONLY, schedule.getEpisodeFilter()) self.assertEqual(CheckForDupesIn.PREVIOUS_RECORDINGS, schedule.getCheckForDupesIn()) schedule.setEpisodeFilter(EpisodeFilter.NONE) self.assertEqual(EpisodeFilter.NONE, schedule.getEpisodeFilter()) self.assertEqual(CheckForDupesIn.PREVIOUS_RECORDINGS, schedule.getCheckForDupesIn()) def test_hashable(self): s1 = RecordingSchedule({'recordid' : 1}, Mock()) s2 = RecordingSchedule({'recordid' : 2}, Mock()) d = {s1:'schedule1',} self.assertIn(s1, d) self.assertEqual('schedule1', d[s1]) self.assertNotIn(s2, d) class UserJobTest(unittest.TestCase): def test_isActive_When_command_not_none_Then_return_true(self): self.assertTrue(UserJob(JobType.USERJOB1, 'Send to Ipad', 'HandBrakeCLI blah blal blah').isActive()) def test_isActive_When_command_empty_The_return_false(self): self.assertFalse(UserJob(JobType.USERJOB1, 'Send to Ipad', '').isActive()) def test_isActive_When_command_none_Then_return_false(self): self.assertFalse(UserJob(JobType.USERJOB1, 'Send to Ipad', None).isActive()) class JobTest(unittest.TestCase): def setUp(self): self.translator = Mock() self.protocol = protocol.Protocol56() def test_moveToFrontOfQueue_Raises_Exeption_When_Job_Not_Queued(self): job = self.createJob(jobStatus=JobStatus.FINISHED) try: job.moveToFrontOfQueue() except StatusException, se: log.debug(se) self.assertTrue('queue' in str(se)) def test_moveToFrontOfQueue_From_10_Of_10(self): # Setup db = Mock() conn = Mock() jobs = [] for i in xrange(1, 11): job = self.createJob(conn=conn, db=db, id=i, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG) job.scheduledRunTime = datetime.datetime.now().replace(year=(2000+i)) jobs.append(job) when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs) when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs) job = copy.copy(jobs[-1]) # Test job.moveToFrontOfQueue() # Verify for i, j in enumerate(jobs[:-1]): log.debug('job %s = %s' % (i, j)) self.assertTrue(2000 + (i+2), j.scheduledRunTime.year) log.debug('current job = %s' % job) self.assertTrue(2001, job.scheduledRunTime.year) def test_moveToFrontOfQueue_From_5_Of_10(self): # Setup jobPos = 4 # zero based index db = Mock() conn = Mock() jobs = [] for i in xrange(1, 11): job = self.createJob(conn=conn, db=db, id=i, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG) job.scheduledRunTime = datetime.datetime.now().replace(year=(2000+i)) jobs.append(job) #log.debug('%s' % job) when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs) when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs) job = copy.copy(jobs[jobPos]) # Test job.moveToFrontOfQueue() # Verify # pushed back [1:4] for i, j in enumerate(jobs[:jobPos]): log.debug('job %s = %s' % (i, j)) self.assertTrue(2000 + (i+2), j.scheduledRunTime.year) # moved to first in queue log.debug('current job = %s' % job) self.assertTrue(2001, job.scheduledRunTime.year) # unaffected jobs [5,10] for i, j in enumerate(jobs[jobPos+1:]): log.debug('job %s = %s' % (i, j)) self.assertTrue(2000 + (i+2), j.scheduledRunTime.year) def test_moveToFrontOfQueue_From_2_Of_2(self): # Setup db = Mock() conn = Mock() jobs = [] for i in xrange(1, 3): job = self.createJob(conn=conn, db=db, id=i, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG) job.scheduledRunTime = datetime.datetime.now().replace(year=(2000+i)) jobs.append(job) when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs) when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs) job = copy.copy(jobs[1]) # Test job.moveToFrontOfQueue() # Verify for i, j in enumerate(jobs[:-1]): log.debug('job %s = %s' % (i, j)) self.assertEqual(2000 + (i+2), j.scheduledRunTime.year) log.debug('current job = %s' % job) self.assertEqual(2001, job.scheduledRunTime.year) def test_getPositionInQueue_Position_Is_7_of_10(self): # Setup db = Mock() conn = Mock() jobs = [] for i in xrange(1, 11): jobs.append(self.createJob(id=i, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG)) when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs) when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn(jobs) job = self.createJob(conn=conn, db=db, id=7, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG) # Test pos, numJobs = job.getPositionInQueue() # Verify log.debug('Job is %d of %d' % (pos, numJobs)) self.assertEqual(7, pos) self.assertEqual(10, numJobs) def test_getPositionInQueue_Position_Is_1_of_1(self): # Setup db = Mock() conn = Mock() job = self.createJob(conn=conn, db=db, jobStatus=JobStatus.QUEUED, jobType=JobType.COMMFLAG) when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn([job]) when(db).getJobs(jobStatus=JobStatus.QUEUED).thenReturn([job]) # Test pos, numJobs = job.getPositionInQueue() # Verify log.debug('Job is %d of %d' % (pos, numJobs)) self.assertEqual(1, pos) self.assertEqual(1, numJobs) def test_getPositionInQueue_RaisesException_JobStatus_Not_Queued(self): when(self.translator).get(JobStatus.translations[JobStatus.FINISHED]).thenReturn('Finished') job = self.createJob(jobStatus=JobStatus.FINISHED) try: job.getPositionInQueue() self.fail('Expected StatusException since Finished jobs should not be in the queue') except StatusException, se: log.debug(se) self.assertTrue('Finished' in str(se)) def test_getPercentComplete_Finished_Job_Returns_100(self): job = self.createJob(jobStatus=JobStatus.FINISHED) self.assertEqual(100, job.getPercentComplete()) def test_getPercentComplete_Pending_Job_Returns_0(self): job = self.createJob(jobStatus=JobStatus.PENDING) self.assertEqual(0, job.getPercentComplete()) def test_getPercentComplete_Running_Job_Returns_57(self): job = self.createJob(jobStatus=JobStatus.RUNNING, jobType=JobType.COMMFLAG) job.comment = "76% Completed @ 13.9645 fps." self.assertEqual(76, job.getPercentComplete()) def test_getPercentComplete_Raises_StatusException_WhenRunningButPercentCompletionNotAvailableYet(self): job = self.createJob(jobStatus=JobStatus.RUNNING, jobType=JobType.COMMFLAG) job.comment = "Logo detection" try: job.getPercentComplete() except StatusException, se: log.debug("%s" % se) def test_getCommFlagRate_Running_Job_Returns_FPS(self): job = self.createJob(jobStatus=JobStatus.RUNNING, jobType=JobType.COMMFLAG) job.comment = "76% Completed @ 13.9645 fps." rate = job.getCommFlagRate() log.debug('Comm flag rate = %s' % rate) self.assertAlmostEqual(13.9645, rate) def test_getCommFlagRate_Raises_StatusException_WhenRunningButCommFlagRateNotAvailableYet(self): job = self.createJob(jobStatus=JobStatus.RUNNING, jobType=JobType.COMMFLAG) job.comment = "Logo detection" try: job.getCommFlagRate() except StatusException, se: log.debug("%s" % se) def test_str_ShouldConvertToString(self): when(self.translator).get(JobStatus.translations[JobStatus.QUEUED]).thenReturn('Queued') when(self.translator).get(JobType.translations[JobType.SYSTEMJOB]).thenReturn('System') s = "%s"%self.createJob(jobStatus=JobStatus.QUEUED, jobType=JobType.SYSTEMJOB) log.debug('job = %s' % s) self.assertTrue('System' in s) self.assertTrue('Queued' in s) def test_isJobFor_ShouldReturnTrue(self): # Setup job = self.createJob() job.startTime = datetime.datetime(2009, 12, 5, 10, 20, 00) job.channelId = 1999 data = [''] * self.protocol.recordSize() data[4] = 1999 data[11] = time.mktime(datetime.datetime(2009, 12, 5, 10, 20, 00).timetuple()) program = RecordedProgram(data=data, settings=Mock(), translator=Mock(), platform=Mock(), protocol=self.protocol, conn=Mock()) # Test & verify self.assertTrue(job.isJobFor(program)) def test_isJobFor_ShouldReturnFalse_TimesDontMatch(self): # Setup job = self.createJob() job.startTime = datetime.datetime(2008, 11, 4, 23, 45, 00) job.channelId = 1999 data = [''] * self.protocol.recordSize() data[4] = 1999 data[11] = time.mktime(datetime.datetime(2009, 12, 5, 10, 20, 00).timetuple()) program = RecordedProgram(data=data, settings=Mock(), translator=Mock(), platform=Mock(), protocol=self.protocol, conn=Mock()) # Test & verify self.assertFalse(job.isJobFor(program)) def test_isJobFor_ShouldReturnFalse_ChannelIds_DontMatch(self): # Setup job = self.createJob() job.startTime = datetime.datetime(2008, 11, 4, 23, 45, 00) job.channelId = 200 data = [''] * self.protocol.recordSize() data[4] = 1999 data[11] = time.mktime(datetime.datetime(2008, 11, 4, 23, 45, 00).timetuple()) program = RecordedProgram(data=data, settings=Mock(), translator=Mock(), platform=Mock(), protocol=self.protocol, conn=Mock()) # Test & verify self.assertFalse(job.isJobFor(program)) def test_eq_TrueForSameObjectInstance(self): job = self.createJob() self.assertTrue(job == job) def test_eq_TrueForJobsWithTheSameId(self): job1 = self.createJob(id=99) job2 = self.createJob(id=99) self.assertTrue(job1 == job2) def test_eq_FalseForJobsWithDifferentIds(self): job1 = self.createJob(id=99) job2 = self.createJob(id=100) self.assertFalse(job1 == job2) def test_eq_FalseForInvalidType(self): job1 = self.createJob(id=99) job2 = "i am not of type Job" self.assertFalse(job1 == job2) def test_eq_FalseForNone(self): job1 = self.createJob(id=99) job2 = None self.assertFalse(job1 == job2) def test_isUserJob(self): self.assertTrue(self.createJob(jobType=JobType.USERJOB & JobType.USERJOB1).isUserJob()) self.assertTrue(self.createJob(jobType=JobType.USERJOB & JobType.USERJOB2).isUserJob()) self.assertTrue(self.createJob(jobType=JobType.USERJOB & JobType.USERJOB3).isUserJob()) self.assertTrue(self.createJob(jobType=JobType.USERJOB & JobType.USERJOB4).isUserJob()) self.assertFalse(self.createJob(jobType=JobType.COMMFLAG).isUserJob()) self.assertFalse(self.createJob(jobType=JobType.SYSTEMJOB).isUserJob()) self.assertFalse(self.createJob(jobType=JobType.TRANSCODE).isUserJob()) def createJob(self, conn=Mock(), db=Mock(), domainCache=Mock(), id=1, jobType=JobType.COMMFLAG, jobStatus=JobStatus.FINISHED): return Job( id=id, channelId=2, startTime=None, insertTime=None, jobType=jobType, cmds=None, flags=None, jobStatus=jobStatus, statusTime=None, hostname='localhost', comment=None, scheduledRunTime=None, translator=self.translator, conn=conn, db=db, domainCache=domainCache) class BackendTest(unittest.TestCase): def test_eq_True_by_reference(self): be = Backend('htpc', '127.0.0.1', '6543', True) self.assertTrue(be == be) def test_eq_by_value(self): bes = [Backend('htpc', '127.0.0.1', '6543', True), Backend('htpc', '127.0.0.1', '6543', False), Backend('htpc', '127.0.0.1', '8888', True), Backend('htpc', '127.0.0.2', '6543', True), Backend('slave', '127.0.0.1', '6543', True)] for i, be1 in enumerate(bes): for j, be2 in enumerate(bes): if i == j: self.assertTrue(be1 == be2) else: self.assertFalse(be1 == be2) def test_eq_False_by_type(self): self.assertFalse(Backend('slave', '127.0.0.1', '6543', True) == 'a string') self.assertFalse(Backend('slave', '127.0.0.1', '6543', True) == None) if __name__ == '__main__': import logging.config logging.config.fileConfig('mythbox_log.ini') unittest.main()
gpl-2.0
Pablo126/SSBW
Tarea4/tarea4/lib/python3.5/site-packages/setuptools/namespaces.py
99
3181
import os from distutils import log import itertools from six.moves import map flatten = itertools.chain.from_iterable class Installer: nspkg_ext = '-nspkg.pth' def install_namespaces(self): nsp = self._get_all_ns_packages() if not nsp: return filename, ext = os.path.splitext(self._get_target()) filename += self.nspkg_ext self.outputs.append(filename) log.info("Installing %s", filename) lines = map(self._gen_nspkg_line, nsp) if self.dry_run: # always generate the lines, even in dry run list(lines) return with open(filename, 'wt') as f: f.writelines(lines) def uninstall_namespaces(self): filename, ext = os.path.splitext(self._get_target()) filename += self.nspkg_ext if not os.path.exists(filename): return log.info("Removing %s", filename) os.remove(filename) def _get_target(self): return self.target _nspkg_tmpl = ( "import sys, types, os", "has_mfs = sys.version_info > (3, 5)", "p = os.path.join(%(root)s, *%(pth)r)", "importlib = has_mfs and __import__('importlib.util')", "has_mfs and __import__('importlib.machinery')", "m = has_mfs and " "sys.modules.setdefault(%(pkg)r, " "importlib.util.module_from_spec(" "importlib.machinery.PathFinder.find_spec(%(pkg)r, " "[os.path.dirname(p)])))", "m = m or " "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))", "mp = (m or []) and m.__dict__.setdefault('__path__',[])", "(p not in mp) and mp.append(p)", ) "lines for the namespace installer" _nspkg_tmpl_multi = ( 'm and setattr(sys.modules[%(parent)r], %(child)r, m)', ) "additional line(s) when a parent package is indicated" def _get_root(self): return "sys._getframe(1).f_locals['sitedir']" def _gen_nspkg_line(self, pkg): # ensure pkg is not a unicode string under Python 2.7 pkg = str(pkg) pth = tuple(pkg.split('.')) root = self._get_root() tmpl_lines = self._nspkg_tmpl parent, sep, child = pkg.rpartition('.') if parent: tmpl_lines += self._nspkg_tmpl_multi return ';'.join(tmpl_lines) % locals() + '\n' def _get_all_ns_packages(self): """Return sorted list of all package namespaces""" pkgs = self.distribution.namespace_packages or [] return sorted(flatten(map(self._pkg_names, pkgs))) @staticmethod def _pkg_names(pkg): """ Given a namespace package, yield the components of that package. >>> names = Installer._pkg_names('a.b.c') >>> set(names) == set(['a', 'a.b', 'a.b.c']) True """ parts = pkg.split('.') while parts: yield '.'.join(parts) parts.pop() class DevelopInstaller(Installer): def _get_root(self): return repr(str(self.egg_path)) def _get_target(self): return self.egg_link
gpl-3.0
jomolinare/kobocat
onadata/apps/main/migrations/0007_replace_special_chars_and_whitespace_in_usernames.py
13
7266
# encoding: utf-8 import datetime from south.db import db from south.v2 import DataMigration from django.db import models import re class Migration(DataMigration): def forwards(self, orm): def update_username_for_user(user, match_on, sub_with): regex = re.compile(match_on) if regex.search(user.username): user.username = regex.sub(sub_with, user.username) user.save() # remove whitespace and replace special chars with underscores for user in orm['auth.User'].objects.all(): update_username_for_user(user, "\s", "") update_username_for_user(user, "\W", "_") def backwards(self, orm): pass models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'main.metadata': { 'Meta': {'object_name': 'MetaData'}, 'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}), 'data_file_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}), 'data_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'data_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'xform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logger.XForm']"}) }, 'main.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'home_page': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'require_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}) }, 'logger.xform': { 'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}), 'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}), 'json': ('django.db.models.fields.TextField', [], {'default': "u''"}), 'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}), 'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}), 'xml': ('django.db.models.fields.TextField', [], {}) } } complete_apps = ['main']
bsd-2-clause
hpcloud-mon/tempest
tempest/api/compute/servers/test_servers.py
4
5628
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest_lib.common.utils import data_utils from tempest.api.compute import base from tempest import test class ServersTestJSON(base.BaseV2ComputeTest): @classmethod def setup_clients(cls): super(ServersTestJSON, cls).setup_clients() cls.client = cls.servers_client def tearDown(self): self.clear_servers() super(ServersTestJSON, self).tearDown() @test.attr(type='gate') @test.idempotent_id('b92d5ec7-b1dd-44a2-87e4-45e888c46ef0') def test_create_server_with_admin_password(self): # If an admin password is provided on server creation, the server's # root password should be set to that password. server = self.create_test_server(adminPass='testpassword') # Verify the password is set correctly in the response self.assertEqual('testpassword', server['adminPass']) @test.attr(type='gate') @test.idempotent_id('8fea6be7-065e-47cf-89b8-496e6f96c699') def test_create_with_existing_server_name(self): # Creating a server with a name that already exists is allowed # TODO(sdague): clear out try, we do cleanup one layer up server_name = data_utils.rand_name('server') server = self.create_test_server(name=server_name, wait_until='ACTIVE') id1 = server['id'] server = self.create_test_server(name=server_name, wait_until='ACTIVE') id2 = server['id'] self.assertNotEqual(id1, id2, "Did not create a new server") server = self.client.get_server(id1) name1 = server['name'] server = self.client.get_server(id2) name2 = server['name'] self.assertEqual(name1, name2) @test.attr(type='gate') @test.idempotent_id('f9e15296-d7f9-4e62-b53f-a04e89160833') def test_create_specify_keypair(self): # Specify a keypair while creating a server key_name = data_utils.rand_name('key') self.keypairs_client.create_keypair(key_name) self.keypairs_client.list_keypairs() server = self.create_test_server(key_name=key_name) self.client.wait_for_server_status(server['id'], 'ACTIVE') server = self.client.get_server(server['id']) self.assertEqual(key_name, server['key_name']) def _update_server_name(self, server_id, status): # The server name should be changed to the the provided value new_name = data_utils.rand_name('server') # Update the server with a new name self.client.update_server(server_id, name=new_name) self.client.wait_for_server_status(server_id, status) # Verify the name of the server has changed server = self.client.get_server(server_id) self.assertEqual(new_name, server['name']) return server @test.attr(type='gate') @test.idempotent_id('5e6ccff8-349d-4852-a8b3-055df7988dd2') def test_update_server_name(self): # The server name should be changed to the the provided value server = self.create_test_server(wait_until='ACTIVE') self._update_server_name(server['id'], 'ACTIVE') @test.attr(type='gate') @test.idempotent_id('6ac19cb1-27a3-40ec-b350-810bdc04c08e') def test_update_server_name_in_stop_state(self): # The server name should be changed to the the provided value server = self.create_test_server(wait_until='ACTIVE') self.client.stop(server['id']) self.client.wait_for_server_status(server['id'], 'SHUTOFF') updated_server = self._update_server_name(server['id'], 'SHUTOFF') self.assertNotIn('progress', updated_server) @test.attr(type='gate') @test.idempotent_id('89b90870-bc13-4b73-96af-f9d4f2b70077') def test_update_access_server_address(self): # The server's access addresses should reflect the provided values server = self.create_test_server(wait_until='ACTIVE') # Update the IPv4 and IPv6 access addresses self.client.update_server(server['id'], accessIPv4='1.1.1.1', accessIPv6='::babe:202:202') self.client.wait_for_server_status(server['id'], 'ACTIVE') # Verify the access addresses have been updated server = self.client.get_server(server['id']) self.assertEqual('1.1.1.1', server['accessIPv4']) self.assertEqual('::babe:202:202', server['accessIPv6']) @test.attr(type='gate') @test.idempotent_id('38fb1d02-c3c5-41de-91d3-9bc2025a75eb') def test_create_server_with_ipv6_addr_only(self): # Create a server without an IPv4 address(only IPv6 address). server = self.create_test_server(accessIPv6='2001:2001::3') self.client.wait_for_server_status(server['id'], 'ACTIVE') server = self.client.get_server(server['id']) self.assertEqual('2001:2001::3', server['accessIPv6'])
apache-2.0
jchevin/MissionPlanner-master
packages/IronPython.StdLib.2.7.4/content/Lib/distutils/command/install_headers.py
251
1346
"""distutils.command.install_headers Implements the Distutils 'install_headers' command, to install C/C++ header files to the Python include directory.""" __revision__ = "$Id$" from distutils.core import Command # XXX force is never used class install_headers(Command): description = "install C/C++ header files" user_options = [('install-dir=', 'd', "directory to install header files to"), ('force', 'f', "force installation (overwrite existing files)"), ] boolean_options = ['force'] def initialize_options(self): self.install_dir = None self.force = 0 self.outfiles = [] def finalize_options(self): self.set_undefined_options('install', ('install_headers', 'install_dir'), ('force', 'force')) def run(self): headers = self.distribution.headers if not headers: return self.mkpath(self.install_dir) for header in headers: (out, _) = self.copy_file(header, self.install_dir) self.outfiles.append(out) def get_inputs(self): return self.distribution.headers or [] def get_outputs(self): return self.outfiles # class install_headers
gpl-3.0
acutesoftware/AIKIF
aikif/.z_prototype/create_word_lists.py
1
5833
# coding: utf-8 # create_word_lists.py written by Duncan Murray 3/2/2014 # creates a simple list of verbs, nouns and adjectives for # simple 'bag of words' parsing. # First implementation uses the following dataset: # WordNet 3.1 Copyright 2011 by Princeton University. import os import sys from xml.dom.minidom import parse, parseString import xml sys.path.append('..//..//aspytk') #import as_util_data as dat import lib_data as dat import lib_file as fle import xml.etree.ElementTree as ET ontologyClassificationFile = 'S:\\DATA\\opendata\\ontology\\wikipedia_categories\\dbpedia-ontology.owl.bz2.owl.bz2.owl' ipFolder = 'S://DATA//opendata//datasets//dict//' opFolder = '..//data//ref//' # os.getcwd() def SaveListFirstWordOnly(msg, ipfile, opFile): numRecs = 0 opList = [] rawList = dat.ReadFileToList(ipfile) for line in rawList: if line[0:1] != ' ': numRecs = numRecs + 1 noun = line[0:line.find(' ')] opList.append(noun) dat.SaveListToFile(opList, opFile) print ('Saved ' + str(numRecs) + ' ' + msg) return numRecs def ExtractListOfWords(): # ---- get the list of nouns, adverbs and adjectives ---- numRecs = SaveListFirstWordOnly('nouns', ipFolder + 'index.noun', opFolder + 'nounList.txt') numRecs = SaveListFirstWordOnly('adverbs', ipFolder + 'index.adv', opFolder + 'advList.txt') numRecs = SaveListFirstWordOnly('adjectives', ipFolder + 'index.adj', opFolder + 'adjList.txt') # ---- get the list of verbs ---- (need some stemming here) numRecs = 0 verbList = [] rawList = dat.ReadFileToList(ipFolder + 'index.verb') for line in rawList: if line[0:1] != ' ': numRecs = numRecs + 1 verb = line[0:line.find(' ')] verbList.append(verb) verbList.append(verb + 's') # turns play to plays - TODO, use stemming algorithm verbList.append(verb + 'ed') dat.SaveListToFile(verbList, opFolder + 'verbList.txt') print ('Saved ' + str(numRecs) + ' verbs') def ExtractCat(fname, opFile): #Look at the wikipedia categories - no - far too detailed #S:\DATA\opendata\ontology\wikipedia_categories\articlecategories_en.csv.bz2.csv.bz2.csv #ipFile = 'S:\\DATA\\opendata\\ontology\\wikipedia_categories\\articlecategories_en.csv.bz2.csv.bz2.csv' # ipFile = 'S:\\DATA\\opendata\\ontology\\wikipedia_categories\\articlecategories_en.csv.bz2.csv.bz2.csv' # headFile = 'S:\\DATA\\opendata\\ontology\\wikipedia_categories\\articlecategories_head.csv' # sampleFile = 'S:\\DATA\\opendata\\ontology\\wikipedia_categories\\articlecategories_sample.csv' # dat.unix_head(ipFile, headFile, 5000) # dat.getPercentRandomRecords(ipFile, sampleFile, 1) # Later # infoboxproperties_en.csv.bz2.csv.bz2 = list of properties or labels (name, age, BestBowlerFirstInnings, PilotName, ProducerAge, etc) # ontology is in RDF format print(fname + ' = ' + str(dat.countLinesInFile(fname)) + ' rows' ) # dom = xml.dom.minidom.parse( fname ) # parse an XML file #print (dom1.toxml()) # dom.findall('owl:DatatypeProperty', namespaces=namespaces) # for node in dom.getElementsByTagName('DatatypeProperty'): # visit every node <bar /> numFound = 0 categories = [] dom = parse( ontologyClassificationFile ) for node in dom.getElementsByTagName('rdfs:label'): # visit every node <bar /> #print (node.toxml()) numFound = numFound + 1 cat = dat.StriptHTMLtags(node.toxml()) print(cat) categories.append(cat) #print ('subclasses = ') #for sub in node.findall('subClassOf'): # print (sub.toxml()) dat.SaveListToFile(categories, opFile) return numFound def GetOntologyExtract(fname, txt): numFound = 0 # see http://stackoverflow.com/questions/14853243/parsing-xml-with-namespace-in-python-elementtree namespaces = {'owl': 'http://www.w3.org/2002/07/owl#'} # add more as needed #namespaces = {'rdfs': 'http://www.w3.org/2000/01/rdf-schema#'} print('Extracting ' + txt ) tree = ET.parse(fname) doc = tree.getroot() #nodes = doc.findall('owl:Class', namespaces=namespaces) nodes = doc.findall(txt, namespaces=namespaces) #nodes = doc.findall('rdfs:label', namespaces=namespaces) print('found ' + str(len(nodes)) + ' nodes\n ' ) for node in nodes: numFound = numFound + 1 for itm in node.items(): print (itm[1][28:]) #print(node.tail) # find_in_tree(root,"myNode") #print(node.attrib) #print(len(node)) #print(node.get('rdfs')) #print('node.text= ' + node.text) #print('node.tag = ' + node.tag) return numFound #-----------------------------------------------------------# # Build a Master Word List # #-----------------------------------------------------------# # Idea is to concatenate unique words from all lists and also # names from contacts or baby names list (+country names, etc) # to create one huge list of ALL words used in english sentences. # Then, in a table each word has flags like isVerb, isNoun, isAdv, # isAdjective, isName, isPerson, isPlace # Once this is built then ANY sentence can be parsed into a list # of ids instead of storing strings you store a series of numbers. # MAIN opFile = '..\\data\\ref\\ontology_list.txt' print('create_word_list.py - script to extract lists for AIKIF') print('Reading - ' + ontologyClassificationFile) #ExtractListOfWords() print('Extracted ' + str(ExtractCat(ontologyClassificationFile, opFile)) + ' nodes to ' + opFile) #print('Found ' + str(GetOntologyExtract(ontologyClassificationFile, 'owl:Class')) + ' nodes') print('Done..')
gpl-3.0
Jet-Streaming/framework
deps/v8/test/simdjs/testcfg.py
3
2290
# Copyright 2014 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import shutil import sys from testrunner.local import testsuite from testrunner.objects import testcase SIMDJS_SUITE_PATH = ["data", "src"] class SimdJsTestSuite(testsuite.TestSuite): def __init__(self, name, root): super(SimdJsTestSuite, self).__init__(name, root) self.testroot = os.path.join(self.root, *SIMDJS_SUITE_PATH) self.ParseTestRecord = None def ListTests(self, context): tests = [ testcase.TestCase(self, 'shell_test_runner'), ] for filename in os.listdir(os.path.join(self.testroot, 'benchmarks')): if (not filename.endswith('.js') or filename in ['run.js', 'run_browser.js', 'base.js']): continue name = filename.rsplit('.')[0] tests.append( testcase.TestCase(self, 'benchmarks/' + name)) return tests def GetFlagsForTestCase(self, testcase, context): return (testcase.flags + context.mode_flags + [os.path.join(self.root, "harness-adapt.js"), "--harmony", "--harmony-simd", os.path.join(self.testroot, testcase.path + ".js"), os.path.join(self.root, "harness-finish.js")]) def GetSourceForTest(self, testcase): filename = os.path.join(self.testroot, testcase.path + ".js") with open(filename) as f: return f.read() def IsNegativeTest(self, testcase): return False def IsFailureOutput(self, testcase): if testcase.output.exit_code != 0: return True return "FAILED!" in testcase.output.stdout def DownloadData(self): print "SimdJs download is deprecated. It's part of DEPS." # Clean up old directories and archive files. directory_old_name = os.path.join(self.root, "data.old") if os.path.exists(directory_old_name): shutil.rmtree(directory_old_name) archive_files = [f for f in os.listdir(self.root) if f.startswith("ecmascript_simd-")] if len(archive_files) > 0: print "Clobber outdated test archives ..." for f in archive_files: os.remove(os.path.join(self.root, f)) def GetSuite(name, root): return SimdJsTestSuite(name, root)
mpl-2.0
JavML/django
django/middleware/csrf.py
118
9930
""" Cross Site Request Forgery Middleware. This module provides a middleware that implements protection against request forgeries from other sites. """ from __future__ import unicode_literals import logging import re from django.conf import settings from django.core.urlresolvers import get_callable from django.utils.cache import patch_vary_headers from django.utils.crypto import constant_time_compare, get_random_string from django.utils.encoding import force_text from django.utils.http import is_same_domain from django.utils.six.moves.urllib.parse import urlparse logger = logging.getLogger('django.request') REASON_NO_REFERER = "Referer checking failed - no Referer." REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins." REASON_NO_CSRF_COOKIE = "CSRF cookie not set." REASON_BAD_TOKEN = "CSRF token missing or incorrect." REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed." REASON_INSECURE_REFERER = "Referer checking failed - Referer is insecure while host is secure." CSRF_KEY_LENGTH = 32 def _get_failure_view(): """ Returns the view to be used for CSRF rejections """ return get_callable(settings.CSRF_FAILURE_VIEW) def _get_new_csrf_key(): return get_random_string(CSRF_KEY_LENGTH) def get_token(request): """ Returns the CSRF token required for a POST form. The token is an alphanumeric value. A new token is created if one is not already set. A side effect of calling this function is to make the csrf_protect decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie' header to the outgoing response. For this reason, you may need to use this function lazily, as is done by the csrf context processor. """ if "CSRF_COOKIE" not in request.META: request.META["CSRF_COOKIE"] = _get_new_csrf_key() request.META["CSRF_COOKIE_USED"] = True return request.META["CSRF_COOKIE"] def rotate_token(request): """ Changes the CSRF token in use for a request - should be done on login for security purposes. """ request.META.update({ "CSRF_COOKIE_USED": True, "CSRF_COOKIE": _get_new_csrf_key(), }) def _sanitize_token(token): # Allow only alphanum if len(token) > CSRF_KEY_LENGTH: return _get_new_csrf_key() token = re.sub('[^a-zA-Z0-9]+', '', force_text(token)) if token == "": # In case the cookie has been truncated to nothing at some point. return _get_new_csrf_key() return token class CsrfViewMiddleware(object): """ Middleware that requires a present and correct csrfmiddlewaretoken for POST requests that have a CSRF cookie, and sets an outgoing CSRF cookie. This middleware should be used in conjunction with the csrf_token template tag. """ # The _accept and _reject methods currently only exist for the sake of the # requires_csrf_token decorator. def _accept(self, request): # Avoid checking the request twice by adding a custom attribute to # request. This will be relevant when both decorator and middleware # are used. request.csrf_processing_done = True return None def _reject(self, request, reason): logger.warning('Forbidden (%s): %s', reason, request.path, extra={ 'status_code': 403, 'request': request, } ) return _get_failure_view()(request, reason=reason) def process_view(self, request, callback, callback_args, callback_kwargs): if getattr(request, 'csrf_processing_done', False): return None try: csrf_token = _sanitize_token( request.COOKIES[settings.CSRF_COOKIE_NAME]) # Use same token next time request.META['CSRF_COOKIE'] = csrf_token except KeyError: csrf_token = None # Wait until request.META["CSRF_COOKIE"] has been manipulated before # bailing out, so that get_token still works if getattr(callback, 'csrf_exempt', False): return None # Assume that anything not defined as 'safe' by RFC2616 needs protection if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'): if getattr(request, '_dont_enforce_csrf_checks', False): # Mechanism to turn off CSRF checks for test suite. # It comes after the creation of CSRF cookies, so that # everything else continues to work exactly the same # (e.g. cookies are sent, etc.), but before any # branches that call reject(). return self._accept(request) if request.is_secure(): # Suppose user visits http://example.com/ # An active network attacker (man-in-the-middle, MITM) sends a # POST form that targets https://example.com/detonate-bomb/ and # submits it via JavaScript. # # The attacker will need to provide a CSRF cookie and token, but # that's no problem for a MITM and the session-independent # nonce we're using. So the MITM can circumvent the CSRF # protection. This is true for any HTTP connection, but anyone # using HTTPS expects better! For this reason, for # https://example.com/ we need additional protection that treats # http://example.com/ as completely untrusted. Under HTTPS, # Barth et al. found that the Referer header is missing for # same-domain requests in only about 0.2% of cases or less, so # we can use strict Referer checking. referer = force_text( request.META.get('HTTP_REFERER'), strings_only=True, errors='replace' ) if referer is None: return self._reject(request, REASON_NO_REFERER) referer = urlparse(referer) # Make sure we have a valid URL for Referer. if '' in (referer.scheme, referer.netloc): return self._reject(request, REASON_MALFORMED_REFERER) # Ensure that our Referer is also secure. if referer.scheme != 'https': return self._reject(request, REASON_INSECURE_REFERER) # If there isn't a CSRF_COOKIE_DOMAIN, assume we need an exact # match on host:port. If not, obey the cookie rules. if settings.CSRF_COOKIE_DOMAIN is None: # request.get_host() includes the port. good_referer = request.get_host() else: good_referer = settings.CSRF_COOKIE_DOMAIN server_port = request.META['SERVER_PORT'] if server_port not in ('443', '80'): good_referer = '%s:%s' % (good_referer, server_port) # Here we generate a list of all acceptable HTTP referers, # including the current host since that has been validated # upstream. good_hosts = list(settings.CSRF_TRUSTED_ORIGINS) good_hosts.append(good_referer) if not any(is_same_domain(referer.netloc, host) for host in good_hosts): reason = REASON_BAD_REFERER % referer.geturl() return self._reject(request, reason) if csrf_token is None: # No CSRF cookie. For POST requests, we insist on a CSRF cookie, # and in this way we can avoid all CSRF attacks, including login # CSRF. return self._reject(request, REASON_NO_CSRF_COOKIE) # Check non-cookie token for match. request_csrf_token = "" if request.method == "POST": try: request_csrf_token = request.POST.get('csrfmiddlewaretoken', '') except IOError: # Handle a broken connection before we've completed reading # the POST data. process_view shouldn't raise any # exceptions, so we'll ignore and serve the user a 403 # (assuming they're still listening, which they probably # aren't because of the error). pass if request_csrf_token == "": # Fall back to X-CSRFToken, to make things easier for AJAX, # and possible for PUT/DELETE. request_csrf_token = request.META.get(settings.CSRF_HEADER_NAME, '') if not constant_time_compare(request_csrf_token, csrf_token): return self._reject(request, REASON_BAD_TOKEN) return self._accept(request) def process_response(self, request, response): if getattr(response, 'csrf_processing_done', False): return response if not request.META.get("CSRF_COOKIE_USED", False): return response # Set the CSRF cookie even if it's already set, so we renew # the expiry timer. response.set_cookie(settings.CSRF_COOKIE_NAME, request.META["CSRF_COOKIE"], max_age=settings.CSRF_COOKIE_AGE, domain=settings.CSRF_COOKIE_DOMAIN, path=settings.CSRF_COOKIE_PATH, secure=settings.CSRF_COOKIE_SECURE, httponly=settings.CSRF_COOKIE_HTTPONLY ) # Content varies with the CSRF cookie, so set the Vary header. patch_vary_headers(response, ('Cookie',)) response.csrf_processing_done = True return response
bsd-3-clause
Infinidat/gitpy
gitpy/tag.py
1
1690
# Copyright (c) 2009, Rotem Yaari <[email protected]> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of organization nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .ref import Ref class Tag(Ref): def __repr__(self): return "<tag %s>" % (self.name,) class LocalTag(Tag): pass class RemoteTag(Tag): pass
bsd-3-clause
dsully/SoCo
soco/alarms.py
6
11801
# -*- coding: utf-8 -*- """This module contains classes relating to Sonos Alarms.""" from __future__ import unicode_literals import logging import re import weakref from datetime import datetime from .core import PLAY_MODES from .xml import XML log = logging.getLogger(__name__) # pylint: disable=C0103 TIME_FORMAT = "%H:%M:%S" def is_valid_recurrence(text): """Check that ``text`` is a valid recurrence string. A valid recurrence string is ``DAILY``, ``ONCE``, ``WEEKDAYS``, ``WEEKENDS`` or of the form ``ON_DDDDDD`` where ``D`` is a number from 0-7 representing a day of the week (Sunday is 0), e.g. ``ON_034`` meaning Sunday, Wednesday and Thursday Args: text (str): the recurrence string to check. Returns: bool: `True` if the recurrence string is valid, else `False`. Examples: >>> from soco.alarms import is_valid_recurrence >>> is_valid_recurrence('WEEKENDS') True >>> is_valid_recurrence('') False >>> is_valid_recurrence('ON_132') # Mon, Tue, Wed True >>> is_valid_recurrence('ON_777') # Sat True >>> is_valid_recurrence('ON_3421') # Mon, Tue, Wed, Thur True >>> is_valid_recurrence('ON_123456789') # Too many digits False """ if text in ("DAILY", "ONCE", "WEEKDAYS", "WEEKENDS"): return True return re.search(r'^ON_[0-7]{1,7}$', text) is not None class Alarm(object): """A class representing a Sonos Alarm. Alarms may be created or updated and saved to, or removed from the Sonos system. An alarm is not automatically saved. Call `save()` to do that. Example: >>> device = soco.discovery.any_soco() >>> # create an alarm with default properties >>> alarm = Alarm(device) >>> print alarm.volume 20 >>> print get_alarms() set([]) >>> # save the alarm to the Sonos system >>> alarm.save() >>> print get_alarms() set([<Alarm id:88@15:26:15 at 0x107abb090>]) >>> # update the alarm >>> alarm.recurrence = "ONCE" >>> # Save it again for the change to take effect >>> alarm.save() >>> # Remove it >>> alarm.remove() >>> print get_alarms() set([]) """ # pylint: disable=too-many-instance-attributes _all_alarms = weakref.WeakValueDictionary() # pylint: disable=too-many-arguments def __init__( self, zone, start_time=None, duration=None, recurrence='DAILY', enabled=True, program_uri=None, program_metadata='', play_mode='NORMAL', volume=20, include_linked_zones=False): """ Args: zone (`SoCo`): The soco instance which will play the alarm. start_time (`datetime.time`, optional): The alarm's start time. Specify hours, minutes and seconds only. Defaults to the current time. duration (`datetime.time`, optional): The alarm's duration. Specify hours, minutes and seconds only. May be `None` for unlimited duration. Defaults to `None`. recurrence (str, optional): A string representing how often the alarm should be triggered. Can be ``DAILY``, ``ONCE``, ``WEEKDAYS``, ``WEEKENDS`` or of the form ``ON_DDDDDD`` where ``D`` is a number from 0-7 representing a day of the week (Sunday is 0), e.g. ``ON_034`` meaning Sunday, Wednesday and Thursday. Defaults to ``DAILY``. enabled (bool, optional): `True` if alarm is enabled, `False` otherwise. Defaults to `True`. program_uri(str, optional): The uri to play. If `None`, the built-in Sonos chime sound will be used. Defaults to `None`. program_metadata (str, optional): The metadata associated with `program_uri`. Defaults to ''. play_mode(str, optional): The play mode for the alarm. Can be one of ``NORMAL``, ``SHUFFLE_NOREPEAT``, ``SHUFFLE``, ``REPEAT_ALL``. Defaults to ``NORMAL``. volume (int, optional): The alarm's volume (0-100). Defaults to 20. include_linked_zones (bool, optional): `True` if the alarm should be played on the other speakers in the same group, `False` otherwise. Defaults to `False`. """ super(Alarm, self).__init__() self.zone = zone if start_time is None: start_time = datetime.now().time() #: `datetime.time`: The alarm's start time. self.start_time = start_time #: `datetime.time`: The alarm's duration. self.duration = duration self._recurrence = recurrence #: `bool`: `True` if the alarm is enabled, else `False`. self.enabled = enabled #: self.program_uri = program_uri #: `str`: The uri to play. self.program_metadata = program_metadata self._play_mode = play_mode self._volume = volume #: `bool`: `True` if the alarm should be played on the other speakers #: in the same group, `False` otherwise. self.include_linked_zones = include_linked_zones self._alarm_id = None def __repr__(self): middle = str(self.start_time.strftime(TIME_FORMAT)) return "<{0} id:{1}@{2} at {3}>".format( self.__class__.__name__, self._alarm_id, middle, hex(id(self))) @property def play_mode(self): """ `str`: The play mode for the alarm. Can be one of ``NORMAL``, ``SHUFFLE_NOREPEAT``, ``SHUFFLE``, ``REPEAT_ALL``. """ return self._play_mode @play_mode.setter def play_mode(self, play_mode): """See `playmode`.""" play_mode = play_mode.upper() if play_mode not in PLAY_MODES: raise KeyError("'%s' is not a valid play mode" % play_mode) self._play_mode = play_mode @property def volume(self): """`int`: The alarm's volume (0-100).""" return self._volume @volume.setter def volume(self, volume): """See `volume`.""" # max 100 volume = int(volume) self._volume = max(0, min(volume, 100)) # Coerce in range @property def recurrence(self): """`str`: How often the alarm should be triggered. Can be ``DAILY``, ``ONCE``, ``WEEKDAYS``, ``WEEKENDS`` or of the form ``ON_DDDDDDD`` where ``D`` is a number from 0-7 representing a day of the week (Sunday is 0), e.g. ``ON_034`` meaning Sunday, Wednesday and Thursday. """ return self._recurrence @recurrence.setter def recurrence(self, recurrence): """See `recurrence`.""" if not is_valid_recurrence(recurrence): raise KeyError("'%s' is not a valid recurrence value" % recurrence) self._recurrence = recurrence def save(self): """Save the alarm to the Sonos system. Raises: ~soco.exceptions.SoCoUPnPException: if the alarm cannot be created because there is already an alarm for this room at the specified time. """ # pylint: disable=bad-continuation args = [ ('StartLocalTime', self.start_time.strftime(TIME_FORMAT)), ('Duration', '' if self.duration is None else self.duration.strftime(TIME_FORMAT)), ('Recurrence', self.recurrence), ('Enabled', '1' if self.enabled else '0'), ('RoomUUID', self.zone.uid), ('ProgramURI', "x-rincon-buzzer:0" if self.program_uri is None else self.program_uri), ('ProgramMetaData', self.program_metadata), ('PlayMode', self.play_mode), ('Volume', self.volume), ('IncludeLinkedZones', '1' if self.include_linked_zones else '0') ] if self._alarm_id is None: response = self.zone.alarmClock.CreateAlarm(args) self._alarm_id = response['AssignedID'] Alarm._all_alarms[self._alarm_id] = self else: # The alarm has been saved before. Update it instead. args.insert(0, ('ID', self._alarm_id)) self.zone.alarmClock.UpdateAlarm(args) def remove(self): """Remove the alarm from the Sonos system. There is no need to call `save`. The Python instance is not deleted, and can be saved back to Sonos again if desired. """ self.zone.alarmClock.DestroyAlarm([ ('ID', self._alarm_id) ]) alarm_id = self._alarm_id try: del Alarm._all_alarms[alarm_id] except KeyError: pass self._alarm_id = None def get_alarms(soco=None): """Get a set of all alarms known to the Sonos system. Args: soco (`SoCo`, optional): a SoCo instance to query. If None, a random instance is used. Defaults to `None`. Returns: set: A set of `Alarm` instances Note: Any existing `Alarm` instance will have its attributes updated to those currently stored on the Sonos system. """ # Get a soco instance to query. It doesn't matter which. if soco is None: soco = soco.discovery.any_soco() response = soco.alarmClock.ListAlarms() alarm_list = response['CurrentAlarmList'] tree = XML.fromstring(alarm_list.encode('utf-8')) # An alarm list looks like this: # <Alarms> # <Alarm ID="14" StartTime="07:00:00" # Duration="02:00:00" Recurrence="DAILY" Enabled="1" # RoomUUID="RINCON_000ZZZZZZ1400" # ProgramURI="x-rincon-buzzer:0" ProgramMetaData="" # PlayMode="SHUFFLE_NOREPEAT" Volume="25" # IncludeLinkedZones="0"/> # <Alarm ID="15" StartTime="07:00:00" # Duration="02:00:00" Recurrence="DAILY" Enabled="1" # RoomUUID="RINCON_000ZZZZZZ01400" # ProgramURI="x-rincon-buzzer:0" ProgramMetaData="" # PlayMode="SHUFFLE_NOREPEAT" Volume="25" # IncludeLinkedZones="0"/> # </Alarms> # pylint: disable=protected-access alarms = tree.findall('Alarm') result = set() for alarm in alarms: values = alarm.attrib alarm_id = values['ID'] # If an instance already exists for this ID, update and return it. # Otherwise, create a new one and populate its values if Alarm._all_alarms.get(alarm_id): instance = Alarm._all_alarms.get(alarm_id) else: instance = Alarm(None) instance._alarm_id = alarm_id Alarm._all_alarms[instance._alarm_id] = instance instance.start_time = datetime.strptime( values['StartTime'], "%H:%M:%S").time() # NB StartTime, not # StartLocalTime, which is used by CreateAlarm instance.duration = None if values['Duration'] == '' else\ datetime.strptime(values['Duration'], "%H:%M:%S").time() instance.recurrence = values['Recurrence'] instance.enabled = values['Enabled'] == '1' instance.zone = [zone for zone in soco.all_zones if zone.uid == values['RoomUUID']][0] instance.program_uri = None if values['ProgramURI'] ==\ "x-rincon-buzzer:0" else values['ProgramURI'] instance.program_metadata = values['ProgramMetaData'] instance.play_mode = values['PlayMode'] instance.volume = values['Volume'] instance.include_linked_zones = values['IncludeLinkedZones'] == '1' result.add(instance) return result
mit
phantasien/falkor
deps/bastian/tools/gyp/pylib/gyp/ordered_dict.py
2354
10366
# Unmodified from http://code.activestate.com/recipes/576693/ # other than to add MIT license header (as specified on page, but not in code). # Linked from Python documentation here: # http://docs.python.org/2/library/collections.html#collections.OrderedDict # # This should be deleted once Py2.7 is available on all bots, see # http://crbug.com/241769. # # Copyright (c) 2009 Raymond Hettinger. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. # Passes Python2.7's test suite and incorporates all the latest updates. try: from thread import get_ident as _get_ident except ImportError: from dummy_thread import get_ident as _get_ident try: from _abcoll import KeysView, ValuesView, ItemsView except ImportError: pass class OrderedDict(dict): 'Dictionary that remembers insertion order' # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. # Big-O running times for all methods are the same as for regular dictionaries. # The internal self.__map dictionary maps keys to links in a doubly linked list. # The circular doubly linked list starts and ends with a sentinel element. # The sentinel element never gets deleted (this simplifies the algorithm). # Each link is stored as a list of length three: [PREV, NEXT, KEY]. def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. Signature is the same as for regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) def __setitem__(self, key, value, dict_setitem=dict.__setitem__): 'od.__setitem__(i, y) <==> od[i]=y' # Setting a new item creates a new link which goes at the end of the linked # list, and the inherited dictionary is updated with the new key/value pair. if key not in self: root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] dict_setitem(self, key, value) def __delitem__(self, key, dict_delitem=dict.__delitem__): 'od.__delitem__(y) <==> del od[y]' # Deleting an existing item uses self.__map to find the link which is # then removed by updating the links in the predecessor and successor nodes. dict_delitem(self, key) link_prev, link_next, key = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev def __iter__(self): 'od.__iter__() <==> iter(od)' root = self.__root curr = root[1] while curr is not root: yield curr[2] curr = curr[1] def __reversed__(self): 'od.__reversed__() <==> reversed(od)' root = self.__root curr = root[0] while curr is not root: yield curr[2] curr = curr[0] def clear(self): 'od.clear() -> None. Remove all items from od.' try: for node in self.__map.itervalues(): del node[:] root = self.__root root[:] = [root, root, None] self.__map.clear() except AttributeError: pass dict.clear(self) def popitem(self, last=True): '''od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false. ''' if not self: raise KeyError('dictionary is empty') root = self.__root if last: link = root[0] link_prev = link[0] link_prev[1] = root root[0] = link_prev else: link = root[1] link_next = link[1] root[1] = link_next link_next[0] = root key = link[2] del self.__map[key] value = dict.pop(self, key) return key, value # -- the following methods do not depend on the internal structure -- def keys(self): 'od.keys() -> list of keys in od' return list(self) def values(self): 'od.values() -> list of values in od' return [self[key] for key in self] def items(self): 'od.items() -> list of (key, value) pairs in od' return [(key, self[key]) for key in self] def iterkeys(self): 'od.iterkeys() -> an iterator over the keys in od' return iter(self) def itervalues(self): 'od.itervalues -> an iterator over the values in od' for k in self: yield self[k] def iteritems(self): 'od.iteritems -> an iterator over the (key, value) items in od' for k in self: yield (k, self[k]) # Suppress 'OrderedDict.update: Method has no argument': # pylint: disable=E0211 def update(*args, **kwds): '''od.update(E, **F) -> None. Update od from dict/iterable E and F. If E is a dict instance, does: for k in E: od[k] = E[k] If E has a .keys() method, does: for k in E.keys(): od[k] = E[k] Or if E is an iterable of items, does: for k, v in E: od[k] = v In either case, this is followed by: for k, v in F.items(): od[k] = v ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' 'arguments (%d given)' % (len(args),)) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] # Make progressively weaker assumptions about "other" other = () if len(args) == 2: other = args[1] if isinstance(other, dict): for key in other: self[key] = other[key] elif hasattr(other, 'keys'): for key in other.keys(): self[key] = other[key] else: for key, value in other: self[key] = value for key, value in kwds.items(): self[key] = value __update = update # let subclasses override update without breaking __init__ __marker = object() def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default def setdefault(self, key, default=None): 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' if key in self: return self[key] self[key] = default return default def __repr__(self, _repr_running={}): 'od.__repr__() <==> repr(od)' call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] def __reduce__(self): 'Return state information for pickling' items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return self.__class__, (items,) def copy(self): 'od.copy() -> a shallow copy of od' return self.__class__(self) @classmethod def fromkeys(cls, iterable, value=None): '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S and values equal to v (which defaults to None). ''' d = cls() for key in iterable: d[key] = value return d def __eq__(self, other): '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive. ''' if isinstance(other, OrderedDict): return len(self)==len(other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): return not self == other # -- the following methods are only used in Python 2.7 -- def viewkeys(self): "od.viewkeys() -> a set-like object providing a view on od's keys" return KeysView(self) def viewvalues(self): "od.viewvalues() -> an object providing a view on od's values" return ValuesView(self) def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self)
mit
sebrandon1/bitcoin
test/functional/test_framework/address.py
19
2850
#!/usr/bin/env python3 # Copyright (c) 2016-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Encode and decode BASE58, P2PKH and P2SH addresses.""" from .script import hash256, hash160, sha256, CScript, OP_0 from .util import bytes_to_hex_str, hex_str_to_bytes from . import segwit_addr chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' def byte_to_base58(b, version): result = '' str = bytes_to_hex_str(b) str = bytes_to_hex_str(chr(version).encode('latin-1')) + str checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str))) str += checksum[:8] value = int('0x'+str,0) while value > 0: result = chars[value % 58] + result value //= 58 while (str[:2] == '00'): result = chars[0] + result str = str[2:] return result # TODO: def base58_decode def keyhash_to_p2pkh(hash, main = False): assert (len(hash) == 20) version = 0 if main else 111 return byte_to_base58(hash, version) def scripthash_to_p2sh(hash, main = False): assert (len(hash) == 20) version = 5 if main else 196 return byte_to_base58(hash, version) def key_to_p2pkh(key, main = False): key = check_key(key) return keyhash_to_p2pkh(hash160(key), main) def script_to_p2sh(script, main = False): script = check_script(script) return scripthash_to_p2sh(hash160(script), main) def key_to_p2sh_p2wpkh(key, main = False): key = check_key(key) p2shscript = CScript([OP_0, hash160(key)]) return script_to_p2sh(p2shscript, main) def program_to_witness(version, program, main = False): if (type(program) is str): program = hex_str_to_bytes(program) assert 0 <= version <= 16 assert 2 <= len(program) <= 40 assert version > 0 or len(program) in [20, 32] return segwit_addr.encode("bc" if main else "bcrt", version, program) def script_to_p2wsh(script, main = False): script = check_script(script) return program_to_witness(0, sha256(script), main) def key_to_p2wpkh(key, main = False): key = check_key(key) return program_to_witness(0, hash160(key), main) def script_to_p2sh_p2wsh(script, main = False): script = check_script(script) p2shscript = CScript([OP_0, sha256(script)]) return script_to_p2sh(p2shscript, main) def check_key(key): if (type(key) is str): key = hex_str_to_bytes(key) # Assuming this is hex string if (type(key) is bytes and (len(key) == 33 or len(key) == 65)): return key assert(False) def check_script(script): if (type(script) is str): script = hex_str_to_bytes(script) # Assuming this is hex string if (type(script) is bytes or type(script) is CScript): return script assert(False)
mit
awkspace/ansible
test/units/modules/storage/netapp/test_na_ontap_unix_user.py
43
11306
# (c) 2018, NetApp, Inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ''' unit test template for ONTAP Ansible module ''' from __future__ import print_function import json import pytest from units.compat import unittest from units.compat.mock import patch, Mock from ansible.module_utils import basic from ansible.module_utils._text import to_bytes import ansible.module_utils.netapp as netapp_utils from ansible.modules.storage.netapp.na_ontap_unix_user \ import NetAppOntapUnixUser as user_module # module under test if not netapp_utils.has_netapp_lib(): pytestmark = pytest.mark.skip('skipping as missing required netapp_lib') def set_module_args(args): """prepare arguments so that they will be picked up during module creation""" args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access class AnsibleExitJson(Exception): """Exception class to be raised by module.exit_json and caught by the test case""" pass class AnsibleFailJson(Exception): """Exception class to be raised by module.fail_json and caught by the test case""" pass def exit_json(*args, **kwargs): # pylint: disable=unused-argument """function to patch over exit_json; package return data into an exception""" if 'changed' not in kwargs: kwargs['changed'] = False raise AnsibleExitJson(kwargs) def fail_json(*args, **kwargs): # pylint: disable=unused-argument """function to patch over fail_json; package return data into an exception""" kwargs['failed'] = True raise AnsibleFailJson(kwargs) class MockONTAPConnection(object): ''' mock server connection to ONTAP host ''' def __init__(self, kind=None, data=None): ''' save arguments ''' self.kind = kind self.params = data self.xml_in = None self.xml_out = None def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument ''' mock invoke_successfully returning xml data ''' self.xml_in = xml if self.kind == 'user': xml = self.build_user_info(self.params) elif self.kind == 'user-fail': raise netapp_utils.zapi.NaApiError(code='TEST', message="This exception is from the unit test") self.xml_out = xml return xml @staticmethod def build_user_info(data): ''' build xml data for vserser-info ''' xml = netapp_utils.zapi.NaElement('xml') attributes = \ {'attributes-list': {'unix-user-info': {'user-id': data['id'], 'group-id': data['group_id'], 'full-name': data['full_name']}}, 'num-records': 1} xml.translate_struct(attributes) return xml class TestMyModule(unittest.TestCase): ''' a group of related Unit Tests ''' def setUp(self): self.mock_module_helper = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) self.mock_module_helper.start() self.addCleanup(self.mock_module_helper.stop) self.server = MockONTAPConnection() self.mock_user = { 'name': 'test', 'id': '11', 'group_id': '12', 'vserver': 'something', 'full_name': 'Test User' } def mock_args(self): return { 'name': self.mock_user['name'], 'group_id': self.mock_user['group_id'], 'id': self.mock_user['id'], 'vserver': self.mock_user['vserver'], 'full_name': self.mock_user['full_name'], 'hostname': 'test', 'username': 'test_user', 'password': 'test_pass!' } def get_user_mock_object(self, kind=None, data=None): """ Helper method to return an na_ontap_unix_user object :param kind: passes this param to MockONTAPConnection() :return: na_ontap_unix_user object """ obj = user_module() obj.autosupport_log = Mock(return_value=None) if data is None: data = self.mock_user obj.server = MockONTAPConnection(kind=kind, data=data) return obj def test_module_fail_when_required_args_missing(self): ''' required arguments are reported as errors ''' with pytest.raises(AnsibleFailJson) as exc: set_module_args({}) user_module() def test_get_nonexistent_user(self): ''' Test if get_unix_user returns None for non-existent user ''' set_module_args(self.mock_args()) result = self.get_user_mock_object().get_unix_user() assert result is None def test_get_existing_user(self): ''' Test if get_unix_user returns details for existing user ''' set_module_args(self.mock_args()) result = self.get_user_mock_object('user').get_unix_user() assert result['full_name'] == self.mock_user['full_name'] def test_get_xml(self): set_module_args(self.mock_args()) obj = self.get_user_mock_object('user') result = obj.get_unix_user() assert obj.server.xml_in['query'] assert obj.server.xml_in['query']['unix-user-info'] user_info = obj.server.xml_in['query']['unix-user-info'] assert user_info['user-name'] == self.mock_user['name'] assert user_info['vserver'] == self.mock_user['vserver'] def test_create_error_missing_params(self): data = self.mock_args() del data['group_id'] set_module_args(data) with pytest.raises(AnsibleFailJson) as exc: self.get_user_mock_object('user').create_unix_user() assert 'Error: Missing one or more required parameters for create: (group_id, id)' == exc.value.args[0]['msg'] @patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.create_unix_user') def test_create_called(self, create_user): set_module_args(self.mock_args()) with pytest.raises(AnsibleExitJson) as exc: self.get_user_mock_object().apply() assert exc.value.args[0]['changed'] create_user.assert_called_with() def test_create_xml(self): '''Test create ZAPI element''' set_module_args(self.mock_args()) create = self.get_user_mock_object() with pytest.raises(AnsibleExitJson) as exc: create.apply() mock_key = { 'user-name': 'name', 'group-id': 'group_id', 'user-id': 'id', 'full-name': 'full_name' } for key in ['user-name', 'user-id', 'group-id', 'full-name']: assert create.server.xml_in[key] == self.mock_user[mock_key[key]] def test_create_wihtout_full_name(self): '''Test create ZAPI element''' data = self.mock_args() del data['full_name'] set_module_args(data) create = self.get_user_mock_object() with pytest.raises(AnsibleExitJson) as exc: create.apply() with pytest.raises(KeyError): create.server.xml_in['full-name'] @patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user') @patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.delete_unix_user') def test_delete_called(self, delete_user, modify_user): ''' Test delete existing user ''' data = self.mock_args() data['state'] = 'absent' set_module_args(data) with pytest.raises(AnsibleExitJson) as exc: self.get_user_mock_object('user').apply() assert exc.value.args[0]['changed'] delete_user.assert_called_with() assert modify_user.call_count == 0 @patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.get_unix_user') @patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user') def test_modify_called(self, modify_user, get_user): ''' Test modify user group_id ''' data = self.mock_args() data['group_id'] = 20 set_module_args(data) get_user.return_value = {'group_id': 10} obj = self.get_user_mock_object('user') with pytest.raises(AnsibleExitJson) as exc: obj.apply() get_user.assert_called_with() modify_user.assert_called_with({'group_id': 20}) def test_modify_only_id(self): ''' Test modify user id ''' set_module_args(self.mock_args()) modify = self.get_user_mock_object('user') modify.modify_unix_user({'id': 123}) assert modify.server.xml_in['user-id'] == '123' with pytest.raises(KeyError): modify.server.xml_in['group-id'] with pytest.raises(KeyError): modify.server.xml_in['full-name'] def test_modify_xml(self): ''' Test modify user full_name ''' set_module_args(self.mock_args()) modify = self.get_user_mock_object('user') modify.modify_unix_user({'full_name': 'New Name', 'group_id': '25'}) assert modify.server.xml_in['user-name'] == self.mock_user['name'] assert modify.server.xml_in['full-name'] == 'New Name' assert modify.server.xml_in['group-id'] == '25' @patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.create_unix_user') @patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.delete_unix_user') @patch('ansible.modules.storage.netapp.na_ontap_unix_user.NetAppOntapUnixUser.modify_unix_user') def test_do_nothing(self, modify, delete, create): ''' changed is False and none of the opetaion methods are called''' data = self.mock_args() data['state'] = 'absent' set_module_args(data) obj = self.get_user_mock_object() with pytest.raises(AnsibleExitJson) as exc: obj.apply() create.assert_not_called() delete.assert_not_called() modify.assert_not_called() def test_get_exception(self): set_module_args(self.mock_args()) with pytest.raises(AnsibleFailJson) as exc: self.get_user_mock_object('user-fail').get_unix_user() assert 'Error getting UNIX user' in exc.value.args[0]['msg'] def test_create_exception(self): set_module_args(self.mock_args()) with pytest.raises(AnsibleFailJson) as exc: self.get_user_mock_object('user-fail').create_unix_user() assert 'Error creating UNIX user' in exc.value.args[0]['msg'] def test_modify_exception(self): set_module_args(self.mock_args()) with pytest.raises(AnsibleFailJson) as exc: self.get_user_mock_object('user-fail').modify_unix_user({'id': '123'}) assert 'Error modifying UNIX user' in exc.value.args[0]['msg'] def test_delete_exception(self): set_module_args(self.mock_args()) with pytest.raises(AnsibleFailJson) as exc: self.get_user_mock_object('user-fail').delete_unix_user() assert 'Error removing UNIX user' in exc.value.args[0]['msg']
gpl-3.0
shish/sdog
sdog/monitor.py
1
4065
#!/usr/bin/env python import socket import os import subprocess from optparse import OptionParser from time import time, sleep from select import select import sys try: from setproctitle import setproctitle except ImportError: def setproctitle(title): pass def main(argv=sys.argv): parser = OptionParser(usage="%prog [sdog options] -- daemon-to-run [daemon options]") parser.add_option("-t", "--timeout", dest="timeout", type=int, default=10, help="Maximum seconds between pings", metavar="N") parser.add_option("-r", "--respawn", dest="respawn", type=int, default=1, help="Delay between respawns", metavar="N") parser.add_option("-T", "--title", dest="title", help="Set process title", metavar="NAME") parser.add_option("-s", "--socket", dest="soc_loc", # FIXME: probably (almost certainly) insecure, # need tmpfile.NamedTemporaryFile() for sockets default="/tmp/sdog-%d.sock" % os.getpid(), help="Path to socket", metavar="FILE") parser.add_option("-v", "--verbose", dest="verbose", default=False, action="store_true", help="Verbose mode") (options, args) = parser.parse_args() if args: launch(options, args) else: parser.error("Need to specify a program to launch") def launch(options, args): c = Child(options, args) try: c.watch() finally: if os.path.exists(options.soc_loc): os.unlink(options.soc_loc) class Child(object): def __init__(self, opts, args): self.opts = opts self.args = args self.proc = None self.ready = False self.sock = None self.last_ok = 0 def watch(self): self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) self.sock.bind(self.opts.soc_loc) while True: try: self.poll() except Exception as e: print "SDog Error:", e self.proc = None sleep(5) def status(self, status): if self.opts.verbose: print status setproctitle("sdog: %s: %s" % (self.opts.title or self.args[0], status)) def poll(self): if not self.proc: self.status("spawning: %s" % self.args) env = os.environ.copy() env["NOTIFY_SOCKET"] = self.opts.soc_loc self.proc = subprocess.Popen(self.args, env=env) self.status("launched subprocess with PID: %d" % self.proc.pid) self.last_ok = time() self.ready = False return status = self.proc.poll() if status is not None: self.status("Process exited with status code %d, respawning after %d seconds" % (status, self.opts.respawn)) self.proc = None sleep(self.opts.respawn) return rs, ws, xs = select([self.sock], [], [], 1.0) if rs: packet, addr = self.sock.recvfrom(1024) for line in packet.split("\n"): k, _, v = line.partition("=") #print "Got message: ", k, v if k == "WATCHDOG" and v == "1": self.last_ok = time() if k == "READY" and v == "1" and not self.ready: self.status("Daemon is ready") self.ready = True if k == "STATUS": self.status(v) if k == "ERRNO": self.errno = v if k == "BUSERROR": self.buserror = v if k == "MAINPID": self.mainpid = v if time() > self.last_ok + self.opts.timeout: self.status("No OK message for %d seconds, killing child" % (time() - self.last_ok)) self.proc.kill() self.proc = None if __name__ == "__main__": sys.exit(main(sys.argv))
mit
40223105/2015cd_midterm
static/Brython3.1.1-20150328-091302/Lib/reprlib.py
923
5110
"""Redo the builtin repr() (representation) but with limits on most sizes.""" __all__ = ["Repr", "repr", "recursive_repr"] import builtins from itertools import islice try: from _thread import get_ident except ImportError: from _dummy_thread import get_ident def recursive_repr(fillvalue='...'): 'Decorator to make a repr function return fillvalue for a recursive call' def decorating_function(user_function): repr_running = set() def wrapper(self): key = id(self), get_ident() if key in repr_running: return fillvalue repr_running.add(key) try: result = user_function(self) finally: repr_running.discard(key) return result # Can't use functools.wraps() here because of bootstrap issues wrapper.__module__ = getattr(user_function, '__module__') wrapper.__doc__ = getattr(user_function, '__doc__') wrapper.__name__ = getattr(user_function, '__name__') wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) return wrapper return decorating_function class Repr: def __init__(self): self.maxlevel = 6 self.maxtuple = 6 self.maxlist = 6 self.maxarray = 5 self.maxdict = 4 self.maxset = 6 self.maxfrozenset = 6 self.maxdeque = 6 self.maxstring = 30 self.maxlong = 40 self.maxother = 30 def repr(self, x): return self.repr1(x, self.maxlevel) def repr1(self, x, level): typename = type(x).__name__ if ' ' in typename: parts = typename.split() typename = '_'.join(parts) if hasattr(self, 'repr_' + typename): return getattr(self, 'repr_' + typename)(x, level) else: return self.repr_instance(x, level) def _repr_iterable(self, x, level, left, right, maxiter, trail=''): n = len(x) if level <= 0 and n: s = '...' else: newlevel = level - 1 repr1 = self.repr1 pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)] if n > maxiter: pieces.append('...') s = ', '.join(pieces) if n == 1 and trail: right = trail + right return '%s%s%s' % (left, s, right) def repr_tuple(self, x, level): return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',') def repr_list(self, x, level): return self._repr_iterable(x, level, '[', ']', self.maxlist) def repr_array(self, x, level): header = "array('%s', [" % x.typecode return self._repr_iterable(x, level, header, '])', self.maxarray) def repr_set(self, x, level): x = _possibly_sorted(x) return self._repr_iterable(x, level, 'set([', '])', self.maxset) def repr_frozenset(self, x, level): x = _possibly_sorted(x) return self._repr_iterable(x, level, 'frozenset([', '])', self.maxfrozenset) def repr_deque(self, x, level): return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque) def repr_dict(self, x, level): n = len(x) if n == 0: return '{}' if level <= 0: return '{...}' newlevel = level - 1 repr1 = self.repr1 pieces = [] for key in islice(_possibly_sorted(x), self.maxdict): keyrepr = repr1(key, newlevel) valrepr = repr1(x[key], newlevel) pieces.append('%s: %s' % (keyrepr, valrepr)) if n > self.maxdict: pieces.append('...') s = ', '.join(pieces) return '{%s}' % (s,) def repr_str(self, x, level): s = builtins.repr(x[:self.maxstring]) if len(s) > self.maxstring: i = max(0, (self.maxstring-3)//2) j = max(0, self.maxstring-3-i) s = builtins.repr(x[:i] + x[len(x)-j:]) s = s[:i] + '...' + s[len(s)-j:] return s def repr_int(self, x, level): s = builtins.repr(x) # XXX Hope this isn't too slow... if len(s) > self.maxlong: i = max(0, (self.maxlong-3)//2) j = max(0, self.maxlong-3-i) s = s[:i] + '...' + s[len(s)-j:] return s def repr_instance(self, x, level): try: s = builtins.repr(x) # Bugs in x.__repr__() can cause arbitrary # exceptions -- then make up something except Exception: return '<%s instance at %x>' % (x.__class__.__name__, id(x)) if len(s) > self.maxother: i = max(0, (self.maxother-3)//2) j = max(0, self.maxother-3-i) s = s[:i] + '...' + s[len(s)-j:] return s def _possibly_sorted(x): # Since not all sequences of items can be sorted and comparison # functions may raise arbitrary exceptions, return an unsorted # sequence in that case. try: return sorted(x) except Exception: return list(x) aRepr = Repr() repr = aRepr.repr
gpl-3.0
rakeshmi/cinder
cinder/tests/unit/test_replication.py
5
5069
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for Volume replication code. """ import mock from oslo_config import cfg from oslo_utils import importutils from cinder import context from cinder import db from cinder import exception from cinder import test from cinder.tests.unit import utils as test_utils from cinder.volume import driver CONF = cfg.CONF class VolumeReplicationTestCase(test.TestCase): def setUp(self): super(VolumeReplicationTestCase, self).setUp() self.ctxt = context.RequestContext('user', 'fake', False) self.adm_ctxt = context.RequestContext('admin', 'fake', True) self.manager = importutils.import_object(CONF.volume_manager) self.manager.host = 'test_host' self.manager.stats = {'allocated_capacity_gb': 0} self.driver_patcher = mock.patch.object(self.manager, 'driver', spec=driver.VolumeDriver) self.driver = self.driver_patcher.start() @mock.patch('cinder.utils.require_driver_initialized') def test_promote_replica_uninit_driver(self, _init): """Test promote replication when driver is not initialized.""" _init.side_effect = exception.DriverNotInitialized vol = test_utils.create_volume(self.ctxt, status='available', replication_status='active') self.driver.promote_replica.return_value = None self.assertRaises(exception.DriverNotInitialized, self.manager.promote_replica, self.adm_ctxt, vol['id']) def test_promote_replica(self): """Test promote replication.""" vol = test_utils.create_volume(self.ctxt, status='available', replication_status='active') self.driver.promote_replica.return_value = \ {'replication_status': 'inactive'} self.manager.promote_replica(self.adm_ctxt, vol['id']) vol_after = db.volume_get(self.ctxt, vol['id']) self.assertEqual(vol_after['replication_status'], 'inactive') def test_promote_replica_fail(self): """Test promote replication when promote fails.""" vol = test_utils.create_volume(self.ctxt, status='available', replication_status='active') self.driver.promote_replica.side_effect = exception.CinderException self.assertRaises(exception.CinderException, self.manager.promote_replica, self.adm_ctxt, vol['id']) def test_reenable_replication(self): """Test reenable replication.""" vol = test_utils.create_volume(self.ctxt, status='available', replication_status='error') self.driver.reenable_replication.return_value = \ {'replication_status': 'copying'} self.manager.reenable_replication(self.adm_ctxt, vol['id']) vol_after = db.volume_get(self.ctxt, vol['id']) self.assertEqual(vol_after['replication_status'], 'copying') @mock.patch('cinder.utils.require_driver_initialized') def test_reenable_replication_uninit_driver(self, _init): """Test reenable replication when driver is not initialized.""" _init.side_effect = exception.DriverNotInitialized vol = test_utils.create_volume(self.ctxt, status='available', replication_status='error') self.assertRaises(exception.DriverNotInitialized, self.manager.reenable_replication, self.adm_ctxt, vol['id']) def test_reenable_replication_fail(self): """Test promote replication when driver is not initialized.""" vol = test_utils.create_volume(self.ctxt, status='available', replication_status='error') self.driver.reenable_replication.side_effect = \ exception.CinderException self.assertRaises(exception.CinderException, self.manager.reenable_replication, self.adm_ctxt, vol['id'])
apache-2.0
benfinke/ns_python
build/lib/nssrc/com/citrix/netscaler/nitro/resource/stat/system/system_stats.py
3
18080
# # Copyright (c) 2008-2015 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License") # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response from nssrc.com.citrix.netscaler.nitro.service.options import options from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util class system_stats(base_resource) : def __init__(self) : self._clearstats = "" self._voltagev12n = 0 self._voltagev5n = 0 self._cpuusage = 0 self._rescpuusage = 0 self._slavecpuusage = 0 self._mastercpuusage = 0 self._auxvolt7 = 0 self._auxvolt6 = 0 self._auxvolt5 = 0 self._auxvolt4 = 0 self._auxvolt3 = 0 self._auxvolt2 = 0 self._auxvolt1 = 0 self._auxvolt0 = 0 self._voltagevsen2 = 0 self._voltagev5sb = 0 self._voltagevtt = 0 self._voltagevbat = 0 self._voltagev12p = 0 self._voltagev5p = 0 self._voltagev33stby = 0 self._voltagev33main = 0 self._voltagevcc1 = 0 self._voltagevcc0 = 0 self._numcpus = 0 self._memusagepcnt = 0 self._memuseinmb = 0 self._mgmtcpuusagepcnt = 0 self._pktcpuusagepcnt = 0 self._cpuusagepcnt = 0 self._rescpuusagepcnt = 0 self._starttime = "" self._disk0perusage = 0 self._disk1perusage = 0 self._cpufan0speed = 0 self._cpufan1speed = 0 self._systemfanspeed = 0 self._fan0speed = 0 self._fanspeed = 0 self._cpu0temp = 0 self._cpu1temp = 0 self._internaltemp = 0 self._powersupply1status = "" self._powersupply2status = "" self._disk0size = 0 self._disk0used = 0 self._disk0avail = 0 self._disk1size = 0 self._disk1used = 0 self._disk1avail = 0 self._fan2speed = 0 self._fan3speed = 0 self._fan4speed = 0 self._fan5speed = 0 self._auxtemp0 = 0 self._auxtemp1 = 0 self._auxtemp2 = 0 self._auxtemp3 = 0 self._powersupply3status = "" self._powersupply4status = "" self._timesincestart = "" self._memsizemb = 0 @property def clearstats(self) : ur"""Clear the statsistics / counters.<br/>Possible values = basic, full. """ try : return self._clearstats except Exception as e: raise e @clearstats.setter def clearstats(self, clearstats) : ur"""Clear the statsistics / counters """ try : self._clearstats = clearstats except Exception as e: raise e @property def voltagevbat(self) : ur"""Onboard battery power supply output. 9800 and 9950 platforms display standard value of 5.0V. """ try : return self._voltagevbat except Exception as e: raise e @property def auxvolt2(self) : ur"""Voltage of a device connected to health monitoring chip through pin 2. """ try : return self._auxvolt2 except Exception as e: raise e @property def voltagev5n(self) : ur"""Power supply -5V output. Acceptable range is -5.50 through -4.50 volts. 9800 and 9960 platforms display standard value of -5.0V. """ try : return self._voltagev5n except Exception as e: raise e @property def auxvolt7(self) : ur"""Voltage of a device connected to health monitoring chip through pin 7. """ try : return self._auxvolt7 except Exception as e: raise e @property def cpu0temp(self) : ur"""CPU 0 temperature. 9800 and 9960 platforms display internal chip temperature. This is a critical counter. You can configure CPU 0 Temperature by using the Set snmp alarm TEMPERATURE-HIGH command to set the upper limit. """ try : return self._cpu0temp except Exception as e: raise e @property def fan0speed(self) : ur"""System fan 1 speed. For new platforms associated pin is connected to CPU supporting fans. For platforms in which it is not connected, it will point to System Fan. """ try : return self._fan0speed except Exception as e: raise e @property def disk1perusage(self) : ur"""Used space in /var partition of the disk, as a percentage. This is a critical counter. You can configure /var Used (%) by using the Set snmp alarm DISK-USAGE-HIGH command. """ try : return self._disk1perusage except Exception as e: raise e @property def rescpuusagepcnt(self) : ur"""Average CPU utilization percentage. Not applicable for a single-CPU system. """ try : return self._rescpuusagepcnt except Exception as e: raise e @property def disk1used(self) : ur"""Used space in /var partition of the hard disk. """ try : return self._disk1used except Exception as e: raise e @property def disk1avail(self) : ur"""Available space in /var partition of the hard disk. """ try : return self._disk1avail except Exception as e: raise e @property def fan4speed(self) : ur"""Speed of Fan 2 if associated pin is connected to health monitoring chip. """ try : return self._fan4speed except Exception as e: raise e @property def rescpuusage(self) : ur"""Shows average CPU utilization percentage if more than 1 CPU is present. """ try : return self._rescpuusage except Exception as e: raise e @property def powersupply3status(self) : ur"""Power supply 3 failure status. """ try : return self._powersupply3status except Exception as e: raise e @property def auxvolt5(self) : ur"""Voltage of a device connected to health monitoring chip through pin 5. """ try : return self._auxvolt5 except Exception as e: raise e @property def auxvolt3(self) : ur"""Voltage of a device connected to health monitoring chip through pin 3. """ try : return self._auxvolt3 except Exception as e: raise e @property def disk0perusage(self) : ur"""Used space in /flash partition of the disk, as a percentage. This is a critical counter. You can configure /flash Used (%) by using the Set snmp alarm DISK-USAGE-HIGH command. """ try : return self._disk0perusage except Exception as e: raise e @property def fan2speed(self) : ur"""Speed of Fan 0 if associated pin is connected to health monitoring chip. """ try : return self._fan2speed except Exception as e: raise e @property def powersupply4status(self) : ur"""Power supply 4 failure status. """ try : return self._powersupply4status except Exception as e: raise e @property def auxvolt1(self) : ur"""Voltage of a device connected to health monitoring chip through pin 1. """ try : return self._auxvolt1 except Exception as e: raise e @property def fanspeed(self) : ur"""System fan 2 speed. For new platforms associated pin is connected to CPU supporting fans. For platforms in which it is not connected, it will point to System Fan. """ try : return self._fanspeed except Exception as e: raise e @property def fan5speed(self) : ur"""Speed of Fan 3 if associated pin is connected to health monitoring chip. """ try : return self._fan5speed except Exception as e: raise e @property def disk0size(self) : ur"""Size of /flash partition of the hard disk. """ try : return self._disk0size except Exception as e: raise e @property def mgmtcpuusagepcnt(self) : ur"""Management CPU utilization percentage. """ try : return self._mgmtcpuusagepcnt except Exception as e: raise e @property def cpuusage(self) : ur"""CPU utilization percentage. """ try : return self._cpuusage except Exception as e: raise e @property def voltagev5sb(self) : ur"""Power Supply 5V Standby Voltage. Currently only 13k Platforms will have valid value for this counter and for older platforms this will be 0. """ try : return self._voltagev5sb except Exception as e: raise e @property def disk0used(self) : ur"""Used space in /flash partition of the hard disk. """ try : return self._disk0used except Exception as e: raise e @property def powersupply1status(self) : ur"""Power supply 1 failure status. """ try : return self._powersupply1status except Exception as e: raise e @property def cpufan0speed(self) : ur"""CPU Fan 0 speed. Acceptable range is 3000 through 6000 RPM. This is a critical counter. You can configure CPU Fan 0 Speed by using the Set snmp alarm FAN-SPEED-LOW command to set the lower limit. """ try : return self._cpufan0speed except Exception as e: raise e @property def disk1size(self) : ur"""Size of /var partition of the hard disk. """ try : return self._disk1size except Exception as e: raise e @property def auxtemp1(self) : ur"""Temperature of a device connected to health monitoring chip through pin 1. """ try : return self._auxtemp1 except Exception as e: raise e @property def numcpus(self) : ur"""The number of CPUs on the NetScaler appliance. """ try : return self._numcpus except Exception as e: raise e @property def pktcpuusagepcnt(self) : ur"""Average CPU utilization percentage for all packet engines excluding management PE. """ try : return self._pktcpuusagepcnt except Exception as e: raise e @property def voltagev5p(self) : ur"""Power supply +5V output. Acceptable range is 4.50 through 5.50 volts. """ try : return self._voltagev5p except Exception as e: raise e @property def voltagevsen2(self) : ur"""Voltage Sensor 2 Input. Currently only 13k Platforms will have valid value for this counter and for older platforms this will be 0. """ try : return self._voltagevsen2 except Exception as e: raise e @property def auxvolt0(self) : ur"""Voltage of a device connected to health monitoring chip through pin 0. """ try : return self._auxvolt0 except Exception as e: raise e @property def auxtemp2(self) : ur"""Temperature of a device connected to health monitoring chip through pin 2. """ try : return self._auxtemp2 except Exception as e: raise e @property def memsizemb(self) : ur"""Total amount of system memory, in megabytes. """ try : return self._memsizemb except Exception as e: raise e @property def voltagev33main(self) : ur"""Main power supply +3.3V output. Acceptable range is 2.970 through 3.630 volts. This is a critical counter. You can configure Main 3.3V Supply Voltage, by using the Set snmp alarm VOLTAGE-LOW command to set the lower limit and the Set snmp alarm VOLTAGE-HIGH command to set the upper limit. """ try : return self._voltagev33main except Exception as e: raise e @property def cpu1temp(self) : ur"""CPU 1 temperature. 9800 and 9960 platforms display internal chip temperature. 7000, 9010 and 10010 platforms display CPU 0 temperature. This is a critical counter. You can configure CPU 1 Temperature by using the Set snmp alarm TEMPERATURE-HIGH command to set the upper limit. """ try : return self._cpu1temp except Exception as e: raise e @property def voltagev12n(self) : ur"""Power supply -12V output. Acceptable range is -13.20 through -10.80 volts. 9800 and 9960 platforms display standard value of -12.0V. """ try : return self._voltagev12n except Exception as e: raise e @property def memuseinmb(self) : ur"""Main memory currently in use, in megabytes. """ try : return self._memuseinmb except Exception as e: raise e @property def auxtemp3(self) : ur"""Temperature of a device connected to health monitoring chip through pin 3. """ try : return self._auxtemp3 except Exception as e: raise e @property def internaltemp(self) : ur"""Internal temperature of health monitoring chip. This is a critical counter. You can configure Internal Temperature by using the Set snmp alarm TEMPERATURE-HIGH command to set the upper limit. """ try : return self._internaltemp except Exception as e: raise e @property def voltagev12p(self) : ur"""Power supply +12V output. Acceptable range is 10.80 through 13.20 volts. """ try : return self._voltagev12p except Exception as e: raise e @property def disk0avail(self) : ur"""Available space in /flash partition of the hard disk. """ try : return self._disk0avail except Exception as e: raise e @property def voltagev33stby(self) : ur"""Standby power supply +3.3V output. Acceptable range is 2.970 through 3.630 volts. 9800 and 9960 platforms display standard value of 3.3V. You can configure Standby 3.3V Supply Voltage by using the Set snmp alarm VOLTAGE-LOW command to set the lower limit and the Set snmp alarm VOLTAGE-HIGH command to set the upper limit. """ try : return self._voltagev33stby except Exception as e: raise e @property def voltagevcc1(self) : ur"""CPU core 1 voltage. Acceptable range is 1.080 through 1.650 volts. If CPU 1 is not connected to the health monitoring chip, display shows voltage of CPU 0. """ try : return self._voltagevcc1 except Exception as e: raise e @property def fan3speed(self) : ur"""Speed of Fan 1 if associated pin is connected to health monitoring chip. """ try : return self._fan3speed except Exception as e: raise e @property def voltagevtt(self) : ur"""Intel CPU Vtt power. Currently only 13k Platforms will have valid value for this counter and for older platforms this will be 0. """ try : return self._voltagevtt except Exception as e: raise e @property def auxtemp0(self) : ur"""Temperature of a device connected to health monitoring chip through pin 0. """ try : return self._auxtemp0 except Exception as e: raise e @property def cpufan1speed(self) : ur"""CPU Fan 1 speed. Acceptable range is 3000 through 6000 RPM. 7000 platform displays speed of CPU fan 0. This is a critical counter. You can configure CPU Fan 1 Speed by using the Set snmp alarm FAN-SPEED-LOW command to set the lower limit. """ try : return self._cpufan1speed except Exception as e: raise e @property def voltagevcc0(self) : ur"""CPU core 0 voltage. Acceptable range is 1.080 through 1.650 volts. """ try : return self._voltagevcc0 except Exception as e: raise e @property def auxvolt4(self) : ur"""Voltage of a device connected to health monitoring chip through pin 4. """ try : return self._auxvolt4 except Exception as e: raise e @property def starttime(self) : ur"""Time when the NetScaler appliance was last started. """ try : return self._starttime except Exception as e: raise e @property def systemfanspeed(self) : ur"""System fan speed. Acceptable range is 3000 through 6000 RPM. This is a critical counter. You can configure System Fan Speed by using the Set snmp alarm FAN-SPEED-LOW command to set the lower limit. """ try : return self._systemfanspeed except Exception as e: raise e @property def cpuusagepcnt(self) : ur"""CPU utilization percentage. """ try : return self._cpuusagepcnt except Exception as e: raise e @property def mastercpuusage(self) : ur"""CPU 0 (currently the master CPU) utilization, as percentage of capacity. """ try : return self._mastercpuusage except Exception as e: raise e @property def timesincestart(self) : ur"""Seconds since the NetScaler appliance started. """ try : return self._timesincestart except Exception as e: raise e @property def auxvolt6(self) : ur"""Voltage of a device connected to health monitoring chip through pin 6. """ try : return self._auxvolt6 except Exception as e: raise e @property def slavecpuusage(self) : ur"""CPU 1 (currently the slave CPU) utilization, as percentage of capacity. Not applicable for a single-CPU system. """ try : return self._slavecpuusage except Exception as e: raise e @property def memusagepcnt(self) : ur"""Percentage of memory utilization on NetScaler. """ try : return self._memusagepcnt except Exception as e: raise e @property def powersupply2status(self) : ur"""Power supply 2 failure status. """ try : return self._powersupply2status except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(system_response, response, self.__class__.__name__.replace('_stats','')) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.system except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : return 0 except Exception as e : raise e @classmethod def get(cls, service, name="", option_="") : ur""" Use this API to fetch the statistics of all system_stats resources that are configured on netscaler. """ try : obj = system_stats() if not name : response = obj.stat_resources(service, option_) return response except Exception as e: raise e class Clearstats: basic = "basic" full = "full" class system_response(base_response) : def __init__(self, length=1) : self.system = [] self.errorcode = 0 self.message = "" self.severity = "" self.sessionid = "" self.system = [system_stats() for _ in range(length)]
apache-2.0
yannrouillard/weboob
weboob/applications/boobill/boobill.py
2
8189
# -*- coding: utf-8 -*- # Copyright(C) 2012-2013 Florent Fourcot # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. import sys from decimal import Decimal from weboob.capabilities.bill import ICapBill, Detail, Subscription from weboob.tools.application.repl import ReplApplication, defaultcount from weboob.tools.application.formatters.iformatter import PrettyFormatter from weboob.tools.application.base import MoreResultsAvailable from weboob.core import CallErrors __all__ = ['Boobill'] class SubscriptionsFormatter(PrettyFormatter): MANDATORY_FIELDS = ('id', 'label') def get_title(self, obj): if obj.renewdate: return u"%s - %s" % (obj.label, obj.renewdate.strftime('%d/%m/%y')) return obj.label class Boobill(ReplApplication): APPNAME = 'boobill' VERSION = '0.i' COPYRIGHT = 'Copyright(C) 2012 Florent Fourcot' DESCRIPTION = 'Console application allowing to get and download bills.' SHORT_DESCRIPTION = "get and download bills" CAPS = ICapBill COLLECTION_OBJECTS = (Subscription, ) EXTRA_FORMATTERS = {'subscriptions': SubscriptionsFormatter, } DEFAULT_FORMATTER = 'table' COMMANDS_FORMATTERS = {'subscriptions': 'subscriptions', 'ls': 'subscriptions', } def main(self, argv): self.load_config() return ReplApplication.main(self, argv) def exec_method(self, id, method): l = [] id, backend_name = self.parse_id(id) if not id: for subscrib in self.get_object_list('iter_subscription'): l.append((subscrib.id, subscrib.backend)) else: l.append((id, backend_name)) more_results = [] not_implemented = [] self.start_format() for id, backend in l: names = (backend,) if backend is not None else None try: for backend, result in self.do(method, id, backends=names): self.format(result) except CallErrors as errors: for backend, error, backtrace in errors: if isinstance(error, MoreResultsAvailable): more_results.append(id + u'@' + backend.name) elif isinstance(error, NotImplementedError): if backend not in not_implemented: not_implemented.append(backend) else: self.bcall_error_handler(backend, error, backtrace) if len(more_results) > 0: print >>sys.stderr, 'Hint: There are more results available for %s (use option -n or count command)' % (', '.join(more_results)) for backend in not_implemented: print >>sys.stderr, u'Error(%s): This feature is not supported yet by this backend.' % backend.name def do_subscriptions(self, line): """ subscriptions List all subscriptions. """ self.start_format() for subscription in self.get_object_list('iter_subscription'): self.format(subscription) def do_details(self, id): """ details [ID] Get details of subscriptions. If no ID given, display all details of all backends. """ l = [] id, backend_name = self.parse_id(id) if not id: for subscrib in self.get_object_list('iter_subscription'): l.append((subscrib.id, subscrib.backend)) else: l.append((id, backend_name)) for id, backend in l: names = (backend,) if backend is not None else None # XXX: should be generated by backend? -Flo # XXX: no, but you should do it in a specific formatter -romain # TODO: do it, and use exec_method here. Code is obsolete mysum = Detail() mysum.label = u"Sum" mysum.infos = u"Generated by boobill" mysum.price = Decimal("0.") self.start_format() for backend, detail in self.do('get_details', id, backends=names): self.format(detail) mysum.price = detail.price + mysum.price self.format(mysum) def do_balance(self, id): """ balance [ID] Get balance of subscriptions. If no ID given, display balance of all backends. """ self.exec_method(id, 'get_balance') @defaultcount(10) def do_history(self, id): """ history [ID] Get the history of subscriptions. If no ID given, display histories of all backends. """ self.exec_method(id, 'iter_bills_history') @defaultcount(10) def do_bills(self, id): """ bills [ID] Get the list of bills documents for subscriptions. If no ID given, display bills of all backends """ self.exec_method(id, 'iter_bills') def do_download(self, line): """ download [ID | all] [FILENAME] download ID [FILENAME] download the bill id is the identifier of the bill (hint: try bills command) FILENAME is where to write the file. If FILENAME is '-', the file is written to stdout. download all [ID] You can use special word "all" and download all bills of subscription identified by ID. If Id not given, download bills of all subscriptions. """ id, dest = self.parse_command_args(line, 2, 1) id, backend_name = self.parse_id(id) if not id: print >>sys.stderr, 'Error: please give a bill ID (hint: use bills command)' return 2 names = (backend_name,) if backend_name is not None else None # Special keywords, download all bills of all subscriptions if id == "all": if dest is None: for backend, subscription in self.do('iter_subscription', backends=names): self.download_all(subscription.id, names) return else: self.download_all(dest, names) return if dest is None: for backend, bill in self.do('get_bill', id, backends=names): dest = id + "." + bill.format for backend, buf in self.do('download_bill', id, backends=names): if buf: if dest == "-": print buf else: try: with open(dest, 'w') as f: f.write(buf) except IOError as e: print >>sys.stderr, 'Unable to write bill in "%s": %s' % (dest, e) return 1 return def download_all(self, id, names): id, backend_name = self.parse_id(id) for backend, bill in self.do('iter_bills', id, backends=names): dest = bill.id + "." + bill.format for backend2, buf in self.do('download_bill', bill.id, backends=names): if buf: if dest == "-": print buf else: try: with open(dest, 'w') as f: f.write(buf) except IOError as e: print >>sys.stderr, 'Unable to write bill in "%s": %s' % (dest, e) return 1 return
agpl-3.0
bgxavier/nova
nova/tests/unit/virt/test_events.py
113
1178
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from nova import test from nova.virt import event class TestEvents(test.NoDBTestCase): def test_event_repr(self): t = time.time() uuid = '1234' lifecycle = event.EVENT_LIFECYCLE_RESUMED e = event.Event(t) self.assertEqual(str(e), "<Event: %s>" % t) e = event.InstanceEvent(uuid, timestamp=t) self.assertEqual(str(e), "<InstanceEvent: %s, %s>" % (t, uuid)) e = event.LifecycleEvent(uuid, lifecycle, timestamp=t) self.assertEqual(str(e), "<LifecycleEvent: %s, %s => Resumed>" % (t, uuid))
apache-2.0
chenjun0210/tensorflow
tensorflow/python/kernel_tests/slice_op_test.py
48
11026
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for slice op.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.platform import test class SliceTest(test.TestCase): def testEmpty(self): inp = np.random.rand(4, 4).astype("f") for k in xrange(4): with self.test_session(use_gpu=True): a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32) slice_t = a[2, k:k] slice_val = slice_t.eval() self.assertAllEqual(slice_val, inp[2, k:k]) def testInt32(self): inp = np.random.rand(4, 4).astype("i") for k in xrange(4): with self.test_session(use_gpu=True): a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.int32) slice_t = a[2, k:k] slice_val = slice_t.eval() self.assertAllEqual(slice_val, inp[2, k:k]) def testSelectAll(self): for _ in range(10): with self.test_session(use_gpu=True): inp = np.random.rand(4, 4, 4, 4).astype("f") a = constant_op.constant(inp, shape=[4, 4, 4, 4], dtype=dtypes.float32) slice_explicit_t = array_ops.slice(a, [0, 0, 0, 0], [-1, -1, -1, -1]) slice_implicit_t = a[:, :, :, :] self.assertAllEqual(inp, slice_explicit_t.eval()) self.assertAllEqual(inp, slice_implicit_t.eval()) self.assertEqual(inp.shape, slice_explicit_t.get_shape()) self.assertEqual(inp.shape, slice_implicit_t.get_shape()) def testSingleDimension(self): for _ in range(10): with self.test_session(use_gpu=True): inp = np.random.rand(10).astype("f") a = constant_op.constant(inp, shape=[10], dtype=dtypes.float32) hi = np.random.randint(0, 9) scalar_t = a[hi] scalar_val = scalar_t.eval() self.assertAllEqual(scalar_val, inp[hi]) if hi > 0: lo = np.random.randint(0, hi) else: lo = 0 slice_t = a[lo:hi] slice_val = slice_t.eval() self.assertAllEqual(slice_val, inp[lo:hi]) def testScalarInput(self): input_val = 0 with self.test_session() as sess: # Test with constant input; shape inference fails. with self.assertRaisesWithPredicateMatch(ValueError, "out of range"): constant_op.constant(input_val)[:].get_shape() # Test evaluating with non-constant input; kernel execution fails. input_t = array_ops.placeholder(dtypes.int32) slice_t = input_t[:] with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError, "out of range"): sess.run([slice_t], feed_dict={input_t: input_val}) def testInvalidIndex(self): input_val = [1, 2] with self.test_session() as sess: # Test with constant input; shape inference fails. with self.assertRaisesWithPredicateMatch(ValueError, "out of range"): constant_op.constant(input_val)[1:, 1:].get_shape() # Test evaluating with non-constant input; kernel execution fails. input_t = array_ops.placeholder(dtypes.int32) slice_t = input_t[1:, 1:] with self.assertRaisesWithPredicateMatch(errors_impl.InvalidArgumentError, "out of range"): sess.run([slice_t], feed_dict={input_t: input_val}) def _testSliceMatrixDim0(self, x, begin, size): with self.test_session(use_gpu=True): tf_ans = array_ops.slice(x, [begin, 0], [size, x.shape[1]]).eval() np_ans = x[begin:begin + size, :] self.assertAllEqual(tf_ans, np_ans) def testSliceMatrixDim0(self): x = np.random.rand(8, 4).astype("f") self._testSliceMatrixDim0(x, 1, 2) self._testSliceMatrixDim0(x, 3, 3) y = np.random.rand(8, 7).astype("f") # 7 * sizeof(float) is not aligned self._testSliceMatrixDim0(y, 1, 2) self._testSliceMatrixDim0(y, 3, 3) def testSingleElementAll(self): for _ in range(10): with self.test_session(use_gpu=True): inp = np.random.rand(4, 4).astype("f") a = constant_op.constant(inp, shape=[4, 4], dtype=dtypes.float32) x, y = np.random.randint(0, 3, size=2).tolist() slice_t = a[x, 0:y] slice_val = slice_t.eval() self.assertAllEqual(slice_val, inp[x, 0:y]) def testSimple(self): with self.test_session(use_gpu=True) as sess: inp = np.random.rand(4, 4).astype("f") a = constant_op.constant( [float(x) for x in inp.ravel(order="C")], shape=[4, 4], dtype=dtypes.float32) slice_t = array_ops.slice(a, [0, 0], [2, 2]) slice2_t = a[:2, :2] slice_val, slice2_val = sess.run([slice_t, slice2_t]) self.assertAllEqual(slice_val, inp[:2, :2]) self.assertAllEqual(slice2_val, inp[:2, :2]) self.assertEqual(slice_val.shape, slice_t.get_shape()) self.assertEqual(slice2_val.shape, slice2_t.get_shape()) def testComplex(self): with self.test_session(use_gpu=True): inp = np.random.rand(4, 10, 10, 4).astype("f") a = constant_op.constant(inp, dtype=dtypes.float32) x = np.random.randint(0, 9) z = np.random.randint(0, 9) if z > 0: y = np.random.randint(0, z) else: y = 0 slice_t = a[:, x, y:z, :] self.assertAllEqual(slice_t.eval(), inp[:, x, y:z, :]) def testRandom(self): # Random dims of rank 6 input_shape = np.random.randint(0, 20, size=6) inp = np.random.rand(*input_shape).astype("f") with self.test_session(use_gpu=True) as sess: a = constant_op.constant( [float(x) for x in inp.ravel(order="C")], shape=input_shape, dtype=dtypes.float32) indices = [0 if x == 0 else np.random.randint(x) for x in input_shape] sizes = [ np.random.randint(0, input_shape[i] - indices[i] + 1) for i in range(6) ] slice_t = array_ops.slice(a, indices, sizes) slice2_t = a[indices[0]:indices[0] + sizes[0], indices[1]:indices[ 1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[3] + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]: indices[5] + sizes[5]] slice_val, slice2_val = sess.run([slice_t, slice2_t]) expected_val = inp[indices[0]:indices[0] + sizes[0], indices[1]:indices[ 1] + sizes[1], indices[2]:indices[2] + sizes[2], indices[3]:indices[ 3] + sizes[3], indices[4]:indices[4] + sizes[4], indices[5]:indices[ 5] + sizes[5]] self.assertAllEqual(slice_val, expected_val) self.assertAllEqual(slice2_val, expected_val) self.assertEqual(expected_val.shape, slice_t.get_shape()) self.assertEqual(expected_val.shape, slice2_t.get_shape()) def _testGradientSlice(self, input_shape, slice_begin, slice_size): with self.test_session(use_gpu=True): num_inputs = np.prod(input_shape) num_grads = np.prod(slice_size) inp = np.random.rand(num_inputs).astype("f").reshape(input_shape) a = constant_op.constant( [float(x) for x in inp.ravel(order="C")], shape=input_shape, dtype=dtypes.float32) slice_t = array_ops.slice(a, slice_begin, slice_size) grads = np.random.rand(num_grads).astype("f").reshape(slice_size) grad_tensor = constant_op.constant(grads) grad = gradients_impl.gradients(slice_t, [a], grad_tensor)[0] result = grad.eval() # Create a zero tensor of the input shape ane place # the grads into the right location to compare against TensorFlow. np_ans = np.zeros(input_shape) slices = [] for i in xrange(len(input_shape)): slices.append(slice(slice_begin[i], slice_begin[i] + slice_size[i])) np_ans[slices] = grads self.assertAllClose(np_ans, result) def _testGradientVariableSize(self): with self.test_session(use_gpu=True): inp = constant_op.constant([1.0, 2.0, 3.0], name="in") out = array_ops.slice(inp, [1], [-1]) grad_actual = gradients_impl.gradients(out, inp)[0].eval() self.assertAllClose([0., 1., 1.], grad_actual) def testGradientsAll(self): # Slice the middle square out of a 4x4 input self._testGradientSlice([4, 4], [1, 1], [2, 2]) # Slice the upper left square out of a 4x4 input self._testGradientSlice([4, 4], [0, 0], [2, 2]) # Slice a non-square input starting from (2,1) self._testGradientSlice([4, 4], [2, 1], [1, 2]) # Slice a 3D tensor self._testGradientSlice([3, 3, 3], [0, 1, 0], [2, 1, 1]) # Use -1 as a slice dimension. self._testGradientVariableSize() def testNotIterable(self): # NOTE(mrry): If we register __getitem__ as an overloaded # operator, Python will valiantly attempt to iterate over the # Tensor from 0 to infinity. This test ensures that this # unintended behavior is prevented. c = constant_op.constant(5.0) with self.assertRaisesWithPredicateMatch( TypeError, lambda e: "'Tensor' object is not iterable" in str(e)): for _ in c: pass def testComputedShape(self): # NOTE(mrry): We cannot currently handle partially-known values, # because `tf.slice()` uses -1 to specify a wildcard size, and # this can't be handled using the # `tensor_util.constant_value_as_shape()` trick. a = constant_op.constant([[1, 2, 3], [4, 5, 6]]) begin = constant_op.constant(0) size = constant_op.constant(1) b = array_ops.slice(a, [begin, 0], [size, 2]) self.assertEqual([1, 2], b.get_shape()) begin = array_ops.placeholder(dtypes.int32, shape=()) c = array_ops.slice(a, [begin, 0], [-1, 2]) self.assertEqual([None, 2], c.get_shape().as_list()) def testSliceOfSlice(self): with self.test_session(use_gpu=True): a = constant_op.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) b = a[1:, :] c = b[:-1, :] d = c[1, :] res = 2 * d - c[1, :] + a[2, :] - 2 * b[-2, :] self.assertAllEqual([0, 0, 0], res.eval()) if __name__ == "__main__": test.main()
apache-2.0
youssef-emad/shogun
examples/undocumented/python_modular/classifier_multiclass_ecoc.py
24
2869
#!/usr/bin/env python import re import time from tools.multiclass_shared import prepare_data # run with toy data [traindat, label_traindat, testdat, label_testdat] = prepare_data() # run with opt-digits if available #[traindat, label_traindat, testdat, label_testdat] = prepare_data(False) parameter_list = [[traindat,testdat,label_traindat,label_testdat,2.1,1,1e-5]] def classifier_multiclass_ecoc (fm_train_real=traindat,fm_test_real=testdat,label_train_multiclass=label_traindat,label_test_multiclass=label_testdat,lawidth=2.1,C=1,epsilon=1e-5): import modshogun from modshogun import ECOCStrategy, LibLinear, L2R_L2LOSS_SVC, LinearMulticlassMachine from modshogun import MulticlassAccuracy from modshogun import RealFeatures, MulticlassLabels def nonabstract_class(name): try: getattr(modshogun, name)() except TypeError: return False return True encoders = [x for x in dir(modshogun) if re.match(r'ECOC.+Encoder', x) and nonabstract_class(x)] decoders = [x for x in dir(modshogun) if re.match(r'ECOC.+Decoder', x) and nonabstract_class(x)] fea_train = RealFeatures(fm_train_real) fea_test = RealFeatures(fm_test_real) gnd_train = MulticlassLabels(label_train_multiclass) if label_test_multiclass is None: gnd_test = None else: gnd_test = MulticlassLabels(label_test_multiclass) base_classifier = LibLinear(L2R_L2LOSS_SVC) base_classifier.set_bias_enabled(True) #print('Testing with %d encoders and %d decoders' % (len(encoders), len(decoders))) #print('-' * 70) #format_str = '%%15s + %%-10s %%-10%s %%-10%s %%-10%s' #print((format_str % ('s', 's', 's')) % ('encoder', 'decoder', 'codelen', 'time', 'accuracy')) def run_ecoc(ier, idr): encoder = getattr(modshogun, encoders[ier])() decoder = getattr(modshogun, decoders[idr])() # whether encoder is data dependent if hasattr(encoder, 'set_labels'): encoder.set_labels(gnd_train) encoder.set_features(fea_train) strategy = ECOCStrategy(encoder, decoder) classifier = LinearMulticlassMachine(strategy, fea_train, base_classifier, gnd_train) classifier.train() label_pred = classifier.apply(fea_test) if gnd_test is not None: evaluator = MulticlassAccuracy() acc = evaluator.evaluate(label_pred, gnd_test) else: acc = None return (classifier.get_num_machines(), acc) for ier in range(len(encoders)): for idr in range(len(decoders)): t_begin = time.clock() (codelen, acc) = run_ecoc(ier, idr) if acc is None: acc_fmt = 's' acc = 'N/A' else: acc_fmt = '.4f' t_elapse = time.clock() - t_begin #print((format_str % ('d', '.3f', acc_fmt)) % # (encoders[ier][4:-7], decoders[idr][4:-7], codelen, t_elapse, acc)) if __name__=='__main__': print('MulticlassECOC') classifier_multiclass_ecoc(*parameter_list[0])
gpl-3.0
Phonebooth/depot_tools
third_party/coverage/results.py
49
10023
"""Results of coverage measurement.""" import os from coverage.backward import iitems, set, sorted # pylint: disable=W0622 from coverage.misc import format_lines, join_regex, NoSource from coverage.parser import CodeParser class Analysis(object): """The results of analyzing a code unit.""" def __init__(self, cov, code_unit): self.coverage = cov self.code_unit = code_unit self.filename = self.code_unit.filename actual_filename, source = self.find_source(self.filename) self.parser = CodeParser( text=source, filename=actual_filename, exclude=self.coverage._exclude_regex('exclude') ) self.statements, self.excluded = self.parser.parse_source() # Identify missing statements. executed = self.coverage.data.executed_lines(self.filename) exec1 = self.parser.first_lines(executed) self.missing = sorted(set(self.statements) - set(exec1)) if self.coverage.data.has_arcs(): self.no_branch = self.parser.lines_matching( join_regex(self.coverage.config.partial_list), join_regex(self.coverage.config.partial_always_list) ) n_branches = self.total_branches() mba = self.missing_branch_arcs() n_partial_branches = sum( [len(v) for k,v in iitems(mba) if k not in self.missing] ) n_missing_branches = sum([len(v) for k,v in iitems(mba)]) else: n_branches = n_partial_branches = n_missing_branches = 0 self.no_branch = set() self.numbers = Numbers( n_files=1, n_statements=len(self.statements), n_excluded=len(self.excluded), n_missing=len(self.missing), n_branches=n_branches, n_partial_branches=n_partial_branches, n_missing_branches=n_missing_branches, ) def find_source(self, filename): """Find the source for `filename`. Returns two values: the actual filename, and the source. The source returned depends on which of these cases holds: * The filename seems to be a non-source file: returns None * The filename is a source file, and actually exists: returns None. * The filename is a source file, and is in a zip file or egg: returns the source. * The filename is a source file, but couldn't be found: raises `NoSource`. """ source = None base, ext = os.path.splitext(filename) TRY_EXTS = { '.py': ['.py', '.pyw'], '.pyw': ['.pyw'], } try_exts = TRY_EXTS.get(ext) if not try_exts: return filename, None for try_ext in try_exts: try_filename = base + try_ext if os.path.exists(try_filename): return try_filename, None source = self.coverage.file_locator.get_zip_data(try_filename) if source: return try_filename, source raise NoSource("No source for code: '%s'" % filename) def missing_formatted(self): """The missing line numbers, formatted nicely. Returns a string like "1-2, 5-11, 13-14". """ return format_lines(self.statements, self.missing) def has_arcs(self): """Were arcs measured in this result?""" return self.coverage.data.has_arcs() def arc_possibilities(self): """Returns a sorted list of the arcs in the code.""" arcs = self.parser.arcs() return arcs def arcs_executed(self): """Returns a sorted list of the arcs actually executed in the code.""" executed = self.coverage.data.executed_arcs(self.filename) m2fl = self.parser.first_line executed = [(m2fl(l1), m2fl(l2)) for (l1,l2) in executed] return sorted(executed) def arcs_missing(self): """Returns a sorted list of the arcs in the code not executed.""" possible = self.arc_possibilities() executed = self.arcs_executed() missing = [ p for p in possible if p not in executed and p[0] not in self.no_branch ] return sorted(missing) def arcs_unpredicted(self): """Returns a sorted list of the executed arcs missing from the code.""" possible = self.arc_possibilities() executed = self.arcs_executed() # Exclude arcs here which connect a line to itself. They can occur # in executed data in some cases. This is where they can cause # trouble, and here is where it's the least burden to remove them. unpredicted = [ e for e in executed if e not in possible and e[0] != e[1] ] return sorted(unpredicted) def branch_lines(self): """Returns a list of line numbers that have more than one exit.""" exit_counts = self.parser.exit_counts() return [l1 for l1,count in iitems(exit_counts) if count > 1] def total_branches(self): """How many total branches are there?""" exit_counts = self.parser.exit_counts() return sum([count for count in exit_counts.values() if count > 1]) def missing_branch_arcs(self): """Return arcs that weren't executed from branch lines. Returns {l1:[l2a,l2b,...], ...} """ missing = self.arcs_missing() branch_lines = set(self.branch_lines()) mba = {} for l1, l2 in missing: if l1 in branch_lines: if l1 not in mba: mba[l1] = [] mba[l1].append(l2) return mba def branch_stats(self): """Get stats about branches. Returns a dict mapping line numbers to a tuple: (total_exits, taken_exits). """ exit_counts = self.parser.exit_counts() missing_arcs = self.missing_branch_arcs() stats = {} for lnum in self.branch_lines(): exits = exit_counts[lnum] try: missing = len(missing_arcs[lnum]) except KeyError: missing = 0 stats[lnum] = (exits, exits - missing) return stats class Numbers(object): """The numerical results of measuring coverage. This holds the basic statistics from `Analysis`, and is used to roll up statistics across files. """ # A global to determine the precision on coverage percentages, the number # of decimal places. _precision = 0 _near0 = 1.0 # These will change when _precision is changed. _near100 = 99.0 def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0, n_branches=0, n_partial_branches=0, n_missing_branches=0 ): self.n_files = n_files self.n_statements = n_statements self.n_excluded = n_excluded self.n_missing = n_missing self.n_branches = n_branches self.n_partial_branches = n_partial_branches self.n_missing_branches = n_missing_branches def set_precision(cls, precision): """Set the number of decimal places used to report percentages.""" assert 0 <= precision < 10 cls._precision = precision cls._near0 = 1.0 / 10**precision cls._near100 = 100.0 - cls._near0 set_precision = classmethod(set_precision) def _get_n_executed(self): """Returns the number of executed statements.""" return self.n_statements - self.n_missing n_executed = property(_get_n_executed) def _get_n_executed_branches(self): """Returns the number of executed branches.""" return self.n_branches - self.n_missing_branches n_executed_branches = property(_get_n_executed_branches) def _get_pc_covered(self): """Returns a single percentage value for coverage.""" if self.n_statements > 0: pc_cov = (100.0 * (self.n_executed + self.n_executed_branches) / (self.n_statements + self.n_branches)) else: pc_cov = 100.0 return pc_cov pc_covered = property(_get_pc_covered) def _get_pc_covered_str(self): """Returns the percent covered, as a string, without a percent sign. Note that "0" is only returned when the value is truly zero, and "100" is only returned when the value is truly 100. Rounding can never result in either "0" or "100". """ pc = self.pc_covered if 0 < pc < self._near0: pc = self._near0 elif self._near100 < pc < 100: pc = self._near100 else: pc = round(pc, self._precision) return "%.*f" % (self._precision, pc) pc_covered_str = property(_get_pc_covered_str) def pc_str_width(cls): """How many characters wide can pc_covered_str be?""" width = 3 # "100" if cls._precision > 0: width += 1 + cls._precision return width pc_str_width = classmethod(pc_str_width) def __add__(self, other): nums = Numbers() nums.n_files = self.n_files + other.n_files nums.n_statements = self.n_statements + other.n_statements nums.n_excluded = self.n_excluded + other.n_excluded nums.n_missing = self.n_missing + other.n_missing nums.n_branches = self.n_branches + other.n_branches nums.n_partial_branches = ( self.n_partial_branches + other.n_partial_branches ) nums.n_missing_branches = ( self.n_missing_branches + other.n_missing_branches ) return nums def __radd__(self, other): # Implementing 0+Numbers allows us to sum() a list of Numbers. if other == 0: return self return NotImplemented
bsd-3-clause
Telthor/cppDonorSimulation
donorTests/lib/googletest-master/googletest/test/gtest_filter_unittest.py
364
21325
#!/usr/bin/env python # # Copyright 2005 Google Inc. All Rights Reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for Google Test test filters. A user can specify which test(s) in a Google Test program to run via either the GTEST_FILTER environment variable or the --gtest_filter flag. This script tests such functionality by invoking gtest_filter_unittest_ (a program written with Google Test) with different environments and command line flags. Note that test sharding may also influence which tests are filtered. Therefore, we test that here also. """ __author__ = '[email protected] (Zhanyong Wan)' import os import re try: from sets import Set as set # For Python 2.3 compatibility except ImportError: pass import sys import gtest_test_utils # Constants. # Checks if this platform can pass empty environment variables to child # processes. We set an env variable to an empty string and invoke a python # script in a subprocess to print whether the variable is STILL in # os.environ. We then use 'eval' to parse the child's output so that an # exception is thrown if the input is anything other than 'True' nor 'False'. os.environ['EMPTY_VAR'] = '' child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print(\'EMPTY_VAR\' in os.environ)']) CAN_PASS_EMPTY_ENV = eval(child.output) # Check if this platform can unset environment variables in child processes. # We set an env variable to a non-empty string, unset it, and invoke # a python script in a subprocess to print whether the variable # is NO LONGER in os.environ. # We use 'eval' to parse the child's output so that an exception # is thrown if the input is neither 'True' nor 'False'. os.environ['UNSET_VAR'] = 'X' del os.environ['UNSET_VAR'] child = gtest_test_utils.Subprocess( [sys.executable, '-c', 'import os; print(\'UNSET_VAR\' not in os.environ)']) CAN_UNSET_ENV = eval(child.output) # Checks if we should test with an empty filter. This doesn't # make sense on platforms that cannot pass empty env variables (Win32) # and on platforms that cannot unset variables (since we cannot tell # the difference between "" and NULL -- Borland and Solaris < 5.10) CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV) # The environment variable for specifying the test filters. FILTER_ENV_VAR = 'GTEST_FILTER' # The environment variables for test sharding. TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' # The command line flag for specifying the test filters. FILTER_FLAG = 'gtest_filter' # The command line flag for including disabled tests. ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests' # Command to run the gtest_filter_unittest_ program. COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_') # Regex for determining whether parameterized tests are enabled in the binary. PARAM_TEST_REGEX = re.compile(r'/ParamTest') # Regex for parsing test case names from Google Test's output. TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') # Regex for parsing test names from Google Test's output. TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') # The command line flag to tell Google Test to output the list of tests it # will run. LIST_TESTS_FLAG = '--gtest_list_tests' # Indicates whether Google Test supports death tests. SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( [COMMAND, LIST_TESTS_FLAG]).output # Full names of all tests in gtest_filter_unittests_. PARAM_TESTS = [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestX/1', 'SeqQ/ParamTest.TestY/0', 'SeqQ/ParamTest.TestY/1', ] DISABLED_TESTS = [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ] if SUPPORTS_DEATH_TESTS: DEATH_TESTS = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', ] else: DEATH_TESTS = [] # All the non-disabled tests. ACTIVE_TESTS = [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS param_tests_present = None # Utilities. environ = os.environ.copy() def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def RunAndReturnOutput(args = None): """Runs the test program and returns its output.""" return gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ).output def RunAndExtractTestList(args = None): """Runs the test program and returns its exit code and a list of tests run.""" p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) tests_run = [] test_case = '' test = '' for line in p.output.split('\n'): match = TEST_CASE_REGEX.match(line) if match is not None: test_case = match.group(1) else: match = TEST_REGEX.match(line) if match is not None: test = match.group(1) tests_run.append(test_case + '.' + test) return (tests_run, p.exit_code) def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): """Runs the given function and arguments in a modified environment.""" try: original_env = environ.copy() environ.update(extra_env) return function(*args, **kwargs) finally: environ.clear() environ.update(original_env) def RunWithSharding(total_shards, shard_index, command): """Runs a test program shard and returns exit code and a list of tests run.""" extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), TOTAL_SHARDS_ENV_VAR: str(total_shards)} return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command) # The unit test. class GTestFilterUnitTest(gtest_test_utils.TestCase): """Tests the env variable or the command line flag to filter tests.""" # Utilities. def AssertSetEqual(self, lhs, rhs): """Asserts that two sets are equal.""" for elem in lhs: self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) for elem in rhs: self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) def AssertPartitionIsValid(self, set_var, list_of_sets): """Asserts that list_of_sets is a valid partition of set_var.""" full_partition = [] for slice_var in list_of_sets: full_partition.extend(slice_var) self.assertEqual(len(set_var), len(full_partition)) self.assertEqual(set(set_var), set(full_partition)) def AdjustForParameterizedTests(self, tests_to_run): """Adjust tests_to_run in case value parameterized tests are disabled.""" global param_tests_present if not param_tests_present: return list(set(tests_to_run) - set(PARAM_TESTS)) else: return tests_to_run def RunAndVerify(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for a given filter.""" tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # First, tests using the environment variable. # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) tests_run = RunAndExtractTestList()[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, tests_to_run) # pylint: enable-msg=C6403 # Next, tests using the command line flag. if gtest_filter is None: args = [] else: args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)] tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, args=None, check_exit_0=False): """Checks that binary runs correct tests for the given filter and shard. Runs all shards of gtest_filter_unittest_ with the given filter, and verifies that the right set of tests were run. The union of tests run on each shard should be identical to tests_to_run, without duplicates. Args: gtest_filter: A filter to apply to the tests. total_shards: A total number of shards to split test run into. tests_to_run: A set of tests expected to run. args : Arguments to pass to the to the test binary. check_exit_0: When set to a true value, make sure that all shards return 0. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Windows removes empty variables from the environment when passing it # to a new process. This means it is impossible to pass an empty filter # into a process using the environment variable. However, we can still # test the case when the variable is not supplied (i.e., gtest_filter is # None). # pylint: disable-msg=C6403 if CAN_TEST_EMPTY_FILTER or gtest_filter != '': SetEnvVar(FILTER_ENV_VAR, gtest_filter) partition = [] for i in range(0, total_shards): (tests_run, exit_code) = RunWithSharding(total_shards, i, args) if check_exit_0: self.assertEqual(0, exit_code) partition.append(tests_run) self.AssertPartitionIsValid(tests_to_run, partition) SetEnvVar(FILTER_ENV_VAR, None) # pylint: enable-msg=C6403 def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): """Checks that the binary runs correct set of tests for the given filter. Runs gtest_filter_unittest_ with the given filter, and enables disabled tests. Verifies that the right set of tests were run. Args: gtest_filter: A filter to apply to the tests. tests_to_run: A set of tests expected to run. """ tests_to_run = self.AdjustForParameterizedTests(tests_to_run) # Construct the command line. args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG] if gtest_filter is not None: args.append('--%s=%s' % (FILTER_FLAG, gtest_filter)) tests_run = RunAndExtractTestList(args)[0] self.AssertSetEqual(tests_run, tests_to_run) def setUp(self): """Sets up test case. Determines whether value-parameterized tests are enabled in the binary and sets the flags accordingly. """ global param_tests_present if param_tests_present is None: param_tests_present = PARAM_TEST_REGEX.search( RunAndReturnOutput()) is not None def testDefaultBehavior(self): """Tests the behavior of not specifying the filter.""" self.RunAndVerify(None, ACTIVE_TESTS) def testDefaultBehaviorWithShards(self): """Tests the behavior without the filter, with sharding enabled.""" self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) def testEmptyFilter(self): """Tests an empty filter.""" self.RunAndVerify('', []) self.RunAndVerifyWithSharding('', 1, []) self.RunAndVerifyWithSharding('', 2, []) def testBadFilter(self): """Tests a filter that matches nothing.""" self.RunAndVerify('BadFilter', []) self.RunAndVerifyAllowingDisabled('BadFilter', []) def testFullName(self): """Tests filtering by full name.""" self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) def testUniversalFilters(self): """Tests filters that match everything.""" self.RunAndVerify('*', ACTIVE_TESTS) self.RunAndVerify('*.*', ACTIVE_TESTS) self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) def testFilterByTestCase(self): """Tests filtering by test case name.""" self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] self.RunAndVerify('BazTest.*', BAZ_TESTS) self.RunAndVerifyAllowingDisabled('BazTest.*', BAZ_TESTS + ['BazTest.DISABLED_TestC']) def testFilterByTest(self): """Tests filtering by test name.""" self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) def testFilterDisabledTests(self): """Select only the disabled tests to run.""" self.RunAndVerify('DISABLED_FoobarTest.Test1', []) self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', ['DISABLED_FoobarTest.Test1']) self.RunAndVerify('*DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) self.RunAndVerify('*.DISABLED_*', []) self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ 'BarTest.DISABLED_TestFour', 'BarTest.DISABLED_TestFive', 'BazTest.DISABLED_TestC', 'DISABLED_FoobarTest.DISABLED_Test2', ]) self.RunAndVerify('DISABLED_*', []) self.RunAndVerifyAllowingDisabled('DISABLED_*', [ 'DISABLED_FoobarTest.Test1', 'DISABLED_FoobarTest.DISABLED_Test2', 'DISABLED_FoobarbazTest.TestA', ]) def testWildcardInTestCaseName(self): """Tests using wildcard in the test case name.""" self.RunAndVerify('*a*.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) def testWildcardInTestName(self): """Tests using wildcard in the test name.""" self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) def testFilterWithoutDot(self): """Tests a filter that has no '.' in it.""" self.RunAndVerify('*z*', [ 'FooTest.Xyz', 'BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB', ]) def testTwoPatterns(self): """Tests filters that consist of two patterns.""" self.RunAndVerify('Foo*.*:*A*', [ 'FooTest.Abc', 'FooTest.Xyz', 'BazTest.TestA', ]) # An empty pattern + a non-empty one self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) def testThreePatterns(self): """Tests filters that consist of three patterns.""" self.RunAndVerify('*oo*:*A*:*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', 'BazTest.TestA', ]) # The 2nd pattern is empty. self.RunAndVerify('*oo*::*One', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BazTest.TestOne', ]) # The last 2 patterns are empty. self.RunAndVerify('*oo*::', [ 'FooTest.Abc', 'FooTest.Xyz', ]) def testNegativeFilters(self): self.RunAndVerify('*-BazTest.TestOne', [ 'FooTest.Abc', 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', 'BazTest.TestA', 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('*-FooTest.Abc:BazTest.*', [ 'FooTest.Xyz', 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) self.RunAndVerify('BarTest.*-BarTest.TestOne', [ 'BarTest.TestTwo', 'BarTest.TestThree', ]) # Tests without leading '*'. self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [ 'BarTest.TestOne', 'BarTest.TestTwo', 'BarTest.TestThree', ] + DEATH_TESTS + PARAM_TESTS) # Value parameterized tests. self.RunAndVerify('*/*', PARAM_TESTS) # Value parameterized tests filtering by the sequence name. self.RunAndVerify('SeqP/*', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ]) # Value parameterized tests filtering by the test name. self.RunAndVerify('*/0', [ 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestY/0', 'SeqQ/ParamTest.TestX/0', 'SeqQ/ParamTest.TestY/0', ]) def testFlagOverridesEnvVar(self): """Tests that the filter flag overrides the filtering env. variable.""" SetEnvVar(FILTER_ENV_VAR, 'Foo*') args = ['--%s=%s' % (FILTER_FLAG, '*One')] tests_run = RunAndExtractTestList(args)[0] SetEnvVar(FILTER_ENV_VAR, None) self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) def testShardStatusFileIsCreated(self): """Tests that the shard file is created if specified in the environment.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: InvokeWithModifiedEnv(extra_env, RunAndReturnOutput) finally: self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) def testShardStatusFileIsCreatedWithListTests(self): """Tests that the shard file is created with the "list_tests" flag.""" shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), 'shard_status_file2') self.assert_(not os.path.exists(shard_status_file)) extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} try: output = InvokeWithModifiedEnv(extra_env, RunAndReturnOutput, [LIST_TESTS_FLAG]) finally: # This assertion ensures that Google Test enumerated the tests as # opposed to running them. self.assert_('[==========]' not in output, 'Unexpected output during test enumeration.\n' 'Please ensure that LIST_TESTS_FLAG is assigned the\n' 'correct flag value for listing Google Test tests.') self.assert_(os.path.exists(shard_status_file)) os.remove(shard_status_file) if SUPPORTS_DEATH_TESTS: def testShardingWorksWithDeathTests(self): """Tests integration with death tests and sharding.""" gtest_filter = 'HasDeathTest.*:SeqP/*' expected_tests = [ 'HasDeathTest.Test1', 'HasDeathTest.Test2', 'SeqP/ParamTest.TestX/0', 'SeqP/ParamTest.TestX/1', 'SeqP/ParamTest.TestY/0', 'SeqP/ParamTest.TestY/1', ] for flag in ['--gtest_death_test_style=threadsafe', '--gtest_death_test_style=fast']: self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, check_exit_0=True, args=[flag]) self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, check_exit_0=True, args=[flag]) if __name__ == '__main__': gtest_test_utils.Main()
mit
sodafree/backend
build/lib.linux-i686-2.7/django/conf/locale/sl/formats.py
257
1834
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'd. F Y' TIME_FORMAT = 'H:i:s' DATETIME_FORMAT = 'j. F Y. H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'j. M. Y' SHORT_DATETIME_FORMAT = 'j.n.Y. H:i' FIRST_DAY_OF_WEEK = 0 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06' '%d-%m-%Y', # '25-10-2006' '%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06' ) TIME_INPUT_FORMATS = ( '%H:%M:%S', # '14:30:59' '%H:%M', # '14:30' ) DATETIME_INPUT_FORMATS = ( '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' '%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59' '%d-%m-%Y %H:%M', # '25-10-2006 14:30' '%d-%m-%Y', # '25-10-2006' '%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59' '%d. %m. %Y %H:%M', # '25. 10. 2006 14:30' '%d. %m. %Y', # '25. 10. 2006' '%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59' '%d. %m. %y %H:%M', # '25. 10. 06 14:30' '%d. %m. %y', # '25. 10. 06' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
bsd-3-clause
stansonhealth/ansible-modules-core
cloud/openstack/_nova_keypair.py
41
5486
#!/usr/bin/python #coding: utf-8 -*- # (c) 2013, Benno Joy <[email protected]> # (c) 2013, John Dewey <[email protected]> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. import time try: from novaclient.v1_1 import client as nova_client from novaclient import exceptions as exc HAS_NOVACLIENT = True except ImportError: HAS_NOVACLIENT = False DOCUMENTATION = ''' --- module: nova_keypair version_added: "1.2" author: - "Benno Joy (@bennojoy)" - "Michael DeHaan" deprecated: Deprecated in 2.0. Use os_keypair instead short_description: Add/Delete key pair from nova description: - Add or Remove key pair from nova . options: login_username: description: - login username to authenticate to keystone required: true default: admin login_password: description: - Password of login user required: true default: 'yes' login_tenant_name: description: - The tenant name of the login user required: true default: 'yes' auth_url: description: - The keystone url for authentication required: false default: 'http://127.0.0.1:35357/v2.0/' region_name: description: - Name of the region required: false default: None state: description: - Indicate desired state of the resource choices: ['present', 'absent'] default: present name: description: - Name that has to be given to the key pair required: true default: None public_key: description: - The public key that would be uploaded to nova and injected to vm's upon creation required: false default: None requirements: - "python >= 2.6" - "python-novaclient" ''' EXAMPLES = ''' # Creates a key pair with the running users public key - nova_keypair: state=present login_username=admin login_password=admin login_tenant_name=admin name=ansible_key public_key={{ lookup('file','~/.ssh/id_rsa.pub') }} # Creates a new key pair and the private key returned after the run. - nova_keypair: state=present login_username=admin login_password=admin login_tenant_name=admin name=ansible_key ''' def main(): argument_spec = openstack_argument_spec() argument_spec.update(dict( name = dict(required=True), public_key = dict(default=None), state = dict(default='present', choices=['absent', 'present']) )) module = AnsibleModule(argument_spec=argument_spec) if not HAS_NOVACLIENT: module.fail_json(msg='python-novaclient is required for this module to work') nova = nova_client.Client(module.params['login_username'], module.params['login_password'], module.params['login_tenant_name'], module.params['auth_url'], region_name=module.params['region_name'], service_type='compute') try: nova.authenticate() except exc.Unauthorized as e: module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message) except exc.AuthorizationFailure as e: module.fail_json(msg = "Unable to authorize user: %s" % e.message) if module.params['state'] == 'present': for key in nova.keypairs.list(): if key.name == module.params['name']: if module.params['public_key'] and (module.params['public_key'] != key.public_key ): module.fail_json(msg = "name {} present but key hash not the same as offered. Delete key first.".format(key['name'])) else: module.exit_json(changed = False, result = "Key present") try: key = nova.keypairs.create(module.params['name'], module.params['public_key']) except Exception as e: module.exit_json(msg = "Error in creating the keypair: %s" % e.message) if not module.params['public_key']: module.exit_json(changed = True, key = key.private_key) module.exit_json(changed = True, key = None) if module.params['state'] == 'absent': for key in nova.keypairs.list(): if key.name == module.params['name']: try: nova.keypairs.delete(module.params['name']) except Exception as e: module.fail_json(msg = "The keypair deletion has failed: %s" % e.message) module.exit_json( changed = True, result = "deleted") module.exit_json(changed = False, result = "not present") # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
gpl-3.0
drmrd/ansible
lib/ansible/modules/cloud/google/gcp_healthcheck.py
48
15302
#!/usr/bin/python # Copyright 2017 Google Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_healthcheck version_added: "2.4" short_description: Create, Update or Destroy a Healthcheck. description: - Create, Update or Destroy a Healthcheck. Currently only HTTP and HTTPS Healthchecks are supported. Healthchecks are used to monitor individual instances, managed instance groups and/or backend services. Healtchecks are reusable. - Visit U(https://cloud.google.com/compute/docs/load-balancing/health-checks) for an overview of Healthchecks on GCP. - See U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for API details on HTTP Healthchecks. - See U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks) for more details on the HTTPS Healtcheck API. requirements: - "python >= 2.6" - "google-api-python-client >= 1.6.2" - "google-auth >= 0.9.0" - "google-auth-httplib2 >= 0.0.2" notes: - Only supports HTTP and HTTPS Healthchecks currently. author: - "Tom Melendez (@supertom) <[email protected]>" options: check_interval: description: - How often (in seconds) to send a health check. default: 5 healthcheck_name: description: - Name of the Healthcheck. required: true healthcheck_type: description: - Type of Healthcheck. required: true choices: ["HTTP", "HTTPS"] host_header: description: - The value of the host header in the health check request. If left empty, the public IP on behalf of which this health check is performed will be used. required: true default: "" port: description: - The TCP port number for the health check request. The default value is 443 for HTTPS and 80 for HTTP. request_path: description: - The request path of the HTTPS health check request. required: false default: "/" state: description: State of the Healthcheck. required: true choices: ["present", "absent"] timeout: description: - How long (in seconds) to wait for a response before claiming failure. It is invalid for timeout to have a greater value than check_interval. default: 5 unhealthy_threshold: description: - A so-far healthy instance will be marked unhealthy after this many consecutive failures. default: 2 healthy_threshold: description: - A so-far unhealthy instance will be marked healthy after this many consecutive successes. default: 2 service_account_email: description: - service account email service_account_permissions: version_added: "2.0" description: - service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information) choices: [ "bigquery", "cloud-platform", "compute-ro", "compute-rw", "useraccounts-ro", "useraccounts-rw", "datastore", "logging-write", "monitoring", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email" ] credentials_file: description: - Path to the JSON file associated with the service account email project_id: description: - Your GCP project ID ''' EXAMPLES = ''' - name: Create Minimum HealthCheck gcp_healthcheck: service_account_email: "{{ service_account_email }}" credentials_file: "{{ credentials_file }}" project_id: "{{ project_id }}" healthcheck_name: my-healthcheck healthcheck_type: HTTP state: present - name: Create HTTP HealthCheck gcp_healthcheck: service_account_email: "{{ service_account_email }}" credentials_file: "{{ credentials_file }}" project_id: "{{ project_id }}" healthcheck_name: my-healthcheck healthcheck_type: HTTP host: my-host request_path: /hc check_interval: 10 timeout: 30 unhealthy_threshhold: 2 healthy_threshhold: 1 state: present - name: Create HTTPS HealthCheck gcp_healthcheck: service_account_email: "{{ service_account_email }}" credentials_file: "{{ credentials_file }}" project_id: "{{ project_id }}" healthcheck_name: "{{ https_healthcheck }}" healthcheck_type: HTTPS host_header: my-host request_path: /hc check_interval: 5 timeout: 5 unhealthy_threshold: 2 healthy_threshold: 1 state: present ''' RETURN = ''' state: description: state of the Healthcheck returned: Always. type: str sample: present healthcheck_name: description: Name of the Healthcheck returned: Always type: str sample: my-url-map healthcheck_type: description: Type of the Healthcheck returned: Always type: str sample: HTTP healthcheck: description: GCP Healthcheck dictionary returned: Always. Refer to GCP documentation for detailed field descriptions. type: dict sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" } ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.gcp import get_google_api_client, GCPUtils USER_AGENT_PRODUCT = 'ansible-healthcheck' USER_AGENT_VERSION = '0.0.1' def _validate_healthcheck_params(params): """ Validate healthcheck params. Simple validation has already assumed by AnsibleModule. :param params: Ansible dictionary containing configuration. :type params: ``dict`` :return: True or raises ValueError :rtype: ``bool`` or `class:ValueError` """ if params['timeout'] > params['check_interval']: raise ValueError("timeout (%s) is greater than check_interval (%s)" % ( params['timeout'], params['check_interval'])) return (True, '') def _build_healthcheck_dict(params): """ Reformat services in Ansible Params for GCP. :param params: Params from AnsibleModule object :type params: ``dict`` :param project_id: The GCP project ID. :type project_id: ``str`` :return: dictionary suitable for submission to GCP HealthCheck (HTTP/HTTPS) API. :rtype ``dict`` """ gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name') if 'timeout' in gcp_dict: gcp_dict['timeoutSec'] = gcp_dict['timeout'] del gcp_dict['timeout'] if 'checkInterval' in gcp_dict: gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval'] del gcp_dict['checkInterval'] if 'hostHeader' in gcp_dict: gcp_dict['host'] = gcp_dict['hostHeader'] del gcp_dict['hostHeader'] if 'healthcheckType' in gcp_dict: del gcp_dict['healthcheckType'] return gcp_dict def _get_req_resource(client, resource_type): if resource_type == 'HTTPS': return (client.httpsHealthChecks(), 'httpsHealthCheck') else: return (client.httpHealthChecks(), 'httpHealthCheck') def get_healthcheck(client, name, project_id=None, resource_type='HTTP'): """ Get a Healthcheck from GCP. :param client: An initialized GCE Compute Disovery resource. :type client: :class: `googleapiclient.discovery.Resource` :param name: Name of the Url Map. :type name: ``str`` :param project_id: The GCP project ID. :type project_id: ``str`` :return: A dict resp from the respective GCP 'get' request. :rtype: ``dict`` """ try: resource, entity_name = _get_req_resource(client, resource_type) args = {'project': project_id, entity_name: name} req = resource.get(**args) return GCPUtils.execute_api_client_req(req, raise_404=False) except: raise def create_healthcheck(client, params, project_id, resource_type='HTTP'): """ Create a new Healthcheck. :param client: An initialized GCE Compute Disovery resource. :type client: :class: `googleapiclient.discovery.Resource` :param params: Dictionary of arguments from AnsibleModule. :type params: ``dict`` :return: Tuple with changed status and response dict :rtype: ``tuple`` in the format of (bool, dict) """ gcp_dict = _build_healthcheck_dict(params) try: resource, _ = _get_req_resource(client, resource_type) args = {'project': project_id, 'body': gcp_dict} req = resource.insert(**args) return_data = GCPUtils.execute_api_client_req(req, client, raw=False) if not return_data: return_data = get_healthcheck(client, name=params['healthcheck_name'], project_id=project_id) return (True, return_data) except: raise def delete_healthcheck(client, name, project_id, resource_type='HTTP'): """ Delete a Healthcheck. :param client: An initialized GCE Compute Disover resource. :type client: :class: `googleapiclient.discovery.Resource` :param name: Name of the Url Map. :type name: ``str`` :param project_id: The GCP project ID. :type project_id: ``str`` :return: Tuple with changed status and response dict :rtype: ``tuple`` in the format of (bool, dict) """ try: resource, entity_name = _get_req_resource(client, resource_type) args = {'project': project_id, entity_name: name} req = resource.delete(**args) return_data = GCPUtils.execute_api_client_req(req, client) return (True, return_data) except: raise def update_healthcheck(client, healthcheck, params, name, project_id, resource_type='HTTP'): """ Update a Healthcheck. If the healthcheck has not changed, the update will not occur. :param client: An initialized GCE Compute Disovery resource. :type client: :class: `googleapiclient.discovery.Resource` :param healthcheck: Name of the Url Map. :type healthcheck: ``dict`` :param params: Dictionary of arguments from AnsibleModule. :type params: ``dict`` :param name: Name of the Url Map. :type name: ``str`` :param project_id: The GCP project ID. :type project_id: ``str`` :return: Tuple with changed status and response dict :rtype: ``tuple`` in the format of (bool, dict) """ gcp_dict = _build_healthcheck_dict(params) ans = GCPUtils.are_params_equal(healthcheck, gcp_dict) if ans: return (False, 'no update necessary') try: resource, entity_name = _get_req_resource(client, resource_type) args = {'project': project_id, entity_name: name, 'body': gcp_dict} req = resource.update(**args) return_data = GCPUtils.execute_api_client_req( req, client=client, raw=False) return (True, return_data) except: raise def main(): module = AnsibleModule(argument_spec=dict( healthcheck_name=dict(required=True), healthcheck_type=dict(required=True, choices=['HTTP', 'HTTPS']), request_path=dict(required=False, default='/'), check_interval=dict(required=False, type='int', default=5), healthy_threshold=dict(required=False, type='int', default=2), unhealthy_threshold=dict(required=False, type='int', default=2), host_header=dict(required=False, type='str', default=''), timeout=dict(required=False, type='int', default=5), port=dict(required=False, type='int'), state=dict(choices=['absent', 'present'], default='present'), service_account_email=dict(), service_account_permissions=dict(type='list'), credentials_file=dict(), project_id=dict(), ), ) client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT, user_agent_version=USER_AGENT_VERSION) params = {} params['healthcheck_name'] = module.params.get('healthcheck_name') params['healthcheck_type'] = module.params.get('healthcheck_type') params['request_path'] = module.params.get('request_path') params['check_interval'] = module.params.get('check_interval') params['healthy_threshold'] = module.params.get('healthy_threshold') params['unhealthy_threshold'] = module.params.get('unhealthy_threshold') params['host_header'] = module.params.get('host_header') params['timeout'] = module.params.get('timeout') params['port'] = module.params.get('port', None) params['state'] = module.params.get('state') if not params['port']: params['port'] = 80 if params['healthcheck_type'] == 'HTTPS': params['port'] = 443 try: _validate_healthcheck_params(params) except Exception as e: module.fail_json(msg=e.message, changed=False) changed = False json_output = {'state': params['state']} healthcheck = get_healthcheck(client, name=params['healthcheck_name'], project_id=conn_params['project_id'], resource_type=params['healthcheck_type']) if not healthcheck: if params['state'] == 'absent': # Doesn't exist in GCE, and state==absent. changed = False module.fail_json( msg="Cannot delete unknown healthcheck: %s" % (params['healthcheck_name'])) else: # Create changed, json_output['healthcheck'] = create_healthcheck(client, params=params, project_id=conn_params['project_id'], resource_type=params['healthcheck_type']) elif params['state'] == 'absent': # Delete changed, json_output['healthcheck'] = delete_healthcheck(client, name=params['healthcheck_name'], project_id=conn_params['project_id'], resource_type=params['healthcheck_type']) else: changed, json_output['healthcheck'] = update_healthcheck(client, healthcheck=healthcheck, params=params, name=params['healthcheck_name'], project_id=conn_params['project_id'], resource_type=params['healthcheck_type']) json_output['changed'] = changed json_output.update(params) module.exit_json(**json_output) if __name__ == '__main__': main()
gpl-3.0
Dino0631/RedRain-Bot
lib/pip/_vendor/requests/packages/urllib3/exceptions.py
515
5599
from __future__ import absolute_import # Base Exceptions class HTTPError(Exception): "Base exception used by this module." pass class HTTPWarning(Warning): "Base warning used by this module." pass class PoolError(HTTPError): "Base exception for errors caused within a pool." def __init__(self, pool, message): self.pool = pool HTTPError.__init__(self, "%s: %s" % (pool, message)) def __reduce__(self): # For pickling purposes. return self.__class__, (None, None) class RequestError(PoolError): "Base exception for PoolErrors that have associated URLs." def __init__(self, pool, url, message): self.url = url PoolError.__init__(self, pool, message) def __reduce__(self): # For pickling purposes. return self.__class__, (None, self.url, None) class SSLError(HTTPError): "Raised when SSL certificate fails in an HTTPS connection." pass class ProxyError(HTTPError): "Raised when the connection to a proxy fails." pass class DecodeError(HTTPError): "Raised when automatic decoding based on Content-Type fails." pass class ProtocolError(HTTPError): "Raised when something unexpected happens mid-request/response." pass #: Renamed to ProtocolError but aliased for backwards compatibility. ConnectionError = ProtocolError # Leaf Exceptions class MaxRetryError(RequestError): """Raised when the maximum number of retries is exceeded. :param pool: The connection pool :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool` :param string url: The requested Url :param exceptions.Exception reason: The underlying error """ def __init__(self, pool, url, reason=None): self.reason = reason message = "Max retries exceeded with url: %s (Caused by %r)" % ( url, reason) RequestError.__init__(self, pool, url, message) class HostChangedError(RequestError): "Raised when an existing pool gets a request for a foreign host." def __init__(self, pool, url, retries=3): message = "Tried to open a foreign host with url: %s" % url RequestError.__init__(self, pool, url, message) self.retries = retries class TimeoutStateError(HTTPError): """ Raised when passing an invalid state to a timeout """ pass class TimeoutError(HTTPError): """ Raised when a socket timeout error occurs. Catching this error will catch both :exc:`ReadTimeoutErrors <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`. """ pass class ReadTimeoutError(TimeoutError, RequestError): "Raised when a socket timeout occurs while receiving data from a server" pass # This timeout error does not have a URL attached and needs to inherit from the # base HTTPError class ConnectTimeoutError(TimeoutError): "Raised when a socket timeout occurs while connecting to a server" pass class NewConnectionError(ConnectTimeoutError, PoolError): "Raised when we fail to establish a new connection. Usually ECONNREFUSED." pass class EmptyPoolError(PoolError): "Raised when a pool runs out of connections and no more are allowed." pass class ClosedPoolError(PoolError): "Raised when a request enters a pool after the pool has been closed." pass class LocationValueError(ValueError, HTTPError): "Raised when there is something wrong with a given URL input." pass class LocationParseError(LocationValueError): "Raised when get_host or similar fails to parse the URL input." def __init__(self, location): message = "Failed to parse: %s" % location HTTPError.__init__(self, message) self.location = location class ResponseError(HTTPError): "Used as a container for an error reason supplied in a MaxRetryError." GENERIC_ERROR = 'too many error responses' SPECIFIC_ERROR = 'too many {status_code} error responses' class SecurityWarning(HTTPWarning): "Warned when perfoming security reducing actions" pass class SubjectAltNameWarning(SecurityWarning): "Warned when connecting to a host with a certificate missing a SAN." pass class InsecureRequestWarning(SecurityWarning): "Warned when making an unverified HTTPS request." pass class SystemTimeWarning(SecurityWarning): "Warned when system time is suspected to be wrong" pass class InsecurePlatformWarning(SecurityWarning): "Warned when certain SSL configuration is not available on a platform." pass class SNIMissingWarning(HTTPWarning): "Warned when making a HTTPS request without SNI available." pass class DependencyWarning(HTTPWarning): """ Warned when an attempt is made to import a module with missing optional dependencies. """ pass class ResponseNotChunked(ProtocolError, ValueError): "Response needs to be chunked in order to read it as chunks." pass class ProxySchemeUnknown(AssertionError, ValueError): "ProxyManager does not support the supplied scheme" # TODO(t-8ch): Stop inheriting from AssertionError in v2.0. def __init__(self, scheme): message = "Not supported proxy scheme %s" % scheme super(ProxySchemeUnknown, self).__init__(message) class HeaderParsingError(HTTPError): "Raised by assert_header_parsing, but we convert it to a log.warning statement." def __init__(self, defects, unparsed_data): message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data) super(HeaderParsingError, self).__init__(message)
gpl-3.0
40423106/2016fallcadp_ag4
publishconf.py
251
1705
#!/usr/bin/env python # -*- coding: utf-8 -*- # from __future__ import unicode_literals # This file is only used if you use `make publish` or # explicitly specify it as your config file. import os import sys sys.path.append(os.curdir) from pelicanconf import * # 因為 publishconf.py 在 pelicanconf.py 之後, 因此若兩處有相同變數的設定, 將以較後讀入的 publishconf.py 中的設定為主. # 將所有靜態 html 檔案移到 blog 子目錄 SITEURL = 'blog' # 此設定用於將資料送到 gh-pages, 因此使用絕對 URL 設定 (嘗試用 相對目錄設定) RELATIVE_URLS = True # 為了要讓 local 與 gh-pages 上都能夠使用 Tipue search, 可能要採用不同的 theme THEME = 'theme/pelican-bootstrap3' #BOOTSTRAP_THEME = 'readable' #BOOTSTRAP_THEME = 'readable-old' BOOTSTRAP_THEME = 'united' #PYGMENTS_STYLE = 'paraiso-drak' #PYGMENTS_STYLE = 'fruity' # 為了同時兼容 render_math, 必須放棄 fruity PYGMENTS_STYLE = 'monokai' FEED_ALL_ATOM = 'feeds/all.atom.xml' CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml' DELETE_OUTPUT_DIRECTORY = True # Following items are often useful when publishing # 必須使用各網誌用戶各自 disqus 討論版設定 #DISQUS_SITENAME = "" #GOOGLE_ANALYTICS = "" # 設定網誌以 md 檔案建立的 file system date 為準, 無需自行設定 DEFAULT_DATE = 'fs' # 遠端的 code hightlight MD_EXTENSIONS = ['fenced_code', 'extra', 'codehilite(linenums=True)'] # 若要依照日期存檔呼叫 #ARTICLE_URL = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/' #ARTICLE_SAVE_AS = 'posts/{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html' PAGE_URL = 'pages/{slug}/' PAGE_SAVE_AS = 'pages/{slug}/index.html' SHOW_ARTICLE_AUTHOR = True
agpl-3.0
citrix-openstack-build/swift
swift/container/sync.py
2
19536
# Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from swift import gettext_ as _ from time import ctime, time from random import random, shuffle from struct import unpack_from from eventlet import sleep, Timeout import swift.common.db from swift.container import server as container_server from swiftclient import ClientException, delete_object, put_object, \ quote from swift.container.backend import ContainerBroker from swift.common.direct_client import direct_get_object from swift.common.ring import Ring from swift.common.utils import get_logger, config_true_value, \ validate_sync_to, whataremyips, FileLikeIter from swift.common.ondisk import audit_location_generator, hash_path from swift.common.daemon import Daemon from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND class ContainerSync(Daemon): """ Daemon to sync syncable containers. This is done by scanning the local devices for container databases and checking for x-container-sync-to and x-container-sync-key metadata values. If they exist, newer rows since the last sync will trigger PUTs or DELETEs to the other container. .. note:: Container sync will sync object POSTs only if the proxy server is set to use "object_post_as_copy = true" which is the default. So-called fast object posts, "object_post_as_copy = false" do not update the container listings and therefore can't be detected for synchronization. The actual syncing is slightly more complicated to make use of the three (or number-of-replicas) main nodes for a container without each trying to do the exact same work but also without missing work if one node happens to be down. Two sync points are kept per container database. All rows between the two sync points trigger updates. Any rows newer than both sync points cause updates depending on the node's position for the container (primary nodes do one third, etc. depending on the replica count of course). After a sync run, the first sync point is set to the newest ROWID known and the second sync point is set to newest ROWID for which all updates have been sent. An example may help. Assume replica count is 3 and perfectly matching ROWIDs starting at 1. First sync run, database has 6 rows: * SyncPoint1 starts as -1. * SyncPoint2 starts as -1. * No rows between points, so no "all updates" rows. * Six rows newer than SyncPoint1, so a third of the rows are sent by node 1, another third by node 2, remaining third by node 3. * SyncPoint1 is set as 6 (the newest ROWID known). * SyncPoint2 is left as -1 since no "all updates" rows were synced. Next sync run, database has 12 rows: * SyncPoint1 starts as 6. * SyncPoint2 starts as -1. * The rows between -1 and 6 all trigger updates (most of which should short-circuit on the remote end as having already been done). * Six more rows newer than SyncPoint1, so a third of the rows are sent by node 1, another third by node 2, remaining third by node 3. * SyncPoint1 is set as 12 (the newest ROWID known). * SyncPoint2 is set as 6 (the newest "all updates" ROWID). In this way, under normal circumstances each node sends its share of updates each run and just sends a batch of older updates to ensure nothing was missed. :param conf: The dict of configuration values from the [container-sync] section of the container-server.conf :param container_ring: If None, the <swift_dir>/container.ring.gz will be loaded. This is overridden by unit tests. :param object_ring: If None, the <swift_dir>/object.ring.gz will be loaded. This is overridden by unit tests. """ def __init__(self, conf, container_ring=None, object_ring=None): #: The dict of configuration values from the [container-sync] section #: of the container-server.conf. self.conf = conf #: Logger to use for container-sync log lines. self.logger = get_logger(conf, log_route='container-sync') #: Path to the local device mount points. self.devices = conf.get('devices', '/srv/node') #: Indicates whether mount points should be verified as actual mount #: points (normally true, false for tests and SAIO). self.mount_check = config_true_value(conf.get('mount_check', 'true')) #: Minimum time between full scans. This is to keep the daemon from #: running wild on near empty systems. self.interval = int(conf.get('interval', 300)) #: Maximum amount of time to spend syncing a container before moving on #: to the next one. If a conatiner sync hasn't finished in this time, #: it'll just be resumed next scan. self.container_time = int(conf.get('container_time', 60)) #: The list of hosts we're allowed to send syncs to. self.allowed_sync_hosts = [ h.strip() for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',') if h.strip()] self.proxy = conf.get('sync_proxy') #: Number of containers with sync turned on that were successfully #: synced. self.container_syncs = 0 #: Number of successful DELETEs triggered. self.container_deletes = 0 #: Number of successful PUTs triggered. self.container_puts = 0 #: Number of containers that didn't have sync turned on. self.container_skips = 0 #: Number of containers that had a failure of some type. self.container_failures = 0 #: Time of last stats report. self.reported = time() swift_dir = conf.get('swift_dir', '/etc/swift') #: swift.common.ring.Ring for locating containers. self.container_ring = container_ring or Ring(swift_dir, ring_name='container') #: swift.common.ring.Ring for locating objects. self.object_ring = object_ring or Ring(swift_dir, ring_name='object') self._myips = whataremyips() self._myport = int(conf.get('bind_port', 6001)) swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) def run_forever(self): """ Runs container sync scans until stopped. """ sleep(random() * self.interval) while True: begin = time() all_locs = audit_location_generator(self.devices, container_server.DATADIR, '.db', mount_check=self.mount_check, logger=self.logger) for path, device, partition in all_locs: self.container_sync(path) if time() - self.reported >= 3600: # once an hour self.report() elapsed = time() - begin if elapsed < self.interval: sleep(self.interval - elapsed) def run_once(self): """ Runs a single container sync scan. """ self.logger.info(_('Begin container sync "once" mode')) begin = time() all_locs = audit_location_generator(self.devices, container_server.DATADIR, '.db', mount_check=self.mount_check, logger=self.logger) for path, device, partition in all_locs: self.container_sync(path) if time() - self.reported >= 3600: # once an hour self.report() self.report() elapsed = time() - begin self.logger.info( _('Container sync "once" mode completed: %.02fs'), elapsed) def report(self): """ Writes a report of the stats to the logger and resets the stats for the next report. """ self.logger.info( _('Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s ' 'puts], %(skip)s skipped, %(fail)s failed'), {'time': ctime(self.reported), 'sync': self.container_syncs, 'delete': self.container_deletes, 'put': self.container_puts, 'skip': self.container_skips, 'fail': self.container_failures}) self.reported = time() self.container_syncs = 0 self.container_deletes = 0 self.container_puts = 0 self.container_skips = 0 self.container_failures = 0 def container_sync(self, path): """ Checks the given path for a container database, determines if syncing is turned on for that database and, if so, sends any updates to the other container. :param path: the path to a container db """ broker = None try: broker = ContainerBroker(path) info = broker.get_info() x, nodes = self.container_ring.get_nodes(info['account'], info['container']) for ordinal, node in enumerate(nodes): if node['ip'] in self._myips and node['port'] == self._myport: break else: return if not broker.is_deleted(): sync_to = None sync_key = None sync_point1 = info['x_container_sync_point1'] sync_point2 = info['x_container_sync_point2'] for key, (value, timestamp) in broker.metadata.iteritems(): if key.lower() == 'x-container-sync-to': sync_to = value elif key.lower() == 'x-container-sync-key': sync_key = value if not sync_to or not sync_key: self.container_skips += 1 self.logger.increment('skips') return sync_to = sync_to.rstrip('/') err = validate_sync_to(sync_to, self.allowed_sync_hosts) if err: self.logger.info( _('ERROR %(db_file)s: %(validate_sync_to_err)s'), {'db_file': broker.db_file, 'validate_sync_to_err': err}) self.container_failures += 1 self.logger.increment('failures') return stop_at = time() + self.container_time next_sync_point = None while time() < stop_at and sync_point2 < sync_point1: rows = broker.get_items_since(sync_point2, 1) if not rows: break row = rows[0] if row['ROWID'] > sync_point1: break key = hash_path(info['account'], info['container'], row['name'], raw_digest=True) # This node will only initially sync out one third of the # objects (if 3 replicas, 1/4 if 4, etc.) and will skip # problematic rows as needed in case of faults. # This section will attempt to sync previously skipped # rows in case the previous attempts by any of the nodes # didn't succeed. if not self.container_sync_row(row, sync_to, sync_key, broker, info): if not next_sync_point: next_sync_point = sync_point2 sync_point2 = row['ROWID'] broker.set_x_container_sync_points(None, sync_point2) if next_sync_point: broker.set_x_container_sync_points(None, next_sync_point) while time() < stop_at: rows = broker.get_items_since(sync_point1, 1) if not rows: break row = rows[0] key = hash_path(info['account'], info['container'], row['name'], raw_digest=True) # This node will only initially sync out one third of the # objects (if 3 replicas, 1/4 if 4, etc.). It'll come back # around to the section above and attempt to sync # previously skipped rows in case the other nodes didn't # succeed or in case it failed to do so the first time. if unpack_from('>I', key)[0] % \ len(nodes) == ordinal: self.container_sync_row(row, sync_to, sync_key, broker, info) sync_point1 = row['ROWID'] broker.set_x_container_sync_points(sync_point1, None) self.container_syncs += 1 self.logger.increment('syncs') except (Exception, Timeout) as err: self.container_failures += 1 self.logger.increment('failures') self.logger.exception(_('ERROR Syncing %s'), broker.db_file if broker else path) def container_sync_row(self, row, sync_to, sync_key, broker, info): """ Sends the update the row indicates to the sync_to container. :param row: The updated row in the local database triggering the sync update. :param sync_to: The URL to the remote container. :param sync_key: The X-Container-Sync-Key to use when sending requests to the other container. :param broker: The local container database broker. :param info: The get_info result from the local container database broker. :returns: True on success """ try: start_time = time() if row['deleted']: try: delete_object(sync_to, name=row['name'], headers={'x-timestamp': row['created_at'], 'x-container-sync-key': sync_key}, proxy=self.proxy) except ClientException as err: if err.http_status != HTTP_NOT_FOUND: raise self.container_deletes += 1 self.logger.increment('deletes') self.logger.timing_since('deletes.timing', start_time) else: part, nodes = self.object_ring.get_nodes( info['account'], info['container'], row['name']) shuffle(nodes) exc = None looking_for_timestamp = float(row['created_at']) timestamp = -1 headers = body = None for node in nodes: try: these_headers, this_body = direct_get_object( node, part, info['account'], info['container'], row['name'], resp_chunk_size=65536) this_timestamp = float(these_headers['x-timestamp']) if this_timestamp > timestamp: timestamp = this_timestamp headers = these_headers body = this_body except ClientException as err: # If any errors are not 404, make sure we report the # non-404 one. We don't want to mistakenly assume the # object no longer exists just because one says so and # the others errored for some other reason. if not exc or exc.http_status == HTTP_NOT_FOUND: exc = err except (Exception, Timeout) as err: exc = err if timestamp < looking_for_timestamp: if exc: raise exc raise Exception( _('Unknown exception trying to GET: %(node)r ' '%(account)r %(container)r %(object)r'), {'node': node, 'part': part, 'account': info['account'], 'container': info['container'], 'object': row['name']}) for key in ('date', 'last-modified'): if key in headers: del headers[key] if 'etag' in headers: headers['etag'] = headers['etag'].strip('"') headers['x-timestamp'] = row['created_at'] headers['x-container-sync-key'] = sync_key put_object(sync_to, name=row['name'], headers=headers, contents=FileLikeIter(body), proxy=self.proxy) self.container_puts += 1 self.logger.increment('puts') self.logger.timing_since('puts.timing', start_time) except ClientException as err: if err.http_status == HTTP_UNAUTHORIZED: self.logger.info( _('Unauth %(sync_from)r => %(sync_to)r'), {'sync_from': '%s/%s' % (quote(info['account']), quote(info['container'])), 'sync_to': sync_to}) elif err.http_status == HTTP_NOT_FOUND: self.logger.info( _('Not found %(sync_from)r => %(sync_to)r \ - object %(obj_name)r'), {'sync_from': '%s/%s' % (quote(info['account']), quote(info['container'])), 'sync_to': sync_to, 'obj_name': row['name']}) else: self.logger.exception( _('ERROR Syncing %(db_file)s %(row)s'), {'db_file': broker.db_file, 'row': row}) self.container_failures += 1 self.logger.increment('failures') return False except (Exception, Timeout) as err: self.logger.exception( _('ERROR Syncing %(db_file)s %(row)s'), {'db_file': broker.db_file, 'row': row}) self.container_failures += 1 self.logger.increment('failures') return False return True
apache-2.0
scieloorg/scielo-manager
scielomanager/editorialmanager/notifications.py
3
3509
# coding: utf-8 import logging from django.core.exceptions import ObjectDoesNotExist from scielomanager.tools import get_users_by_group_by_collections, user_receive_emails from scielomanager import notifications logger = logging.getLogger(__name__) class IssueBoardMessage(notifications.Message): EMAIL_DATA_BY_ACTION = { 'issue_add_no_replicated_board': { 'subject_sufix': "Issue Board can't be replicated", 'template_path': 'email/issue_add_no_replicated_board.txt', }, 'issue_add_replicated_board': { 'subject_sufix': "Issue has a new replicated board", 'template_path': 'email/issue_add_replicated_board.txt', }, } def set_recipients(self, issue): editor = getattr(issue.journal, 'editor', None) if editor: if user_receive_emails(editor): self.recipients = [editor.email, ] else: logger.info("[IssueBoardMessage.set_recipients] editor (user.pk: %s) does not have a profile or decides to not receive emails." % editor.pk) else: logger.error("[IssueBoardMessage.set_recipients] Can't prepare a message, issue.journal.editor is None or empty. Issue pk == %s" % issue.pk) class BoardMembersMessage(notifications.Message): EMAIL_DATA_BY_ACTION = { 'board_add_member': { 'subject_sufix': "Member of the journal board, was added", 'template_path': 'email/board_add_member.txt', }, 'board_edit_member': { 'subject_sufix': "Member of the journal board, was edited", 'template_path': 'email/board_edit_member.txt', }, 'board_delete_member': { 'subject_sufix': "Member of the journal board, was deleted", 'template_path': 'email/board_delete_member.txt', } } def set_recipients(self): """ emails must be sent as BCC """ self.recipients = [] def set_bcc_recipients(self, member): """ recipients must belong to the same collection as member """ collections_of_board_member = member.board.issue.journal.collections.all() if collections_of_board_member: librarians = get_users_by_group_by_collections('Librarian', collections_of_board_member) else: logger.error("[BoardMembersMessage.set_bcc_recipients] Can't define the collection of member (pk: %s), to filter bcc_recipients" % member.pk) return if librarians: filtered_librarians = [librarian for librarian in librarians if user_receive_emails(librarian)] self.bcc_recipients = map(lambda u: u.email, filtered_librarians) else: logger.error("[BoardMembersMessage.set_bcc_recipients] Can't prepare a message, Can't retrieve a list of Librarian Users.") def issue_board_replica(issue, action): message = IssueBoardMessage(action=action,) message.set_recipients(issue) extra_context = {'issue': issue,} message.render_body(extra_context) return message.send_mail() def board_members_send_email_by_action(member, user, audit_log_msg, action): message = BoardMembersMessage(action=action) message.set_recipients() message.set_bcc_recipients(member) extra_context = { 'user': user, 'member': member, 'issue': member.board.issue, 'message': audit_log_msg, } message.render_body(extra_context) return message.send_mail()
bsd-2-clause
online-behaviour/machine-learning
getTweetText.py
1
1768
#!/usr/bin/python3 -W all """ getTweetText.py: extract tweet text from json file usage: getTweetText.py < file 20170418 erikt(at)xs4all.nl """ import csv import json import re import sys # command name for error messages COMMAND = sys.argv[0] patternNewline = re.compile("\n") # open csv output with sys.stdout as csvfile: outFile = csv.writer(csvfile,delimiter=",",quotechar='"') # repeat for each input line for line in sys.stdin: # convert the line to a json dictionary jsonLine = json.loads(line) # test for presence of required fields if not "id_str" in jsonLine: sys.exit(COMMAND+" missing id_str field") if not "text" in jsonLine: sys.exit(COMMAND+" missing text field") if not "user" in jsonLine: sys.exit(COMMAND+" missing user field") if not "screen_name" in jsonLine["user"]: sys.exit(COMMAND+" missing screen_name field") if not "created_at" in jsonLine["user"]: sys.exit(COMMAND+" missing created_at field") if not "in_reply_to_status_id_str" in jsonLine: sys.exit(COMMAND+" missing in_reply_to_status_id_str field") # print the text in csv format thisId = jsonLine["id_str"] replyId = jsonLine["in_reply_to_status_id_str"] if replyId == None and "retweeted_status" in jsonLine and \ "in_reply_to_status_id_str" in jsonLine["retweeted_status"]: replyId = jsonLine["retweeted_status"]["in_reply_to_status_id_str"] screenName = jsonLine["user"]["screen_name"] date = jsonLine["created_at"] text = jsonLine["text"] text = patternNewline.sub(" ",text) outFile.writerow([thisId,replyId,date,screenName,text]) csvfile.close()
apache-2.0
jlegendary/scikit-learn
sklearn/decomposition/tests/test_dict_learning.py
47
8095
import numpy as np from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.decomposition import DictionaryLearning from sklearn.decomposition import MiniBatchDictionaryLearning from sklearn.decomposition import SparseCoder from sklearn.decomposition import dict_learning_online from sklearn.decomposition import sparse_encode rng_global = np.random.RandomState(0) n_samples, n_features = 10, 8 X = rng_global.randn(n_samples, n_features) def test_dict_learning_shapes(): n_components = 5 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_overcomplete(): n_components = 12 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_reconstruction(): n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) # used to test lars here too, but there's no guarantee the number of # nonzero atoms is right. def test_dict_learning_reconstruction_parallel(): # regression test that parallel reconstruction works with n_jobs=-1 n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0, n_jobs=-1) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) def test_dict_learning_nonzero_coefs(): n_components = 4 dico = DictionaryLearning(n_components, transform_algorithm='lars', transform_n_nonzero_coefs=3, random_state=0) code = dico.fit(X).transform(X[1]) assert_true(len(np.flatnonzero(code)) == 3) dico.set_params(transform_algorithm='omp') code = dico.transform(X[1]) assert_equal(len(np.flatnonzero(code)), 3) def test_dict_learning_unknown_fit_algorithm(): n_components = 5 dico = DictionaryLearning(n_components, fit_algorithm='<unknown>') assert_raises(ValueError, dico.fit, X) def test_dict_learning_split(): n_components = 5 dico = DictionaryLearning(n_components, transform_algorithm='threshold', random_state=0) code = dico.fit(X).transform(X) dico.split_sign = True split_code = dico.transform(X) assert_array_equal(split_code[:, :n_components] - split_code[:, n_components:], code) def test_dict_learning_online_shapes(): rng = np.random.RandomState(0) n_components = 8 code, dictionary = dict_learning_online(X, n_components=n_components, alpha=1, random_state=rng) assert_equal(code.shape, (n_samples, n_components)) assert_equal(dictionary.shape, (n_components, n_features)) assert_equal(np.dot(code, dictionary).shape, X.shape) def test_dict_learning_online_verbosity(): n_components = 5 # test verbosity from sklearn.externals.six.moves import cStringIO as StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1, random_state=0) dico.fit(X) dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2, random_state=0) dico.fit(X) dict_learning_online(X, n_components=n_components, alpha=1, verbose=1, random_state=0) dict_learning_online(X, n_components=n_components, alpha=1, verbose=2, random_state=0) finally: sys.stdout = old_stdout assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_estimator_shapes(): n_components = 5 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0) dico.fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_overcomplete(): n_components = 12 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_initialization(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) dico = MiniBatchDictionaryLearning(n_components, n_iter=0, dict_init=V, random_state=0).fit(X) assert_array_equal(dico.components_, V) def test_dict_learning_online_partial_fit(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X), batch_size=1, alpha=1, shuffle=False, dict_init=V, random_state=0).fit(X) dict2 = MiniBatchDictionaryLearning(n_components, alpha=1, n_iter=1, dict_init=V, random_state=0) for i in range(10): for sample in X: dict2.partial_fit(sample) assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)) assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2) def test_sparse_encode_shapes(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'): code = sparse_encode(X, V, algorithm=algo) assert_equal(code.shape, (n_samples, n_components)) def test_sparse_encode_error(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = sparse_encode(X, V, alpha=0.001) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) def test_sparse_encode_error_default_sparsity(): rng = np.random.RandomState(0) X = rng.randn(100, 64) D = rng.randn(2, 64) code = ignore_warnings(sparse_encode)(X, D, algorithm='omp', n_nonzero_coefs=None) assert_equal(code.shape, (100, 2)) def test_unknown_method(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>") def test_sparse_coder_estimator(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars', transform_alpha=0.001).transform(X) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
bsd-3-clause
kiritoe/pokeapi
data/v2/build.py
7
51228
# To build out the data you'll need to jump into the Django shell # # $ python manage.py shell # # and run the build script with # # $ execfile('data/v2/build.py') # # Each time the build script is run it will iterate over each table in the database, # wipe it and rewrite each row using the data found in data/v2/csv. # If you don't need all of the data just go into data/v2/build.py and # comment out everything but what you need to build the tables you're looking for. # This might be useful because some of the csv files are massive # (pokemon_moves expecially) and it can take about 30 minutes to build everything. import csv import os from django.db import migrations, connection from pokemon_v2.models import * data_location = 'data/v2/csv/' db_cursor = connection.cursor() db_vendor = connection.vendor def loadData(fileName): return csv.reader(open(data_location + fileName, 'rb'), delimiter=',') def clearTable(model): table_name = model._meta.db_table model.objects.all().delete() print 'building ' + table_name # Reset DB auto increments to start at 1 if db_vendor == 'sqlite': db_cursor.execute("DELETE FROM sqlite_sequence WHERE name = " + "'" + table_name + "'" ) else: db_cursor.execute("SELECT setval(pg_get_serial_sequence(" + "'" + table_name + "'" + ",'id'), 1, false);") ############## # LANGUAGE # ############## clearTable(Language) data = loadData('languages.csv') for index, info in enumerate(data): if index > 0: language = Language ( id = int(info[0]), iso639 = info[1], iso3166 = info[2], name = info[3], official = bool(info[4]), order = info[5], ) language.save() clearTable(LanguageName) data = loadData('language_names.csv') for index, info in enumerate(data): if index > 0: languageName = LanguageName ( language = Language.objects.get(pk = int(info[0])), local_language_id = int(info[1]), name = info[2] ) languageName.save() ############ # REGION # ############ clearTable(Region) data = loadData('regions.csv') for index, info in enumerate(data): if index > 0: model = Region ( id = int(info[0]), name = info[1] ) model.save() clearTable(RegionName) data = loadData('region_names.csv') for index, info in enumerate(data): if index > 0: model = RegionName ( region = Region.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() ################ # GENERATION # ################ clearTable(Generation) data = loadData('generations.csv') for index, info in enumerate(data): if index > 0: model = Generation ( id = int(info[0]), region = Region.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(GenerationName) data = loadData('generation_names.csv') for index, info in enumerate(data): if index > 0: model = GenerationName ( generation = Generation.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() ############# # VERSION # ############# clearTable(VersionGroup) data = loadData('version_groups.csv') for index, info in enumerate(data): if index > 0: versionGroup = VersionGroup ( id = int(info[0]), name = info[1], generation = Generation.objects.get(pk = int(info[2])), order = int(info[3]) ) versionGroup.save() clearTable(VersionGroupRegion) data = loadData('version_group_regions.csv') for index, info in enumerate(data): if index > 0: versionGroupRegion = VersionGroupRegion ( version_group = VersionGroup.objects.get(pk = int(info[0])), region = Region.objects.get(pk = int(info[1])), ) versionGroupRegion.save() clearTable(Version) data = loadData('versions.csv') for index, info in enumerate(data): if index > 0: version = Version ( id = int(info[0]), version_group = VersionGroup.objects.get(pk = int(info[1])), name = info[2] ) version.save() clearTable(VersionName) data = loadData('version_names.csv') for index, info in enumerate(data): if index > 0: versionName = VersionName ( version = Version.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) versionName.save() ################## # DAMAGE CLASS # ################## clearTable(MoveDamageClass) data = loadData('move_damage_classes.csv') for index, info in enumerate(data): if index > 0: model = MoveDamageClass ( id = int(info[0]), name = info[1] ) model.save() clearTable(MoveDamageClassDescription) data = loadData('move_damage_class_prose.csv') for index, info in enumerate(data): if index > 0: model = MoveDamageClassDescription ( move_damage_class = MoveDamageClass.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), description = info[2] ) model.save() ########### # STATS # ########### clearTable(Stat) data = loadData('stats.csv') for index, info in enumerate(data): if index > 0: stat = Stat ( id = int(info[0]), move_damage_class = MoveDamageClass.objects.get(pk = int(info[1])) if info[1] != '' else None, name = info[2], is_battle_only = bool(info[3]), game_index = int(info[4]) if info[4] else 0, ) stat.save() clearTable(StatName) data = loadData('stat_names.csv') for index, info in enumerate(data): if index > 0: statName = StatName ( stat = Stat.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) statName.save() clearTable(PokeathlonStat) data = loadData('pokeathlon_stats.csv') for index, info in enumerate(data): if index > 0: stat = PokeathlonStat ( id = int(info[0]), name = info[1], ) stat.save() clearTable(PokeathlonStatName) data = loadData('pokeathlon_stat_names.csv') for index, info in enumerate(data): if index > 0: statName = PokeathlonStatName ( pokeathlon_stat = PokeathlonStat.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) statName.save() ############### # ABILITIES # ############### clearTable(Ability) data = loadData('abilities.csv') for index, info in enumerate(data): if index > 0: ability = Ability ( id = int(info[0]), name = info[1], generation = Generation.objects.get(pk = int(info[2])), is_main_series = bool(info[3]) ) ability.save() clearTable(AbilityName) data = loadData('ability_names.csv') for index, info in enumerate(data): if index > 0: abilityName = AbilityName ( ability = Ability.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) abilityName.save() clearTable(AbilityDescription) data = loadData('ability_prose.csv') for index, info in enumerate(data): if index > 0: abilityDesc = AbilityDescription ( ability = Ability.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), short_effect = info[2], effect = info[3] ) abilityDesc.save() clearTable(AbilityFlavorText) data = loadData('ability_flavor_text.csv') for index, info in enumerate(data): if index > 0: abilityFlavorText = AbilityFlavorText ( ability = Ability.objects.get(pk = int(info[0])), version_group = VersionGroup.objects.get(pk = int(info[1])), language = Language.objects.get(pk = int(info[2])), flavor_text = info[3] ) abilityFlavorText.save() #################### # CHARACTERISTIC # #################### clearTable(Characteristic) data = loadData('characteristics.csv') for index, info in enumerate(data): if index > 0: model = Characteristic ( id = int(info[0]), stat = Stat.objects.get(pk = int(info[1])), gene_mod_5 = int(info[2]) ) model.save() clearTable(CharacteristicDescription) data = loadData('characteristic_text.csv') for index, info in enumerate(data): if index > 0: model = CharacteristicDescription ( characteristic = Characteristic.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), description = info[2] ) model.save() ############### # EGG GROUP # ############### clearTable(EggGroup) data = loadData('egg_groups.csv') for index, info in enumerate(data): if index > 0: model = EggGroup ( id = int(info[0]), name = info[1] ) model.save() clearTable(EggGroupName) data = loadData('egg_group_prose.csv') for index, info in enumerate(data): if index > 0: model = EggGroupName ( egg_group = EggGroup.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() ################# # GROWTH RATE # ################# clearTable(GrowthRate) data = loadData('growth_rates.csv') for index, info in enumerate(data): if index > 0: model = GrowthRate ( id = int(info[0]), name = info[1], formula = info[2] ) model.save() clearTable(GrowthRateDescription) data = loadData('growth_rate_prose.csv') for index, info in enumerate(data): if index > 0: model = GrowthRateDescription ( growth_rate = GrowthRate.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), description = info[2] ) model.save() clearTable(ItemPocket) data = loadData('item_pockets.csv') for index, info in enumerate(data): if index > 0: model = ItemPocket ( id = int(info[0]), name = info[1] ) model.save() clearTable(ItemPocketName) data = loadData('item_pocket_names.csv') for index, info in enumerate(data): if index > 0: model = ItemPocketName ( item_pocket = ItemPocket.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(ItemFlingEffect) data = loadData('item_fling_effects.csv') for index, info in enumerate(data): if index > 0: model = ItemFlingEffect ( id = int(info[0]) ) model.save() clearTable(ItemFlingEffectDescription) data = loadData('item_fling_effect_prose.csv') for index, info in enumerate(data): if index > 0: model = ItemFlingEffectDescription ( item_fling_effect = ItemFlingEffect.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), effect = info[2] ) model.save() clearTable(ItemCategory) data = loadData('item_categories.csv') for index, info in enumerate(data): if index > 0: model = ItemCategory ( id = int(info[0]), item_pocket = ItemPocket.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(ItemCategoryName) data = loadData('item_category_prose.csv') for index, info in enumerate(data): if index > 0: model = ItemCategoryName ( item_category = ItemCategory.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(Item) data = loadData('items.csv') for index, info in enumerate(data): if index > 0: model = Item ( id = int(info[0]), name = info[1], item_category = ItemCategory.objects.get(pk = int(info[2])), cost = int(info[3]), fling_power = int(info[4]) if info[4] != '' else None, item_fling_effect = ItemFlingEffect.objects.get(pk = int(info[5])) if info[5] != '' else None ) model.save() clearTable(ItemName) data = loadData('item_names.csv') for index, info in enumerate(data): if index > 0: model = ItemName ( item = Item.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(ItemDescription) data = loadData('item_prose.csv') for index, info in enumerate(data): if index > 0: model = ItemDescription ( item = Item.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), short_effect = info[2], effect = info[3] ) model.save() clearTable(ItemGameIndex) data = loadData('item_game_indices.csv') for index, info in enumerate(data): if index > 0: model = ItemGameIndex ( item = Item.objects.get(pk = int(info[0])), generation = Generation.objects.get(pk = int(info[1])), game_index = int(info[2]) ) model.save() clearTable(ItemFlavorText) data = loadData('item_flavor_text.csv') for index, info in enumerate(data): if index > 0: model = ItemFlavorText ( item = Item.objects.get(pk = int(info[0])), version_group = VersionGroup.objects.get(pk = int(info[1])), language = Language.objects.get(pk = int(info[2])), flavor_text = info[3] ) model.save() clearTable(ItemFlag) data = loadData('item_flags.csv') for index, info in enumerate(data): if index > 0: model = ItemFlag ( id = int(info[0]), name = info[1] ) model.save() clearTable(ItemFlagDescription) data = loadData('item_flag_prose.csv') for index, info in enumerate(data): if index > 0: model = ItemFlagDescription ( item_flag = ItemFlag.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2], description = info[3] ) model.save() clearTable(ItemFlagMap) data = loadData('item_flag_map.csv') for index, info in enumerate(data): if index > 0: model = ItemFlagMap ( item = Item.objects.get(pk = int(info[0])), item_flag = ItemFlag.objects.get(pk = int(info[1])) ) model.save() clearTable(ItemFlagDescription) data = loadData('item_flag_prose.csv') for index, info in enumerate(data): if index > 0: model = ItemFlagDescription ( item_flag = ItemFlag.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2], description = info[3] ) model.save() ########### # TYPES # ########### clearTable(Type) data = loadData('types.csv') for index, info in enumerate(data): if index > 0: type = Type ( id = int(info[0]), name = info[1], generation = Generation.objects.get(pk = int(info[2])), move_damage_class = MoveDamageClass.objects.get(pk = int(info[3])) if info[3] != '' else None ) type.save() clearTable(TypeName) data = loadData('type_names.csv') for index, info in enumerate(data): if index > 0: typeName = TypeName ( type = Type.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) typeName.save() clearTable(TypeGameIndex) data = loadData('type_game_indices.csv') for index, info in enumerate(data): if index > 0: typeGameIndex = TypeGameIndex ( type = Type.objects.get(pk = int(info[0])), generation = Generation.objects.get(pk = int(info[1])), game_index = int(info[2]) ) typeGameIndex.save() clearTable(TypeEfficacy) data = loadData('type_efficacy.csv') for index, info in enumerate(data): if index > 0: typeEfficacy = TypeEfficacy ( damage_type_id = int(info[0]), target_type_id = int(info[1]), damage_factor = int(info[2]) ) typeEfficacy.save() ########### # MOVES # ########### clearTable(MoveEffect) data = loadData('move_effects.csv') for index, info in enumerate(data): if index > 0: model = MoveEffect ( id = int(info[0]) ) model.save() clearTable(MoveEffectDescription) data = loadData('move_effect_prose.csv') for index, info in enumerate(data): if index > 0: model = MoveEffectDescription ( move_effect = MoveEffect.objects.get(pk = int(info[1])), language = Language.objects.get(pk = int(info[1])), short_effect = info[2], effect = info[3] ) model.save() clearTable(MoveEffectChange) data = loadData('move_effect_changelog.csv') for index, info in enumerate(data): if index > 0: model = MoveEffectChange ( id = int(info[0]), move_effect = MoveEffect.objects.get(pk = int(info[1])), version_group = VersionGroup.objects.get(pk = int(info[2])) ) model.save() clearTable(MoveEffectChangeDescription) data = loadData('move_effect_changelog_prose.csv') for index, info in enumerate(data): if index > 0: model = MoveEffectChangeDescription ( move_effect_change = MoveEffectChange.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), effect = info[2] ) model.save() clearTable(MoveTarget) data = loadData('move_targets.csv') for index, info in enumerate(data): if index > 0: model = MoveTarget ( id = int(info[0]), name = info[1] ) model.save() clearTable(MoveTargetDescription) data = loadData('move_target_prose.csv') for index, info in enumerate(data): if index > 0: model = MoveTargetDescription ( move_target = MoveTarget.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2], description = info[3] ) model.save() clearTable(Move) data = loadData('moves.csv') for index, info in enumerate(data): if index > 0: model = Move ( id = int(info[0]), name = info[1], generation = Generation.objects.get(pk = int(info[2])), type = Type.objects.get(pk = int(info[3])), power = int(info[4]) if info[4] != '' else None, pp = int(info[5]) if info[5] != '' else None, accuracy = int(info[6]) if info[6] != '' else None, priority = int(info[7]) if info[7] != '' else None, move_target = MoveTarget.objects.get(pk = int(info[8])), move_damage_class = MoveDamageClass.objects.get(pk = int(info[9])), move_effect = MoveEffect.objects.get(pk = int(info[10])), move_effect_chance = int(info[11]) if info[11] != '' else None, contest_type_id = int(info[12]) if info[12] != '' else None, contest_effect_id = int(info[13]) if info[13] != '' else None, super_contest_effect_id = int(info[14]) if info[14] != '' else None ) model.save() clearTable(MoveName) data = loadData('move_names.csv') for index, info in enumerate(data): if index > 0: model = MoveName ( move = Move.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(MoveFlavorText) data = loadData('move_flavor_text.csv') for index, info in enumerate(data): if index > 0: model = MoveFlavorText ( move = Move.objects.get(pk = int(info[0])), version_group = VersionGroup.objects.get(pk = int(info[1])), language = Language.objects.get(pk = int(info[2])), flavor_text = info[3] ) model.save() clearTable(MoveChange) data = loadData('move_changelog.csv') for index, info in enumerate(data): if index > 0: model = MoveChange ( move = Move.objects.get(pk = int(info[0])), version_group = VersionGroup.objects.get(pk = int(info[1])), type = Type.objects.get(pk = int(info[2])) if info[2] != '' else None, power = int(info[3]) if info[3] != '' else None, pp = int(info[4]) if info[4] != '' else None, accuracy = int(info[5]) if info[5] != '' else None, move_effect = MoveEffect.objects.get(pk = int(info[6])) if info[6] != '' else None, move_effect_chance = int(info[7]) if info[7] != '' else None ) model.save() clearTable(MoveBattleStyle) data = loadData('move_battle_styles.csv') for index, info in enumerate(data): if index > 0: model = MoveBattleStyle ( id = int(info[0]), name = info[1] ) model.save() clearTable(MoveBattleStyleName) data = loadData('move_battle_style_prose.csv') for index, info in enumerate(data): if index > 0: model = MoveBattleStyleName ( move_battle_style = MoveBattleStyle.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(MoveFlag) data = loadData('move_flags.csv') for index, info in enumerate(data): if index > 0: model = MoveFlag ( id = int(info[0]), name = info[1] ) model.save() clearTable(MoveFlagMap) data = loadData('move_flag_map.csv') for index, info in enumerate(data): if index > 0: model = MoveFlagMap ( move = Move.objects.get(pk = int(info[0])), move_flag = MoveFlag.objects.get(pk = int(info[1])), ) model.save() clearTable(MoveFlagDescription) data = loadData('move_flag_prose.csv') for index, info in enumerate(data): if index > 0: model = MoveFlagDescription ( move_flag = MoveFlag.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2], description = info[3] ) model.save() clearTable(MoveMetaAilment) data = loadData('move_meta_ailments.csv') for index, info in enumerate(data): if index > 0: model = MoveMetaAilment ( id = int(info[0]), name = info[1] ) model.save() clearTable(MoveMetaAilmentName) data = loadData('move_meta_ailment_names.csv') for index, info in enumerate(data): if index > 0: model = MoveMetaAilmentName ( move_meta_ailment = MoveMetaAilment.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(MoveMetaCategory) data = loadData('move_meta_categories.csv') for index, info in enumerate(data): if index > 0: model = MoveMetaCategory ( id = int(info[0]), name = info[1] ) model.save() clearTable(MoveMetaCategoryDescription) data = loadData('move_meta_category_prose.csv') for index, info in enumerate(data): if index > 0: model = MoveMetaCategoryDescription ( move_meta_category = MoveMetaCategory.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), description = info[2] ) model.save() clearTable(MoveMeta) data = loadData('move_meta.csv') for index, info in enumerate(data): if index > 0: model = MoveMeta ( move = Move.objects.get(pk = int(info[0])), move_meta_category = MoveMetaCategory.objects.get(pk = int(info[1])), move_meta_ailment = MoveMetaAilment.objects.get(pk = int(info[2])), min_hits = int(info[3]) if info[3] != '' else None, max_hits = int(info[4]) if info[4] != '' else None, min_turns = int(info[5]) if info[5] != '' else None, max_turns = int(info[6]) if info[6] != '' else None, drain = int(info[7]) if info[7] != '' else None, healing = int(info[8]) if info[8] != '' else None, crit_rate = int(info[9]) if info[9] != '' else None, ailment_chance = int(info[10]) if info[10] != '' else None, flinch_chance = int(info[11]) if info[11] != '' else None, stat_chance = int(info[12]) if info[12] != '' else None ) model.save() clearTable(MoveMetaStatChange) data = loadData('move_meta_stat_changes.csv') for index, info in enumerate(data): if index > 0: model = MoveMetaStatChange ( move = Move.objects.get(pk = int(info[0])), stat = Stat.objects.get(pk = int(info[1])), change = int(info[2]) ) model.save() ############# # CONTEST # ############# clearTable(ContestType) data = loadData('contest_types.csv') for index, info in enumerate(data): if index > 0: model = ContestType ( id = int(info[0]), name = info[1] ) model.save() clearTable(ContestTypeName) data = loadData('contest_type_names.csv') for index, info in enumerate(data): if index > 0: model = ContestTypeName ( contest_type = ContestType.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2], flavor = info[3], color = info[4] ) model.save() clearTable(ContestEffect) data = loadData('contest_effects.csv') for index, info in enumerate(data): if index > 0: model = ContestEffect ( id = int(info[0]), appeal = int(info[1]), jam = int(info[2]) ) model.save() clearTable(ContestEffectDescription) data = loadData('contest_effect_prose.csv') for index, info in enumerate(data): if index > 0: model = ContestEffectDescription ( contest_effect = ContestEffect.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), flavor_text = info[2], effect = info[3] ) model.save() clearTable(ContestCombo) data = loadData('contest_combos.csv') for index, info in enumerate(data): if index > 0: model = ContestCombo ( first_move = Move.objects.get(pk = int(info[0])), second_move = Move.objects.get(pk = int(info[1])) ) model.save() clearTable(SuperContestEffect) data = loadData('super_contest_effects.csv') for index, info in enumerate(data): if index > 0: model = SuperContestEffect ( id = int(info[0]), appeal = int(info[1]) ) model.save() clearTable(SuperContestEffectDescription) data = loadData('super_contest_effect_prose.csv') for index, info in enumerate(data): if index > 0: model = SuperContestEffectDescription ( super_contest_effect = SuperContestEffect.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), flavor_text = info[2] ) model.save() clearTable(SuperContestCombo) data = loadData('super_contest_combos.csv') for index, info in enumerate(data): if index > 0: model = SuperContestCombo ( first_move = Move.objects.get(pk = int(info[0])), second_move = Move.objects.get(pk = int(info[1])) ) model.save() ############# # BERRIES # ############# clearTable(BerryFirmness) data = loadData('berry_firmness.csv') for index, info in enumerate(data): if index > 0: model = BerryFirmness ( id = int(info[0]), name = info[1] ) model.save() clearTable(BerryFirmnessName) data = loadData('berry_firmness_names.csv') for index, info in enumerate(data): if index > 0: model = BerryFirmnessName ( berry_firmness = BerryFirmness.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(Berry) data = loadData('berries.csv') for index, info in enumerate(data): if index > 0: model = Berry ( id = int(info[0]), item = Item.objects.get(pk = int(info[1])), berry_firmness = BerryFirmness.objects.get(pk = int(info[2])), natural_gift_power = int(info[3]), nature = None, size = int(info[5]), max_harvest = int(info[6]), growth_time = int(info[7]), soil_dryness = int(info[8]), smoothness = int(info[9]) ) model.save() clearTable(BerryFlavor) data = loadData('berry_flavors.csv') for index, info in enumerate(data): if index > 0: model = BerryFlavor ( berry = Berry.objects.get(pk = int(info[0])), contest_type = ContestType.objects.get(pk = int(info[1])), flavor = int(info[2]) ) model.save() ############ # NATURE # ############ clearTable(Nature) data = loadData('natures.csv') for index, info in enumerate(data): if index > 0: nature = Nature ( id = int(info[0]), name = info[1], decreased_stat_id = Stat.objects.get(pk = int(info[2])), increased_stat_id = Stat.objects.get(pk = int(info[3])), hates_flavor_id = BerryFlavor.objects.get(pk = int(info[4])), likes_flavor_id = BerryFlavor.objects.get(pk = int(info[5])), game_index = info[6] ) nature.save() #Berry/Nature associations data = loadData('berries.csv') for index, info in enumerate(data): if index > 0: berry = Berry.objects.get(pk = int(info[0])) berry.nature = Nature.objects.get(pk = int(info[4])) berry.save() clearTable(NatureName) data = loadData('nature_names.csv') for index, info in enumerate(data): if index > 0: natureName = NatureName ( nature = Nature.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) natureName.save() clearTable(NaturePokeathlonStat) data = loadData('nature_pokeathlon_stats.csv') for index, info in enumerate(data): if index > 0: naturePokeathlonStat = NaturePokeathlonStat ( nature = Nature.objects.get(pk = int(info[0])), pokeathlon_stat = PokeathlonStat.objects.get(pk = int(info[1])), max_change = info[2] ) naturePokeathlonStat.save() clearTable(NatureBattleStylePreference) data = loadData('nature_battle_style_preferences.csv') for index, info in enumerate(data): if index > 0: model = NatureBattleStylePreference ( nature = Nature.objects.get(pk = int(info[0])), move_battle_style_id = int(info[1]), low_hp_preference = info[2], high_hp_preference = info[3] ) model.save() ############ # GENDER # ############ clearTable(Gender) data = loadData('genders.csv') for index, info in enumerate(data): if index > 0: model = Gender ( id = int(info[0]), name = info[1] ) model.save() ################ # EXPERIENCE # ################ clearTable(Experience) data = loadData('experience.csv') for index, info in enumerate(data): if index > 0: model = Experience ( growth_rate = GrowthRate.objects.get(pk = int(info[0])), level = int(info[1]), experience = int(info[2]) ) model.save() ############## # MACHINES # ############## clearTable(Machine) data = loadData('machines.csv') for index, info in enumerate(data): if index > 0: model = Machine ( machine_number = int(info[0]), version_group = VersionGroup.objects.get(pk = int(info[1])), item = Item.objects.get(pk = int(info[2])), move = Move.objects.get(pk = int(info[3])), ) model.save() ############### # EVOLUTION # ############### clearTable(EvolutionChain) data = loadData('evolution_chains.csv') for index, info in enumerate(data): if index > 0: model = EvolutionChain ( id = int(info[0]), baby_evolution_item = Item.objects.get(pk = int(info[1])) if info[1] != '' else None, ) model.save() clearTable(EvolutionTrigger) data = loadData('evolution_triggers.csv') for index, info in enumerate(data): if index > 0: model = EvolutionTrigger ( id = int(info[0]), name = info[1] ) model.save() clearTable(EvolutionTriggerName) data = loadData('evolution_trigger_prose.csv') for index, info in enumerate(data): if index > 0: model = EvolutionTriggerName ( evolution_trigger = EvolutionTrigger.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() ############# # POKEDEX # ############# clearTable(Pokedex) data = loadData('pokedexes.csv') for index, info in enumerate(data): if index > 0: model = Pokedex ( id = int(info[0]), region = Region.objects.get(pk = int(info[1])) if info[1] != '' else None, name = info[2], is_main_series = bool(info[3]) ) model.save() clearTable(PokedexDescription) data = loadData('pokedex_prose.csv') for index, info in enumerate(data): if index > 0: model = PokedexDescription ( pokedex = Pokedex.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2], description = info[3] ) model.save() clearTable(PokedexVersionGroup) data = loadData('pokedex_version_groups.csv') for index, info in enumerate(data): if index > 0: model = PokedexVersionGroup ( pokedex = Pokedex.objects.get(pk = int(info[0])), version_group = VersionGroup.objects.get(pk = int(info[1])) ) model.save() ############# # POKEMON # ############# clearTable(PokemonColor) data = loadData('pokemon_colors.csv') for index, info in enumerate(data): if index > 0: model = PokemonColor ( id = int(info[0]), name = info[1] ) model.save() clearTable(PokemonColorName) data = loadData('pokemon_color_names.csv') for index, info in enumerate(data): if index > 0: model = PokemonColorName ( pokemon_color = PokemonColor.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(PokemonShape) data = loadData('pokemon_shapes.csv') for index, info in enumerate(data): if index > 0: model = PokemonShape ( id = int(info[0]), name = info[1] ) model.save() clearTable(PokemonShapeName) data = loadData('pokemon_shape_prose.csv') for index, info in enumerate(data): if index > 0: model = PokemonShapeName ( pokemon_shape = PokemonShape.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2], awesome_name = info[3] ) model.save() clearTable(PokemonHabitat) data = loadData('pokemon_habitats.csv') for index, info in enumerate(data): if index > 0: model = PokemonHabitat ( id = int(info[0]), name = info[1] ) model.save() clearTable(PokemonSpecies) data = loadData('pokemon_species.csv') for index, info in enumerate(data): if index > 0: model = PokemonSpecies ( id = int(info[0]), name = info[1], generation = Generation.objects.get(pk = int(info[2])), evolves_from_species = None, evolution_chain = EvolutionChain.objects.get(pk = int(info[4])), pokemon_color = PokemonColor.objects.get(pk = int(info[5])), pokemon_shape = PokemonShape.objects.get(pk = int(info[6])), pokemon_habitat = PokemonHabitat.objects.get(pk = int(info[7])) if info[7] != '' else None, gender_rate = int(info[8]), capture_rate = int(info[9]), base_happiness = int(info[10]), is_baby = bool(info[11]), hatch_counter = int(info[12]), has_gender_differences = bool(info[13]), growth_rate = GrowthRate.objects.get(pk = int(info[14])), forms_switchable = bool(info[15]), order = int(info[16]) ) model.save() data = loadData('pokemon_species.csv') for index, info in enumerate(data): if index > 0: evolves = PokemonSpecies.objects.get(pk = int(info[3])) if info[3] != '' else None if evolves: species = PokemonSpecies.objects.get(pk = int(info[0])) species.evolves_from_species = evolves species.save() clearTable(PokemonSpeciesName) data = loadData('pokemon_species_names.csv') for index, info in enumerate(data): if index > 0: model = PokemonSpeciesName ( pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2], genus = info[3] ) model.save() clearTable(PokemonSpeciesDescription) data = loadData('pokemon_species_prose.csv') for index, info in enumerate(data): if index > 0: model = PokemonSpeciesDescription ( pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), description = info[2] ) model.save() clearTable(PokemonSpeciesFlavorText) data = loadData('pokemon_species_flavor_text.csv') for index, info in enumerate(data): if index > 0: model = PokemonSpeciesFlavorText ( pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])), version = Version.objects.get(pk = int(info[1])), language = Language.objects.get(pk = int(info[2])), flavor_text = info[3] ) model.save() clearTable(Pokemon) data = loadData('pokemon.csv') for index, info in enumerate(data): if index > 0: model = Pokemon ( id = int(info[0]), name = info[1], pokemon_species = PokemonSpecies.objects.get(pk = int(info[2])), height = int(info[3]), weight = int(info[4]), base_experience = int(info[5]), order = int(info[6]), is_default = bool(info[7]) ) model.save() clearTable(PokemonAbility) data = loadData('pokemon_abilities.csv') for index, info in enumerate(data): if index > 0: model = PokemonAbility ( pokemon = Pokemon.objects.get(pk = int(info[0])), ability = Ability.objects.get(pk = int(info[1])), is_hidden = bool(info[2]), slot = int(info[3]) ) model.save() clearTable(PokemonDexNumber) data = loadData('pokemon_dex_numbers.csv') for index, info in enumerate(data): if index > 0: model = PokemonDexNumber ( pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])), pokedex = Pokedex.objects.get(pk = int(info[1])), pokedex_number = int(info[2]) ) model.save() clearTable(PokemonEggGroup) data = loadData('pokemon_egg_groups.csv') for index, info in enumerate(data): if index > 0: model = PokemonEggGroup ( pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])), egg_group = EggGroup.objects.get(pk = int(info[1])) ) model.save() clearTable(PokemonEvolution) data = loadData('pokemon_evolution.csv') for index, info in enumerate(data): if index > 0: model = PokemonEvolution ( id = int(info[0]), evolved_species = PokemonSpecies.objects.get(pk = int(info[1])), evolution_trigger = EvolutionTrigger.objects.get(pk = int(info[2])), evolution_item = Item.objects.get(pk = int(info[3])) if info[3] != '' else None, min_level = int(info[4]) if info[4] != '' else None, gender = Gender.objects.get(pk = int(info[5])) if info[5] != '' else None, location_id = int(info[6]) if info[6] != '' else None, held_item = Item.objects.get(pk = int(info[7])) if info[7] != '' else None, time_of_day = info[8], known_move = Move.objects.get(pk = int(info[9])) if info[9] != '' else None, known_move_type = Type.objects.get(pk = int(info[10])) if info[10] != '' else None, min_happiness = int(info[11]) if info[11] != '' else None, min_beauty = int(info[12]) if info[12] != '' else None, min_affection = int(info[13]) if info[13] != '' else None, relative_physical_stats = int(info[14]) if info[14] != '' else None, party_species = PokemonSpecies.objects.get(pk = int(info[15])) if info[15] != '' else None, party_type = Type.objects.get(pk = int(info[16])) if info[16] != '' else None, trade_species = PokemonSpecies.objects.get(pk = int(info[17])) if info[17] != '' else None, needs_overworld_rain = bool(info[18]), turn_upside_down = bool(info[19]) ) model.save() clearTable(PokemonForm) data = loadData('pokemon_forms.csv') for index, info in enumerate(data): if index > 0: model = PokemonForm ( id = int(info[0]), name = info[1], form_identifier = info[2], pokemon = Pokemon.objects.get(pk = int(info[3])), introduced_in_version_group = VersionGroup.objects.get(pk = int(info[4])), is_default = bool(info[5]), is_battle_only = bool(info[6]), is_mega = bool(info[7]), form_order = int(info[8]), order = int(info[9]) ) model.save() clearTable(PokemonFormName) data = loadData('pokemon_form_names.csv') for index, info in enumerate(data): if index > 0: model = PokemonFormName ( pokemon_form = PokemonForm.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2], pokemon_name = info[3] ) model.save() clearTable(PokemonFormGeneration) data = loadData('pokemon_form_generations.csv') for index, info in enumerate(data): if index > 0: model = PokemonFormGeneration ( pokemon_form = PokemonForm.objects.get(pk = int(info[0])), generation = Generation.objects.get(pk = int(info[1])), game_index = int(info[2]) ) model.save() clearTable(PokemonGameIndex) data = loadData('pokemon_game_indices.csv') for index, info in enumerate(data): if index > 0: model = PokemonGameIndex ( pokemon = Pokemon.objects.get(pk = int(info[0])), version = Version.objects.get(pk = int(info[1])), game_index = int(info[2]) ) model.save() clearTable(PokemonGameIndex) data = loadData('pokemon_game_indices.csv') for index, info in enumerate(data): if index > 0: model = PokemonGameIndex ( pokemon = Pokemon.objects.get(pk = int(info[0])), version = Version.objects.get(pk = int(info[1])), game_index = int(info[2]) ) model.save() clearTable(PokemonHabitatName) data = loadData('pokemon_habitat_names.csv') for index, info in enumerate(data): if index > 0: model = PokemonHabitatName ( pokemon_habitat = PokemonHabitat.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(PokemonItem) data = loadData('pokemon_items.csv') for index, info in enumerate(data): if index > 0: model = PokemonItem ( pokemon = Pokemon.objects.get(pk = int(info[0])), version = Version.objects.get(pk = int(info[1])), item = Item.objects.get(pk = int(info[2])), rarity = int(info[3]) ) model.save() clearTable(PokemonMoveMethod) data = loadData('pokemon_move_methods.csv') for index, info in enumerate(data): if index > 0: model = PokemonMoveMethod ( id = int(info[0]), name = info[1] ) model.save() clearTable(PokemonMoveMethodName) data = loadData('pokemon_move_method_prose.csv') for index, info in enumerate(data): if index > 0: model = PokemonMoveMethodName ( pokemon_move_method = PokemonMoveMethod.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2], description = info[3] ) model.save() clearTable(PokemonMove) data = loadData('pokemon_moves.csv') for index, info in enumerate(data): if index > 0: model = PokemonMove ( pokemon = Pokemon.objects.get(pk = int(info[0])), version_group = VersionGroup.objects.get(pk = int(info[1])), move = Move.objects.get(pk = int(info[2])), pokemon_move_method = PokemonMoveMethod.objects.get(pk = int(info[3])), level = int(info[4]), order = int(info[5]) if info[5] != '' else None, ) model.save() clearTable(PokemonStat) data = loadData('pokemon_stats.csv') for index, info in enumerate(data): if index > 0: model = PokemonStat ( pokemon = Pokemon.objects.get(pk = int(info[0])), stat = Stat.objects.get(pk = int(info[1])), base_stat = int(info[2]), effort = int(info[3]) ) model.save() clearTable(PokemonType) data = loadData('pokemon_types.csv') for index, info in enumerate(data): if index > 0: model = PokemonType ( pokemon = Pokemon.objects.get(pk = int(info[0])), type = Type.objects.get(pk = int(info[1])), slot = int(info[2]) ) model.save() ############## # ENCOUNTER # ############## clearTable(Location) data = loadData('locations.csv') for index, info in enumerate(data): if index > 0: model = Location ( id = int(info[0]), region = Region.objects.get(pk = int(info[1])) if info[1] != '' else None, name = info[2] ) model.save() clearTable(LocationName) data = loadData('location_names.csv') for index, info in enumerate(data): if index > 0: model = LocationName ( location = Location.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(LocationGameIndex) data = loadData('location_game_indices.csv') for index, info in enumerate(data): if index > 0: model = LocationGameIndex ( location = Location.objects.get(pk = int(info[0])), generation = Generation.objects.get(pk = int(info[1])), game_index = int(info[2]) ) model.save() clearTable(LocationArea) data = loadData('location_areas.csv') for index, info in enumerate(data): if index > 0: model = LocationArea ( id = int(info[0]), location = Location.objects.get(pk = int(info[1])), game_index = int(info[2]), name = info[3] ) model.save() clearTable(LocationAreaName) data = loadData('location_area_prose.csv') for index, info in enumerate(data): if index > 0: model = LocationAreaName ( location_area = LocationArea.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(LocationAreaEncounterRate) data = loadData('location_area_encounter_rates.csv') for index, info in enumerate(data): if index > 0: model = LocationAreaEncounterRate ( location_area = LocationArea.objects.get(pk = int(info[0])), encounter_method = None, version = Version.objects.get(pk = int(info[2])), rate = int(info[3]) ) model.save() ############### # ENCOUNTER # ############### clearTable(EncounterMethod) data = loadData('encounter_methods.csv') for index, info in enumerate(data): if index > 0: model = EncounterMethod ( id = int(info[0]), name = info[1], order = int(info[2]) ) model.save() clearTable(EncounterMethodName) data = loadData('encounter_method_prose.csv') for index, info in enumerate(data): if index > 0: model = EncounterMethodName ( encounter_method = EncounterMethod.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(EncounterSlot) data = loadData('encounter_slots.csv') for index, info in enumerate(data): if index > 0: model = EncounterSlot ( id = int(info[0]), version_group = VersionGroup.objects.get(pk = int(info[1])), encounter_method = EncounterMethod.objects.get(pk = int(info[2])), slot = int(info[3]) if info[3] != '' else None, rarity = int(info[4]) ) model.save() clearTable(EncounterCondition) data = loadData('encounter_conditions.csv') for index, info in enumerate(data): if index > 0: model = EncounterCondition ( id = int(info[0]), name = info[1] ) model.save() clearTable(EncounterConditionName) data = loadData('encounter_condition_prose.csv') for index, info in enumerate(data): if index > 0: model = EncounterConditionName ( encounter_condition = EncounterCondition.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(Encounter) data = loadData('encounters.csv') for index, info in enumerate(data): if index > 0: model = Encounter ( id = int(info[0]), version = Version.objects.get(pk = int(info[1])), location_area = LocationArea.objects.get(pk = int(info[2])), encounter_slot = EncounterSlot.objects.get(pk = int(info[3])), pokemon = Pokemon.objects.get(pk = int(info[4])), min_level = int(info[5]), max_level = int(info[6]) ) model.save() clearTable(EncounterConditionValue) data = loadData('encounter_condition_values.csv') for index, info in enumerate(data): if index > 0: model = EncounterConditionValue ( id = int(info[0]), encounter_condition = EncounterCondition.objects.get(pk = int(info[1])), name = info[2], is_default = bool(info[3]) ) model.save() clearTable(EncounterConditionValueName) data = loadData('encounter_condition_value_prose.csv') for index, info in enumerate(data): if index > 0: model = EncounterConditionValueName ( encounter_condition_value = EncounterConditionValue.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2], ) model.save() clearTable(EncounterConditionValueMap) data = loadData('encounter_condition_value_map.csv') for index, info in enumerate(data): if index > 0: model = EncounterConditionValueMap ( encounter = Encounter.objects.get(pk = int(info[0])), encounter_condition_value = EncounterConditionValue.objects.get(pk = int(info[1])) ) model.save() #Location/Encounter associations data = loadData('location_area_encounter_rates.csv') for index, info in enumerate(data): if index > 0: laer = LocationAreaEncounterRate.objects.get(pk = int(info[0])) laer.encounter_method = EncounterMethod.objects.get(pk = int(info[1])) laer.save() ############## # PAL PARK # ############## clearTable(PalParkArea) data = loadData('pal_park_areas.csv') for index, info in enumerate(data): if index > 0: model = PalParkArea ( id = int(info[0]), name = info[1] ) model.save() clearTable(PalParkAreaName) data = loadData('pal_park_area_names.csv') for index, info in enumerate(data): if index > 0: model = PalParkAreaName ( pal_park_area = PalParkArea.objects.get(pk = int(info[0])), language = Language.objects.get(pk = int(info[1])), name = info[2] ) model.save() clearTable(PalPark) data = loadData('pal_park.csv') for index, info in enumerate(data): if index > 0: model = PalPark ( pokemon_species = PokemonSpecies.objects.get(pk = int(info[0])), pal_park_area = PalParkArea.objects.get(pk = int(info[1])), rate = int(info[2]) ) model.save()
bsd-3-clause
valexandersaulys/prudential_insurance_kaggle
venv/lib/python2.7/site-packages/scipy/interpolate/tests/test_ndgriddata.py
63
5962
from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_equal, assert_array_equal, assert_allclose, \ run_module_suite, assert_raises from scipy.interpolate import griddata class TestGriddata(object): def test_fill_value(self): x = [(0,0), (0,1), (1,0)] y = [1, 2, 3] yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1) assert_array_equal(yi, [-1., -1, 1]) yi = griddata(x, y, [(1,1), (1,2), (0,0)]) assert_array_equal(yi, [np.nan, np.nan, 1]) def test_alternative_call(self): x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], dtype=np.double) y = (np.arange(x.shape[0], dtype=np.double)[:,None] + np.array([0,1])[None,:]) for method in ('nearest', 'linear', 'cubic'): for rescale in (True, False): msg = repr((method, rescale)) yi = griddata((x[:,0], x[:,1]), y, (x[:,0], x[:,1]), method=method, rescale=rescale) assert_allclose(y, yi, atol=1e-14, err_msg=msg) def test_multivalue_2d(self): x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], dtype=np.double) y = (np.arange(x.shape[0], dtype=np.double)[:,None] + np.array([0,1])[None,:]) for method in ('nearest', 'linear', 'cubic'): for rescale in (True, False): msg = repr((method, rescale)) yi = griddata(x, y, x, method=method, rescale=rescale) assert_allclose(y, yi, atol=1e-14, err_msg=msg) def test_multipoint_2d(self): x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], dtype=np.double) y = np.arange(x.shape[0], dtype=np.double) xi = x[:,None,:] + np.array([0,0,0])[None,:,None] for method in ('nearest', 'linear', 'cubic'): for rescale in (True, False): msg = repr((method, rescale)) yi = griddata(x, y, xi, method=method, rescale=rescale) assert_equal(yi.shape, (5, 3), err_msg=msg) assert_allclose(yi, np.tile(y[:,None], (1, 3)), atol=1e-14, err_msg=msg) def test_complex_2d(self): x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], dtype=np.double) y = np.arange(x.shape[0], dtype=np.double) y = y - 2j*y[::-1] xi = x[:,None,:] + np.array([0,0,0])[None,:,None] for method in ('nearest', 'linear', 'cubic'): for rescale in (True, False): msg = repr((method, rescale)) yi = griddata(x, y, xi, method=method, rescale=rescale) assert_equal(yi.shape, (5, 3), err_msg=msg) assert_allclose(yi, np.tile(y[:,None], (1, 3)), atol=1e-14, err_msg=msg) def test_1d(self): x = np.array([1, 2.5, 3, 4.5, 5, 6]) y = np.array([1, 2, 0, 3.9, 2, 1]) for method in ('nearest', 'linear', 'cubic'): assert_allclose(griddata(x, y, x, method=method), y, err_msg=method, atol=1e-14) assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y, err_msg=method, atol=1e-14) assert_allclose(griddata((x,), y, (x,), method=method), y, err_msg=method, atol=1e-14) def test_1d_unsorted(self): x = np.array([2.5, 1, 4.5, 5, 6, 3]) y = np.array([1, 2, 0, 3.9, 2, 1]) for method in ('nearest', 'linear', 'cubic'): assert_allclose(griddata(x, y, x, method=method), y, err_msg=method, atol=1e-10) assert_allclose(griddata(x.reshape(6, 1), y, x, method=method), y, err_msg=method, atol=1e-10) assert_allclose(griddata((x,), y, (x,), method=method), y, err_msg=method, atol=1e-10) def test_square_rescale_manual(self): points = np.array([(0,0), (0,100), (10,100), (10,0), (1, 5)], dtype=np.double) points_rescaled = np.array([(0,0), (0,1), (1,1), (1,0), (0.1, 0.05)], dtype=np.double) values = np.array([1., 2., -3., 5., 9.], dtype=np.double) xx, yy = np.broadcast_arrays(np.linspace(0, 10, 14)[:,None], np.linspace(0, 100, 14)[None,:]) xx = xx.ravel() yy = yy.ravel() xi = np.array([xx, yy]).T.copy() for method in ('nearest', 'linear', 'cubic'): msg = method zi = griddata(points_rescaled, values, xi/np.array([10, 100.]), method=method) zi_rescaled = griddata(points, values, xi, method=method, rescale=True) assert_allclose(zi, zi_rescaled, err_msg=msg, atol=1e-12) def test_xi_1d(self): # Check that 1-D xi is interpreted as a coordinate x = np.array([(0,0), (-0.5,-0.5), (-0.5,0.5), (0.5, 0.5), (0.25, 0.3)], dtype=np.double) y = np.arange(x.shape[0], dtype=np.double) y = y - 2j*y[::-1] xi = np.array([0.5, 0.5]) for method in ('nearest', 'linear', 'cubic'): p1 = griddata(x, y, xi, method=method) p2 = griddata(x, y, xi[None,:], method=method) assert_allclose(p1, p2, err_msg=method) xi1 = np.array([0.5]) xi3 = np.array([0.5, 0.5, 0.5]) assert_raises(ValueError, griddata, x, y, xi1, method=method) assert_raises(ValueError, griddata, x, y, xi3, method=method) if __name__ == "__main__": run_module_suite()
gpl-2.0
pyload/pyload
src/pyload/plugins/downloaders/SpeedyshareCom.py
2
1461
# -*- coding: utf-8 -*- # # Test links: # http://speedy.sh/ep2qY/Zapp-Brannigan.jpg import re from ..base.simple_downloader import SimpleDownloader class SpeedyshareCom(SimpleDownloader): __name__ = "SpeedyshareCom" __type__ = "downloader" __version__ = "0.11" __status__ = "testing" __pattern__ = r"https?://(?:www\.)?(speedyshare\.com|speedy\.sh)/\w+" __config__ = [ ("enabled", "bool", "Activated", True), ("use_premium", "bool", "Use premium account if available", True), ("fallback", "bool", "Fallback to free download if premium fails", True), ("chk_filesize", "bool", "Check file size", True), ("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10), ] __description__ = """Speedyshare.com downloader plugin""" __license__ = "GPLv3" __authors__ = [("zapp-brannigan", "[email protected]")] NAME_PATTERN = r"class=downloadfilename>(?P<N>.*)</span></td>" SIZE_PATTERN = r"class=sizetagtext>(?P<S>.*) (?P<U>[kKmM]?[iI]?[bB]?)</div>" OFFLINE_PATTERN = r"class=downloadfilenamenotfound>.*</span>" LINK_FREE_PATTERN = r"<a href=\'(.*)\'><img src=/gf/slowdownload\.png alt=\'Slow Download\' border=0" def setup(self): self.multi_dl = False self.chunk_limit = 1 def handle_free(self, pyfile): m = re.search(self.LINK_FREE_PATTERN, self.data) if m is None: self.link = m.group(1)
agpl-3.0
knowmetools/km-api
km_api/know_me/migrations/0006_subscription.py
1
2495
# Generated by Django 2.0.6 on 2018-10-20 23:41 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import permission_utils.model_mixins class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ("know_me", "0005_kmuser_is_legacy_user"), ] operations = [ migrations.CreateModel( name="Subscription", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "is_active", models.BooleanField( help_text="A boolean indicating if the subscription is active.", verbose_name="is active", ), ), ( "time_created", models.DateTimeField( auto_now_add=True, help_text="The time that the subscription instance was created.", verbose_name="creation time", ), ), ( "time_updated", models.DateTimeField( auto_now=True, help_text="The time of the subscription's last update.", verbose_name="last update time", ), ), ( "user", models.OneToOneField( help_text="The user who has a Know Me subscription", on_delete=django.db.models.deletion.CASCADE, related_name="know_me_subscription", to=settings.AUTH_USER_MODEL, verbose_name="user", ), ), ], options={ "verbose_name": "Know Me subscription", "verbose_name_plural": "Know Me subscriptions", "ordering": ("time_created",), }, bases=( permission_utils.model_mixins.IsAuthenticatedMixin, models.Model, ), ) ]
apache-2.0
yawnosnorous/python-for-android
python-modules/twisted/twisted/web/test/test_distrib.py
52
12485
# Copyright (c) 2008-2010 Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.web.distrib}. """ from os.path import abspath from xml.dom.minidom import parseString try: import pwd except ImportError: pwd = None from zope.interface.verify import verifyObject from twisted.python import log, filepath from twisted.internet import reactor, defer from twisted.trial import unittest from twisted.spread import pb from twisted.spread.banana import SIZE_LIMIT from twisted.web import http, distrib, client, resource, static, server from twisted.web.test.test_web import DummyRequest from twisted.web.test._util import _render class MySite(server.Site): def stopFactory(self): if hasattr(self, "logFile"): if self.logFile != log.logfile: self.logFile.close() del self.logFile class PBServerFactory(pb.PBServerFactory): """ A PB server factory which keeps track of the most recent protocol it created. @ivar proto: L{None} or the L{Broker} instance most recently returned from C{buildProtocol}. """ proto = None def buildProtocol(self, addr): self.proto = pb.PBServerFactory.buildProtocol(self, addr) return self.proto class DistribTest(unittest.TestCase): port1 = None port2 = None sub = None f1 = None def tearDown(self): """ Clean up all the event sources left behind by either directly by test methods or indirectly via some distrib API. """ dl = [defer.Deferred(), defer.Deferred()] if self.f1 is not None and self.f1.proto is not None: self.f1.proto.notifyOnDisconnect(lambda: dl[0].callback(None)) else: dl[0].callback(None) if self.sub is not None and self.sub.publisher is not None: self.sub.publisher.broker.notifyOnDisconnect( lambda: dl[1].callback(None)) self.sub.publisher.broker.transport.loseConnection() else: dl[1].callback(None) http._logDateTimeStop() if self.port1 is not None: dl.append(self.port1.stopListening()) if self.port2 is not None: dl.append(self.port2.stopListening()) return defer.gatherResults(dl) def testDistrib(self): # site1 is the publisher r1 = resource.Resource() r1.putChild("there", static.Data("root", "text/plain")) site1 = server.Site(r1) self.f1 = PBServerFactory(distrib.ResourcePublisher(site1)) self.port1 = reactor.listenTCP(0, self.f1) self.sub = distrib.ResourceSubscription("127.0.0.1", self.port1.getHost().port) r2 = resource.Resource() r2.putChild("here", self.sub) f2 = MySite(r2) self.port2 = reactor.listenTCP(0, f2) d = client.getPage("http://127.0.0.1:%d/here/there" % \ self.port2.getHost().port) d.addCallback(self.failUnlessEqual, 'root') return d def _requestTest(self, child, **kwargs): """ Set up a resource on a distrib site using L{ResourcePublisher} and then retrieve it from a L{ResourceSubscription} via an HTTP client. @param child: The resource to publish using distrib. @param **kwargs: Extra keyword arguments to pass to L{getPage} when requesting the resource. @return: A L{Deferred} which fires with the result of the request. """ distribRoot = resource.Resource() distribRoot.putChild("child", child) distribSite = server.Site(distribRoot) self.f1 = distribFactory = PBServerFactory( distrib.ResourcePublisher(distribSite)) distribPort = reactor.listenTCP( 0, distribFactory, interface="127.0.0.1") self.addCleanup(distribPort.stopListening) addr = distribPort.getHost() self.sub = mainRoot = distrib.ResourceSubscription( addr.host, addr.port) mainSite = server.Site(mainRoot) mainPort = reactor.listenTCP(0, mainSite, interface="127.0.0.1") self.addCleanup(mainPort.stopListening) mainAddr = mainPort.getHost() return client.getPage("http://%s:%s/child" % ( mainAddr.host, mainAddr.port), **kwargs) def test_requestHeaders(self): """ The request headers are available on the request object passed to a distributed resource's C{render} method. """ requestHeaders = {} class ReportRequestHeaders(resource.Resource): def render(self, request): requestHeaders.update(dict( request.requestHeaders.getAllRawHeaders())) return "" request = self._requestTest( ReportRequestHeaders(), headers={'foo': 'bar'}) def cbRequested(result): self.assertEquals(requestHeaders['Foo'], ['bar']) request.addCallback(cbRequested) return request def test_largeWrite(self): """ If a string longer than the Banana size limit is passed to the L{distrib.Request} passed to the remote resource, it is broken into smaller strings to be transported over the PB connection. """ class LargeWrite(resource.Resource): def render(self, request): request.write('x' * SIZE_LIMIT + 'y') request.finish() return server.NOT_DONE_YET request = self._requestTest(LargeWrite()) request.addCallback(self.assertEquals, 'x' * SIZE_LIMIT + 'y') return request def test_largeReturn(self): """ Like L{test_largeWrite}, but for the case where C{render} returns a long string rather than explicitly passing it to L{Request.write}. """ class LargeReturn(resource.Resource): def render(self, request): return 'x' * SIZE_LIMIT + 'y' request = self._requestTest(LargeReturn()) request.addCallback(self.assertEquals, 'x' * SIZE_LIMIT + 'y') return request def test_connectionLost(self): """ If there is an error issuing the request to the remote publisher, an error response is returned. """ # Using pb.Root as a publisher will cause request calls to fail with an # error every time. Just what we want to test. self.f1 = serverFactory = PBServerFactory(pb.Root()) self.port1 = serverPort = reactor.listenTCP(0, serverFactory) self.sub = subscription = distrib.ResourceSubscription( "127.0.0.1", serverPort.getHost().port) request = DummyRequest(['']) d = _render(subscription, request) def cbRendered(ignored): self.assertEqual(request.responseCode, 500) # This is the error we caused the request to fail with. It should # have been logged. self.assertEqual(len(self.flushLoggedErrors(pb.NoSuchMethod)), 1) d.addCallback(cbRendered) return d class _PasswordDatabase: def __init__(self, users): self._users = users def getpwall(self): return iter(self._users) def getpwnam(self, username): for user in self._users: if user[0] == username: return user raise KeyError() class UserDirectoryTests(unittest.TestCase): """ Tests for L{UserDirectory}, a resource for listing all user resources available on a system. """ def setUp(self): self.alice = ('alice', 'x', 123, 456, 'Alice,,,', self.mktemp(), '/bin/sh') self.bob = ('bob', 'x', 234, 567, 'Bob,,,', self.mktemp(), '/bin/sh') self.database = _PasswordDatabase([self.alice, self.bob]) self.directory = distrib.UserDirectory(self.database) def test_interface(self): """ L{UserDirectory} instances provide L{resource.IResource}. """ self.assertTrue(verifyObject(resource.IResource, self.directory)) def _404Test(self, name): """ Verify that requesting the C{name} child of C{self.directory} results in a 404 response. """ request = DummyRequest([name]) result = self.directory.getChild(name, request) d = _render(result, request) def cbRendered(ignored): self.assertEqual(request.responseCode, 404) d.addCallback(cbRendered) return d def test_getInvalidUser(self): """ L{UserDirectory.getChild} returns a resource which renders a 404 response when passed a string which does not correspond to any known user. """ return self._404Test('carol') def test_getUserWithoutResource(self): """ L{UserDirectory.getChild} returns a resource which renders a 404 response when passed a string which corresponds to a known user who has neither a user directory nor a user distrib socket. """ return self._404Test('alice') def test_getPublicHTMLChild(self): """ L{UserDirectory.getChild} returns a L{static.File} instance when passed the name of a user with a home directory containing a I{public_html} directory. """ home = filepath.FilePath(self.bob[-2]) public_html = home.child('public_html') public_html.makedirs() request = DummyRequest(['bob']) result = self.directory.getChild('bob', request) self.assertIsInstance(result, static.File) self.assertEqual(result.path, public_html.path) def test_getDistribChild(self): """ L{UserDirectory.getChild} returns a L{ResourceSubscription} instance when passed the name of a user suffixed with C{".twistd"} who has a home directory containing a I{.twistd-web-pb} socket. """ home = filepath.FilePath(self.bob[-2]) home.makedirs() web = home.child('.twistd-web-pb') request = DummyRequest(['bob']) result = self.directory.getChild('bob.twistd', request) self.assertIsInstance(result, distrib.ResourceSubscription) self.assertEqual(result.host, 'unix') self.assertEqual(abspath(result.port), web.path) def test_invalidMethod(self): """ L{UserDirectory.render} raises L{UnsupportedMethod} in response to a non-I{GET} request. """ request = DummyRequest(['']) request.method = 'POST' self.assertRaises( server.UnsupportedMethod, self.directory.render, request) def test_render(self): """ L{UserDirectory} renders a list of links to available user content in response to a I{GET} request. """ public_html = filepath.FilePath(self.alice[-2]).child('public_html') public_html.makedirs() web = filepath.FilePath(self.bob[-2]) web.makedirs() # This really only works if it's a unix socket, but the implementation # doesn't currently check for that. It probably should someday, and # then skip users with non-sockets. web.child('.twistd-web-pb').setContent("") request = DummyRequest(['']) result = _render(self.directory, request) def cbRendered(ignored): document = parseString(''.join(request.written)) # Each user should have an li with a link to their page. [alice, bob] = document.getElementsByTagName('li') self.assertEqual(alice.firstChild.tagName, 'a') self.assertEqual(alice.firstChild.getAttribute('href'), 'alice/') self.assertEqual(alice.firstChild.firstChild.data, 'Alice (file)') self.assertEqual(bob.firstChild.tagName, 'a') self.assertEqual(bob.firstChild.getAttribute('href'), 'bob.twistd/') self.assertEqual(bob.firstChild.firstChild.data, 'Bob (twistd)') result.addCallback(cbRendered) return result def test_passwordDatabase(self): """ If L{UserDirectory} is instantiated with no arguments, it uses the L{pwd} module as its password database. """ directory = distrib.UserDirectory() self.assertIdentical(directory._pwd, pwd) if pwd is None: test_passwordDatabase.skip = "pwd module required"
apache-2.0
ZHAW-INES/rioxo-uClinux-dist
user/python/python-2.4.4/Lib/hotshot/stats.py
252
2582
"""Statistics analyzer for HotShot.""" import profile import pstats import hotshot.log from hotshot.log import ENTER, EXIT def load(filename): return StatsLoader(filename).load() class StatsLoader: def __init__(self, logfn): self._logfn = logfn self._code = {} self._stack = [] self.pop_frame = self._stack.pop def load(self): # The timer selected by the profiler should never be used, so make # sure it doesn't work: p = Profile() p.get_time = _brokentimer log = hotshot.log.LogReader(self._logfn) taccum = 0 for event in log: what, (filename, lineno, funcname), tdelta = event if tdelta > 0: taccum += tdelta # We multiply taccum to convert from the microseconds we # have to the seconds that the profile/pstats module work # with; this allows the numbers to have some basis in # reality (ignoring calibration issues for now). if what == ENTER: frame = self.new_frame(filename, lineno, funcname) p.trace_dispatch_call(frame, taccum * .000001) taccum = 0 elif what == EXIT: frame = self.pop_frame() p.trace_dispatch_return(frame, taccum * .000001) taccum = 0 # no further work for line events assert not self._stack return pstats.Stats(p) def new_frame(self, *args): # args must be filename, firstlineno, funcname # our code objects are cached since we don't need to create # new ones every time try: code = self._code[args] except KeyError: code = FakeCode(*args) self._code[args] = code # frame objects are create fresh, since the back pointer will # vary considerably if self._stack: back = self._stack[-1] else: back = None frame = FakeFrame(code, back) self._stack.append(frame) return frame class Profile(profile.Profile): def simulate_cmd_complete(self): pass class FakeCode: def __init__(self, filename, firstlineno, funcname): self.co_filename = filename self.co_firstlineno = firstlineno self.co_name = self.__name__ = funcname class FakeFrame: def __init__(self, code, back): self.f_back = back self.f_code = code def _brokentimer(): raise RuntimeError, "this timer should not be called"
gpl-2.0
mrksu/flac2m
src/paths.py
1
4790
#!/usr/bin/env python3 from typing import List, Tuple import os from common import error_exit MusicDir = Tuple[str, List[str]] # A tuple of dir name and all of its files MusicMap = List[MusicDir] # List of dirs containing music def find_music(roots: List[str]) -> MusicMap: music_dirs = [] for root in roots: # Use absolute paths otherwise first letter can be lost somewhere root_abs = os.path.abspath(root) for directory in os.walk(root_abs): dir_name, cont_dirs, cont_files = directory for f in cont_files: if f.endswith(".flac"): # print("Music found: {} in {}".format(f, dir_name)) music_dirs.append((dir_name, cont_files)) break return music_dirs # This function is similar to os.path.commonpath except for the 1-element case. # I discovered os.path.common_path only after writing this, now too proud # to replace it. It was a good excercise. def greatest_common_dir(directories: List[str]) -> str: """ Compares directory paths in list and returns the part that all of them have in common; i.e. ["/usr/bin", "/usr/share"] -> "/usr" If there is only one directory, returns all except the innermost element; i.e. ["/usr/share/man"] -> "/usr/share" """ # The list of directories should never be empty assert len(directories) != 0, "No music directories to analyze" # If there is only one directory in the list, return the innermost # directory immediately containing music files if len(directories) == 1: split_dir = directories[0].split("/") all_except_containing_dir = split_dir[:-1] return "/".join(all_except_containing_dir) split_dirs = [d.split("/") for d in directories] common_elements = [] common = True index = 0 while common: first_dir = split_dirs[0] path_element = first_dir[index] for d in split_dirs: if d[index] != path_element: common = False break if common: common_elements.append(path_element) index += 1 common_path = "/".join(common_elements) return common_path def get_flac_files(all_files: List[str]) -> List[str]: flacs = [f for f in all_files if f.endswith("flac")] return flacs def get_files_to_copy(all_files: List[str], c_template: List[str]) -> List[str]: # Not a list comprehension here because this can potentially be faster # considering there should only be a few covers / copy file templates # and many actual files to_copy = [] for c in c_template: for f in all_files: if f == c: to_copy.append(f) return to_copy def subtract_common_path(full_path: str, common_path: str) -> str: assert full_path.startswith(common_path), "No common path to subtract" common_length = len(common_path) subtracted = full_path[common_length+1:] return subtracted SubsPair = Tuple[str, str] # A pair of strings to use in substitution def evaluate_substitution(subs: str) -> SubsPair: split_subs = subs.split("/") if len(split_subs) != 2: error_exit("‘{}’: invalid substitution format. "\ "Expected ‘old/new’.".format(subs)) return (split_subs[0], split_subs[1]) InOutPair = Tuple[str, str] # A pair of input path and output path InOutList = List[InOutPair] # A list of said in/out pairs def create_in_out_paths(music_map: MusicMap, out_root: str, subsf: SubsPair, subsd: SubsPair, copy=False, c_template=None) -> InOutList: all_dirs = [t[0] for t in music_map] common_path = greatest_common_dir(all_dirs) in_out_list = [] for music_dir in music_map: dir_path, files = music_dir if copy: sel_files = get_files_to_copy(files, c_template) else: sel_files = get_flac_files(files) unique_path = subtract_common_path(dir_path, common_path) # TODO: process substitutions in a separate function beforehand if subsd: old, new = subsd unique_path = unique_path.replace(old, new) for f in sel_files: if subsf: old, new = subsf f.replace(old, new) in_path = os.path.join(dir_path, f) out_path = os.path.join(os.path.abspath(out_root), unique_path, f) in_out_list.append((in_path, out_path)) return in_out_list def check_access(path, write=False): acc = os.access if write: return acc(path, os.W_OK) and acc(path, os.X_OK) else: return acc(path, os.R_OK) and acc(path, os.X_OK)
gpl-3.0
julienbou/heroku-buildpack-serpan
vendor/pip-1.3.1/pip/vcs/subversion.py
63
10620
import os import re from pip.backwardcompat import urlparse from pip import InstallationError from pip.index import Link from pip.util import rmtree, display_path, call_subprocess from pip.log import logger from pip.vcs import vcs, VersionControl _svn_xml_url_re = re.compile('url="([^"]+)"') _svn_rev_re = re.compile('committed-rev="(\d+)"') _svn_url_re = re.compile(r'URL: (.+)') _svn_revision_re = re.compile(r'Revision: (.+)') _svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') _svn_info_xml_url_re = re.compile(r'<url>(.*)</url>') class Subversion(VersionControl): name = 'svn' dirname = '.svn' repo_name = 'checkout' schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn') bundle_file = 'svn-checkout.txt' guide = ('# This was an svn checkout; to make it a checkout again run:\n' 'svn checkout --force -r %(rev)s %(url)s .\n') def get_info(self, location): """Returns (url, revision), where both are strings""" assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location output = call_subprocess( [self.cmd, 'info', location], show_stdout=False, extra_environ={'LANG': 'C'}) match = _svn_url_re.search(output) if not match: logger.warn('Cannot determine URL of svn checkout %s' % display_path(location)) logger.info('Output that cannot be parsed: \n%s' % output) return None, None url = match.group(1).strip() match = _svn_revision_re.search(output) if not match: logger.warn('Cannot determine revision of svn checkout %s' % display_path(location)) logger.info('Output that cannot be parsed: \n%s' % output) return url, None return url, match.group(1) def parse_vcs_bundle_file(self, content): for line in content.splitlines(): if not line.strip() or line.strip().startswith('#'): continue match = re.search(r'^-r\s*([^ ])?', line) if not match: return None, None rev = match.group(1) rest = line[match.end():].strip().split(None, 1)[0] return rest, rev return None, None def export(self, location): """Export the svn repository at the url to the destination location""" url, rev = self.get_url_rev() rev_options = get_rev_options(url, rev) logger.notify('Exporting svn repository %s to %s' % (url, location)) logger.indent += 2 try: if os.path.exists(location): # Subversion doesn't like to check out over an existing directory # --force fixes this, but was only added in svn 1.5 rmtree(location) call_subprocess( [self.cmd, 'export'] + rev_options + [url, location], filter_stdout=self._filter, show_stdout=False) finally: logger.indent -= 2 def switch(self, dest, url, rev_options): call_subprocess( [self.cmd, 'switch'] + rev_options + [url, dest]) def update(self, dest, rev_options): call_subprocess( [self.cmd, 'update'] + rev_options + [dest]) def obtain(self, dest): url, rev = self.get_url_rev() rev_options = get_rev_options(url, rev) if rev: rev_display = ' (to revision %s)' % rev else: rev_display = '' if self.check_destination(dest, url, rev_options, rev_display): logger.notify('Checking out %s%s to %s' % (url, rev_display, display_path(dest))) call_subprocess( [self.cmd, 'checkout', '-q'] + rev_options + [url, dest]) def get_location(self, dist, dependency_links): for url in dependency_links: egg_fragment = Link(url).egg_fragment if not egg_fragment: continue if '-' in egg_fragment: ## FIXME: will this work when a package has - in the name? key = '-'.join(egg_fragment.split('-')[:-1]).lower() else: key = egg_fragment if key == dist.key: return url.split('#', 1)[0] return None def get_revision(self, location): """ Return the maximum revision for all files under a given location """ # Note: taken from setuptools.command.egg_info revision = 0 for base, dirs, files in os.walk(location): if self.dirname not in dirs: dirs[:] = [] continue # no sense walking uncontrolled subdirs dirs.remove(self.dirname) entries_fn = os.path.join(base, self.dirname, 'entries') if not os.path.exists(entries_fn): ## FIXME: should we warn? continue dirurl, localrev = self._get_svn_url_rev(base) if base == location: base_url = dirurl + '/' # save the root url elif not dirurl or not dirurl.startswith(base_url): dirs[:] = [] continue # not part of the same svn tree, skip it revision = max(revision, localrev) return revision def get_url_rev(self): # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it url, rev = super(Subversion, self).get_url_rev() if url.startswith('ssh://'): url = 'svn+' + url return url, rev def get_url(self, location): # In cases where the source is in a subdirectory, not alongside setup.py # we have to look up in the location until we find a real setup.py orig_location = location while not os.path.exists(os.path.join(location, 'setup.py')): last_location = location location = os.path.dirname(location) if location == last_location: # We've traversed up to the root of the filesystem without finding setup.py logger.warn("Could not find setup.py for directory %s (tried all parent directories)" % orig_location) return None return self._get_svn_url_rev(location)[0] def _get_svn_url_rev(self, location): f = open(os.path.join(location, self.dirname, 'entries')) data = f.read() f.close() if data.startswith('8') or data.startswith('9') or data.startswith('10'): data = list(map(str.splitlines, data.split('\n\x0c\n'))) del data[0][0] # get rid of the '8' url = data[0][3] revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0] elif data.startswith('<?xml'): match = _svn_xml_url_re.search(data) if not match: raise ValueError('Badly formatted data: %r' % data) url = match.group(1) # get repository URL revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0] else: try: # subversion >= 1.7 xml = call_subprocess([self.cmd, 'info', '--xml', location], show_stdout=False) url = _svn_info_xml_url_re.search(xml).group(1) revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)] except InstallationError: url, revs = None, [] if revs: rev = max(revs) else: rev = 0 return url, rev def get_tag_revs(self, svn_tag_url): stdout = call_subprocess( [self.cmd, 'ls', '-v', svn_tag_url], show_stdout=False) results = [] for line in stdout.splitlines(): parts = line.split() rev = int(parts[0]) tag = parts[-1].strip('/') results.append((tag, rev)) return results def find_tag_match(self, rev, tag_revs): best_match_rev = None best_tag = None for tag, tag_rev in tag_revs: if (tag_rev > rev and (best_match_rev is None or best_match_rev > tag_rev)): # FIXME: Is best_match > tag_rev really possible? # or is it a sign something is wacky? best_match_rev = tag_rev best_tag = tag return best_tag def get_src_requirement(self, dist, location, find_tags=False): repo = self.get_url(location) if repo is None: return None parts = repo.split('/') ## FIXME: why not project name? egg_project_name = dist.egg_name().split('-', 1)[0] rev = self.get_revision(location) if parts[-2] in ('tags', 'tag'): # It's a tag, perfect! full_egg_name = '%s-%s' % (egg_project_name, parts[-1]) elif parts[-2] in ('branches', 'branch'): # It's a branch :( full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev) elif parts[-1] == 'trunk': # Trunk :-/ full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev) if find_tags: tag_url = '/'.join(parts[:-1]) + '/tags' tag_revs = self.get_tag_revs(tag_url) match = self.find_tag_match(rev, tag_revs) if match: logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match) repo = '%s/%s' % (tag_url, match) full_egg_name = '%s-%s' % (egg_project_name, match) else: # Don't know what it is logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo) full_egg_name = '%s-dev_r%s' % (egg_project_name, rev) return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name) def get_rev_options(url, rev): if rev: rev_options = ['-r', rev] else: rev_options = [] r = urlparse.urlsplit(url) if hasattr(r, 'username'): # >= Python-2.5 username, password = r.username, r.password else: netloc = r[1] if '@' in netloc: auth = netloc.split('@')[0] if ':' in auth: username, password = auth.split(':', 1) else: username, password = auth, None else: username, password = None, None if username: rev_options += ['--username', username] if password: rev_options += ['--password', password] return rev_options vcs.register(Subversion)
mit
opensourcechipspark/platform_external_chromium_org
third_party/tlslite/tlslite/utils/PyCrypto_RSAKey.py
361
1814
"""PyCrypto RSA implementation.""" from cryptomath import * from RSAKey import * from Python_RSAKey import Python_RSAKey if pycryptoLoaded: from Crypto.PublicKey import RSA class PyCrypto_RSAKey(RSAKey): def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0): if not d: self.rsa = RSA.construct( (n, e) ) else: self.rsa = RSA.construct( (n, e, d, p, q) ) def __getattr__(self, name): return getattr(self.rsa, name) def hasPrivateKey(self): return self.rsa.has_private() def hash(self): return Python_RSAKey(self.n, self.e).hash() def _rawPrivateKeyOp(self, m): s = numberToString(m) byteLength = numBytes(self.n) if len(s)== byteLength: pass elif len(s) == byteLength-1: s = '\0' + s else: raise AssertionError() c = stringToNumber(self.rsa.decrypt((s,))) return c def _rawPublicKeyOp(self, c): s = numberToString(c) byteLength = numBytes(self.n) if len(s)== byteLength: pass elif len(s) == byteLength-1: s = '\0' + s else: raise AssertionError() m = stringToNumber(self.rsa.encrypt(s, None)[0]) return m def writeXMLPublicKey(self, indent=''): return Python_RSAKey(self.n, self.e).write(indent) def generate(bits): key = PyCrypto_RSAKey() def f(numBytes): return bytesToString(getRandomBytes(numBytes)) key.rsa = RSA.generate(bits, f) return key generate = staticmethod(generate)
bsd-3-clause
freedesktop-unofficial-mirror/telepathy__telepathy-idle
tests/twisted/servicetest.py
2
21869
""" Infrastructure code for testing connection managers. """ from twisted.internet import glib2reactor from twisted.internet.protocol import Protocol, Factory, ClientFactory glib2reactor.install() import sys import time import os import pprint import unittest import dbus from dbus.mainloop.glib import DBusGMainLoop DBusGMainLoop(set_as_default=True) from twisted.internet import reactor import constants as cs tp_name_prefix = cs.PREFIX tp_path_prefix = '/' + cs.PREFIX.replace('.', '/') class DictionarySupersetOf (object): """Utility class for expecting "a dictionary with at least these keys".""" def __init__(self, dictionary): self._dictionary = dictionary def __repr__(self): return "DictionarySupersetOf(%s)" % self._dictionary def __eq__(self, other): """would like to just do: return set(other.items()).issuperset(self._dictionary.items()) but it turns out that this doesn't work if you have another dict nested in the values of your dicts""" try: for k,v in self._dictionary.items(): if k not in other or other[k] != v: return False return True except TypeError: # other is not iterable return False class Event(object): def __init__(self, type, **kw): self.__dict__.update(kw) self.type = type (self.subqueue, self.subtype) = type.split ("-", 1) def __str__(self): return '\n'.join([ str(type(self)) ] + format_event(self)) def format_event(event): ret = ['- type %s' % event.type] for key in sorted(dir(event)): if key != 'type' and not key.startswith('_'): ret.append('- %s: %s' % ( key, pprint.pformat(getattr(event, key)))) if key == 'error': ret.append('%s' % getattr(event, key)) return ret class EventPattern: def __init__(self, type, **properties): self.type = type self.predicate = None if 'predicate' in properties: self.predicate = properties['predicate'] del properties['predicate'] self.properties = properties (self.subqueue, self.subtype) = type.split ("-", 1) def __repr__(self): properties = dict(self.properties) if self.predicate is not None: properties['predicate'] = self.predicate return '%s(%r, **%r)' % ( self.__class__.__name__, self.type, properties) def match(self, event): if event.type != self.type: return False for key, value in self.properties.iteritems(): try: if getattr(event, key) != value: return False except AttributeError: return False if self.predicate is None or self.predicate(event): return True return False class TimeoutError(Exception): pass class ForbiddenEventOccurred(Exception): def __init__(self, event): Exception.__init__(self) self.event = event def __str__(self): return '\n' + '\n'.join(format_event(self.event)) class BaseEventQueue: """Abstract event queue base class. Implement the wait() method to have something that works. """ def __init__(self, timeout=None): self.verbose = False self.forbidden_events = set() self.event_queues = {} if timeout is None: self.timeout = 5 else: self.timeout = timeout def log(self, s): if self.verbose: print s def log_queues(self, queues): self.log ("Waiting for event on: %s" % ", ".join(queues)) def log_event(self, event): self.log('got event:') if self.verbose: map(self.log, format_event(event)) def forbid_events(self, patterns): """ Add patterns (an iterable of EventPattern) to the set of forbidden events. If a forbidden event occurs during an expect or expect_many, the test will fail. """ self.forbidden_events.update(set(patterns)) def unforbid_events(self, patterns): """ Remove 'patterns' (an iterable of EventPattern) from the set of forbidden events. These must be the same EventPattern pointers that were passed to forbid_events. """ self.forbidden_events.difference_update(set(patterns)) def unforbid_all(self): """ Remove all patterns from the set of forbidden events. """ self.forbidden_events.clear() def _check_forbidden(self, event): for e in self.forbidden_events: if e.match(event): raise ForbiddenEventOccurred(event) def expect(self, type, **kw): """ Waits for an event matching the supplied pattern to occur, and returns it. For example, to await a D-Bus signal with particular arguments: e = q.expect('dbus-signal', signal='Badgers', args=["foo", 42]) """ pattern = EventPattern(type, **kw) t = time.time() while True: event = self.wait([pattern.subqueue]) self._check_forbidden(event) if pattern.match(event): self.log('handled, took %0.3f ms' % ((time.time() - t) * 1000.0) ) self.log('') return event self.log('not handled') self.log('') def expect_many(self, *patterns): """ Waits for events matching all of the supplied EventPattern instances to return, and returns a list of events in the same order as the patterns they matched. After a pattern is successfully matched, it is not considered for future events; if more than one unsatisfied pattern matches an event, the first "wins". Note that the expected events may occur in any order. If you're expecting a series of events in a particular order, use repeated calls to expect() instead. This method is useful when you're awaiting a number of events which may happen in any order. For instance, in telepathy-gabble, calling a D-Bus method often causes a value to be returned immediately, as well as a query to be sent to the server. Since these events may reach the test in either order, the following is incorrect and will fail if the IQ happens to reach the test first: ret = q.expect('dbus-return', method='Foo') query = q.expect('stream-iq', query_ns=ns.FOO) The following would be correct: ret, query = q.expect_many( EventPattern('dbus-return', method='Foo'), EventPattern('stream-iq', query_ns=ns.FOO), ) """ ret = [None] * len(patterns) t = time.time() while None in ret: try: queues = set() for i, pattern in enumerate(patterns): if ret[i] is None: queues.add(pattern.subqueue) event = self.wait(queues) except TimeoutError: self.log('timeout') self.log('still expecting:') for i, pattern in enumerate(patterns): if ret[i] is None: self.log(' - %r' % pattern) raise self._check_forbidden(event) for i, pattern in enumerate(patterns): if ret[i] is None and pattern.match(event): self.log('handled, took %0.3f ms' % ((time.time() - t) * 1000.0) ) self.log('') ret[i] = event break else: self.log('not handled') self.log('') return ret def demand(self, type, **kw): pattern = EventPattern(type, **kw) event = self.wait([pattern.subqueue]) if pattern.match(event): self.log('handled') self.log('') return event self.log('not handled') raise RuntimeError('expected %r, got %r' % (pattern, event)) def queues_available(self, queues): if queues == None: return self.event_queues.keys() else: available = self.event_queues.keys() return filter(lambda x: x in available, queues) def pop_next(self, queue): events = self.event_queues[queue] e = events.pop(0) if not events: self.event_queues.pop (queue) return e def append(self, event): self.log ("Adding to queue") self.log_event (event) self.event_queues[event.subqueue] = \ self.event_queues.get(event.subqueue, []) + [event] class IteratingEventQueue(BaseEventQueue): """Event queue that works by iterating the Twisted reactor.""" def __init__(self, timeout=None): BaseEventQueue.__init__(self, timeout) def wait(self, queues=None): stop = [False] def later(): stop[0] = True delayed_call = reactor.callLater(self.timeout, later) self.log_queues(queues) qa = self.queues_available(queues) while not qa and (not stop[0]): reactor.iterate(0.01) qa = self.queues_available(queues) if qa: delayed_call.cancel() e = self.pop_next (qa[0]) self.log_event (e) return e else: raise TimeoutError class TestEventQueue(BaseEventQueue): def __init__(self, events): BaseEventQueue.__init__(self) for e in events: self.append (e) def wait(self, queues = None): qa = self.queues_available(queues) if qa: return self.pop_next (qa[0]) else: raise TimeoutError class EventQueueTest(unittest.TestCase): def test_expect(self): queue = TestEventQueue([Event('test-foo'), Event('test-bar')]) assert queue.expect('test-foo').type == 'test-foo' assert queue.expect('test-bar').type == 'test-bar' def test_expect_many(self): queue = TestEventQueue([Event('test-foo'), Event('test-bar')]) bar, foo = queue.expect_many( EventPattern('test-bar'), EventPattern('test-foo')) assert bar.type == 'test-bar' assert foo.type == 'test-foo' def test_expect_many2(self): # Test that events are only matched against patterns that haven't yet # been matched. This tests a regression. queue = TestEventQueue([Event('test-foo', x=1), Event('test-foo', x=2)]) foo1, foo2 = queue.expect_many( EventPattern('test-foo'), EventPattern('test-foo')) assert foo1.type == 'test-foo' and foo1.x == 1 assert foo2.type == 'test-foo' and foo2.x == 2 def test_expect_queueing(self): queue = TestEventQueue([Event('foo-test', x=1), Event('foo-test', x=2)]) queue.append(Event('bar-test', x=1)) queue.append(Event('bar-test', x=2)) queue.append(Event('baz-test', x=1)) queue.append(Event('baz-test', x=2)) for x in xrange(1,2): e = queue.expect ('baz-test') assertEquals (x, e.x) e = queue.expect ('bar-test') assertEquals (x, e.x) e = queue.expect ('foo-test') assertEquals (x, e.x) def test_timeout(self): queue = TestEventQueue([]) self.assertRaises(TimeoutError, queue.expect, 'test-foo') def test_demand(self): queue = TestEventQueue([Event('test-foo'), Event('test-bar')]) foo = queue.demand('test-foo') assert foo.type == 'test-foo' def test_demand_fail(self): queue = TestEventQueue([Event('test-foo'), Event('test-bar')]) self.assertRaises(RuntimeError, queue.demand, 'test-bar') def unwrap(x): """Hack to unwrap D-Bus values, so that they're easier to read when printed.""" if isinstance(x, list): return map(unwrap, x) if isinstance(x, tuple): return tuple(map(unwrap, x)) if isinstance(x, dict): return dict([(unwrap(k), unwrap(v)) for k, v in x.iteritems()]) if isinstance(x, dbus.Boolean): return bool(x) for t in [unicode, str, long, int, float]: if isinstance(x, t): return t(x) return x def call_async(test, proxy, method, *args, **kw): """Call a D-Bus method asynchronously and generate an event for the resulting method return/error.""" def reply_func(*ret): test.append(Event('dbus-return', method=method, value=unwrap(ret))) def error_func(err): test.append(Event('dbus-error', method=method, error=err, name=err.get_dbus_name(), message=str(err))) method_proxy = getattr(proxy, method) kw.update({'reply_handler': reply_func, 'error_handler': error_func}) method_proxy(*args, **kw) def sync_dbus(bus, q, conn): # Dummy D-Bus method call. We can't use DBus.Peer.Ping() because libdbus # replies to that message immediately, rather than handing it up to # dbus-glib and thence Gabble, which means that Ping()ing Gabble doesn't # ensure that it's processed all D-Bus messages prior to our ping. # # This won't do the right thing unless the proxy has a unique name. assert conn.object.bus_name.startswith(':') root_object = bus.get_object(conn.object.bus_name, '/', introspect=False) call_async(q, dbus.Interface(root_object, cs.PREFIX + '.Tests'), 'DummySyncDBus') q.expect('dbus-error', method='DummySyncDBus') class ProxyWrapper: def __init__(self, object, default, others={}): self.object = object self.default_interface = dbus.Interface(object, default) self.Properties = dbus.Interface(object, dbus.PROPERTIES_IFACE) self.TpProperties = \ dbus.Interface(object, tp_name_prefix + '.Properties') self.interfaces = dict([ (name, dbus.Interface(object, iface)) for name, iface in others.iteritems()]) def __getattr__(self, name): if name in self.interfaces: return self.interfaces[name] if name in self.object.__dict__: return getattr(self.object, name) return getattr(self.default_interface, name) class ConnWrapper(ProxyWrapper): def inspect_contact_sync(self, handle): return self.inspect_contacts_sync([handle])[0] def inspect_contacts_sync(self, handles): h2asv = self.Contacts.GetContactAttributes(handles, [], True) ret = [] for h in handles: ret.append(h2asv[h][cs.ATTR_CONTACT_ID]) return ret def get_contact_handle_sync(self, identifier): return self.Contacts.GetContactByID(identifier, [])[0] def get_contact_handles_sync(self, ids): return [self.get_contact_handle_sync(i) for i in ids] def wrap_connection(conn): return ConnWrapper(conn, tp_name_prefix + '.Connection', dict([ (name, tp_name_prefix + '.Connection.Interface.' + name) for name in ['Aliasing', 'Avatars', 'Capabilities', 'Contacts', 'SimplePresence', 'Requests']] + [('Peer', 'org.freedesktop.DBus.Peer'), ('ContactCapabilities', cs.CONN_IFACE_CONTACT_CAPS), ('ContactInfo', cs.CONN_IFACE_CONTACT_INFO), ('Location', cs.CONN_IFACE_LOCATION), ('Future', tp_name_prefix + '.Connection.FUTURE'), ('MailNotification', cs.CONN_IFACE_MAIL_NOTIFICATION), ('ContactList', cs.CONN_IFACE_CONTACT_LIST), ('ContactGroups', cs.CONN_IFACE_CONTACT_GROUPS), ('PowerSaving', cs.CONN_IFACE_POWER_SAVING), ('Addressing', cs.CONN_IFACE_ADDRESSING), ])) def wrap_channel(chan, type_, extra=None): interfaces = { type_: tp_name_prefix + '.Channel.Type.' + type_, 'Group': tp_name_prefix + '.Channel.Interface.Group', } if extra: interfaces.update(dict([ (name, tp_name_prefix + '.Channel.Interface.' + name) for name in extra])) return ProxyWrapper(chan, tp_name_prefix + '.Channel', interfaces) def wrap_content(chan, extra=None): interfaces = { } if extra: interfaces.update(dict([ (name, tp_name_prefix + '.Call1.Content.Interface.' + name) for name in extra])) return ProxyWrapper(chan, tp_name_prefix + '.Call1.Content', interfaces) def make_connection(bus, event_func, name, proto, params): cm = bus.get_object( tp_name_prefix + '.ConnectionManager.%s' % name, tp_path_prefix + '/ConnectionManager/%s' % name, introspect=False) cm_iface = dbus.Interface(cm, tp_name_prefix + '.ConnectionManager') connection_name, connection_path = cm_iface.RequestConnection( proto, dbus.Dictionary(params, signature='sv')) conn = wrap_connection(bus.get_object(connection_name, connection_path)) return conn def make_channel_proxy(conn, path, iface): bus = dbus.SessionBus() chan = bus.get_object(conn.object.bus_name, path) chan = dbus.Interface(chan, tp_name_prefix + '.' + iface) return chan # block_reading can be used if the test want to choose when we start to read # data from the socket. class EventProtocol(Protocol): def __init__(self, queue=None, block_reading=False): self.queue = queue self.block_reading = block_reading def dataReceived(self, data): if self.queue is not None: self.queue.append(Event('socket-data', protocol=self, data=data)) def sendData(self, data): self.transport.write(data) def connectionMade(self): if self.block_reading: self.transport.stopReading() def connectionLost(self, reason=None): if self.queue is not None: self.queue.append(Event('socket-disconnected', protocol=self)) class EventProtocolFactory(Factory): def __init__(self, queue, block_reading=False): self.queue = queue self.block_reading = block_reading def _create_protocol(self): return EventProtocol(self.queue, self.block_reading) def buildProtocol(self, addr): proto = self._create_protocol() self.queue.append(Event('socket-connected', protocol=proto)) return proto class EventProtocolClientFactory(EventProtocolFactory, ClientFactory): pass def watch_tube_signals(q, tube): def got_signal_cb(*args, **kwargs): q.append(Event('tube-signal', path=kwargs['path'], signal=kwargs['member'], args=map(unwrap, args), tube=tube)) tube.add_signal_receiver(got_signal_cb, path_keyword='path', member_keyword='member', byte_arrays=True) def pretty(x): return pprint.pformat(unwrap(x)) def assertEquals(expected, value): if expected != value: raise AssertionError( "expected:\n%s\ngot:\n%s" % (pretty(expected), pretty(value))) def assertSameSets(expected, value): exp_set = set(expected) val_set = set(value) if exp_set != val_set: raise AssertionError( "expected contents:\n%s\ngot:\n%s" % ( pretty(exp_set), pretty(val_set))) def assertNotEquals(expected, value): if expected == value: raise AssertionError( "expected something other than:\n%s" % pretty(value)) def assertContains(element, value): if element not in value: raise AssertionError( "expected:\n%s\nin:\n%s" % (pretty(element), pretty(value))) def assertDoesNotContain(element, value): if element in value: raise AssertionError( "expected:\n%s\nnot in:\n%s" % (pretty(element), pretty(value))) def assertLength(length, value): if len(value) != length: raise AssertionError("expected: length %d, got length %d:\n%s" % ( length, len(value), pretty(value))) def assertFlagsSet(flags, value): masked = value & flags if masked != flags: raise AssertionError( "expected flags %u, of which only %u are set in %u" % ( flags, masked, value)) def assertFlagsUnset(flags, value): masked = value & flags if masked != 0: raise AssertionError( "expected none of flags %u, but %u are set in %u" % ( flags, masked, value)) def assertDBusError(name, error): if error.get_dbus_name() != name: raise AssertionError( "expected DBus error named:\n %s\ngot:\n %s\n(with message: %s)" % (name, error.get_dbus_name(), error.message)) def install_colourer(): def red(s): return '\x1b[31m%s\x1b[0m' % s def green(s): return '\x1b[32m%s\x1b[0m' % s patterns = { 'handled': green, 'not handled': red, } class Colourer: def __init__(self, fh, patterns): self.fh = fh self.patterns = patterns def write(self, s): for p, f in self.patterns.items(): if s.startswith(p): self.fh.write(f(p) + s[len(p):]) return self.fh.write(s) sys.stdout = Colourer(sys.stdout, patterns) return sys.stdout # this is just to shut up unittest. class DummyStream(object): def write(self, s): if 'CHECK_TWISTED_VERBOSE' in os.environ: print s, def flush(self): pass if __name__ == '__main__': stream = DummyStream() runner = unittest.TextTestRunner(stream=stream) unittest.main(testRunner=runner)
lgpl-2.1
ammarkhann/FinalSeniorCode
lib/python2.7/site-packages/requests/packages/chardet/mbcharsetprober.py
2924
3268
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # Proofpoint, Inc. # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from . import constants from .charsetprober import CharSetProber class MultiByteCharSetProber(CharSetProber): def __init__(self): CharSetProber.__init__(self) self._mDistributionAnalyzer = None self._mCodingSM = None self._mLastChar = [0, 0] def reset(self): CharSetProber.reset(self) if self._mCodingSM: self._mCodingSM.reset() if self._mDistributionAnalyzer: self._mDistributionAnalyzer.reset() self._mLastChar = [0, 0] def get_charset_name(self): pass def feed(self, aBuf): aLen = len(aBuf) for i in range(0, aLen): codingState = self._mCodingSM.next_state(aBuf[i]) if codingState == constants.eError: if constants._debug: sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n') self._mState = constants.eNotMe break elif codingState == constants.eItsMe: self._mState = constants.eFoundIt break elif codingState == constants.eStart: charLen = self._mCodingSM.get_current_charlen() if i == 0: self._mLastChar[1] = aBuf[0] self._mDistributionAnalyzer.feed(self._mLastChar, charLen) else: self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1], charLen) self._mLastChar[0] = aBuf[aLen - 1] if self.get_state() == constants.eDetecting: if (self._mDistributionAnalyzer.got_enough_data() and (self.get_confidence() > constants.SHORTCUT_THRESHOLD)): self._mState = constants.eFoundIt return self.get_state() def get_confidence(self): return self._mDistributionAnalyzer.get_confidence()
mit
Ryex/airtime
python_apps/media-monitor/mm2/tests/test_owners.py
12
1265
# -*- coding: utf-8 -*- import unittest from media.monitor import owners class TestMMP(unittest.TestCase): def setUp(self): self.f = "test.mp3" def test_has_owner(self): owners.reset_owners() o = 12345 self.assertTrue( owners.add_file_owner(self.f,o) ) self.assertTrue( owners.has_owner(self.f) ) def test_add_file_owner(self): owners.reset_owners() self.assertFalse( owners.add_file_owner('testing', -1) ) self.assertTrue( owners.add_file_owner(self.f, 123) ) self.assertTrue( owners.add_file_owner(self.f, 123) ) self.assertTrue( owners.add_file_owner(self.f, 456) ) def test_remove_file_owner(self): owners.reset_owners() self.assertTrue( owners.add_file_owner(self.f, 123) ) self.assertTrue( owners.remove_file_owner(self.f) ) self.assertFalse( owners.remove_file_owner(self.f) ) def test_get_owner(self): owners.reset_owners() self.assertTrue( owners.add_file_owner(self.f, 123) ) self.assertEqual( owners.get_owner(self.f), 123, "file is owned" ) self.assertEqual( owners.get_owner("random_stuff.txt"), -1, "file is not owned" ) if __name__ == '__main__': unittest.main()
agpl-3.0
fnp/wolnelektury
src/social/models.py
1
6767
# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later. # Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information. # from random import randint from django.db import models from django.conf import settings from django.core.exceptions import ValidationError from django.urls import reverse from django.utils.translation import ugettext_lazy as _, get_language from catalogue.models import Book from wolnelektury.utils import cached_render, clear_cached_renders class BannerGroup(models.Model): name = models.CharField(_('name'), max_length=255, unique=True) created_at = models.DateTimeField(_('created at'), auto_now_add=True) class Meta: ordering = ('name',) verbose_name = _('banner group') verbose_name_plural = _('banner groups') def __str__(self): return self.name def get_absolute_url(self): """This is used for testing.""" return "%s?banner_group=%d" % (reverse('main_page'), self.id) def get_banner(self): banners = self.cite_set.all() count = banners.count() if not count: return None return banners[randint(0, count-1)] class Cite(models.Model): book = models.ForeignKey(Book, models.CASCADE, verbose_name=_('book'), null=True, blank=True) text = models.TextField(_('text'), blank=True) small = models.BooleanField(_('small'), default=False, help_text=_('Make this cite display smaller.')) vip = models.CharField(_('VIP'), max_length=128, null=True, blank=True) link = models.URLField(_('link')) video = models.URLField(_('video'), blank=True) picture = models.ImageField(_('picture'), blank=True, help_text='Najlepsze wymiary: 975 x 315 z tekstem, 487 x 315 bez tekstu.') picture_alt = models.CharField(_('picture alternative text'), max_length=255, blank=True) picture_title = models.CharField(_('picture title'), max_length=255, null=True, blank=True) picture_author = models.CharField(_('picture author'), max_length=255, blank=True, null=True) picture_link = models.URLField(_('picture link'), blank=True, null=True) picture_license = models.CharField(_('picture license name'), max_length=255, blank=True, null=True) picture_license_link = models.URLField(_('picture license link'), blank=True, null=True) sticky = models.BooleanField(_('sticky'), default=False, db_index=True, help_text=_('Sticky cites will take precedense.')) background_plain = models.BooleanField(_('plain background'), default=False) background_color = models.CharField(_('background color'), max_length=32, blank=True) image = models.ImageField( _('background image'), upload_to='social/cite', null=True, blank=True, help_text=_('Best background is 975 x 315 px and under 100kB.')) image_title = models.CharField(_('background title'), max_length=255, null=True, blank=True) image_author = models.CharField(_('background author'), max_length=255, blank=True, null=True) image_link = models.URLField(_('background link'), blank=True, null=True) image_license = models.CharField(_('background license name'), max_length=255, blank=True, null=True) image_license_link = models.URLField(_('background license link'), blank=True, null=True) created_at = models.DateTimeField(_('created at'), auto_now_add=True) group = models.ForeignKey(BannerGroup, verbose_name=_('group'), null=True, blank=True, on_delete=models.SET_NULL) class Meta: ordering = ('vip', 'text') verbose_name = _('banner') verbose_name_plural = _('banners') def __str__(self): t = [] if self.text: t.append(self.text[:60]) if self.book_id: t.append('[ks.]'[:60]) t.append(self.link[:60]) if self.vip: t.append('vip: ' + self.vip) if self.picture: t.append('[obr.]') if self.video: t.append('[vid.]') return ', '.join(t) def get_absolute_url(self): """This is used for testing.""" return "%s?banner=%d" % (reverse('main_page'), self.id) def has_box(self): return self.video or self.picture def has_body(self): return self.vip or self.text or self.book def layout(self): pieces = [] if self.has_box(): pieces.append('box') if self.has_body(): pieces.append('text') return '-'.join(pieces) def save(self, *args, **kwargs): ret = super(Cite, self).save(*args, **kwargs) self.clear_cache() return ret @cached_render('social/cite_promo.html') def main_box(self): return { 'cite': self, 'main': True, } def clear_cache(self): clear_cached_renders(self.main_box) class Carousel(models.Model): placement = models.SlugField(_('placement'), choices=[ ('main', 'main'), ]) priority = models.SmallIntegerField(_('priority'), default=0) language = models.CharField(_('language'), max_length=2, blank=True, default='', choices=settings.LANGUAGES) class Meta: # ordering = ('placement', '-priority') verbose_name = _('carousel') verbose_name_plural = _('carousels') def __str__(self): return self.placement @classmethod def get(cls, placement): carousel = cls.objects.filter(models.Q(language='') | models.Q(language=get_language()), placement=placement).order_by('-priority', '?').first() if carousel is None: carousel = cls.objects.create(placement=placement) return carousel class CarouselItem(models.Model): order = models.PositiveSmallIntegerField(_('order'), unique=True) carousel = models.ForeignKey(Carousel, models.CASCADE, verbose_name=_('carousel')) banner = models.ForeignKey(Cite, models.CASCADE, null=True, blank=True, verbose_name=_('banner')) banner_group = models.ForeignKey(BannerGroup, models.CASCADE, null=True, blank=True, verbose_name=_('banner group')) class Meta: ordering = ('order',) unique_together = [('carousel', 'order')] verbose_name = _('carousel item') verbose_name_plural = _('carousel items') def __str__(self): return str(self.banner or self.banner_group) def clean(self): if not self.banner and not self.banner_group: raise ValidationError(_('Either banner or banner group is required.')) elif self.banner and self.banner_group: raise ValidationError(_('Either banner or banner group is required.')) def get_banner(self): return self.banner or self.banner_group.get_banner()
agpl-3.0
glouppe/scikit-learn
examples/model_selection/plot_roc.py
49
5041
""" ======================================= Receiver Operating Characteristic (ROC) ======================================= Example of Receiver Operating Characteristic (ROC) metric to evaluate classifier output quality. ROC curves typically feature true positive rate on the Y axis, and false positive rate on the X axis. This means that the top left corner of the plot is the "ideal" point - a false positive rate of zero, and a true positive rate of one. This is not very realistic, but it does mean that a larger area under the curve (AUC) is usually better. The "steepness" of ROC curves is also important, since it is ideal to maximize the true positive rate while minimizing the false positive rate. Multiclass settings ------------------- ROC curves are typically used in binary classification to study the output of a classifier. In order to extend ROC curve and ROC area to multi-class or multi-label classification, it is necessary to binarize the output. One ROC curve can be drawn per label, but one can also draw a ROC curve by considering each element of the label indicator matrix as a binary prediction (micro-averaging). Another evaluation measure for multi-class classification is macro-averaging, which gives equal weight to the classification of each label. .. note:: See also :func:`sklearn.metrics.roc_auc_score`, :ref:`example_model_selection_plot_roc_crossval.py`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from itertools import cycle from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.model_selection import train_test_split from sklearn.preprocessing import label_binarize from sklearn.multiclass import OneVsRestClassifier from scipy import interp # Import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target # Binarize the output y = label_binarize(y, classes=[0, 1, 2]) n_classes = y.shape[1] # Add noisy features to make the problem harder random_state = np.random.RandomState(0) n_samples, n_features = X.shape X = np.c_[X, random_state.randn(n_samples, 200 * n_features)] # shuffle and split training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) # Learn to predict each class against the other classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True, random_state=random_state)) y_score = classifier.fit(X_train, y_train).decision_function(X_test) # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) ############################################################################## # Plot of a ROC curve for a specific class plt.figure() lw = 2 plt.plot(fpr[2], tpr[2], color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2]) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() ############################################################################## # Plot ROC curves for the multiclass problem # Compute macro-average ROC curve and ROC area # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) # Plot all ROC curves plt.figure() plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"]), color='deeppink', linestyle=':', linewidth=4) plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["macro"]), color='navy', linestyle=':', linewidth=4) colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) for i, color in zip(range(n_classes), colors): plt.plot(fpr[i], tpr[i], color=color, lw=lw, label='ROC curve of class {0} (area = {1:0.2f})' ''.format(i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Some extension of Receiver operating characteristic to multi-class') plt.legend(loc="lower right") plt.show()
bsd-3-clause
MenZil/kuma
vendor/packages/nose/commands.py
68
6310
""" nosetests setuptools command ---------------------------- The easiest way to run tests with nose is to use the `nosetests` setuptools command:: python setup.py nosetests This command has one *major* benefit over the standard `test` command: *all nose plugins are supported*. To configure the `nosetests` command, add a [nosetests] section to your setup.cfg. The [nosetests] section can contain any command line arguments that nosetests supports. The differences between issuing an option on the command line and adding it to setup.cfg are: * In setup.cfg, the -- prefix must be excluded * In setup.cfg, command line flags that take no arguments must be given an argument flag (1, T or TRUE for active, 0, F or FALSE for inactive) Here's an example [nosetests] setup.cfg section:: [nosetests] verbosity=1 detailed-errors=1 with-coverage=1 cover-package=nose debug=nose.loader pdb=1 pdb-failures=1 If you commonly run nosetests with a large number of options, using the nosetests setuptools command and configuring with setup.cfg can make running your tests much less tedious. (Note that the same options and format supported in setup.cfg are supported in all other config files, and the nosetests script will also load config files.) Another reason to run tests with the command is that the command will install packages listed in your `tests_require`, as well as doing a complete build of your package before running tests. For packages with dependencies or that build C extensions, using the setuptools command can be more convenient than building by hand and running the nosetests script. Bootstrapping ------------- If you are distributing your project and want users to be able to run tests without having to install nose themselves, add nose to the setup_requires section of your setup():: setup( # ... setup_requires=['nose>=1.0'] ) This will direct setuptools to download and activate nose during the setup process, making the ``nosetests`` command available. """ try: from setuptools import Command except ImportError: Command = nosetests = None else: from nose.config import Config, option_blacklist, user_config_files, \ flag, _bool from nose.core import TestProgram from nose.plugins import DefaultPluginManager def get_user_options(parser): """convert a optparse option list into a distutils option tuple list""" opt_list = [] for opt in parser.option_list: if opt._long_opts[0][2:] in option_blacklist: continue long_name = opt._long_opts[0][2:] if opt.action not in ('store_true', 'store_false'): long_name = long_name + "=" short_name = None if opt._short_opts: short_name = opt._short_opts[0][1:] opt_list.append((long_name, short_name, opt.help or "")) return opt_list class nosetests(Command): description = "Run unit tests using nosetests" __config = Config(files=user_config_files(), plugins=DefaultPluginManager()) __parser = __config.getParser() user_options = get_user_options(__parser) def initialize_options(self): """create the member variables, but change hyphens to underscores """ self.option_to_cmds = {} for opt in self.__parser.option_list: cmd_name = opt._long_opts[0][2:] option_name = cmd_name.replace('-', '_') self.option_to_cmds[option_name] = cmd_name setattr(self, option_name, None) self.attr = None def finalize_options(self): """nothing to do here""" pass def run(self): """ensure tests are capable of being run, then run nose.main with a reconstructed argument list""" if getattr(self.distribution, 'use_2to3', False): # If we run 2to3 we can not do this inplace: # Ensure metadata is up-to-date build_py = self.get_finalized_command('build_py') build_py.inplace = 0 build_py.run() bpy_cmd = self.get_finalized_command("build_py") build_path = bpy_cmd.build_lib # Build extensions egg_info = self.get_finalized_command('egg_info') egg_info.egg_base = build_path egg_info.run() build_ext = self.get_finalized_command('build_ext') build_ext.inplace = 0 build_ext.run() else: self.run_command('egg_info') # Build extensions in-place build_ext = self.get_finalized_command('build_ext') build_ext.inplace = 1 build_ext.run() if self.distribution.install_requires: self.distribution.fetch_build_eggs( self.distribution.install_requires) if self.distribution.tests_require: self.distribution.fetch_build_eggs( self.distribution.tests_require) ei_cmd = self.get_finalized_command("egg_info") argv = ['nosetests', '--where', ei_cmd.egg_base] for (option_name, cmd_name) in self.option_to_cmds.items(): if option_name in option_blacklist: continue value = getattr(self, option_name) if value is not None: argv.extend( self.cfgToArg(option_name.replace('_', '-'), value)) TestProgram(argv=argv, config=self.__config) def cfgToArg(self, optname, value): argv = [] long_optname = '--' + optname opt = self.__parser.get_option(long_optname) if opt.action in ('store_true', 'store_false'): if not flag(value): raise ValueError("Invalid value '%s' for '%s'" % ( value, optname)) if _bool(value): argv.append(long_optname) else: argv.extend([long_optname, value]) return argv
mpl-2.0
blackzw/openwrt_sdk_dev1
staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/unittest/loader.py
152
13420
"""Loading unittests.""" import os import re import sys import traceback import types from functools import cmp_to_key as _CmpToKey from fnmatch import fnmatch from . import case, suite __unittest = True # what about .pyc or .pyo (etc) # we would need to avoid loading the same tests multiple times # from '.py', '.pyc' *and* '.pyo' VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE) def _make_failed_import_test(name, suiteClass): message = 'Failed to import test module: %s\n%s' % (name, traceback.format_exc()) return _make_failed_test('ModuleImportFailure', name, ImportError(message), suiteClass) def _make_failed_load_tests(name, exception, suiteClass): return _make_failed_test('LoadTestsFailure', name, exception, suiteClass) def _make_failed_test(classname, methodname, exception, suiteClass): def testFailure(self): raise exception attrs = {methodname: testFailure} TestClass = type(classname, (case.TestCase,), attrs) return suiteClass((TestClass(methodname),)) class TestLoader(object): """ This class is responsible for loading tests according to various criteria and returning them wrapped in a TestSuite """ testMethodPrefix = 'test' sortTestMethodsUsing = cmp suiteClass = suite.TestSuite _top_level_dir = None def loadTestsFromTestCase(self, testCaseClass): """Return a suite of all tests cases contained in testCaseClass""" if issubclass(testCaseClass, suite.TestSuite): raise TypeError("Test cases should not be derived from TestSuite." \ " Maybe you meant to derive from TestCase?") testCaseNames = self.getTestCaseNames(testCaseClass) if not testCaseNames and hasattr(testCaseClass, 'runTest'): testCaseNames = ['runTest'] loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames)) return loaded_suite def loadTestsFromModule(self, module, use_load_tests=True): """Return a suite of all tests cases contained in the given module""" tests = [] for name in dir(module): obj = getattr(module, name) if isinstance(obj, type) and issubclass(obj, case.TestCase): tests.append(self.loadTestsFromTestCase(obj)) load_tests = getattr(module, 'load_tests', None) tests = self.suiteClass(tests) if use_load_tests and load_tests is not None: try: return load_tests(self, tests, None) except Exception, e: return _make_failed_load_tests(module.__name__, e, self.suiteClass) return tests def loadTestsFromName(self, name, module=None): """Return a suite of all tests cases given a string specifier. The name may resolve either to a module, a test case class, a test method within a test case class, or a callable object which returns a TestCase or TestSuite instance. The method optionally resolves the names relative to a given module. """ parts = name.split('.') if module is None: parts_copy = parts[:] while parts_copy: try: module = __import__('.'.join(parts_copy)) break except ImportError: del parts_copy[-1] if not parts_copy: raise parts = parts[1:] obj = module for part in parts: parent, obj = obj, getattr(obj, part) if isinstance(obj, types.ModuleType): return self.loadTestsFromModule(obj) elif isinstance(obj, type) and issubclass(obj, case.TestCase): return self.loadTestsFromTestCase(obj) elif (isinstance(obj, types.UnboundMethodType) and isinstance(parent, type) and issubclass(parent, case.TestCase)): return self.suiteClass([parent(obj.__name__)]) elif isinstance(obj, suite.TestSuite): return obj elif hasattr(obj, '__call__'): test = obj() if isinstance(test, suite.TestSuite): return test elif isinstance(test, case.TestCase): return self.suiteClass([test]) else: raise TypeError("calling %s returned %s, not a test" % (obj, test)) else: raise TypeError("don't know how to make test from: %s" % obj) def loadTestsFromNames(self, names, module=None): """Return a suite of all tests cases found using the given sequence of string specifiers. See 'loadTestsFromName()'. """ suites = [self.loadTestsFromName(name, module) for name in names] return self.suiteClass(suites) def getTestCaseNames(self, testCaseClass): """Return a sorted sequence of method names found within testCaseClass """ def isTestMethod(attrname, testCaseClass=testCaseClass, prefix=self.testMethodPrefix): return attrname.startswith(prefix) and \ hasattr(getattr(testCaseClass, attrname), '__call__') testFnNames = filter(isTestMethod, dir(testCaseClass)) if self.sortTestMethodsUsing: testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing)) return testFnNames def discover(self, start_dir, pattern='test*.py', top_level_dir=None): """Find and return all test modules from the specified start directory, recursing into subdirectories to find them. Only test files that match the pattern will be loaded. (Using shell style pattern matching.) All test modules must be importable from the top level of the project. If the start directory is not the top level directory then the top level directory must be specified separately. If a test package name (directory with '__init__.py') matches the pattern then the package will be checked for a 'load_tests' function. If this exists then it will be called with loader, tests, pattern. If load_tests exists then discovery does *not* recurse into the package, load_tests is responsible for loading all tests in the package. The pattern is deliberately not stored as a loader attribute so that packages can continue discovery themselves. top_level_dir is stored so load_tests does not need to pass this argument in to loader.discover(). """ set_implicit_top = False if top_level_dir is None and self._top_level_dir is not None: # make top_level_dir optional if called from load_tests in a package top_level_dir = self._top_level_dir elif top_level_dir is None: set_implicit_top = True top_level_dir = start_dir top_level_dir = os.path.abspath(top_level_dir) if not top_level_dir in sys.path: # all test modules must be importable from the top level directory # should we *unconditionally* put the start directory in first # in sys.path to minimise likelihood of conflicts between installed # modules and development versions? sys.path.insert(0, top_level_dir) self._top_level_dir = top_level_dir is_not_importable = False if os.path.isdir(os.path.abspath(start_dir)): start_dir = os.path.abspath(start_dir) if start_dir != top_level_dir: is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py')) else: # support for discovery from dotted module names try: __import__(start_dir) except ImportError: is_not_importable = True else: the_module = sys.modules[start_dir] top_part = start_dir.split('.')[0] start_dir = os.path.abspath(os.path.dirname((the_module.__file__))) if set_implicit_top: self._top_level_dir = self._get_directory_containing_module(top_part) sys.path.remove(top_level_dir) if is_not_importable: raise ImportError('Start directory is not importable: %r' % start_dir) tests = list(self._find_tests(start_dir, pattern)) return self.suiteClass(tests) def _get_directory_containing_module(self, module_name): module = sys.modules[module_name] full_path = os.path.abspath(module.__file__) if os.path.basename(full_path).lower().startswith('__init__.py'): return os.path.dirname(os.path.dirname(full_path)) else: # here we have been given a module rather than a package - so # all we can do is search the *same* directory the module is in # should an exception be raised instead return os.path.dirname(full_path) def _get_name_from_path(self, path): path = os.path.splitext(os.path.normpath(path))[0] _relpath = os.path.relpath(path, self._top_level_dir) assert not os.path.isabs(_relpath), "Path must be within the project" assert not _relpath.startswith('..'), "Path must be within the project" name = _relpath.replace(os.path.sep, '.') return name def _get_module_from_name(self, name): __import__(name) return sys.modules[name] def _match_path(self, path, full_path, pattern): # override this method to use alternative matching strategy return fnmatch(path, pattern) def _find_tests(self, start_dir, pattern): """Used by discovery. Yields test suites it loads.""" paths = os.listdir(start_dir) for path in paths: full_path = os.path.join(start_dir, path) if os.path.isfile(full_path): if not VALID_MODULE_NAME.match(path): # valid Python identifiers only continue if not self._match_path(path, full_path, pattern): continue # if the test file matches, load it name = self._get_name_from_path(full_path) try: module = self._get_module_from_name(name) except: yield _make_failed_import_test(name, self.suiteClass) else: mod_file = os.path.abspath(getattr(module, '__file__', full_path)) realpath = os.path.splitext(mod_file)[0] fullpath_noext = os.path.splitext(full_path)[0] if realpath.lower() != fullpath_noext.lower(): module_dir = os.path.dirname(realpath) mod_name = os.path.splitext(os.path.basename(full_path))[0] expected_dir = os.path.dirname(full_path) msg = ("%r module incorrectly imported from %r. Expected %r. " "Is this module globally installed?") raise ImportError(msg % (mod_name, module_dir, expected_dir)) yield self.loadTestsFromModule(module) elif os.path.isdir(full_path): if not os.path.isfile(os.path.join(full_path, '__init__.py')): continue load_tests = None tests = None if fnmatch(path, pattern): # only check load_tests if the package directory itself matches the filter name = self._get_name_from_path(full_path) package = self._get_module_from_name(name) load_tests = getattr(package, 'load_tests', None) tests = self.loadTestsFromModule(package, use_load_tests=False) if load_tests is None: if tests is not None: # tests loaded from package file yield tests # recurse into the package for test in self._find_tests(full_path, pattern): yield test else: try: yield load_tests(self, tests, pattern) except Exception, e: yield _make_failed_load_tests(package.__name__, e, self.suiteClass) defaultTestLoader = TestLoader() def _makeLoader(prefix, sortUsing, suiteClass=None): loader = TestLoader() loader.sortTestMethodsUsing = sortUsing loader.testMethodPrefix = prefix if suiteClass: loader.suiteClass = suiteClass return loader def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp): return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass) def makeSuite(testCaseClass, prefix='test', sortUsing=cmp, suiteClass=suite.TestSuite): return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass) def findTestCases(module, prefix='test', sortUsing=cmp, suiteClass=suite.TestSuite): return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
gpl-2.0
square/pants
src/python/pants/backend/python/tasks/python_binary_create.py
2
2193
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (nested_scopes, generators, division, absolute_import, with_statement, print_function, unicode_literals) import os import time from pants.backend.python.python_chroot import PythonChroot from pants.backend.python.targets.python_binary import PythonBinary from pants.backend.python.tasks.python_task import PythonTask from pants.base.exceptions import TaskError class PythonBinaryCreate(PythonTask): @staticmethod def is_binary(target): return isinstance(target, PythonBinary) def __init__(self, *args, **kwargs): super(PythonBinaryCreate, self).__init__(*args, **kwargs) self._distdir = self.context.config.getdefault('pants_distdir') def execute(self): binaries = self.context.targets(self.is_binary) # Check for duplicate binary names, since we write the pexes to <dist>/<name>.pex. names = {} for binary in binaries: name = binary.name if name in names: raise TaskError('Cannot build two binaries with the same name in a single invocation. ' '%s and %s both have the name %s.' % (binary, names[name], name)) names[name] = binary for binary in binaries: self.create_binary(binary) def create_binary(self, binary): interpreter = self.select_interpreter_for_targets(binary.closure()) run_info = self.context.run_tracker.run_info build_properties = {} build_properties.update(run_info.add_basic_info(run_id=None, timestamp=time.time())) build_properties.update(run_info.add_scm_info()) pexinfo = binary.pexinfo.copy() pexinfo.build_properties = build_properties with self.temporary_pex_builder(pex_info=pexinfo, interpreter=interpreter) as builder: chroot = PythonChroot( targets=[binary], builder=builder, platforms=binary.platforms, interpreter=interpreter, conn_timeout=self.conn_timeout) pex_path = os.path.join(self._distdir, '%s.pex' % binary.name) chroot.dump() builder.build(pex_path)
apache-2.0
da1z/intellij-community
python/testData/inspections/PyStringFormatInspection/UnionCallType.py
8
1566
from collections import namedtuple def simple_func(cond): if cond: return 1 else: return 1, 2 Point = namedtuple('Point', ['x', 'y']) def named_tuple_func(cond): if cond: return 1 else: return Point(1, 1) def primitive_types_func(cond): if cond: return 1 else: return 2 def collection_func(cond): if cond: return [1, 2] else: return {1, 2} def list_tuple(cond): if cond: return [1, 2] else: return 1, 2 "%s %s" % simple_func(True) "%s %s" % simple_func(False) "%s %s %s" % <warning descr="Too few arguments for format string">simple_func(False)</warning> "%s %s" % named_tuple_func(True) "%s %s" % named_tuple_func(False) "%s %s %s" % named_tuple_func(False) "%s" % primitive_types_func(True) "%s %s" % <warning descr="Too few arguments for format string">primitive_types_func(True)</warning> "%s %s" % <warning descr="Too few arguments for format string">primitive_types_func(False)</warning> "%s %s %s" % <warning descr="Too few arguments for format string">primitive_types_func(False)</warning> "%s %s" % <warning descr="Too few arguments for format string">collection_func(True)</warning> "%s %s" % <warning descr="Too few arguments for format string">collection_func(False)</warning> "%s %s %s" % <warning descr="Too few arguments for format string">collection_func(False)</warning> "%s %s" % list_tuple(True) "%s %s" % list_tuple(True) "%s %s %s" % <warning descr="Too few arguments for format string">list_tuple(True)</warning>
apache-2.0